Page MenuHomeFreeBSD

No OneTemporary

This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/sys/contrib/openzfs/.github/codeql-cpp.yml b/sys/contrib/openzfs/.github/codeql-cpp.yml
new file mode 100644
index 000000000000..88b8c6086025
--- /dev/null
+++ b/sys/contrib/openzfs/.github/codeql-cpp.yml
@@ -0,0 +1,4 @@
+name: "Custom CodeQL Analysis"
+
+queries:
+ - uses: ./.github/codeql/custom-queries/cpp/deprecatedFunctionUsage.ql
diff --git a/sys/contrib/openzfs/.github/codeql-python.yml b/sys/contrib/openzfs/.github/codeql-python.yml
new file mode 100644
index 000000000000..93cb4a435ed9
--- /dev/null
+++ b/sys/contrib/openzfs/.github/codeql-python.yml
@@ -0,0 +1,4 @@
+name: "Custom CodeQL Analysis"
+
+paths-ignore:
+ - tests
diff --git a/sys/contrib/openzfs/.github/codeql/custom-queries/cpp/deprecatedFunctionUsage.ql b/sys/contrib/openzfs/.github/codeql/custom-queries/cpp/deprecatedFunctionUsage.ql
new file mode 100644
index 000000000000..eb4b7bd6299b
--- /dev/null
+++ b/sys/contrib/openzfs/.github/codeql/custom-queries/cpp/deprecatedFunctionUsage.ql
@@ -0,0 +1,59 @@
+/**
+ * @name Deprecated function usage detection
+ * @description Detects functions whose usage is banned from the OpenZFS
+ * codebase due to QA concerns.
+ * @kind problem
+ * @severity error
+ * @id cpp/deprecated-function-usage
+*/
+
+import cpp
+
+predicate isDeprecatedFunction(Function f) {
+ f.getName() = "strtok" or
+ f.getName() = "__xpg_basename" or
+ f.getName() = "basename" or
+ f.getName() = "dirname" or
+ f.getName() = "bcopy" or
+ f.getName() = "bcmp" or
+ f.getName() = "bzero" or
+ f.getName() = "asctime" or
+ f.getName() = "asctime_r" or
+ f.getName() = "gmtime" or
+ f.getName() = "localtime" or
+ f.getName() = "strncpy"
+
+}
+
+string getReplacementMessage(Function f) {
+ if f.getName() = "strtok" then
+ result = "Use strtok_r(3) instead!"
+ else if f.getName() = "__xpg_basename" then
+ result = "basename(3) is underspecified. Use zfs_basename() instead!"
+ else if f.getName() = "basename" then
+ result = "basename(3) is underspecified. Use zfs_basename() instead!"
+ else if f.getName() = "dirname" then
+ result = "dirname(3) is underspecified. Use zfs_dirnamelen() instead!"
+ else if f.getName() = "bcopy" then
+ result = "bcopy(3) is deprecated. Use memcpy(3)/memmove(3) instead!"
+ else if f.getName() = "bcmp" then
+ result = "bcmp(3) is deprecated. Use memcmp(3) instead!"
+ else if f.getName() = "bzero" then
+ result = "bzero(3) is deprecated. Use memset(3) instead!"
+ else if f.getName() = "asctime" then
+ result = "Use strftime(3) instead!"
+ else if f.getName() = "asctime_r" then
+ result = "Use strftime(3) instead!"
+ else if f.getName() = "gmtime" then
+ result = "gmtime(3) isn't thread-safe. Use gmtime_r(3) instead!"
+ else if f.getName() = "localtime" then
+ result = "localtime(3) isn't thread-safe. Use localtime_r(3) instead!"
+ else
+ result = "strncpy(3) is deprecated. Use strlcpy(3) instead!"
+}
+
+from FunctionCall fc, Function f
+where
+ fc.getTarget() = f and
+ isDeprecatedFunction(f)
+select fc, getReplacementMessage(f)
diff --git a/sys/contrib/openzfs/.github/codeql/custom-queries/cpp/qlpack.yml b/sys/contrib/openzfs/.github/codeql/custom-queries/cpp/qlpack.yml
new file mode 100644
index 000000000000..cbe0f1cbe3c4
--- /dev/null
+++ b/sys/contrib/openzfs/.github/codeql/custom-queries/cpp/qlpack.yml
@@ -0,0 +1,4 @@
+name: openzfs-cpp-queries
+version: 0.0.0
+libraryPathDependencies: codeql-cpp
+suites: openzfs-cpp-suite
diff --git a/sys/contrib/openzfs/.github/workflows/README.md b/sys/contrib/openzfs/.github/workflows/README.md
index 8255dd210821..eef47dae3dc7 100644
--- a/sys/contrib/openzfs/.github/workflows/README.md
+++ b/sys/contrib/openzfs/.github/workflows/README.md
@@ -1,51 +1,61 @@
## The testings are done this way
```mermaid
flowchart TB
subgraph CleanUp and Summary
- Part1-20.04-->CleanUp+nice+Summary
- Part2-20.04-->CleanUp+nice+Summary
- PartN-20.04-->CleanUp+nice+Summary
- Part1-22.04-->CleanUp+nice+Summary
- Part2-22.04-->CleanUp+nice+Summary
- PartN-22.04-->CleanUp+nice+Summary
+ CleanUp+Summary
end
subgraph Functional Testings
+ sanity-checks-20.04
+ zloop-checks-20.04
functional-testing-20.04-->Part1-20.04
functional-testing-20.04-->Part2-20.04
- functional-testing-20.04-->PartN-20.04
+ functional-testing-20.04-->Part3-20.04
+ functional-testing-20.04-->Part4-20.04
functional-testing-22.04-->Part1-22.04
functional-testing-22.04-->Part2-22.04
- functional-testing-22.04-->PartN-22.04
-end
-
-subgraph Sanity and zloop Testings
- sanity-checks-20.04-->functional-testing-20.04
- sanity-checks-22.04-->functional-testing-22.04
- zloop-checks-20.04-->functional
- zloop-checks-22.04-->functional
+ functional-testing-22.04-->Part3-22.04
+ functional-testing-22.04-->Part4-22.04
+ sanity-checks-22.04
+ zloop-checks-22.04
end
subgraph Code Checking + Building
+ Build-Ubuntu-20.04
codeql.yml
checkstyle.yml
+ Build-Ubuntu-22.04
+end
+
Build-Ubuntu-20.04-->sanity-checks-20.04
- Build-Ubuntu-22.04-->sanity-checks-22.04
Build-Ubuntu-20.04-->zloop-checks-20.04
+ Build-Ubuntu-20.04-->functional-testing-20.04
+ Build-Ubuntu-22.04-->sanity-checks-22.04
Build-Ubuntu-22.04-->zloop-checks-22.04
-end
+ Build-Ubuntu-22.04-->functional-testing-22.04
+
+ sanity-checks-20.04-->CleanUp+Summary
+ Part1-20.04-->CleanUp+Summary
+ Part2-20.04-->CleanUp+Summary
+ Part3-20.04-->CleanUp+Summary
+ Part4-20.04-->CleanUp+Summary
+ Part1-22.04-->CleanUp+Summary
+ Part2-22.04-->CleanUp+Summary
+ Part3-22.04-->CleanUp+Summary
+ Part4-22.04-->CleanUp+Summary
+ sanity-checks-22.04-->CleanUp+Summary
```
1) build zfs modules for Ubuntu 20.04 and 22.04 (~15m)
2) 2x zloop test (~10m) + 2x sanity test (~25m)
-3) functional testings in parts 1..5 (each ~1h)
+3) 4x functional testings in parts 1..4 (each ~1h)
4) cleanup and create summary
- content of summary depends on the results of the steps
When everything runs fine, the full run should be done in
about 2 hours.
The codeql.yml and checkstyle.yml are not part in this circle.
diff --git a/sys/contrib/openzfs/.github/workflows/checkstyle.yaml b/sys/contrib/openzfs/.github/workflows/checkstyle.yaml
index b0fdc570d473..abcb358fc04f 100644
--- a/sys/contrib/openzfs/.github/workflows/checkstyle.yaml
+++ b/sys/contrib/openzfs/.github/workflows/checkstyle.yaml
@@ -1,59 +1,59 @@
name: checkstyle
on:
push:
pull_request:
jobs:
checkstyle:
runs-on: ubuntu-22.04
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Install dependencies
run: |
# https://github.com/orgs/community/discussions/47863
sudo apt-mark hold grub-efi-amd64-signed
sudo apt-get update --fix-missing
sudo apt-get upgrade
sudo xargs --arg-file=${{ github.workspace }}/.github/workflows/build-dependencies.txt apt-get install -qq
sudo xargs --arg-file=${{ github.workspace }}/.github/workflows/checkstyle-dependencies.txt apt-get install -qq
sudo python3 -m pip install --quiet flake8
sudo apt-get clean
# confirm that the tools are installed
# the build system doesn't fail when they are not
checkbashisms --version
cppcheck --version
flake8 --version
scanelf --version
shellcheck --version
- name: Prepare
run: |
./autogen.sh
./configure
make -j$(nproc) --no-print-directory --silent
- name: Checkstyle
run: |
make -j$(nproc) --no-print-directory --silent checkstyle
- name: Lint
run: |
make -j$(nproc) --no-print-directory --silent lint
- name: CheckABI
id: CheckABI
run: |
docker run -v $PWD:/source ghcr.io/openzfs/libabigail make -j$(nproc) --no-print-directory --silent checkabi
- name: StoreABI
if: failure() && steps.CheckABI.outcome == 'failure'
run: |
docker run -v $PWD:/source ghcr.io/openzfs/libabigail make -j$(nproc) --no-print-directory --silent storeabi
- name: Prepare artifacts
if: failure() && steps.CheckABI.outcome == 'failure'
run: |
find -name *.abi | tar -cf abi_files.tar -T -
- - uses: actions/upload-artifact@v3
+ - uses: actions/upload-artifact@v4
if: failure() && steps.CheckABI.outcome == 'failure'
with:
name: New ABI files (use only if you're sure about interface changes)
path: abi_files.tar
diff --git a/sys/contrib/openzfs/.github/workflows/codeql.yml b/sys/contrib/openzfs/.github/workflows/codeql.yml
index 037f8aca0eaa..e015b2cb71d9 100644
--- a/sys/contrib/openzfs/.github/workflows/codeql.yml
+++ b/sys/contrib/openzfs/.github/workflows/codeql.yml
@@ -1,40 +1,41 @@
name: "CodeQL"
on:
push:
pull_request:
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
permissions:
actions: read
contents: read
security-events: write
strategy:
fail-fast: false
matrix:
language: [ 'cpp', 'python' ]
steps:
- name: Set make jobs
run: |
echo "MAKEFLAGS=-j$(nproc)" >> $GITHUB_ENV
- name: Checkout repository
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
with:
+ config-file: .github/codeql-${{ matrix.language }}.yml
languages: ${{ matrix.language }}
- name: Autobuild
uses: github/codeql-action/autobuild@v2
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2
with:
category: "/language:${{matrix.language}}"
diff --git a/sys/contrib/openzfs/.github/workflows/scripts/generate-summary.sh b/sys/contrib/openzfs/.github/workflows/scripts/generate-summary.sh
index cd5ea3421c94..b5d89208a5d8 100755
--- a/sys/contrib/openzfs/.github/workflows/scripts/generate-summary.sh
+++ b/sys/contrib/openzfs/.github/workflows/scripts/generate-summary.sh
@@ -1,119 +1,119 @@
#!/usr/bin/env bash
# for runtime reasons we split functional testings into N parts
# - use a define to check for missing tarfiles
FUNCTIONAL_PARTS="4"
ZTS_REPORT="tests/test-runner/bin/zts-report.py"
chmod +x $ZTS_REPORT
function output() {
echo -e $* >> Summary.md
}
function error() {
output ":bangbang: $* :bangbang:\n"
}
# this function generates the real summary
# - expects a logfile "log" in current directory
function generate() {
# we issued some error already
test ! -s log && return
# for overview and zts-report
cat log | grep '^Test' > list
# error details
awk '/\[FAIL\]|\[KILLED\]/{ show=1; print; next; }
/\[SKIP\]|\[PASS\]/{ show=0; } show' log > err
# summary of errors
if [ -s err ]; then
output "<pre>"
$ZTS_REPORT --no-maybes ./list >> Summary.md
output "</pre>"
# generate seperate error logfile
ERRLOGS=$((ERRLOGS+1))
errfile="err-$ERRLOGS.md"
echo -e "\n## $headline (debugging)\n" >> $errfile
echo "<details><summary>Error Listing - with dmesg and dbgmsg</summary><pre>" >> $errfile
dd if=err bs=999k count=1 >> $errfile
echo "</pre></details>" >> $errfile
else
output "All tests passed :thumbsup:"
fi
output "<details><summary>Full Listing</summary><pre>"
cat list >> Summary.md
output "</pre></details>"
# remove tmp files
rm -f err list log
}
# check tarfiles and untar
function check_tarfile() {
if [ -f "$1" ]; then
tar xf "$1" || error "Tarfile $1 returns some error"
else
error "Tarfile $1 not found"
fi
}
# check logfile and concatenate test results
function check_logfile() {
if [ -f "$1" ]; then
cat "$1" >> log
else
error "Logfile $1 not found"
fi
}
# sanity
function summarize_s() {
headline="$1"
output "\n## $headline\n"
rm -rf testfiles
check_tarfile "$2/sanity.tar"
check_logfile "testfiles/log"
generate
}
# functional
function summarize_f() {
headline="$1"
output "\n## $headline\n"
rm -rf testfiles
for i in $(seq 1 $FUNCTIONAL_PARTS); do
- tarfile="$2/part$i.tar"
+ tarfile="$2-part$i/part$i.tar"
check_tarfile "$tarfile"
check_logfile "testfiles/log"
done
generate
}
# https://docs.github.com/en/enterprise-server@3.6/actions/using-workflows/workflow-commands-for-github-actions#step-isolation-and-limits
# Job summaries are isolated between steps and each step is restricted to a maximum size of 1MiB.
# [ ] can not show all error findings here
# [x] split files into smaller ones and create additional steps
ERRLOGS=0
if [ ! -f Summary/Summary.md ]; then
# first call, we do the default summary (~500k)
echo -n > Summary.md
summarize_s "Sanity Tests Ubuntu 20.04" Logs-20.04-sanity
summarize_s "Sanity Tests Ubuntu 22.04" Logs-22.04-sanity
summarize_f "Functional Tests Ubuntu 20.04" Logs-20.04-functional
summarize_f "Functional Tests Ubuntu 22.04" Logs-22.04-functional
cat Summary.md >> $GITHUB_STEP_SUMMARY
mkdir -p Summary
mv *.md Summary
else
# here we get, when errors where returned in first call
test -f Summary/err-$1.md && cat Summary/err-$1.md >> $GITHUB_STEP_SUMMARY
fi
exit 0
diff --git a/sys/contrib/openzfs/.github/workflows/scripts/setup-dependencies.sh b/sys/contrib/openzfs/.github/workflows/scripts/setup-dependencies.sh
index 440d5e8e5ac9..b40f9290f914 100755
--- a/sys/contrib/openzfs/.github/workflows/scripts/setup-dependencies.sh
+++ b/sys/contrib/openzfs/.github/workflows/scripts/setup-dependencies.sh
@@ -1,93 +1,88 @@
#!/usr/bin/env bash
set -eu
function prerun() {
echo "::group::Install build dependencies"
# remove snap things, update+upgrade will be faster then
for x in lxd core20 snapd; do sudo snap remove $x; done
sudo apt-get purge snapd google-chrome-stable firefox
# https://github.com/orgs/community/discussions/47863
sudo apt-get remove grub-efi-amd64-bin grub-efi-amd64-signed shim-signed --allow-remove-essential
sudo apt-get update
sudo apt upgrade
sudo xargs --arg-file=.github/workflows/build-dependencies.txt apt-get install -qq
sudo apt-get clean
sudo dmesg -c > /var/tmp/dmesg-prerun
echo "::endgroup::"
}
function mod_build() {
echo "::group::Generate debian packages"
./autogen.sh
./configure --enable-debug --enable-debuginfo --enable-asan --enable-ubsan
make --no-print-directory --silent native-deb-utils native-deb-kmod
mv ../*.deb .
rm ./openzfs-zfs-dracut*.deb ./openzfs-zfs-dkms*.deb
echo "$ImageOS-$ImageVersion" > tests/ImageOS.txt
echo "::endgroup::"
}
function mod_install() {
# install the pre-built module only on the same runner image
MOD=`cat tests/ImageOS.txt`
if [ "$MOD" != "$ImageOS-$ImageVersion" ]; then
rm -f *.deb
mod_build
fi
echo "::group::Install and load modules"
# don't use kernel-shipped zfs modules
sudo sed -i.bak 's/updates/extra updates/' /etc/depmod.d/ubuntu.conf
sudo apt-get install --fix-missing ./*.deb
# Native Debian packages enable and start the services
# Stop zfs-zed daemon, as it may interfere with some ZTS test cases
sudo systemctl stop zfs-zed
sudo depmod -a
sudo modprobe zfs
sudo dmesg
sudo dmesg -c > /var/tmp/dmesg-module-load
echo "::endgroup::"
echo "::group::Report CPU information"
lscpu
cat /proc/spl/kstat/zfs/chksum_bench
echo "::endgroup::"
- echo "::group::Reclaim and report disk space"
- # remove 4GiB of images
- sudo systemd-run docker system prune --force --all --volumes
+ echo "::group::Optimize storage for ZFS testings"
+ # remove swap and umount fast storage
+ # 89GiB -> rootfs + bootfs with ~80MB/s -> don't care
+ # 64GiB -> /mnt with 420MB/s -> new testing ssd
+ sudo swapoff -a
- # remove unused software
- sudo systemd-run --wait rm -rf \
- "$AGENT_TOOLSDIRECTORY" \
- /opt/* \
- /usr/local/* \
- /usr/share/az* \
- /usr/share/dotnet \
- /usr/share/gradle* \
- /usr/share/miniconda \
- /usr/share/swift \
- /var/lib/gems \
- /var/lib/mysql \
- /var/lib/snapd
-
- # trim the cleaned space
- sudo fstrim /
+ # this one is fast and mounted @ /mnt
+ # -> we reformat with ext4 + move it to /var/tmp
+ DEV="/dev/disk/azure/resource-part1"
+ sudo umount /mnt
+ sudo mkfs.ext4 -O ^has_journal -F $DEV
+ sudo mount -o noatime,barrier=0 $DEV /var/tmp
+ sudo chmod 1777 /var/tmp
# disk usage afterwards
- df -h /
+ sudo df -h /
+ sudo df -h /var/tmp
+ sudo fstrim -a
echo "::endgroup::"
}
case "$1" in
build)
prerun
mod_build
;;
tests)
prerun
mod_install
;;
esac
diff --git a/sys/contrib/openzfs/.github/workflows/zfs-linux-tests.yml b/sys/contrib/openzfs/.github/workflows/zfs-linux-tests.yml
index c4fe930d092c..753f3cd0214e 100644
--- a/sys/contrib/openzfs/.github/workflows/zfs-linux-tests.yml
+++ b/sys/contrib/openzfs/.github/workflows/zfs-linux-tests.yml
@@ -1,124 +1,124 @@
name: zfs-linux-tests
on:
workflow_call:
inputs:
os:
description: 'The ubuntu version: 20.02 or 22.04'
required: true
type: string
jobs:
zloop:
runs-on: ubuntu-${{ inputs.os }}
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.sha }}
- - uses: actions/download-artifact@v3
+ - uses: actions/download-artifact@v4
with:
name: modules-${{ inputs.os }}
- name: Install modules
run: |
tar xzf modules-${{ inputs.os }}.tgz
.github/workflows/scripts/setup-dependencies.sh tests
- name: Tests
timeout-minutes: 30
run: |
sudo mkdir -p /var/tmp/zloop
# run for 10 minutes or at most 2 iterations for a maximum runner
# time of 20 minutes.
sudo /usr/share/zfs/zloop.sh -t 600 -I 2 -l -m1 -- -T 120 -P 60
- name: Prepare artifacts
if: failure()
run: |
sudo chmod +r -R /var/tmp/zloop/
- - uses: actions/upload-artifact@v3
+ - uses: actions/upload-artifact@v4
if: failure()
with:
name: Zpool-logs-${{ inputs.os }}
path: |
/var/tmp/zloop/*/
!/var/tmp/zloop/*/vdev/
retention-days: 14
if-no-files-found: ignore
- - uses: actions/upload-artifact@v3
+ - uses: actions/upload-artifact@v4
if: failure()
with:
name: Zpool-files-${{ inputs.os }}
path: |
/var/tmp/zloop/*/vdev/
retention-days: 14
if-no-files-found: ignore
sanity:
runs-on: ubuntu-${{ inputs.os }}
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.sha }}
- - uses: actions/download-artifact@v3
+ - uses: actions/download-artifact@v4
with:
name: modules-${{ inputs.os }}
- name: Install modules
run: |
tar xzf modules-${{ inputs.os }}.tgz
.github/workflows/scripts/setup-dependencies.sh tests
- name: Tests
timeout-minutes: 60
shell: bash
run: |
set -o pipefail
/usr/share/zfs/zfs-tests.sh -vKR -s 3G -r sanity | scripts/zfs-tests-color.sh
- name: Prepare artifacts
if: success() || failure()
run: |
RESPATH="/var/tmp/test_results"
mv -f $RESPATH/current $RESPATH/testfiles
tar cf $RESPATH/sanity.tar -h -C $RESPATH testfiles
- - uses: actions/upload-artifact@v3
+ - uses: actions/upload-artifact@v4
if: success() || failure()
with:
name: Logs-${{ inputs.os }}-sanity
path: /var/tmp/test_results/sanity.tar
if-no-files-found: ignore
functional:
runs-on: ubuntu-${{ inputs.os }}
strategy:
fail-fast: false
matrix:
tests: [ part1, part2, part3, part4 ]
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.sha }}
- - uses: actions/download-artifact@v3
+ - uses: actions/download-artifact@v4
with:
name: modules-${{ inputs.os }}
- name: Install modules
run: |
tar xzf modules-${{ inputs.os }}.tgz
.github/workflows/scripts/setup-dependencies.sh tests
- name: Setup tests
run: |
.github/workflows/scripts/setup-functional.sh ${{ matrix.tests }} >> $GITHUB_ENV
- name: Tests
timeout-minutes: 120
shell: bash
run: |
set -o pipefail
/usr/share/zfs/zfs-tests.sh -vKR -s 3G -T ${{ env.TODO }} | scripts/zfs-tests-color.sh
- name: Prepare artifacts
if: success() || failure()
run: |
RESPATH="/var/tmp/test_results"
mv -f $RESPATH/current $RESPATH/testfiles
tar cf $RESPATH/${{ matrix.tests }}.tar -h -C $RESPATH testfiles
- - uses: actions/upload-artifact@v3
+ - uses: actions/upload-artifact@v4
if: success() || failure()
with:
- name: Logs-${{ inputs.os }}-functional
+ name: Logs-${{ inputs.os }}-functional-${{ matrix.tests }}
path: /var/tmp/test_results/${{ matrix.tests }}.tar
if-no-files-found: ignore
diff --git a/sys/contrib/openzfs/.github/workflows/zfs-linux.yml b/sys/contrib/openzfs/.github/workflows/zfs-linux.yml
index be3908deb948..e6b705c86055 100644
--- a/sys/contrib/openzfs/.github/workflows/zfs-linux.yml
+++ b/sys/contrib/openzfs/.github/workflows/zfs-linux.yml
@@ -1,64 +1,64 @@
name: zfs-linux
on:
push:
pull_request:
jobs:
build:
name: Build
strategy:
fail-fast: false
matrix:
os: [20.04, 22.04]
runs-on: ubuntu-${{ matrix.os }}
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Build modules
run: .github/workflows/scripts/setup-dependencies.sh build
- name: Prepare modules upload
run: tar czf modules-${{ matrix.os }}.tgz *.deb .github tests/test-runner tests/ImageOS.txt
- - uses: actions/upload-artifact@v3
+ - uses: actions/upload-artifact@v4
with:
name: modules-${{ matrix.os }}
path: modules-${{ matrix.os }}.tgz
retention-days: 14
testings:
name: Testing
strategy:
fail-fast: false
matrix:
os: [20.04, 22.04]
needs: build
uses: ./.github/workflows/zfs-linux-tests.yml
with:
os: ${{ matrix.os }}
cleanup:
if: always()
name: Cleanup
runs-on: ubuntu-22.04
needs: testings
steps:
- - uses: actions/download-artifact@v3
+ - uses: actions/download-artifact@v4
- name: Generating summary
run: |
tar xzf modules-22.04/modules-22.04.tgz .github tests
.github/workflows/scripts/generate-summary.sh
# up to 4 steps, each can have 1 MiB output (for debugging log files)
- name: Summary for errors #1
run: .github/workflows/scripts/generate-summary.sh 1
- name: Summary for errors #2
run: .github/workflows/scripts/generate-summary.sh 2
- name: Summary for errors #3
run: .github/workflows/scripts/generate-summary.sh 3
- name: Summary for errors #4
run: .github/workflows/scripts/generate-summary.sh 4
- - uses: actions/upload-artifact@v3
+ - uses: actions/upload-artifact@v4
with:
name: Summary Files
path: Summary/
diff --git a/sys/contrib/openzfs/META b/sys/contrib/openzfs/META
index 93045ec3abe8..d64414e32225 100644
--- a/sys/contrib/openzfs/META
+++ b/sys/contrib/openzfs/META
@@ -1,10 +1,10 @@
Meta: 1
Name: zfs
Branch: 1.0
-Version: 2.2.2
+Version: 2.2.3
Release: 1
Release-Tags: relext
License: CDDL
Author: OpenZFS
-Linux-Maximum: 6.6
+Linux-Maximum: 6.7
Linux-Minimum: 3.10
diff --git a/sys/contrib/openzfs/cmd/zdb/zdb.c b/sys/contrib/openzfs/cmd/zdb/zdb.c
index 3fc9fd2a9d81..d81199765c69 100644
--- a/sys/contrib/openzfs/cmd/zdb/zdb.c
+++ b/sys/contrib/openzfs/cmd/zdb/zdb.c
@@ -1,9508 +1,9528 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2019 by Delphix. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2016 Nexenta Systems, Inc.
* Copyright (c) 2017, 2018 Lawrence Livermore National Security, LLC.
* Copyright (c) 2015, 2017, Intel Corporation.
* Copyright (c) 2020 Datto Inc.
* Copyright (c) 2020, The FreeBSD Foundation [1]
*
* [1] Portions of this software were developed by Allan Jude
* under sponsorship from the FreeBSD Foundation.
* Copyright (c) 2021 Allan Jude
* Copyright (c) 2021 Toomas Soome <tsoome@me.com>
* Copyright (c) 2023, Klara Inc.
* Copyright (c) 2023, Rob Norris <robn@despairlabs.com>
*/
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <ctype.h>
#include <getopt.h>
#include <openssl/evp.h>
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/dmu.h>
#include <sys/zap.h>
#include <sys/fs/zfs.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_sa.h>
#include <sys/sa.h>
#include <sys/sa_impl.h>
#include <sys/vdev.h>
#include <sys/vdev_impl.h>
#include <sys/metaslab_impl.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_bookmark.h>
#include <sys/dbuf.h>
#include <sys/zil.h>
#include <sys/zil_impl.h>
#include <sys/stat.h>
#include <sys/resource.h>
#include <sys/dmu_send.h>
#include <sys/dmu_traverse.h>
#include <sys/zio_checksum.h>
#include <sys/zio_compress.h>
#include <sys/zfs_fuid.h>
#include <sys/arc.h>
#include <sys/arc_impl.h>
#include <sys/ddt.h>
#include <sys/zfeature.h>
#include <sys/abd.h>
#include <sys/blkptr.h>
#include <sys/dsl_crypt.h>
#include <sys/dsl_scan.h>
#include <sys/btree.h>
#include <sys/brt.h>
#include <sys/brt_impl.h>
#include <zfs_comutil.h>
#include <sys/zstd/zstd.h>
#include <libnvpair.h>
#include <libzutil.h>
#include "zdb.h"
#define ZDB_COMPRESS_NAME(idx) ((idx) < ZIO_COMPRESS_FUNCTIONS ? \
zio_compress_table[(idx)].ci_name : "UNKNOWN")
#define ZDB_CHECKSUM_NAME(idx) ((idx) < ZIO_CHECKSUM_FUNCTIONS ? \
zio_checksum_table[(idx)].ci_name : "UNKNOWN")
#define ZDB_OT_TYPE(idx) ((idx) < DMU_OT_NUMTYPES ? (idx) : \
(idx) == DMU_OTN_ZAP_DATA || (idx) == DMU_OTN_ZAP_METADATA ? \
DMU_OT_ZAP_OTHER : \
(idx) == DMU_OTN_UINT64_DATA || (idx) == DMU_OTN_UINT64_METADATA ? \
DMU_OT_UINT64_OTHER : DMU_OT_NUMTYPES)
/* Some platforms require part of inode IDs to be remapped */
#ifdef __APPLE__
#define ZDB_MAP_OBJECT_ID(obj) INO_XNUTOZFS(obj, 2)
#else
#define ZDB_MAP_OBJECT_ID(obj) (obj)
#endif
static const char *
zdb_ot_name(dmu_object_type_t type)
{
if (type < DMU_OT_NUMTYPES)
return (dmu_ot[type].ot_name);
else if ((type & DMU_OT_NEWTYPE) &&
((type & DMU_OT_BYTESWAP_MASK) < DMU_BSWAP_NUMFUNCS))
return (dmu_ot_byteswap[type & DMU_OT_BYTESWAP_MASK].ob_name);
else
return ("UNKNOWN");
}
extern int reference_tracking_enable;
extern int zfs_recover;
extern uint_t zfs_vdev_async_read_max_active;
extern boolean_t spa_load_verify_dryrun;
extern boolean_t spa_mode_readable_spacemaps;
extern uint_t zfs_reconstruct_indirect_combinations_max;
extern uint_t zfs_btree_verify_intensity;
static const char cmdname[] = "zdb";
uint8_t dump_opt[256];
typedef void object_viewer_t(objset_t *, uint64_t, void *data, size_t size);
static uint64_t *zopt_metaslab = NULL;
static unsigned zopt_metaslab_args = 0;
typedef struct zopt_object_range {
uint64_t zor_obj_start;
uint64_t zor_obj_end;
uint64_t zor_flags;
} zopt_object_range_t;
static zopt_object_range_t *zopt_object_ranges = NULL;
static unsigned zopt_object_args = 0;
static int flagbits[256];
#define ZOR_FLAG_PLAIN_FILE 0x0001
#define ZOR_FLAG_DIRECTORY 0x0002
#define ZOR_FLAG_SPACE_MAP 0x0004
#define ZOR_FLAG_ZAP 0x0008
#define ZOR_FLAG_ALL_TYPES -1
#define ZOR_SUPPORTED_FLAGS (ZOR_FLAG_PLAIN_FILE | \
ZOR_FLAG_DIRECTORY | \
ZOR_FLAG_SPACE_MAP | \
ZOR_FLAG_ZAP)
#define ZDB_FLAG_CHECKSUM 0x0001
#define ZDB_FLAG_DECOMPRESS 0x0002
#define ZDB_FLAG_BSWAP 0x0004
#define ZDB_FLAG_GBH 0x0008
#define ZDB_FLAG_INDIRECT 0x0010
#define ZDB_FLAG_RAW 0x0020
#define ZDB_FLAG_PRINT_BLKPTR 0x0040
#define ZDB_FLAG_VERBOSE 0x0080
static uint64_t max_inflight_bytes = 256 * 1024 * 1024; /* 256MB */
static int leaked_objects = 0;
static range_tree_t *mos_refd_objs;
static void snprintf_blkptr_compact(char *, size_t, const blkptr_t *,
boolean_t);
static void mos_obj_refd(uint64_t);
static void mos_obj_refd_multiple(uint64_t);
static int dump_bpobj_cb(void *arg, const blkptr_t *bp, boolean_t free,
dmu_tx_t *tx);
typedef struct sublivelist_verify {
/* FREE's that haven't yet matched to an ALLOC, in one sub-livelist */
zfs_btree_t sv_pair;
/* ALLOC's without a matching FREE, accumulates across sub-livelists */
zfs_btree_t sv_leftover;
} sublivelist_verify_t;
static int
livelist_compare(const void *larg, const void *rarg)
{
const blkptr_t *l = larg;
const blkptr_t *r = rarg;
/* Sort them according to dva[0] */
uint64_t l_dva0_vdev, r_dva0_vdev;
l_dva0_vdev = DVA_GET_VDEV(&l->blk_dva[0]);
r_dva0_vdev = DVA_GET_VDEV(&r->blk_dva[0]);
if (l_dva0_vdev < r_dva0_vdev)
return (-1);
else if (l_dva0_vdev > r_dva0_vdev)
return (+1);
/* if vdevs are equal, sort by offsets. */
uint64_t l_dva0_offset;
uint64_t r_dva0_offset;
l_dva0_offset = DVA_GET_OFFSET(&l->blk_dva[0]);
r_dva0_offset = DVA_GET_OFFSET(&r->blk_dva[0]);
if (l_dva0_offset < r_dva0_offset) {
return (-1);
} else if (l_dva0_offset > r_dva0_offset) {
return (+1);
}
/*
* Since we're storing blkptrs without cancelling FREE/ALLOC pairs,
* it's possible the offsets are equal. In that case, sort by txg
*/
if (l->blk_birth < r->blk_birth) {
return (-1);
} else if (l->blk_birth > r->blk_birth) {
return (+1);
}
return (0);
}
typedef struct sublivelist_verify_block {
dva_t svb_dva;
/*
* We need this to check if the block marked as allocated
* in the livelist was freed (and potentially reallocated)
* in the metaslab spacemaps at a later TXG.
*/
uint64_t svb_allocated_txg;
} sublivelist_verify_block_t;
static void zdb_print_blkptr(const blkptr_t *bp, int flags);
typedef struct sublivelist_verify_block_refcnt {
/* block pointer entry in livelist being verified */
blkptr_t svbr_blk;
/*
* Refcount gets incremented to 1 when we encounter the first
* FREE entry for the svfbr block pointer and a node for it
* is created in our ZDB verification/tracking metadata.
*
* As we encounter more FREE entries we increment this counter
* and similarly decrement it whenever we find the respective
* ALLOC entries for this block.
*
* When the refcount gets to 0 it means that all the FREE and
* ALLOC entries of this block have paired up and we no longer
* need to track it in our verification logic (e.g. the node
* containing this struct in our verification data structure
* should be freed).
*
* [refer to sublivelist_verify_blkptr() for the actual code]
*/
uint32_t svbr_refcnt;
} sublivelist_verify_block_refcnt_t;
static int
sublivelist_block_refcnt_compare(const void *larg, const void *rarg)
{
const sublivelist_verify_block_refcnt_t *l = larg;
const sublivelist_verify_block_refcnt_t *r = rarg;
return (livelist_compare(&l->svbr_blk, &r->svbr_blk));
}
static int
sublivelist_verify_blkptr(void *arg, const blkptr_t *bp, boolean_t free,
dmu_tx_t *tx)
{
ASSERT3P(tx, ==, NULL);
struct sublivelist_verify *sv = arg;
sublivelist_verify_block_refcnt_t current = {
.svbr_blk = *bp,
/*
* Start with 1 in case this is the first free entry.
* This field is not used for our B-Tree comparisons
* anyway.
*/
.svbr_refcnt = 1,
};
zfs_btree_index_t where;
sublivelist_verify_block_refcnt_t *pair =
zfs_btree_find(&sv->sv_pair, &current, &where);
if (free) {
if (pair == NULL) {
/* first free entry for this block pointer */
zfs_btree_add(&sv->sv_pair, &current);
} else {
pair->svbr_refcnt++;
}
} else {
if (pair == NULL) {
/* block that is currently marked as allocated */
for (int i = 0; i < SPA_DVAS_PER_BP; i++) {
if (DVA_IS_EMPTY(&bp->blk_dva[i]))
break;
sublivelist_verify_block_t svb = {
.svb_dva = bp->blk_dva[i],
.svb_allocated_txg = bp->blk_birth
};
if (zfs_btree_find(&sv->sv_leftover, &svb,
&where) == NULL) {
zfs_btree_add_idx(&sv->sv_leftover,
&svb, &where);
}
}
} else {
/* alloc matches a free entry */
pair->svbr_refcnt--;
if (pair->svbr_refcnt == 0) {
/* all allocs and frees have been matched */
zfs_btree_remove_idx(&sv->sv_pair, &where);
}
}
}
return (0);
}
static int
sublivelist_verify_func(void *args, dsl_deadlist_entry_t *dle)
{
int err;
struct sublivelist_verify *sv = args;
zfs_btree_create(&sv->sv_pair, sublivelist_block_refcnt_compare, NULL,
sizeof (sublivelist_verify_block_refcnt_t));
err = bpobj_iterate_nofree(&dle->dle_bpobj, sublivelist_verify_blkptr,
sv, NULL);
sublivelist_verify_block_refcnt_t *e;
zfs_btree_index_t *cookie = NULL;
while ((e = zfs_btree_destroy_nodes(&sv->sv_pair, &cookie)) != NULL) {
char blkbuf[BP_SPRINTF_LEN];
snprintf_blkptr_compact(blkbuf, sizeof (blkbuf),
&e->svbr_blk, B_TRUE);
(void) printf("\tERROR: %d unmatched FREE(s): %s\n",
e->svbr_refcnt, blkbuf);
}
zfs_btree_destroy(&sv->sv_pair);
return (err);
}
static int
livelist_block_compare(const void *larg, const void *rarg)
{
const sublivelist_verify_block_t *l = larg;
const sublivelist_verify_block_t *r = rarg;
if (DVA_GET_VDEV(&l->svb_dva) < DVA_GET_VDEV(&r->svb_dva))
return (-1);
else if (DVA_GET_VDEV(&l->svb_dva) > DVA_GET_VDEV(&r->svb_dva))
return (+1);
if (DVA_GET_OFFSET(&l->svb_dva) < DVA_GET_OFFSET(&r->svb_dva))
return (-1);
else if (DVA_GET_OFFSET(&l->svb_dva) > DVA_GET_OFFSET(&r->svb_dva))
return (+1);
if (DVA_GET_ASIZE(&l->svb_dva) < DVA_GET_ASIZE(&r->svb_dva))
return (-1);
else if (DVA_GET_ASIZE(&l->svb_dva) > DVA_GET_ASIZE(&r->svb_dva))
return (+1);
return (0);
}
/*
* Check for errors in a livelist while tracking all unfreed ALLOCs in the
* sublivelist_verify_t: sv->sv_leftover
*/
static void
livelist_verify(dsl_deadlist_t *dl, void *arg)
{
sublivelist_verify_t *sv = arg;
dsl_deadlist_iterate(dl, sublivelist_verify_func, sv);
}
/*
* Check for errors in the livelist entry and discard the intermediary
* data structures
*/
static int
sublivelist_verify_lightweight(void *args, dsl_deadlist_entry_t *dle)
{
(void) args;
sublivelist_verify_t sv;
zfs_btree_create(&sv.sv_leftover, livelist_block_compare, NULL,
sizeof (sublivelist_verify_block_t));
int err = sublivelist_verify_func(&sv, dle);
zfs_btree_clear(&sv.sv_leftover);
zfs_btree_destroy(&sv.sv_leftover);
return (err);
}
typedef struct metaslab_verify {
/*
* Tree containing all the leftover ALLOCs from the livelists
* that are part of this metaslab.
*/
zfs_btree_t mv_livelist_allocs;
/*
* Metaslab information.
*/
uint64_t mv_vdid;
uint64_t mv_msid;
uint64_t mv_start;
uint64_t mv_end;
/*
* What's currently allocated for this metaslab.
*/
range_tree_t *mv_allocated;
} metaslab_verify_t;
typedef void ll_iter_t(dsl_deadlist_t *ll, void *arg);
typedef int (*zdb_log_sm_cb_t)(spa_t *spa, space_map_entry_t *sme, uint64_t txg,
void *arg);
typedef struct unflushed_iter_cb_arg {
spa_t *uic_spa;
uint64_t uic_txg;
void *uic_arg;
zdb_log_sm_cb_t uic_cb;
} unflushed_iter_cb_arg_t;
static int
iterate_through_spacemap_logs_cb(space_map_entry_t *sme, void *arg)
{
unflushed_iter_cb_arg_t *uic = arg;
return (uic->uic_cb(uic->uic_spa, sme, uic->uic_txg, uic->uic_arg));
}
static void
iterate_through_spacemap_logs(spa_t *spa, zdb_log_sm_cb_t cb, void *arg)
{
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
return;
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls)) {
space_map_t *sm = NULL;
VERIFY0(space_map_open(&sm, spa_meta_objset(spa),
sls->sls_sm_obj, 0, UINT64_MAX, SPA_MINBLOCKSHIFT));
unflushed_iter_cb_arg_t uic = {
.uic_spa = spa,
.uic_txg = sls->sls_txg,
.uic_arg = arg,
.uic_cb = cb
};
VERIFY0(space_map_iterate(sm, space_map_length(sm),
iterate_through_spacemap_logs_cb, &uic));
space_map_close(sm);
}
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
static void
verify_livelist_allocs(metaslab_verify_t *mv, uint64_t txg,
uint64_t offset, uint64_t size)
{
sublivelist_verify_block_t svb = {{{0}}};
DVA_SET_VDEV(&svb.svb_dva, mv->mv_vdid);
DVA_SET_OFFSET(&svb.svb_dva, offset);
DVA_SET_ASIZE(&svb.svb_dva, size);
zfs_btree_index_t where;
uint64_t end_offset = offset + size;
/*
* Look for an exact match for spacemap entry in the livelist entries.
* Then, look for other livelist entries that fall within the range
* of the spacemap entry as it may have been condensed
*/
sublivelist_verify_block_t *found =
zfs_btree_find(&mv->mv_livelist_allocs, &svb, &where);
if (found == NULL) {
found = zfs_btree_next(&mv->mv_livelist_allocs, &where, &where);
}
for (; found != NULL && DVA_GET_VDEV(&found->svb_dva) == mv->mv_vdid &&
DVA_GET_OFFSET(&found->svb_dva) < end_offset;
found = zfs_btree_next(&mv->mv_livelist_allocs, &where, &where)) {
if (found->svb_allocated_txg <= txg) {
(void) printf("ERROR: Livelist ALLOC [%llx:%llx] "
"from TXG %llx FREED at TXG %llx\n",
(u_longlong_t)DVA_GET_OFFSET(&found->svb_dva),
(u_longlong_t)DVA_GET_ASIZE(&found->svb_dva),
(u_longlong_t)found->svb_allocated_txg,
(u_longlong_t)txg);
}
}
}
static int
metaslab_spacemap_validation_cb(space_map_entry_t *sme, void *arg)
{
metaslab_verify_t *mv = arg;
uint64_t offset = sme->sme_offset;
uint64_t size = sme->sme_run;
uint64_t txg = sme->sme_txg;
if (sme->sme_type == SM_ALLOC) {
if (range_tree_contains(mv->mv_allocated,
offset, size)) {
(void) printf("ERROR: DOUBLE ALLOC: "
"%llu [%llx:%llx] "
"%llu:%llu LOG_SM\n",
(u_longlong_t)txg, (u_longlong_t)offset,
(u_longlong_t)size, (u_longlong_t)mv->mv_vdid,
(u_longlong_t)mv->mv_msid);
} else {
range_tree_add(mv->mv_allocated,
offset, size);
}
} else {
if (!range_tree_contains(mv->mv_allocated,
offset, size)) {
(void) printf("ERROR: DOUBLE FREE: "
"%llu [%llx:%llx] "
"%llu:%llu LOG_SM\n",
(u_longlong_t)txg, (u_longlong_t)offset,
(u_longlong_t)size, (u_longlong_t)mv->mv_vdid,
(u_longlong_t)mv->mv_msid);
} else {
range_tree_remove(mv->mv_allocated,
offset, size);
}
}
if (sme->sme_type != SM_ALLOC) {
/*
* If something is freed in the spacemap, verify that
* it is not listed as allocated in the livelist.
*/
verify_livelist_allocs(mv, txg, offset, size);
}
return (0);
}
static int
spacemap_check_sm_log_cb(spa_t *spa, space_map_entry_t *sme,
uint64_t txg, void *arg)
{
metaslab_verify_t *mv = arg;
uint64_t offset = sme->sme_offset;
uint64_t vdev_id = sme->sme_vdev;
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
/* skip indirect vdevs */
if (!vdev_is_concrete(vd))
return (0);
if (vdev_id != mv->mv_vdid)
return (0);
metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
if (ms->ms_id != mv->mv_msid)
return (0);
if (txg < metaslab_unflushed_txg(ms))
return (0);
ASSERT3U(txg, ==, sme->sme_txg);
return (metaslab_spacemap_validation_cb(sme, mv));
}
static void
spacemap_check_sm_log(spa_t *spa, metaslab_verify_t *mv)
{
iterate_through_spacemap_logs(spa, spacemap_check_sm_log_cb, mv);
}
static void
spacemap_check_ms_sm(space_map_t *sm, metaslab_verify_t *mv)
{
if (sm == NULL)
return;
VERIFY0(space_map_iterate(sm, space_map_length(sm),
metaslab_spacemap_validation_cb, mv));
}
static void iterate_deleted_livelists(spa_t *spa, ll_iter_t func, void *arg);
/*
* Transfer blocks from sv_leftover tree to the mv_livelist_allocs if
* they are part of that metaslab (mv_msid).
*/
static void
mv_populate_livelist_allocs(metaslab_verify_t *mv, sublivelist_verify_t *sv)
{
zfs_btree_index_t where;
sublivelist_verify_block_t *svb;
ASSERT3U(zfs_btree_numnodes(&mv->mv_livelist_allocs), ==, 0);
for (svb = zfs_btree_first(&sv->sv_leftover, &where);
svb != NULL;
svb = zfs_btree_next(&sv->sv_leftover, &where, &where)) {
if (DVA_GET_VDEV(&svb->svb_dva) != mv->mv_vdid)
continue;
if (DVA_GET_OFFSET(&svb->svb_dva) < mv->mv_start &&
(DVA_GET_OFFSET(&svb->svb_dva) +
DVA_GET_ASIZE(&svb->svb_dva)) > mv->mv_start) {
(void) printf("ERROR: Found block that crosses "
"metaslab boundary: <%llu:%llx:%llx>\n",
(u_longlong_t)DVA_GET_VDEV(&svb->svb_dva),
(u_longlong_t)DVA_GET_OFFSET(&svb->svb_dva),
(u_longlong_t)DVA_GET_ASIZE(&svb->svb_dva));
continue;
}
if (DVA_GET_OFFSET(&svb->svb_dva) < mv->mv_start)
continue;
if (DVA_GET_OFFSET(&svb->svb_dva) >= mv->mv_end)
continue;
if ((DVA_GET_OFFSET(&svb->svb_dva) +
DVA_GET_ASIZE(&svb->svb_dva)) > mv->mv_end) {
(void) printf("ERROR: Found block that crosses "
"metaslab boundary: <%llu:%llx:%llx>\n",
(u_longlong_t)DVA_GET_VDEV(&svb->svb_dva),
(u_longlong_t)DVA_GET_OFFSET(&svb->svb_dva),
(u_longlong_t)DVA_GET_ASIZE(&svb->svb_dva));
continue;
}
zfs_btree_add(&mv->mv_livelist_allocs, svb);
}
for (svb = zfs_btree_first(&mv->mv_livelist_allocs, &where);
svb != NULL;
svb = zfs_btree_next(&mv->mv_livelist_allocs, &where, &where)) {
zfs_btree_remove(&sv->sv_leftover, svb);
}
}
/*
* [Livelist Check]
* Iterate through all the sublivelists and:
* - report leftover frees (**)
* - record leftover ALLOCs together with their TXG [see Cross Check]
*
* (**) Note: Double ALLOCs are valid in datasets that have dedup
* enabled. Similarly double FREEs are allowed as well but
* only if they pair up with a corresponding ALLOC entry once
* we our done with our sublivelist iteration.
*
* [Spacemap Check]
* for each metaslab:
* - iterate over spacemap and then the metaslab's entries in the
* spacemap log, then report any double FREEs and ALLOCs (do not
* blow up).
*
* [Cross Check]
* After finishing the Livelist Check phase and while being in the
* Spacemap Check phase, we find all the recorded leftover ALLOCs
* of the livelist check that are part of the metaslab that we are
* currently looking at in the Spacemap Check. We report any entries
* that are marked as ALLOCs in the livelists but have been actually
* freed (and potentially allocated again) after their TXG stamp in
* the spacemaps. Also report any ALLOCs from the livelists that
* belong to indirect vdevs (e.g. their vdev completed removal).
*
* Note that this will miss Log Spacemap entries that cancelled each other
* out before being flushed to the metaslab, so we are not guaranteed
* to match all erroneous ALLOCs.
*/
static void
livelist_metaslab_validate(spa_t *spa)
{
(void) printf("Verifying deleted livelist entries\n");
sublivelist_verify_t sv;
zfs_btree_create(&sv.sv_leftover, livelist_block_compare, NULL,
sizeof (sublivelist_verify_block_t));
iterate_deleted_livelists(spa, livelist_verify, &sv);
(void) printf("Verifying metaslab entries\n");
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
if (!vdev_is_concrete(vd))
continue;
for (uint64_t mid = 0; mid < vd->vdev_ms_count; mid++) {
metaslab_t *m = vd->vdev_ms[mid];
(void) fprintf(stderr,
"\rverifying concrete vdev %llu, "
"metaslab %llu of %llu ...",
(longlong_t)vd->vdev_id,
(longlong_t)mid,
(longlong_t)vd->vdev_ms_count);
uint64_t shift, start;
range_seg_type_t type =
metaslab_calculate_range_tree_type(vd, m,
&start, &shift);
metaslab_verify_t mv;
mv.mv_allocated = range_tree_create(NULL,
type, NULL, start, shift);
mv.mv_vdid = vd->vdev_id;
mv.mv_msid = m->ms_id;
mv.mv_start = m->ms_start;
mv.mv_end = m->ms_start + m->ms_size;
zfs_btree_create(&mv.mv_livelist_allocs,
livelist_block_compare, NULL,
sizeof (sublivelist_verify_block_t));
mv_populate_livelist_allocs(&mv, &sv);
spacemap_check_ms_sm(m->ms_sm, &mv);
spacemap_check_sm_log(spa, &mv);
range_tree_vacate(mv.mv_allocated, NULL, NULL);
range_tree_destroy(mv.mv_allocated);
zfs_btree_clear(&mv.mv_livelist_allocs);
zfs_btree_destroy(&mv.mv_livelist_allocs);
}
}
(void) fprintf(stderr, "\n");
/*
* If there are any segments in the leftover tree after we walked
* through all the metaslabs in the concrete vdevs then this means
* that we have segments in the livelists that belong to indirect
* vdevs and are marked as allocated.
*/
if (zfs_btree_numnodes(&sv.sv_leftover) == 0) {
zfs_btree_destroy(&sv.sv_leftover);
return;
}
(void) printf("ERROR: Found livelist blocks marked as allocated "
"for indirect vdevs:\n");
zfs_btree_index_t *where = NULL;
sublivelist_verify_block_t *svb;
while ((svb = zfs_btree_destroy_nodes(&sv.sv_leftover, &where)) !=
NULL) {
int vdev_id = DVA_GET_VDEV(&svb->svb_dva);
ASSERT3U(vdev_id, <, rvd->vdev_children);
vdev_t *vd = rvd->vdev_child[vdev_id];
ASSERT(!vdev_is_concrete(vd));
(void) printf("<%d:%llx:%llx> TXG %llx\n",
vdev_id, (u_longlong_t)DVA_GET_OFFSET(&svb->svb_dva),
(u_longlong_t)DVA_GET_ASIZE(&svb->svb_dva),
(u_longlong_t)svb->svb_allocated_txg);
}
(void) printf("\n");
zfs_btree_destroy(&sv.sv_leftover);
}
/*
* These libumem hooks provide a reasonable set of defaults for the allocator's
* debugging facilities.
*/
const char *
_umem_debug_init(void)
{
return ("default,verbose"); /* $UMEM_DEBUG setting */
}
const char *
_umem_logging_init(void)
{
return ("fail,contents"); /* $UMEM_LOGGING setting */
}
static void
usage(void)
{
(void) fprintf(stderr,
"Usage:\t%s [-AbcdDFGhikLMPsvXy] [-e [-V] [-p <path> ...]] "
"[-I <inflight I/Os>]\n"
"\t\t[-o <var>=<value>]... [-t <txg>] [-U <cache>] [-x <dumpdir>]\n"
"\t\t[-K <key>]\n"
"\t\t[<poolname>[/<dataset | objset id>] [<object | range> ...]]\n"
"\t%s [-AdiPv] [-e [-V] [-p <path> ...]] [-U <cache>] [-K <key>]\n"
"\t\t[<poolname>[/<dataset | objset id>] [<object | range> ...]\n"
"\t%s -B [-e [-V] [-p <path> ...]] [-I <inflight I/Os>]\n"
"\t\t[-o <var>=<value>]... [-t <txg>] [-U <cache>] [-x <dumpdir>]\n"
"\t\t[-K <key>] <poolname>/<objset id> [<backupflags>]\n"
"\t%s [-v] <bookmark>\n"
"\t%s -C [-A] [-U <cache>] [<poolname>]\n"
"\t%s -l [-Aqu] <device>\n"
"\t%s -m [-AFLPX] [-e [-V] [-p <path> ...]] [-t <txg>] "
"[-U <cache>]\n\t\t<poolname> [<vdev> [<metaslab> ...]]\n"
"\t%s -O [-K <key>] <dataset> <path>\n"
"\t%s -r [-K <key>] <dataset> <path> <destination>\n"
"\t%s -R [-A] [-e [-V] [-p <path> ...]] [-U <cache>]\n"
"\t\t<poolname> <vdev>:<offset>:<size>[:<flags>]\n"
"\t%s -E [-A] word0:word1:...:word15\n"
"\t%s -S [-AP] [-e [-V] [-p <path> ...]] [-U <cache>] "
"<poolname>\n\n",
cmdname, cmdname, cmdname, cmdname, cmdname, cmdname, cmdname,
cmdname, cmdname, cmdname, cmdname, cmdname);
(void) fprintf(stderr, " Dataset name must include at least one "
"separator character '/' or '@'\n");
(void) fprintf(stderr, " If dataset name is specified, only that "
"dataset is dumped\n");
(void) fprintf(stderr, " If object numbers or object number "
"ranges are specified, only those\n"
" objects or ranges are dumped.\n\n");
(void) fprintf(stderr,
" Object ranges take the form <start>:<end>[:<flags>]\n"
" start Starting object number\n"
" end Ending object number, or -1 for no upper bound\n"
" flags Optional flags to select object types:\n"
" A All objects (this is the default)\n"
" d ZFS directories\n"
" f ZFS files \n"
" m SPA space maps\n"
" z ZAPs\n"
" - Negate effect of next flag\n\n");
(void) fprintf(stderr, " Options to control amount of output:\n");
(void) fprintf(stderr, " -b --block-stats "
"block statistics\n");
(void) fprintf(stderr, " -B --backup "
"backup stream\n");
(void) fprintf(stderr, " -c --checksum "
"checksum all metadata (twice for all data) blocks\n");
(void) fprintf(stderr, " -C --config "
"config (or cachefile if alone)\n");
(void) fprintf(stderr, " -d --datasets "
"dataset(s)\n");
(void) fprintf(stderr, " -D --dedup-stats "
"dedup statistics\n");
(void) fprintf(stderr, " -E --embedded-block-pointer=INTEGER\n"
" decode and display block "
"from an embedded block pointer\n");
(void) fprintf(stderr, " -h --history "
"pool history\n");
(void) fprintf(stderr, " -i --intent-logs "
"intent logs\n");
(void) fprintf(stderr, " -l --label "
"read label contents\n");
(void) fprintf(stderr, " -k --checkpointed-state "
"examine the checkpointed state of the pool\n");
(void) fprintf(stderr, " -L --disable-leak-tracking "
"disable leak tracking (do not load spacemaps)\n");
(void) fprintf(stderr, " -m --metaslabs "
"metaslabs\n");
(void) fprintf(stderr, " -M --metaslab-groups "
"metaslab groups\n");
(void) fprintf(stderr, " -O --object-lookups "
"perform object lookups by path\n");
(void) fprintf(stderr, " -r --copy-object "
"copy an object by path to file\n");
(void) fprintf(stderr, " -R --read-block "
"read and display block from a device\n");
(void) fprintf(stderr, " -s --io-stats "
"report stats on zdb's I/O\n");
(void) fprintf(stderr, " -S --simulate-dedup "
"simulate dedup to measure effect\n");
(void) fprintf(stderr, " -v --verbose "
"verbose (applies to all others)\n");
(void) fprintf(stderr, " -y --livelist "
"perform livelist and metaslab validation on any livelists being "
"deleted\n\n");
(void) fprintf(stderr, " Below options are intended for use "
"with other options:\n");
(void) fprintf(stderr, " -A --ignore-assertions "
"ignore assertions (-A), enable panic recovery (-AA) or both "
"(-AAA)\n");
(void) fprintf(stderr, " -e --exported "
"pool is exported/destroyed/has altroot/not in a cachefile\n");
(void) fprintf(stderr, " -F --automatic-rewind "
"attempt automatic rewind within safe range of transaction "
"groups\n");
(void) fprintf(stderr, " -G --dump-debug-msg "
"dump zfs_dbgmsg buffer before exiting\n");
(void) fprintf(stderr, " -I --inflight=INTEGER "
"specify the maximum number of checksumming I/Os "
"[default is 200]\n");
(void) fprintf(stderr, " -K --key=KEY "
"decryption key for encrypted dataset\n");
(void) fprintf(stderr, " -o --option=\"OPTION=INTEGER\" "
"set global variable to an unsigned 32-bit integer\n");
(void) fprintf(stderr, " -p --path==PATH "
"use one or more with -e to specify path to vdev dir\n");
(void) fprintf(stderr, " -P --parseable "
"print numbers in parseable form\n");
(void) fprintf(stderr, " -q --skip-label "
"don't print label contents\n");
(void) fprintf(stderr, " -t --txg=INTEGER "
"highest txg to use when searching for uberblocks\n");
(void) fprintf(stderr, " -T --brt-stats "
"BRT statistics\n");
(void) fprintf(stderr, " -u --uberblock "
"uberblock\n");
(void) fprintf(stderr, " -U --cachefile=PATH "
"use alternate cachefile\n");
(void) fprintf(stderr, " -V --verbatim "
"do verbatim import\n");
(void) fprintf(stderr, " -x --dump-blocks=PATH "
"dump all read blocks into specified directory\n");
(void) fprintf(stderr, " -X --extreme-rewind "
"attempt extreme rewind (does not work with dataset)\n");
(void) fprintf(stderr, " -Y --all-reconstruction "
"attempt all reconstruction combinations for split blocks\n");
(void) fprintf(stderr, " -Z --zstd-headers "
"show ZSTD headers \n");
(void) fprintf(stderr, "Specify an option more than once (e.g. -bb) "
"to make only that option verbose\n");
(void) fprintf(stderr, "Default is to dump everything non-verbosely\n");
exit(1);
}
static void
dump_debug_buffer(void)
{
if (dump_opt['G']) {
(void) printf("\n");
(void) fflush(stdout);
zfs_dbgmsg_print("zdb");
}
}
/*
* Called for usage errors that are discovered after a call to spa_open(),
* dmu_bonus_hold(), or pool_match(). abort() is called for other errors.
*/
static void
fatal(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
(void) fprintf(stderr, "%s: ", cmdname);
(void) vfprintf(stderr, fmt, ap);
va_end(ap);
(void) fprintf(stderr, "\n");
dump_debug_buffer();
exit(1);
}
static void
dump_packed_nvlist(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) size;
nvlist_t *nv;
size_t nvsize = *(uint64_t *)data;
char *packed = umem_alloc(nvsize, UMEM_NOFAIL);
VERIFY(0 == dmu_read(os, object, 0, nvsize, packed, DMU_READ_PREFETCH));
VERIFY(nvlist_unpack(packed, nvsize, &nv, 0) == 0);
umem_free(packed, nvsize);
dump_nvlist(nv, 8);
nvlist_free(nv);
}
static void
dump_history_offsets(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object, (void) size;
spa_history_phys_t *shp = data;
if (shp == NULL)
return;
(void) printf("\t\tpool_create_len = %llu\n",
(u_longlong_t)shp->sh_pool_create_len);
(void) printf("\t\tphys_max_off = %llu\n",
(u_longlong_t)shp->sh_phys_max_off);
(void) printf("\t\tbof = %llu\n",
(u_longlong_t)shp->sh_bof);
(void) printf("\t\teof = %llu\n",
(u_longlong_t)shp->sh_eof);
(void) printf("\t\trecords_lost = %llu\n",
(u_longlong_t)shp->sh_records_lost);
}
static void
zdb_nicenum(uint64_t num, char *buf, size_t buflen)
{
if (dump_opt['P'])
(void) snprintf(buf, buflen, "%llu", (longlong_t)num);
else
nicenum(num, buf, buflen);
}
static void
zdb_nicebytes(uint64_t bytes, char *buf, size_t buflen)
{
if (dump_opt['P'])
(void) snprintf(buf, buflen, "%llu", (longlong_t)bytes);
else
zfs_nicebytes(bytes, buf, buflen);
}
static const char histo_stars[] = "****************************************";
static const uint64_t histo_width = sizeof (histo_stars) - 1;
static void
dump_histogram(const uint64_t *histo, int size, int offset)
{
int i;
int minidx = size - 1;
int maxidx = 0;
uint64_t max = 0;
for (i = 0; i < size; i++) {
if (histo[i] == 0)
continue;
if (histo[i] > max)
max = histo[i];
if (i > maxidx)
maxidx = i;
if (i < minidx)
minidx = i;
}
if (max < histo_width)
max = histo_width;
for (i = minidx; i <= maxidx; i++) {
(void) printf("\t\t\t%3u: %6llu %s\n",
i + offset, (u_longlong_t)histo[i],
&histo_stars[(max - histo[i]) * histo_width / max]);
}
}
static void
dump_zap_stats(objset_t *os, uint64_t object)
{
int error;
zap_stats_t zs;
error = zap_get_stats(os, object, &zs);
if (error)
return;
if (zs.zs_ptrtbl_len == 0) {
ASSERT(zs.zs_num_blocks == 1);
(void) printf("\tmicrozap: %llu bytes, %llu entries\n",
(u_longlong_t)zs.zs_blocksize,
(u_longlong_t)zs.zs_num_entries);
return;
}
(void) printf("\tFat ZAP stats:\n");
(void) printf("\t\tPointer table:\n");
(void) printf("\t\t\t%llu elements\n",
(u_longlong_t)zs.zs_ptrtbl_len);
(void) printf("\t\t\tzt_blk: %llu\n",
(u_longlong_t)zs.zs_ptrtbl_zt_blk);
(void) printf("\t\t\tzt_numblks: %llu\n",
(u_longlong_t)zs.zs_ptrtbl_zt_numblks);
(void) printf("\t\t\tzt_shift: %llu\n",
(u_longlong_t)zs.zs_ptrtbl_zt_shift);
(void) printf("\t\t\tzt_blks_copied: %llu\n",
(u_longlong_t)zs.zs_ptrtbl_blks_copied);
(void) printf("\t\t\tzt_nextblk: %llu\n",
(u_longlong_t)zs.zs_ptrtbl_nextblk);
(void) printf("\t\tZAP entries: %llu\n",
(u_longlong_t)zs.zs_num_entries);
(void) printf("\t\tLeaf blocks: %llu\n",
(u_longlong_t)zs.zs_num_leafs);
(void) printf("\t\tTotal blocks: %llu\n",
(u_longlong_t)zs.zs_num_blocks);
(void) printf("\t\tzap_block_type: 0x%llx\n",
(u_longlong_t)zs.zs_block_type);
(void) printf("\t\tzap_magic: 0x%llx\n",
(u_longlong_t)zs.zs_magic);
(void) printf("\t\tzap_salt: 0x%llx\n",
(u_longlong_t)zs.zs_salt);
(void) printf("\t\tLeafs with 2^n pointers:\n");
dump_histogram(zs.zs_leafs_with_2n_pointers, ZAP_HISTOGRAM_SIZE, 0);
(void) printf("\t\tBlocks with n*5 entries:\n");
dump_histogram(zs.zs_blocks_with_n5_entries, ZAP_HISTOGRAM_SIZE, 0);
(void) printf("\t\tBlocks n/10 full:\n");
dump_histogram(zs.zs_blocks_n_tenths_full, ZAP_HISTOGRAM_SIZE, 0);
(void) printf("\t\tEntries with n chunks:\n");
dump_histogram(zs.zs_entries_using_n_chunks, ZAP_HISTOGRAM_SIZE, 0);
(void) printf("\t\tBuckets with n entries:\n");
dump_histogram(zs.zs_buckets_with_n_entries, ZAP_HISTOGRAM_SIZE, 0);
}
static void
dump_none(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object, (void) data, (void) size;
}
static void
dump_unknown(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object, (void) data, (void) size;
(void) printf("\tUNKNOWN OBJECT TYPE\n");
}
static void
dump_uint8(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object, (void) data, (void) size;
}
static void
dump_uint64(objset_t *os, uint64_t object, void *data, size_t size)
{
uint64_t *arr;
uint64_t oursize;
if (dump_opt['d'] < 6)
return;
if (data == NULL) {
dmu_object_info_t doi;
VERIFY0(dmu_object_info(os, object, &doi));
size = doi.doi_max_offset;
/*
* We cap the size at 1 mebibyte here to prevent
* allocation failures and nigh-infinite printing if the
* object is extremely large.
*/
oursize = MIN(size, 1 << 20);
arr = kmem_alloc(oursize, KM_SLEEP);
int err = dmu_read(os, object, 0, oursize, arr, 0);
if (err != 0) {
(void) printf("got error %u from dmu_read\n", err);
kmem_free(arr, oursize);
return;
}
} else {
/*
* Even though the allocation is already done in this code path,
* we still cap the size to prevent excessive printing.
*/
oursize = MIN(size, 1 << 20);
arr = data;
}
if (size == 0) {
if (data == NULL)
kmem_free(arr, oursize);
(void) printf("\t\t[]\n");
return;
}
(void) printf("\t\t[%0llx", (u_longlong_t)arr[0]);
for (size_t i = 1; i * sizeof (uint64_t) < oursize; i++) {
if (i % 4 != 0)
(void) printf(", %0llx", (u_longlong_t)arr[i]);
else
(void) printf(",\n\t\t%0llx", (u_longlong_t)arr[i]);
}
if (oursize != size)
(void) printf(", ... ");
(void) printf("]\n");
if (data == NULL)
kmem_free(arr, oursize);
}
static void
dump_zap(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) data, (void) size;
zap_cursor_t zc;
zap_attribute_t attr;
void *prop;
unsigned i;
dump_zap_stats(os, object);
(void) printf("\n");
for (zap_cursor_init(&zc, os, object);
zap_cursor_retrieve(&zc, &attr) == 0;
zap_cursor_advance(&zc)) {
(void) printf("\t\t%s = ", attr.za_name);
if (attr.za_num_integers == 0) {
(void) printf("\n");
continue;
}
prop = umem_zalloc(attr.za_num_integers *
attr.za_integer_length, UMEM_NOFAIL);
(void) zap_lookup(os, object, attr.za_name,
attr.za_integer_length, attr.za_num_integers, prop);
if (attr.za_integer_length == 1) {
if (strcmp(attr.za_name,
DSL_CRYPTO_KEY_MASTER_KEY) == 0 ||
strcmp(attr.za_name,
DSL_CRYPTO_KEY_HMAC_KEY) == 0 ||
strcmp(attr.za_name, DSL_CRYPTO_KEY_IV) == 0 ||
strcmp(attr.za_name, DSL_CRYPTO_KEY_MAC) == 0 ||
strcmp(attr.za_name, DMU_POOL_CHECKSUM_SALT) == 0) {
uint8_t *u8 = prop;
for (i = 0; i < attr.za_num_integers; i++) {
(void) printf("%02x", u8[i]);
}
} else {
(void) printf("%s", (char *)prop);
}
} else {
for (i = 0; i < attr.za_num_integers; i++) {
switch (attr.za_integer_length) {
case 2:
(void) printf("%u ",
((uint16_t *)prop)[i]);
break;
case 4:
(void) printf("%u ",
((uint32_t *)prop)[i]);
break;
case 8:
(void) printf("%lld ",
(u_longlong_t)((int64_t *)prop)[i]);
break;
}
}
}
(void) printf("\n");
umem_free(prop, attr.za_num_integers * attr.za_integer_length);
}
zap_cursor_fini(&zc);
}
static void
dump_bpobj(objset_t *os, uint64_t object, void *data, size_t size)
{
bpobj_phys_t *bpop = data;
uint64_t i;
char bytes[32], comp[32], uncomp[32];
/* make sure the output won't get truncated */
_Static_assert(sizeof (bytes) >= NN_NUMBUF_SZ, "bytes truncated");
_Static_assert(sizeof (comp) >= NN_NUMBUF_SZ, "comp truncated");
_Static_assert(sizeof (uncomp) >= NN_NUMBUF_SZ, "uncomp truncated");
if (bpop == NULL)
return;
zdb_nicenum(bpop->bpo_bytes, bytes, sizeof (bytes));
zdb_nicenum(bpop->bpo_comp, comp, sizeof (comp));
zdb_nicenum(bpop->bpo_uncomp, uncomp, sizeof (uncomp));
(void) printf("\t\tnum_blkptrs = %llu\n",
(u_longlong_t)bpop->bpo_num_blkptrs);
(void) printf("\t\tbytes = %s\n", bytes);
if (size >= BPOBJ_SIZE_V1) {
(void) printf("\t\tcomp = %s\n", comp);
(void) printf("\t\tuncomp = %s\n", uncomp);
}
if (size >= BPOBJ_SIZE_V2) {
(void) printf("\t\tsubobjs = %llu\n",
(u_longlong_t)bpop->bpo_subobjs);
(void) printf("\t\tnum_subobjs = %llu\n",
(u_longlong_t)bpop->bpo_num_subobjs);
}
if (size >= sizeof (*bpop)) {
(void) printf("\t\tnum_freed = %llu\n",
(u_longlong_t)bpop->bpo_num_freed);
}
if (dump_opt['d'] < 5)
return;
for (i = 0; i < bpop->bpo_num_blkptrs; i++) {
char blkbuf[BP_SPRINTF_LEN];
blkptr_t bp;
int err = dmu_read(os, object,
i * sizeof (bp), sizeof (bp), &bp, 0);
if (err != 0) {
(void) printf("got error %u from dmu_read\n", err);
break;
}
snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), &bp,
BP_GET_FREE(&bp));
(void) printf("\t%s\n", blkbuf);
}
}
static void
dump_bpobj_subobjs(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) data, (void) size;
dmu_object_info_t doi;
int64_t i;
VERIFY0(dmu_object_info(os, object, &doi));
uint64_t *subobjs = kmem_alloc(doi.doi_max_offset, KM_SLEEP);
int err = dmu_read(os, object, 0, doi.doi_max_offset, subobjs, 0);
if (err != 0) {
(void) printf("got error %u from dmu_read\n", err);
kmem_free(subobjs, doi.doi_max_offset);
return;
}
int64_t last_nonzero = -1;
for (i = 0; i < doi.doi_max_offset / 8; i++) {
if (subobjs[i] != 0)
last_nonzero = i;
}
for (i = 0; i <= last_nonzero; i++) {
(void) printf("\t%llu\n", (u_longlong_t)subobjs[i]);
}
kmem_free(subobjs, doi.doi_max_offset);
}
static void
dump_ddt_zap(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) data, (void) size;
dump_zap_stats(os, object);
/* contents are printed elsewhere, properly decoded */
}
static void
dump_sa_attrs(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) data, (void) size;
zap_cursor_t zc;
zap_attribute_t attr;
dump_zap_stats(os, object);
(void) printf("\n");
for (zap_cursor_init(&zc, os, object);
zap_cursor_retrieve(&zc, &attr) == 0;
zap_cursor_advance(&zc)) {
(void) printf("\t\t%s = ", attr.za_name);
if (attr.za_num_integers == 0) {
(void) printf("\n");
continue;
}
(void) printf(" %llx : [%d:%d:%d]\n",
(u_longlong_t)attr.za_first_integer,
(int)ATTR_LENGTH(attr.za_first_integer),
(int)ATTR_BSWAP(attr.za_first_integer),
(int)ATTR_NUM(attr.za_first_integer));
}
zap_cursor_fini(&zc);
}
static void
dump_sa_layouts(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) data, (void) size;
zap_cursor_t zc;
zap_attribute_t attr;
uint16_t *layout_attrs;
unsigned i;
dump_zap_stats(os, object);
(void) printf("\n");
for (zap_cursor_init(&zc, os, object);
zap_cursor_retrieve(&zc, &attr) == 0;
zap_cursor_advance(&zc)) {
(void) printf("\t\t%s = [", attr.za_name);
if (attr.za_num_integers == 0) {
(void) printf("\n");
continue;
}
VERIFY(attr.za_integer_length == 2);
layout_attrs = umem_zalloc(attr.za_num_integers *
attr.za_integer_length, UMEM_NOFAIL);
VERIFY(zap_lookup(os, object, attr.za_name,
attr.za_integer_length,
attr.za_num_integers, layout_attrs) == 0);
for (i = 0; i != attr.za_num_integers; i++)
(void) printf(" %d ", (int)layout_attrs[i]);
(void) printf("]\n");
umem_free(layout_attrs,
attr.za_num_integers * attr.za_integer_length);
}
zap_cursor_fini(&zc);
}
static void
dump_zpldir(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) data, (void) size;
zap_cursor_t zc;
zap_attribute_t attr;
const char *typenames[] = {
/* 0 */ "not specified",
/* 1 */ "FIFO",
/* 2 */ "Character Device",
/* 3 */ "3 (invalid)",
/* 4 */ "Directory",
/* 5 */ "5 (invalid)",
/* 6 */ "Block Device",
/* 7 */ "7 (invalid)",
/* 8 */ "Regular File",
/* 9 */ "9 (invalid)",
/* 10 */ "Symbolic Link",
/* 11 */ "11 (invalid)",
/* 12 */ "Socket",
/* 13 */ "Door",
/* 14 */ "Event Port",
/* 15 */ "15 (invalid)",
};
dump_zap_stats(os, object);
(void) printf("\n");
for (zap_cursor_init(&zc, os, object);
zap_cursor_retrieve(&zc, &attr) == 0;
zap_cursor_advance(&zc)) {
(void) printf("\t\t%s = %lld (type: %s)\n",
attr.za_name, ZFS_DIRENT_OBJ(attr.za_first_integer),
typenames[ZFS_DIRENT_TYPE(attr.za_first_integer)]);
}
zap_cursor_fini(&zc);
}
static int
get_dtl_refcount(vdev_t *vd)
{
int refcount = 0;
if (vd->vdev_ops->vdev_op_leaf) {
space_map_t *sm = vd->vdev_dtl_sm;
if (sm != NULL &&
sm->sm_dbuf->db_size == sizeof (space_map_phys_t))
return (1);
return (0);
}
for (unsigned c = 0; c < vd->vdev_children; c++)
refcount += get_dtl_refcount(vd->vdev_child[c]);
return (refcount);
}
static int
get_metaslab_refcount(vdev_t *vd)
{
int refcount = 0;
if (vd->vdev_top == vd) {
for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
space_map_t *sm = vd->vdev_ms[m]->ms_sm;
if (sm != NULL &&
sm->sm_dbuf->db_size == sizeof (space_map_phys_t))
refcount++;
}
}
for (unsigned c = 0; c < vd->vdev_children; c++)
refcount += get_metaslab_refcount(vd->vdev_child[c]);
return (refcount);
}
static int
get_obsolete_refcount(vdev_t *vd)
{
uint64_t obsolete_sm_object;
int refcount = 0;
VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
if (vd->vdev_top == vd && obsolete_sm_object != 0) {
dmu_object_info_t doi;
VERIFY0(dmu_object_info(vd->vdev_spa->spa_meta_objset,
obsolete_sm_object, &doi));
if (doi.doi_bonus_size == sizeof (space_map_phys_t)) {
refcount++;
}
} else {
ASSERT3P(vd->vdev_obsolete_sm, ==, NULL);
ASSERT3U(obsolete_sm_object, ==, 0);
}
for (unsigned c = 0; c < vd->vdev_children; c++) {
refcount += get_obsolete_refcount(vd->vdev_child[c]);
}
return (refcount);
}
static int
get_prev_obsolete_spacemap_refcount(spa_t *spa)
{
uint64_t prev_obj =
spa->spa_condensing_indirect_phys.scip_prev_obsolete_sm_object;
if (prev_obj != 0) {
dmu_object_info_t doi;
VERIFY0(dmu_object_info(spa->spa_meta_objset, prev_obj, &doi));
if (doi.doi_bonus_size == sizeof (space_map_phys_t)) {
return (1);
}
}
return (0);
}
static int
get_checkpoint_refcount(vdev_t *vd)
{
int refcount = 0;
if (vd->vdev_top == vd && vd->vdev_top_zap != 0 &&
zap_contains(spa_meta_objset(vd->vdev_spa),
vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) == 0)
refcount++;
for (uint64_t c = 0; c < vd->vdev_children; c++)
refcount += get_checkpoint_refcount(vd->vdev_child[c]);
return (refcount);
}
static int
get_log_spacemap_refcount(spa_t *spa)
{
return (avl_numnodes(&spa->spa_sm_logs_by_txg));
}
static int
verify_spacemap_refcounts(spa_t *spa)
{
uint64_t expected_refcount = 0;
uint64_t actual_refcount;
(void) feature_get_refcount(spa,
&spa_feature_table[SPA_FEATURE_SPACEMAP_HISTOGRAM],
&expected_refcount);
actual_refcount = get_dtl_refcount(spa->spa_root_vdev);
actual_refcount += get_metaslab_refcount(spa->spa_root_vdev);
actual_refcount += get_obsolete_refcount(spa->spa_root_vdev);
actual_refcount += get_prev_obsolete_spacemap_refcount(spa);
actual_refcount += get_checkpoint_refcount(spa->spa_root_vdev);
actual_refcount += get_log_spacemap_refcount(spa);
if (expected_refcount != actual_refcount) {
(void) printf("space map refcount mismatch: expected %lld != "
"actual %lld\n",
(longlong_t)expected_refcount,
(longlong_t)actual_refcount);
return (2);
}
return (0);
}
static void
dump_spacemap(objset_t *os, space_map_t *sm)
{
const char *ddata[] = { "ALLOC", "FREE", "CONDENSE", "INVALID",
"INVALID", "INVALID", "INVALID", "INVALID" };
if (sm == NULL)
return;
(void) printf("space map object %llu:\n",
(longlong_t)sm->sm_object);
(void) printf(" smp_length = 0x%llx\n",
(longlong_t)sm->sm_phys->smp_length);
(void) printf(" smp_alloc = 0x%llx\n",
(longlong_t)sm->sm_phys->smp_alloc);
if (dump_opt['d'] < 6 && dump_opt['m'] < 4)
return;
/*
* Print out the freelist entries in both encoded and decoded form.
*/
uint8_t mapshift = sm->sm_shift;
int64_t alloc = 0;
uint64_t word, entry_id = 0;
for (uint64_t offset = 0; offset < space_map_length(sm);
offset += sizeof (word)) {
VERIFY0(dmu_read(os, space_map_object(sm), offset,
sizeof (word), &word, DMU_READ_PREFETCH));
if (sm_entry_is_debug(word)) {
uint64_t de_txg = SM_DEBUG_TXG_DECODE(word);
uint64_t de_sync_pass = SM_DEBUG_SYNCPASS_DECODE(word);
if (de_txg == 0) {
(void) printf(
"\t [%6llu] PADDING\n",
(u_longlong_t)entry_id);
} else {
(void) printf(
"\t [%6llu] %s: txg %llu pass %llu\n",
(u_longlong_t)entry_id,
ddata[SM_DEBUG_ACTION_DECODE(word)],
(u_longlong_t)de_txg,
(u_longlong_t)de_sync_pass);
}
entry_id++;
continue;
}
uint8_t words;
char entry_type;
uint64_t entry_off, entry_run, entry_vdev = SM_NO_VDEVID;
if (sm_entry_is_single_word(word)) {
entry_type = (SM_TYPE_DECODE(word) == SM_ALLOC) ?
'A' : 'F';
entry_off = (SM_OFFSET_DECODE(word) << mapshift) +
sm->sm_start;
entry_run = SM_RUN_DECODE(word) << mapshift;
words = 1;
} else {
/* it is a two-word entry so we read another word */
ASSERT(sm_entry_is_double_word(word));
uint64_t extra_word;
offset += sizeof (extra_word);
VERIFY0(dmu_read(os, space_map_object(sm), offset,
sizeof (extra_word), &extra_word,
DMU_READ_PREFETCH));
ASSERT3U(offset, <=, space_map_length(sm));
entry_run = SM2_RUN_DECODE(word) << mapshift;
entry_vdev = SM2_VDEV_DECODE(word);
entry_type = (SM2_TYPE_DECODE(extra_word) == SM_ALLOC) ?
'A' : 'F';
entry_off = (SM2_OFFSET_DECODE(extra_word) <<
mapshift) + sm->sm_start;
words = 2;
}
(void) printf("\t [%6llu] %c range:"
" %010llx-%010llx size: %06llx vdev: %06llu words: %u\n",
(u_longlong_t)entry_id,
entry_type, (u_longlong_t)entry_off,
(u_longlong_t)(entry_off + entry_run),
(u_longlong_t)entry_run,
(u_longlong_t)entry_vdev, words);
if (entry_type == 'A')
alloc += entry_run;
else
alloc -= entry_run;
entry_id++;
}
if (alloc != space_map_allocated(sm)) {
(void) printf("space_map_object alloc (%lld) INCONSISTENT "
"with space map summary (%lld)\n",
(longlong_t)space_map_allocated(sm), (longlong_t)alloc);
}
}
static void
dump_metaslab_stats(metaslab_t *msp)
{
char maxbuf[32];
range_tree_t *rt = msp->ms_allocatable;
zfs_btree_t *t = &msp->ms_allocatable_by_size;
int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
/* max sure nicenum has enough space */
_Static_assert(sizeof (maxbuf) >= NN_NUMBUF_SZ, "maxbuf truncated");
zdb_nicenum(metaslab_largest_allocatable(msp), maxbuf, sizeof (maxbuf));
(void) printf("\t %25s %10lu %7s %6s %4s %4d%%\n",
"segments", zfs_btree_numnodes(t), "maxsize", maxbuf,
"freepct", free_pct);
(void) printf("\tIn-memory histogram:\n");
dump_histogram(rt->rt_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0);
}
static void
dump_metaslab(metaslab_t *msp)
{
vdev_t *vd = msp->ms_group->mg_vd;
spa_t *spa = vd->vdev_spa;
space_map_t *sm = msp->ms_sm;
char freebuf[32];
zdb_nicenum(msp->ms_size - space_map_allocated(sm), freebuf,
sizeof (freebuf));
(void) printf(
"\tmetaslab %6llu offset %12llx spacemap %6llu free %5s\n",
(u_longlong_t)msp->ms_id, (u_longlong_t)msp->ms_start,
(u_longlong_t)space_map_object(sm), freebuf);
if (dump_opt['m'] > 2 && !dump_opt['L']) {
mutex_enter(&msp->ms_lock);
VERIFY0(metaslab_load(msp));
range_tree_stat_verify(msp->ms_allocatable);
dump_metaslab_stats(msp);
metaslab_unload(msp);
mutex_exit(&msp->ms_lock);
}
if (dump_opt['m'] > 1 && sm != NULL &&
spa_feature_is_active(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
/*
* The space map histogram represents free space in chunks
* of sm_shift (i.e. bucket 0 refers to 2^sm_shift).
*/
(void) printf("\tOn-disk histogram:\t\tfragmentation %llu\n",
(u_longlong_t)msp->ms_fragmentation);
dump_histogram(sm->sm_phys->smp_histogram,
SPACE_MAP_HISTOGRAM_SIZE, sm->sm_shift);
}
if (vd->vdev_ops == &vdev_draid_ops)
ASSERT3U(msp->ms_size, <=, 1ULL << vd->vdev_ms_shift);
else
ASSERT3U(msp->ms_size, ==, 1ULL << vd->vdev_ms_shift);
dump_spacemap(spa->spa_meta_objset, msp->ms_sm);
if (spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) {
(void) printf("\tFlush data:\n\tunflushed txg=%llu\n\n",
(u_longlong_t)metaslab_unflushed_txg(msp));
}
}
static void
print_vdev_metaslab_header(vdev_t *vd)
{
vdev_alloc_bias_t alloc_bias = vd->vdev_alloc_bias;
const char *bias_str = "";
if (alloc_bias == VDEV_BIAS_LOG || vd->vdev_islog) {
bias_str = VDEV_ALLOC_BIAS_LOG;
} else if (alloc_bias == VDEV_BIAS_SPECIAL) {
bias_str = VDEV_ALLOC_BIAS_SPECIAL;
} else if (alloc_bias == VDEV_BIAS_DEDUP) {
bias_str = VDEV_ALLOC_BIAS_DEDUP;
}
uint64_t ms_flush_data_obj = 0;
if (vd->vdev_top_zap != 0) {
int error = zap_lookup(spa_meta_objset(vd->vdev_spa),
vd->vdev_top_zap, VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS,
sizeof (uint64_t), 1, &ms_flush_data_obj);
if (error != ENOENT) {
ASSERT0(error);
}
}
(void) printf("\tvdev %10llu %s",
(u_longlong_t)vd->vdev_id, bias_str);
if (ms_flush_data_obj != 0) {
(void) printf(" ms_unflushed_phys object %llu",
(u_longlong_t)ms_flush_data_obj);
}
(void) printf("\n\t%-10s%5llu %-19s %-15s %-12s\n",
"metaslabs", (u_longlong_t)vd->vdev_ms_count,
"offset", "spacemap", "free");
(void) printf("\t%15s %19s %15s %12s\n",
"---------------", "-------------------",
"---------------", "------------");
}
static void
dump_metaslab_groups(spa_t *spa, boolean_t show_special)
{
vdev_t *rvd = spa->spa_root_vdev;
metaslab_class_t *mc = spa_normal_class(spa);
metaslab_class_t *smc = spa_special_class(spa);
uint64_t fragmentation;
metaslab_class_histogram_verify(mc);
for (unsigned c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg;
if (mg == NULL || (mg->mg_class != mc &&
(!show_special || mg->mg_class != smc)))
continue;
metaslab_group_histogram_verify(mg);
mg->mg_fragmentation = metaslab_group_fragmentation(mg);
(void) printf("\tvdev %10llu\t\tmetaslabs%5llu\t\t"
"fragmentation",
(u_longlong_t)tvd->vdev_id,
(u_longlong_t)tvd->vdev_ms_count);
if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
(void) printf("%3s\n", "-");
} else {
(void) printf("%3llu%%\n",
(u_longlong_t)mg->mg_fragmentation);
}
dump_histogram(mg->mg_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0);
}
(void) printf("\tpool %s\tfragmentation", spa_name(spa));
fragmentation = metaslab_class_fragmentation(mc);
if (fragmentation == ZFS_FRAG_INVALID)
(void) printf("\t%3s\n", "-");
else
(void) printf("\t%3llu%%\n", (u_longlong_t)fragmentation);
dump_histogram(mc->mc_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0);
}
static void
print_vdev_indirect(vdev_t *vd)
{
vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
vdev_indirect_births_t *vib = vd->vdev_indirect_births;
if (vim == NULL) {
ASSERT3P(vib, ==, NULL);
return;
}
ASSERT3U(vdev_indirect_mapping_object(vim), ==,
vic->vic_mapping_object);
ASSERT3U(vdev_indirect_births_object(vib), ==,
vic->vic_births_object);
(void) printf("indirect births obj %llu:\n",
(longlong_t)vic->vic_births_object);
(void) printf(" vib_count = %llu\n",
(longlong_t)vdev_indirect_births_count(vib));
for (uint64_t i = 0; i < vdev_indirect_births_count(vib); i++) {
vdev_indirect_birth_entry_phys_t *cur_vibe =
&vib->vib_entries[i];
(void) printf("\toffset %llx -> txg %llu\n",
(longlong_t)cur_vibe->vibe_offset,
(longlong_t)cur_vibe->vibe_phys_birth_txg);
}
(void) printf("\n");
(void) printf("indirect mapping obj %llu:\n",
(longlong_t)vic->vic_mapping_object);
(void) printf(" vim_max_offset = 0x%llx\n",
(longlong_t)vdev_indirect_mapping_max_offset(vim));
(void) printf(" vim_bytes_mapped = 0x%llx\n",
(longlong_t)vdev_indirect_mapping_bytes_mapped(vim));
(void) printf(" vim_count = %llu\n",
(longlong_t)vdev_indirect_mapping_num_entries(vim));
if (dump_opt['d'] <= 5 && dump_opt['m'] <= 3)
return;
uint32_t *counts = vdev_indirect_mapping_load_obsolete_counts(vim);
for (uint64_t i = 0; i < vdev_indirect_mapping_num_entries(vim); i++) {
vdev_indirect_mapping_entry_phys_t *vimep =
&vim->vim_entries[i];
(void) printf("\t<%llx:%llx:%llx> -> "
"<%llx:%llx:%llx> (%x obsolete)\n",
(longlong_t)vd->vdev_id,
(longlong_t)DVA_MAPPING_GET_SRC_OFFSET(vimep),
(longlong_t)DVA_GET_ASIZE(&vimep->vimep_dst),
(longlong_t)DVA_GET_VDEV(&vimep->vimep_dst),
(longlong_t)DVA_GET_OFFSET(&vimep->vimep_dst),
(longlong_t)DVA_GET_ASIZE(&vimep->vimep_dst),
counts[i]);
}
(void) printf("\n");
uint64_t obsolete_sm_object;
VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
if (obsolete_sm_object != 0) {
objset_t *mos = vd->vdev_spa->spa_meta_objset;
(void) printf("obsolete space map object %llu:\n",
(u_longlong_t)obsolete_sm_object);
ASSERT(vd->vdev_obsolete_sm != NULL);
ASSERT3U(space_map_object(vd->vdev_obsolete_sm), ==,
obsolete_sm_object);
dump_spacemap(mos, vd->vdev_obsolete_sm);
(void) printf("\n");
}
}
static void
dump_metaslabs(spa_t *spa)
{
vdev_t *vd, *rvd = spa->spa_root_vdev;
uint64_t m, c = 0, children = rvd->vdev_children;
(void) printf("\nMetaslabs:\n");
if (!dump_opt['d'] && zopt_metaslab_args > 0) {
c = zopt_metaslab[0];
if (c >= children)
(void) fatal("bad vdev id: %llu", (u_longlong_t)c);
if (zopt_metaslab_args > 1) {
vd = rvd->vdev_child[c];
print_vdev_metaslab_header(vd);
for (m = 1; m < zopt_metaslab_args; m++) {
if (zopt_metaslab[m] < vd->vdev_ms_count)
dump_metaslab(
vd->vdev_ms[zopt_metaslab[m]]);
else
(void) fprintf(stderr, "bad metaslab "
"number %llu\n",
(u_longlong_t)zopt_metaslab[m]);
}
(void) printf("\n");
return;
}
children = c + 1;
}
for (; c < children; c++) {
vd = rvd->vdev_child[c];
print_vdev_metaslab_header(vd);
print_vdev_indirect(vd);
for (m = 0; m < vd->vdev_ms_count; m++)
dump_metaslab(vd->vdev_ms[m]);
(void) printf("\n");
}
}
static void
dump_log_spacemaps(spa_t *spa)
{
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
return;
(void) printf("\nLog Space Maps in Pool:\n");
for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls)) {
space_map_t *sm = NULL;
VERIFY0(space_map_open(&sm, spa_meta_objset(spa),
sls->sls_sm_obj, 0, UINT64_MAX, SPA_MINBLOCKSHIFT));
(void) printf("Log Spacemap object %llu txg %llu\n",
(u_longlong_t)sls->sls_sm_obj, (u_longlong_t)sls->sls_txg);
dump_spacemap(spa->spa_meta_objset, sm);
space_map_close(sm);
}
(void) printf("\n");
}
static void
dump_dde(const ddt_t *ddt, const ddt_entry_t *dde, uint64_t index)
{
const ddt_phys_t *ddp = dde->dde_phys;
const ddt_key_t *ddk = &dde->dde_key;
const char *types[4] = { "ditto", "single", "double", "triple" };
char blkbuf[BP_SPRINTF_LEN];
blkptr_t blk;
int p;
for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
if (ddp->ddp_phys_birth == 0)
continue;
ddt_bp_create(ddt->ddt_checksum, ddk, ddp, &blk);
snprintf_blkptr(blkbuf, sizeof (blkbuf), &blk);
(void) printf("index %llx refcnt %llu %s %s\n",
(u_longlong_t)index, (u_longlong_t)ddp->ddp_refcnt,
types[p], blkbuf);
}
}
static void
dump_dedup_ratio(const ddt_stat_t *dds)
{
double rL, rP, rD, D, dedup, compress, copies;
if (dds->dds_blocks == 0)
return;
rL = (double)dds->dds_ref_lsize;
rP = (double)dds->dds_ref_psize;
rD = (double)dds->dds_ref_dsize;
D = (double)dds->dds_dsize;
dedup = rD / D;
compress = rL / rP;
copies = rD / rP;
(void) printf("dedup = %.2f, compress = %.2f, copies = %.2f, "
"dedup * compress / copies = %.2f\n\n",
dedup, compress, copies, dedup * compress / copies);
}
static void
dump_ddt(ddt_t *ddt, enum ddt_type type, enum ddt_class class)
{
char name[DDT_NAMELEN];
ddt_entry_t dde;
uint64_t walk = 0;
dmu_object_info_t doi;
uint64_t count, dspace, mspace;
int error;
error = ddt_object_info(ddt, type, class, &doi);
if (error == ENOENT)
return;
ASSERT(error == 0);
error = ddt_object_count(ddt, type, class, &count);
ASSERT(error == 0);
if (count == 0)
return;
dspace = doi.doi_physical_blocks_512 << 9;
mspace = doi.doi_fill_count * doi.doi_data_block_size;
ddt_object_name(ddt, type, class, name);
(void) printf("%s: %llu entries, size %llu on disk, %llu in core\n",
name,
(u_longlong_t)count,
(u_longlong_t)(dspace / count),
(u_longlong_t)(mspace / count));
if (dump_opt['D'] < 3)
return;
zpool_dump_ddt(NULL, &ddt->ddt_histogram[type][class]);
if (dump_opt['D'] < 4)
return;
if (dump_opt['D'] < 5 && class == DDT_CLASS_UNIQUE)
return;
(void) printf("%s contents:\n\n", name);
while ((error = ddt_object_walk(ddt, type, class, &walk, &dde)) == 0)
dump_dde(ddt, &dde, walk);
ASSERT3U(error, ==, ENOENT);
(void) printf("\n");
}
static void
dump_all_ddts(spa_t *spa)
{
ddt_histogram_t ddh_total = {{{0}}};
ddt_stat_t dds_total = {0};
for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
ddt_t *ddt = spa->spa_ddt[c];
for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
for (enum ddt_class class = 0; class < DDT_CLASSES;
class++) {
dump_ddt(ddt, type, class);
}
}
}
ddt_get_dedup_stats(spa, &dds_total);
if (dds_total.dds_blocks == 0) {
(void) printf("All DDTs are empty\n");
return;
}
(void) printf("\n");
if (dump_opt['D'] > 1) {
(void) printf("DDT histogram (aggregated over all DDTs):\n");
ddt_get_dedup_histogram(spa, &ddh_total);
zpool_dump_ddt(&dds_total, &ddh_total);
}
dump_dedup_ratio(&dds_total);
}
static void
dump_brt(spa_t *spa)
{
if (!spa_feature_is_enabled(spa, SPA_FEATURE_BLOCK_CLONING)) {
printf("BRT: unsupported on this pool\n");
return;
}
if (!spa_feature_is_active(spa, SPA_FEATURE_BLOCK_CLONING)) {
printf("BRT: empty\n");
return;
}
brt_t *brt = spa->spa_brt;
VERIFY(brt);
char count[32], used[32], saved[32];
zdb_nicebytes(brt_get_used(spa), used, sizeof (used));
zdb_nicebytes(brt_get_saved(spa), saved, sizeof (saved));
uint64_t ratio = brt_get_ratio(spa);
printf("BRT: used %s; saved %s; ratio %llu.%02llux\n", used, saved,
(u_longlong_t)(ratio / 100), (u_longlong_t)(ratio % 100));
if (dump_opt['T'] < 2)
return;
for (uint64_t vdevid = 0; vdevid < brt->brt_nvdevs; vdevid++) {
brt_vdev_t *brtvd = &brt->brt_vdevs[vdevid];
if (brtvd == NULL)
continue;
if (!brtvd->bv_initiated) {
printf("BRT: vdev %" PRIu64 ": empty\n", vdevid);
continue;
}
zdb_nicenum(brtvd->bv_totalcount, count, sizeof (count));
zdb_nicebytes(brtvd->bv_usedspace, used, sizeof (used));
zdb_nicebytes(brtvd->bv_savedspace, saved, sizeof (saved));
printf("BRT: vdev %" PRIu64 ": refcnt %s; used %s; saved %s\n",
vdevid, count, used, saved);
}
if (dump_opt['T'] < 3)
return;
char dva[64];
printf("\n%-16s %-10s\n", "DVA", "REFCNT");
for (uint64_t vdevid = 0; vdevid < brt->brt_nvdevs; vdevid++) {
brt_vdev_t *brtvd = &brt->brt_vdevs[vdevid];
if (brtvd == NULL || !brtvd->bv_initiated)
continue;
zap_cursor_t zc;
zap_attribute_t za;
for (zap_cursor_init(&zc, brt->brt_mos, brtvd->bv_mos_entries);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
uint64_t offset = *(uint64_t *)za.za_name;
uint64_t refcnt = za.za_first_integer;
snprintf(dva, sizeof (dva), "%" PRIu64 ":%llx", vdevid,
(u_longlong_t)offset);
printf("%-16s %-10llu\n", dva, (u_longlong_t)refcnt);
}
zap_cursor_fini(&zc);
}
}
static void
dump_dtl_seg(void *arg, uint64_t start, uint64_t size)
{
char *prefix = arg;
(void) printf("%s [%llu,%llu) length %llu\n",
prefix,
(u_longlong_t)start,
(u_longlong_t)(start + size),
(u_longlong_t)(size));
}
static void
dump_dtl(vdev_t *vd, int indent)
{
spa_t *spa = vd->vdev_spa;
boolean_t required;
const char *name[DTL_TYPES] = { "missing", "partial", "scrub",
"outage" };
char prefix[256];
spa_vdev_state_enter(spa, SCL_NONE);
required = vdev_dtl_required(vd);
(void) spa_vdev_state_exit(spa, NULL, 0);
if (indent == 0)
(void) printf("\nDirty time logs:\n\n");
(void) printf("\t%*s%s [%s]\n", indent, "",
vd->vdev_path ? vd->vdev_path :
vd->vdev_parent ? vd->vdev_ops->vdev_op_type : spa_name(spa),
required ? "DTL-required" : "DTL-expendable");
for (int t = 0; t < DTL_TYPES; t++) {
range_tree_t *rt = vd->vdev_dtl[t];
if (range_tree_space(rt) == 0)
continue;
(void) snprintf(prefix, sizeof (prefix), "\t%*s%s",
indent + 2, "", name[t]);
range_tree_walk(rt, dump_dtl_seg, prefix);
if (dump_opt['d'] > 5 && vd->vdev_children == 0)
dump_spacemap(spa->spa_meta_objset,
vd->vdev_dtl_sm);
}
for (unsigned c = 0; c < vd->vdev_children; c++)
dump_dtl(vd->vdev_child[c], indent + 4);
}
static void
dump_history(spa_t *spa)
{
nvlist_t **events = NULL;
char *buf;
uint64_t resid, len, off = 0;
uint_t num = 0;
int error;
char tbuf[30];
if ((buf = malloc(SPA_OLD_MAXBLOCKSIZE)) == NULL) {
(void) fprintf(stderr, "%s: unable to allocate I/O buffer\n",
__func__);
return;
}
do {
len = SPA_OLD_MAXBLOCKSIZE;
if ((error = spa_history_get(spa, &off, &len, buf)) != 0) {
(void) fprintf(stderr, "Unable to read history: "
"error %d\n", error);
free(buf);
return;
}
if (zpool_history_unpack(buf, len, &resid, &events, &num) != 0)
break;
off -= resid;
} while (len != 0);
(void) printf("\nHistory:\n");
for (unsigned i = 0; i < num; i++) {
boolean_t printed = B_FALSE;
if (nvlist_exists(events[i], ZPOOL_HIST_TIME)) {
time_t tsec;
struct tm t;
tsec = fnvlist_lookup_uint64(events[i],
ZPOOL_HIST_TIME);
(void) localtime_r(&tsec, &t);
(void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t);
} else {
tbuf[0] = '\0';
}
if (nvlist_exists(events[i], ZPOOL_HIST_CMD)) {
(void) printf("%s %s\n", tbuf,
fnvlist_lookup_string(events[i], ZPOOL_HIST_CMD));
} else if (nvlist_exists(events[i], ZPOOL_HIST_INT_EVENT)) {
uint64_t ievent;
ievent = fnvlist_lookup_uint64(events[i],
ZPOOL_HIST_INT_EVENT);
if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS)
goto next;
(void) printf(" %s [internal %s txg:%ju] %s\n",
tbuf,
zfs_history_event_names[ievent],
fnvlist_lookup_uint64(events[i],
ZPOOL_HIST_TXG),
fnvlist_lookup_string(events[i],
ZPOOL_HIST_INT_STR));
} else if (nvlist_exists(events[i], ZPOOL_HIST_INT_NAME)) {
(void) printf("%s [txg:%ju] %s", tbuf,
fnvlist_lookup_uint64(events[i],
ZPOOL_HIST_TXG),
fnvlist_lookup_string(events[i],
ZPOOL_HIST_INT_NAME));
if (nvlist_exists(events[i], ZPOOL_HIST_DSNAME)) {
(void) printf(" %s (%llu)",
fnvlist_lookup_string(events[i],
ZPOOL_HIST_DSNAME),
(u_longlong_t)fnvlist_lookup_uint64(
events[i],
ZPOOL_HIST_DSID));
}
(void) printf(" %s\n", fnvlist_lookup_string(events[i],
ZPOOL_HIST_INT_STR));
} else if (nvlist_exists(events[i], ZPOOL_HIST_IOCTL)) {
(void) printf("%s ioctl %s\n", tbuf,
fnvlist_lookup_string(events[i],
ZPOOL_HIST_IOCTL));
if (nvlist_exists(events[i], ZPOOL_HIST_INPUT_NVL)) {
(void) printf(" input:\n");
dump_nvlist(fnvlist_lookup_nvlist(events[i],
ZPOOL_HIST_INPUT_NVL), 8);
}
if (nvlist_exists(events[i], ZPOOL_HIST_OUTPUT_NVL)) {
(void) printf(" output:\n");
dump_nvlist(fnvlist_lookup_nvlist(events[i],
ZPOOL_HIST_OUTPUT_NVL), 8);
}
if (nvlist_exists(events[i], ZPOOL_HIST_ERRNO)) {
(void) printf(" errno: %lld\n",
(longlong_t)fnvlist_lookup_int64(events[i],
ZPOOL_HIST_ERRNO));
}
} else {
goto next;
}
printed = B_TRUE;
next:
if (dump_opt['h'] > 1) {
if (!printed)
(void) printf("unrecognized record:\n");
dump_nvlist(events[i], 2);
}
}
free(buf);
}
static void
dump_dnode(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object, (void) data, (void) size;
}
static uint64_t
blkid2offset(const dnode_phys_t *dnp, const blkptr_t *bp,
const zbookmark_phys_t *zb)
{
if (dnp == NULL) {
ASSERT(zb->zb_level < 0);
if (zb->zb_object == 0)
return (zb->zb_blkid);
return (zb->zb_blkid * BP_GET_LSIZE(bp));
}
ASSERT(zb->zb_level >= 0);
return ((zb->zb_blkid <<
(zb->zb_level * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT))) *
dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
}
static void
snprintf_zstd_header(spa_t *spa, char *blkbuf, size_t buflen,
const blkptr_t *bp)
{
- abd_t *pabd;
+ static abd_t *pabd = NULL;
void *buf;
zio_t *zio;
zfs_zstdhdr_t zstd_hdr;
int error;
if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_ZSTD)
return;
if (BP_IS_HOLE(bp))
return;
if (BP_IS_EMBEDDED(bp)) {
buf = malloc(SPA_MAXBLOCKSIZE);
if (buf == NULL) {
(void) fprintf(stderr, "out of memory\n");
exit(1);
}
decode_embedded_bp_compressed(bp, buf);
memcpy(&zstd_hdr, buf, sizeof (zstd_hdr));
free(buf);
zstd_hdr.c_len = BE_32(zstd_hdr.c_len);
zstd_hdr.raw_version_level = BE_32(zstd_hdr.raw_version_level);
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf),
" ZSTD:size=%u:version=%u:level=%u:EMBEDDED",
zstd_hdr.c_len, zfs_get_hdrversion(&zstd_hdr),
zfs_get_hdrlevel(&zstd_hdr));
return;
}
- pabd = abd_alloc_for_io(SPA_MAXBLOCKSIZE, B_FALSE);
+ if (!pabd)
+ pabd = abd_alloc_for_io(SPA_MAXBLOCKSIZE, B_FALSE);
zio = zio_root(spa, NULL, NULL, 0);
/* Decrypt but don't decompress so we can read the compression header */
zio_nowait(zio_read(zio, spa, bp, pabd, BP_GET_PSIZE(bp), NULL, NULL,
ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW_COMPRESS,
NULL));
error = zio_wait(zio);
if (error) {
(void) fprintf(stderr, "read failed: %d\n", error);
return;
}
buf = abd_borrow_buf_copy(pabd, BP_GET_LSIZE(bp));
memcpy(&zstd_hdr, buf, sizeof (zstd_hdr));
zstd_hdr.c_len = BE_32(zstd_hdr.c_len);
zstd_hdr.raw_version_level = BE_32(zstd_hdr.raw_version_level);
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf),
" ZSTD:size=%u:version=%u:level=%u:NORMAL",
zstd_hdr.c_len, zfs_get_hdrversion(&zstd_hdr),
zfs_get_hdrlevel(&zstd_hdr));
abd_return_buf_copy(pabd, buf, BP_GET_LSIZE(bp));
}
static void
snprintf_blkptr_compact(char *blkbuf, size_t buflen, const blkptr_t *bp,
boolean_t bp_freed)
{
const dva_t *dva = bp->blk_dva;
int ndvas = dump_opt['d'] > 5 ? BP_GET_NDVAS(bp) : 1;
int i;
if (dump_opt['b'] >= 6) {
snprintf_blkptr(blkbuf, buflen, bp);
if (bp_freed) {
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf), " %s", "FREE");
}
return;
}
if (BP_IS_EMBEDDED(bp)) {
(void) sprintf(blkbuf,
"EMBEDDED et=%u %llxL/%llxP B=%llu",
(int)BPE_GET_ETYPE(bp),
(u_longlong_t)BPE_GET_LSIZE(bp),
(u_longlong_t)BPE_GET_PSIZE(bp),
(u_longlong_t)bp->blk_birth);
return;
}
blkbuf[0] = '\0';
for (i = 0; i < ndvas; i++)
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf), "%llu:%llx:%llx ",
(u_longlong_t)DVA_GET_VDEV(&dva[i]),
(u_longlong_t)DVA_GET_OFFSET(&dva[i]),
(u_longlong_t)DVA_GET_ASIZE(&dva[i]));
if (BP_IS_HOLE(bp)) {
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf),
"%llxL B=%llu",
(u_longlong_t)BP_GET_LSIZE(bp),
(u_longlong_t)bp->blk_birth);
} else {
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf),
"%llxL/%llxP F=%llu B=%llu/%llu",
(u_longlong_t)BP_GET_LSIZE(bp),
(u_longlong_t)BP_GET_PSIZE(bp),
(u_longlong_t)BP_GET_FILL(bp),
(u_longlong_t)bp->blk_birth,
(u_longlong_t)BP_PHYSICAL_BIRTH(bp));
if (bp_freed)
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf), " %s", "FREE");
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf),
" cksum=%016llx:%016llx:%016llx:%016llx",
(u_longlong_t)bp->blk_cksum.zc_word[0],
(u_longlong_t)bp->blk_cksum.zc_word[1],
(u_longlong_t)bp->blk_cksum.zc_word[2],
(u_longlong_t)bp->blk_cksum.zc_word[3]);
}
}
static void
print_indirect(spa_t *spa, blkptr_t *bp, const zbookmark_phys_t *zb,
const dnode_phys_t *dnp)
{
char blkbuf[BP_SPRINTF_LEN];
int l;
if (!BP_IS_EMBEDDED(bp)) {
ASSERT3U(BP_GET_TYPE(bp), ==, dnp->dn_type);
ASSERT3U(BP_GET_LEVEL(bp), ==, zb->zb_level);
}
(void) printf("%16llx ", (u_longlong_t)blkid2offset(dnp, bp, zb));
ASSERT(zb->zb_level >= 0);
for (l = dnp->dn_nlevels - 1; l >= -1; l--) {
if (l == zb->zb_level) {
(void) printf("L%llx", (u_longlong_t)zb->zb_level);
} else {
(void) printf(" ");
}
}
snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), bp, B_FALSE);
if (dump_opt['Z'] && BP_GET_COMPRESS(bp) == ZIO_COMPRESS_ZSTD)
snprintf_zstd_header(spa, blkbuf, sizeof (blkbuf), bp);
(void) printf("%s\n", blkbuf);
}
static int
visit_indirect(spa_t *spa, const dnode_phys_t *dnp,
blkptr_t *bp, const zbookmark_phys_t *zb)
{
int err = 0;
if (bp->blk_birth == 0)
return (0);
print_indirect(spa, bp, zb, dnp);
if (BP_GET_LEVEL(bp) > 0 && !BP_IS_HOLE(bp)) {
arc_flags_t flags = ARC_FLAG_WAIT;
int i;
blkptr_t *cbp;
int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
arc_buf_t *buf;
uint64_t fill = 0;
ASSERT(!BP_IS_REDACTED(bp));
err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf,
ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
if (err)
return (err);
ASSERT(buf->b_data);
/* recursively visit blocks below this */
cbp = buf->b_data;
for (i = 0; i < epb; i++, cbp++) {
zbookmark_phys_t czb;
SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
zb->zb_level - 1,
zb->zb_blkid * epb + i);
err = visit_indirect(spa, dnp, cbp, &czb);
if (err)
break;
fill += BP_GET_FILL(cbp);
}
if (!err)
ASSERT3U(fill, ==, BP_GET_FILL(bp));
arc_buf_destroy(buf, &buf);
}
return (err);
}
static void
dump_indirect(dnode_t *dn)
{
dnode_phys_t *dnp = dn->dn_phys;
zbookmark_phys_t czb;
(void) printf("Indirect blocks:\n");
SET_BOOKMARK(&czb, dmu_objset_id(dn->dn_objset),
dn->dn_object, dnp->dn_nlevels - 1, 0);
for (int j = 0; j < dnp->dn_nblkptr; j++) {
czb.zb_blkid = j;
(void) visit_indirect(dmu_objset_spa(dn->dn_objset), dnp,
&dnp->dn_blkptr[j], &czb);
}
(void) printf("\n");
}
static void
dump_dsl_dir(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object;
dsl_dir_phys_t *dd = data;
time_t crtime;
char nice[32];
/* make sure nicenum has enough space */
_Static_assert(sizeof (nice) >= NN_NUMBUF_SZ, "nice truncated");
if (dd == NULL)
return;
ASSERT3U(size, >=, sizeof (dsl_dir_phys_t));
crtime = dd->dd_creation_time;
(void) printf("\t\tcreation_time = %s", ctime(&crtime));
(void) printf("\t\thead_dataset_obj = %llu\n",
(u_longlong_t)dd->dd_head_dataset_obj);
(void) printf("\t\tparent_dir_obj = %llu\n",
(u_longlong_t)dd->dd_parent_obj);
(void) printf("\t\torigin_obj = %llu\n",
(u_longlong_t)dd->dd_origin_obj);
(void) printf("\t\tchild_dir_zapobj = %llu\n",
(u_longlong_t)dd->dd_child_dir_zapobj);
zdb_nicenum(dd->dd_used_bytes, nice, sizeof (nice));
(void) printf("\t\tused_bytes = %s\n", nice);
zdb_nicenum(dd->dd_compressed_bytes, nice, sizeof (nice));
(void) printf("\t\tcompressed_bytes = %s\n", nice);
zdb_nicenum(dd->dd_uncompressed_bytes, nice, sizeof (nice));
(void) printf("\t\tuncompressed_bytes = %s\n", nice);
zdb_nicenum(dd->dd_quota, nice, sizeof (nice));
(void) printf("\t\tquota = %s\n", nice);
zdb_nicenum(dd->dd_reserved, nice, sizeof (nice));
(void) printf("\t\treserved = %s\n", nice);
(void) printf("\t\tprops_zapobj = %llu\n",
(u_longlong_t)dd->dd_props_zapobj);
(void) printf("\t\tdeleg_zapobj = %llu\n",
(u_longlong_t)dd->dd_deleg_zapobj);
(void) printf("\t\tflags = %llx\n",
(u_longlong_t)dd->dd_flags);
#define DO(which) \
zdb_nicenum(dd->dd_used_breakdown[DD_USED_ ## which], nice, \
sizeof (nice)); \
(void) printf("\t\tused_breakdown[" #which "] = %s\n", nice)
DO(HEAD);
DO(SNAP);
DO(CHILD);
DO(CHILD_RSRV);
DO(REFRSRV);
#undef DO
(void) printf("\t\tclones = %llu\n",
(u_longlong_t)dd->dd_clones);
}
static void
dump_dsl_dataset(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object;
dsl_dataset_phys_t *ds = data;
time_t crtime;
char used[32], compressed[32], uncompressed[32], unique[32];
char blkbuf[BP_SPRINTF_LEN];
/* make sure nicenum has enough space */
_Static_assert(sizeof (used) >= NN_NUMBUF_SZ, "used truncated");
_Static_assert(sizeof (compressed) >= NN_NUMBUF_SZ,
"compressed truncated");
_Static_assert(sizeof (uncompressed) >= NN_NUMBUF_SZ,
"uncompressed truncated");
_Static_assert(sizeof (unique) >= NN_NUMBUF_SZ, "unique truncated");
if (ds == NULL)
return;
ASSERT(size == sizeof (*ds));
crtime = ds->ds_creation_time;
zdb_nicenum(ds->ds_referenced_bytes, used, sizeof (used));
zdb_nicenum(ds->ds_compressed_bytes, compressed, sizeof (compressed));
zdb_nicenum(ds->ds_uncompressed_bytes, uncompressed,
sizeof (uncompressed));
zdb_nicenum(ds->ds_unique_bytes, unique, sizeof (unique));
snprintf_blkptr(blkbuf, sizeof (blkbuf), &ds->ds_bp);
(void) printf("\t\tdir_obj = %llu\n",
(u_longlong_t)ds->ds_dir_obj);
(void) printf("\t\tprev_snap_obj = %llu\n",
(u_longlong_t)ds->ds_prev_snap_obj);
(void) printf("\t\tprev_snap_txg = %llu\n",
(u_longlong_t)ds->ds_prev_snap_txg);
(void) printf("\t\tnext_snap_obj = %llu\n",
(u_longlong_t)ds->ds_next_snap_obj);
(void) printf("\t\tsnapnames_zapobj = %llu\n",
(u_longlong_t)ds->ds_snapnames_zapobj);
(void) printf("\t\tnum_children = %llu\n",
(u_longlong_t)ds->ds_num_children);
(void) printf("\t\tuserrefs_obj = %llu\n",
(u_longlong_t)ds->ds_userrefs_obj);
(void) printf("\t\tcreation_time = %s", ctime(&crtime));
(void) printf("\t\tcreation_txg = %llu\n",
(u_longlong_t)ds->ds_creation_txg);
(void) printf("\t\tdeadlist_obj = %llu\n",
(u_longlong_t)ds->ds_deadlist_obj);
(void) printf("\t\tused_bytes = %s\n", used);
(void) printf("\t\tcompressed_bytes = %s\n", compressed);
(void) printf("\t\tuncompressed_bytes = %s\n", uncompressed);
(void) printf("\t\tunique = %s\n", unique);
(void) printf("\t\tfsid_guid = %llu\n",
(u_longlong_t)ds->ds_fsid_guid);
(void) printf("\t\tguid = %llu\n",
(u_longlong_t)ds->ds_guid);
(void) printf("\t\tflags = %llx\n",
(u_longlong_t)ds->ds_flags);
(void) printf("\t\tnext_clones_obj = %llu\n",
(u_longlong_t)ds->ds_next_clones_obj);
(void) printf("\t\tprops_obj = %llu\n",
(u_longlong_t)ds->ds_props_obj);
(void) printf("\t\tbp = %s\n", blkbuf);
}
static int
dump_bptree_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
(void) arg, (void) tx;
char blkbuf[BP_SPRINTF_LEN];
if (bp->blk_birth != 0) {
snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
(void) printf("\t%s\n", blkbuf);
}
return (0);
}
static void
dump_bptree(objset_t *os, uint64_t obj, const char *name)
{
char bytes[32];
bptree_phys_t *bt;
dmu_buf_t *db;
/* make sure nicenum has enough space */
_Static_assert(sizeof (bytes) >= NN_NUMBUF_SZ, "bytes truncated");
if (dump_opt['d'] < 3)
return;
VERIFY3U(0, ==, dmu_bonus_hold(os, obj, FTAG, &db));
bt = db->db_data;
zdb_nicenum(bt->bt_bytes, bytes, sizeof (bytes));
(void) printf("\n %s: %llu datasets, %s\n",
name, (unsigned long long)(bt->bt_end - bt->bt_begin), bytes);
dmu_buf_rele(db, FTAG);
if (dump_opt['d'] < 5)
return;
(void) printf("\n");
(void) bptree_iterate(os, obj, B_FALSE, dump_bptree_cb, NULL, NULL);
}
static int
dump_bpobj_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, dmu_tx_t *tx)
{
(void) arg, (void) tx;
char blkbuf[BP_SPRINTF_LEN];
ASSERT(bp->blk_birth != 0);
snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), bp, bp_freed);
(void) printf("\t%s\n", blkbuf);
return (0);
}
static void
dump_full_bpobj(bpobj_t *bpo, const char *name, int indent)
{
char bytes[32];
char comp[32];
char uncomp[32];
uint64_t i;
/* make sure nicenum has enough space */
_Static_assert(sizeof (bytes) >= NN_NUMBUF_SZ, "bytes truncated");
_Static_assert(sizeof (comp) >= NN_NUMBUF_SZ, "comp truncated");
_Static_assert(sizeof (uncomp) >= NN_NUMBUF_SZ, "uncomp truncated");
if (dump_opt['d'] < 3)
return;
zdb_nicenum(bpo->bpo_phys->bpo_bytes, bytes, sizeof (bytes));
if (bpo->bpo_havesubobj && bpo->bpo_phys->bpo_subobjs != 0) {
zdb_nicenum(bpo->bpo_phys->bpo_comp, comp, sizeof (comp));
zdb_nicenum(bpo->bpo_phys->bpo_uncomp, uncomp, sizeof (uncomp));
if (bpo->bpo_havefreed) {
(void) printf(" %*s: object %llu, %llu local "
"blkptrs, %llu freed, %llu subobjs in object %llu, "
"%s (%s/%s comp)\n",
indent * 8, name,
(u_longlong_t)bpo->bpo_object,
(u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
(u_longlong_t)bpo->bpo_phys->bpo_num_freed,
(u_longlong_t)bpo->bpo_phys->bpo_num_subobjs,
(u_longlong_t)bpo->bpo_phys->bpo_subobjs,
bytes, comp, uncomp);
} else {
(void) printf(" %*s: object %llu, %llu local "
"blkptrs, %llu subobjs in object %llu, "
"%s (%s/%s comp)\n",
indent * 8, name,
(u_longlong_t)bpo->bpo_object,
(u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
(u_longlong_t)bpo->bpo_phys->bpo_num_subobjs,
(u_longlong_t)bpo->bpo_phys->bpo_subobjs,
bytes, comp, uncomp);
}
for (i = 0; i < bpo->bpo_phys->bpo_num_subobjs; i++) {
uint64_t subobj;
bpobj_t subbpo;
int error;
VERIFY0(dmu_read(bpo->bpo_os,
bpo->bpo_phys->bpo_subobjs,
i * sizeof (subobj), sizeof (subobj), &subobj, 0));
error = bpobj_open(&subbpo, bpo->bpo_os, subobj);
if (error != 0) {
(void) printf("ERROR %u while trying to open "
"subobj id %llu\n",
error, (u_longlong_t)subobj);
continue;
}
dump_full_bpobj(&subbpo, "subobj", indent + 1);
bpobj_close(&subbpo);
}
} else {
if (bpo->bpo_havefreed) {
(void) printf(" %*s: object %llu, %llu blkptrs, "
"%llu freed, %s\n",
indent * 8, name,
(u_longlong_t)bpo->bpo_object,
(u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
(u_longlong_t)bpo->bpo_phys->bpo_num_freed,
bytes);
} else {
(void) printf(" %*s: object %llu, %llu blkptrs, "
"%s\n",
indent * 8, name,
(u_longlong_t)bpo->bpo_object,
(u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
bytes);
}
}
if (dump_opt['d'] < 5)
return;
if (indent == 0) {
(void) bpobj_iterate_nofree(bpo, dump_bpobj_cb, NULL, NULL);
(void) printf("\n");
}
}
static int
dump_bookmark(dsl_pool_t *dp, char *name, boolean_t print_redact,
boolean_t print_list)
{
int err = 0;
zfs_bookmark_phys_t prop;
objset_t *mos = dp->dp_spa->spa_meta_objset;
err = dsl_bookmark_lookup(dp, name, NULL, &prop);
if (err != 0) {
return (err);
}
(void) printf("\t#%s: ", strchr(name, '#') + 1);
(void) printf("{guid: %llx creation_txg: %llu creation_time: "
"%llu redaction_obj: %llu}\n", (u_longlong_t)prop.zbm_guid,
(u_longlong_t)prop.zbm_creation_txg,
(u_longlong_t)prop.zbm_creation_time,
(u_longlong_t)prop.zbm_redaction_obj);
IMPLY(print_list, print_redact);
if (!print_redact || prop.zbm_redaction_obj == 0)
return (0);
redaction_list_t *rl;
VERIFY0(dsl_redaction_list_hold_obj(dp,
prop.zbm_redaction_obj, FTAG, &rl));
redaction_list_phys_t *rlp = rl->rl_phys;
(void) printf("\tRedacted:\n\t\tProgress: ");
if (rlp->rlp_last_object != UINT64_MAX ||
rlp->rlp_last_blkid != UINT64_MAX) {
(void) printf("%llu %llu (incomplete)\n",
(u_longlong_t)rlp->rlp_last_object,
(u_longlong_t)rlp->rlp_last_blkid);
} else {
(void) printf("complete\n");
}
(void) printf("\t\tSnapshots: [");
for (unsigned int i = 0; i < rlp->rlp_num_snaps; i++) {
if (i > 0)
(void) printf(", ");
(void) printf("%0llu",
(u_longlong_t)rlp->rlp_snaps[i]);
}
(void) printf("]\n\t\tLength: %llu\n",
(u_longlong_t)rlp->rlp_num_entries);
if (!print_list) {
dsl_redaction_list_rele(rl, FTAG);
return (0);
}
if (rlp->rlp_num_entries == 0) {
dsl_redaction_list_rele(rl, FTAG);
(void) printf("\t\tRedaction List: []\n\n");
return (0);
}
redact_block_phys_t *rbp_buf;
uint64_t size;
dmu_object_info_t doi;
VERIFY0(dmu_object_info(mos, prop.zbm_redaction_obj, &doi));
size = doi.doi_max_offset;
rbp_buf = kmem_alloc(size, KM_SLEEP);
err = dmu_read(mos, prop.zbm_redaction_obj, 0, size,
rbp_buf, 0);
if (err != 0) {
dsl_redaction_list_rele(rl, FTAG);
kmem_free(rbp_buf, size);
return (err);
}
(void) printf("\t\tRedaction List: [{object: %llx, offset: "
"%llx, blksz: %x, count: %llx}",
(u_longlong_t)rbp_buf[0].rbp_object,
(u_longlong_t)rbp_buf[0].rbp_blkid,
(uint_t)(redact_block_get_size(&rbp_buf[0])),
(u_longlong_t)redact_block_get_count(&rbp_buf[0]));
for (size_t i = 1; i < rlp->rlp_num_entries; i++) {
(void) printf(",\n\t\t{object: %llx, offset: %llx, "
"blksz: %x, count: %llx}",
(u_longlong_t)rbp_buf[i].rbp_object,
(u_longlong_t)rbp_buf[i].rbp_blkid,
(uint_t)(redact_block_get_size(&rbp_buf[i])),
(u_longlong_t)redact_block_get_count(&rbp_buf[i]));
}
dsl_redaction_list_rele(rl, FTAG);
kmem_free(rbp_buf, size);
(void) printf("]\n\n");
return (0);
}
static void
dump_bookmarks(objset_t *os, int verbosity)
{
zap_cursor_t zc;
zap_attribute_t attr;
dsl_dataset_t *ds = dmu_objset_ds(os);
dsl_pool_t *dp = spa_get_dsl(os->os_spa);
objset_t *mos = os->os_spa->spa_meta_objset;
if (verbosity < 4)
return;
dsl_pool_config_enter(dp, FTAG);
for (zap_cursor_init(&zc, mos, ds->ds_bookmarks_obj);
zap_cursor_retrieve(&zc, &attr) == 0;
zap_cursor_advance(&zc)) {
char osname[ZFS_MAX_DATASET_NAME_LEN];
char buf[ZFS_MAX_DATASET_NAME_LEN];
int len;
dmu_objset_name(os, osname);
len = snprintf(buf, sizeof (buf), "%s#%s", osname,
attr.za_name);
VERIFY3S(len, <, ZFS_MAX_DATASET_NAME_LEN);
(void) dump_bookmark(dp, buf, verbosity >= 5, verbosity >= 6);
}
zap_cursor_fini(&zc);
dsl_pool_config_exit(dp, FTAG);
}
static void
bpobj_count_refd(bpobj_t *bpo)
{
mos_obj_refd(bpo->bpo_object);
if (bpo->bpo_havesubobj && bpo->bpo_phys->bpo_subobjs != 0) {
mos_obj_refd(bpo->bpo_phys->bpo_subobjs);
for (uint64_t i = 0; i < bpo->bpo_phys->bpo_num_subobjs; i++) {
uint64_t subobj;
bpobj_t subbpo;
int error;
VERIFY0(dmu_read(bpo->bpo_os,
bpo->bpo_phys->bpo_subobjs,
i * sizeof (subobj), sizeof (subobj), &subobj, 0));
error = bpobj_open(&subbpo, bpo->bpo_os, subobj);
if (error != 0) {
(void) printf("ERROR %u while trying to open "
"subobj id %llu\n",
error, (u_longlong_t)subobj);
continue;
}
bpobj_count_refd(&subbpo);
bpobj_close(&subbpo);
}
}
}
static int
dsl_deadlist_entry_count_refd(void *arg, dsl_deadlist_entry_t *dle)
{
spa_t *spa = arg;
uint64_t empty_bpobj = spa->spa_dsl_pool->dp_empty_bpobj;
if (dle->dle_bpobj.bpo_object != empty_bpobj)
bpobj_count_refd(&dle->dle_bpobj);
return (0);
}
static int
dsl_deadlist_entry_dump(void *arg, dsl_deadlist_entry_t *dle)
{
ASSERT(arg == NULL);
if (dump_opt['d'] >= 5) {
char buf[128];
(void) snprintf(buf, sizeof (buf),
"mintxg %llu -> obj %llu",
(longlong_t)dle->dle_mintxg,
(longlong_t)dle->dle_bpobj.bpo_object);
dump_full_bpobj(&dle->dle_bpobj, buf, 0);
} else {
(void) printf("mintxg %llu -> obj %llu\n",
(longlong_t)dle->dle_mintxg,
(longlong_t)dle->dle_bpobj.bpo_object);
}
return (0);
}
static void
dump_blkptr_list(dsl_deadlist_t *dl, const char *name)
{
char bytes[32];
char comp[32];
char uncomp[32];
char entries[32];
spa_t *spa = dmu_objset_spa(dl->dl_os);
uint64_t empty_bpobj = spa->spa_dsl_pool->dp_empty_bpobj;
if (dl->dl_oldfmt) {
if (dl->dl_bpobj.bpo_object != empty_bpobj)
bpobj_count_refd(&dl->dl_bpobj);
} else {
mos_obj_refd(dl->dl_object);
dsl_deadlist_iterate(dl, dsl_deadlist_entry_count_refd, spa);
}
/* make sure nicenum has enough space */
_Static_assert(sizeof (bytes) >= NN_NUMBUF_SZ, "bytes truncated");
_Static_assert(sizeof (comp) >= NN_NUMBUF_SZ, "comp truncated");
_Static_assert(sizeof (uncomp) >= NN_NUMBUF_SZ, "uncomp truncated");
_Static_assert(sizeof (entries) >= NN_NUMBUF_SZ, "entries truncated");
if (dump_opt['d'] < 3)
return;
if (dl->dl_oldfmt) {
dump_full_bpobj(&dl->dl_bpobj, "old-format deadlist", 0);
return;
}
zdb_nicenum(dl->dl_phys->dl_used, bytes, sizeof (bytes));
zdb_nicenum(dl->dl_phys->dl_comp, comp, sizeof (comp));
zdb_nicenum(dl->dl_phys->dl_uncomp, uncomp, sizeof (uncomp));
zdb_nicenum(avl_numnodes(&dl->dl_tree), entries, sizeof (entries));
(void) printf("\n %s: %s (%s/%s comp), %s entries\n",
name, bytes, comp, uncomp, entries);
if (dump_opt['d'] < 4)
return;
(void) putchar('\n');
dsl_deadlist_iterate(dl, dsl_deadlist_entry_dump, NULL);
}
static int
verify_dd_livelist(objset_t *os)
{
uint64_t ll_used, used, ll_comp, comp, ll_uncomp, uncomp;
dsl_pool_t *dp = spa_get_dsl(os->os_spa);
dsl_dir_t *dd = os->os_dsl_dataset->ds_dir;
ASSERT(!dmu_objset_is_snapshot(os));
if (!dsl_deadlist_is_open(&dd->dd_livelist))
return (0);
/* Iterate through the livelist to check for duplicates */
dsl_deadlist_iterate(&dd->dd_livelist, sublivelist_verify_lightweight,
NULL);
dsl_pool_config_enter(dp, FTAG);
dsl_deadlist_space(&dd->dd_livelist, &ll_used,
&ll_comp, &ll_uncomp);
dsl_dataset_t *origin_ds;
ASSERT(dsl_pool_config_held(dp));
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dir_phys(dd)->dd_origin_obj, FTAG, &origin_ds));
VERIFY0(dsl_dataset_space_written(origin_ds, os->os_dsl_dataset,
&used, &comp, &uncomp));
dsl_dataset_rele(origin_ds, FTAG);
dsl_pool_config_exit(dp, FTAG);
/*
* It's possible that the dataset's uncomp space is larger than the
* livelist's because livelists do not track embedded block pointers
*/
if (used != ll_used || comp != ll_comp || uncomp < ll_uncomp) {
char nice_used[32], nice_comp[32], nice_uncomp[32];
(void) printf("Discrepancy in space accounting:\n");
zdb_nicenum(used, nice_used, sizeof (nice_used));
zdb_nicenum(comp, nice_comp, sizeof (nice_comp));
zdb_nicenum(uncomp, nice_uncomp, sizeof (nice_uncomp));
(void) printf("dir: used %s, comp %s, uncomp %s\n",
nice_used, nice_comp, nice_uncomp);
zdb_nicenum(ll_used, nice_used, sizeof (nice_used));
zdb_nicenum(ll_comp, nice_comp, sizeof (nice_comp));
zdb_nicenum(ll_uncomp, nice_uncomp, sizeof (nice_uncomp));
(void) printf("livelist: used %s, comp %s, uncomp %s\n",
nice_used, nice_comp, nice_uncomp);
return (1);
}
return (0);
}
static char *key_material = NULL;
static boolean_t
zdb_derive_key(dsl_dir_t *dd, uint8_t *key_out)
{
uint64_t keyformat, salt, iters;
int i;
unsigned char c;
VERIFY0(zap_lookup(dd->dd_pool->dp_meta_objset, dd->dd_crypto_obj,
zfs_prop_to_name(ZFS_PROP_KEYFORMAT), sizeof (uint64_t),
1, &keyformat));
switch (keyformat) {
case ZFS_KEYFORMAT_HEX:
for (i = 0; i < WRAPPING_KEY_LEN * 2; i += 2) {
if (!isxdigit(key_material[i]) ||
!isxdigit(key_material[i+1]))
return (B_FALSE);
if (sscanf(&key_material[i], "%02hhx", &c) != 1)
return (B_FALSE);
key_out[i / 2] = c;
}
break;
case ZFS_KEYFORMAT_PASSPHRASE:
VERIFY0(zap_lookup(dd->dd_pool->dp_meta_objset,
dd->dd_crypto_obj, zfs_prop_to_name(ZFS_PROP_PBKDF2_SALT),
sizeof (uint64_t), 1, &salt));
VERIFY0(zap_lookup(dd->dd_pool->dp_meta_objset,
dd->dd_crypto_obj, zfs_prop_to_name(ZFS_PROP_PBKDF2_ITERS),
sizeof (uint64_t), 1, &iters));
if (PKCS5_PBKDF2_HMAC_SHA1(key_material, strlen(key_material),
((uint8_t *)&salt), sizeof (uint64_t), iters,
WRAPPING_KEY_LEN, key_out) != 1)
return (B_FALSE);
break;
default:
fatal("no support for key format %u\n",
(unsigned int) keyformat);
}
return (B_TRUE);
}
static char encroot[ZFS_MAX_DATASET_NAME_LEN];
static boolean_t key_loaded = B_FALSE;
static void
zdb_load_key(objset_t *os)
{
dsl_pool_t *dp;
dsl_dir_t *dd, *rdd;
uint8_t key[WRAPPING_KEY_LEN];
uint64_t rddobj;
int err;
dp = spa_get_dsl(os->os_spa);
dd = os->os_dsl_dataset->ds_dir;
dsl_pool_config_enter(dp, FTAG);
VERIFY0(zap_lookup(dd->dd_pool->dp_meta_objset, dd->dd_crypto_obj,
DSL_CRYPTO_KEY_ROOT_DDOBJ, sizeof (uint64_t), 1, &rddobj));
VERIFY0(dsl_dir_hold_obj(dd->dd_pool, rddobj, NULL, FTAG, &rdd));
dsl_dir_name(rdd, encroot);
dsl_dir_rele(rdd, FTAG);
if (!zdb_derive_key(dd, key))
fatal("couldn't derive encryption key");
dsl_pool_config_exit(dp, FTAG);
ASSERT3U(dsl_dataset_get_keystatus(dd), ==, ZFS_KEYSTATUS_UNAVAILABLE);
dsl_crypto_params_t *dcp;
nvlist_t *crypto_args;
crypto_args = fnvlist_alloc();
fnvlist_add_uint8_array(crypto_args, "wkeydata",
(uint8_t *)key, WRAPPING_KEY_LEN);
VERIFY0(dsl_crypto_params_create_nvlist(DCP_CMD_NONE,
NULL, crypto_args, &dcp));
err = spa_keystore_load_wkey(encroot, dcp, B_FALSE);
dsl_crypto_params_free(dcp, (err != 0));
fnvlist_free(crypto_args);
if (err != 0)
fatal(
"couldn't load encryption key for %s: %s",
encroot, err == ZFS_ERR_CRYPTO_NOTSUP ?
"crypto params not supported" : strerror(err));
ASSERT3U(dsl_dataset_get_keystatus(dd), ==, ZFS_KEYSTATUS_AVAILABLE);
printf("Unlocked encryption root: %s\n", encroot);
key_loaded = B_TRUE;
}
static void
zdb_unload_key(void)
{
if (!key_loaded)
return;
VERIFY0(spa_keystore_unload_wkey(encroot));
key_loaded = B_FALSE;
}
static avl_tree_t idx_tree;
static avl_tree_t domain_tree;
static boolean_t fuid_table_loaded;
static objset_t *sa_os = NULL;
static sa_attr_type_t *sa_attr_table = NULL;
static int
open_objset(const char *path, const void *tag, objset_t **osp)
{
int err;
uint64_t sa_attrs = 0;
uint64_t version = 0;
VERIFY3P(sa_os, ==, NULL);
/*
* We can't own an objset if it's redacted. Therefore, we do this
* dance: hold the objset, then acquire a long hold on its dataset, then
* release the pool (which is held as part of holding the objset).
*/
if (dump_opt['K']) {
/* decryption requested, try to load keys */
err = dmu_objset_hold(path, tag, osp);
if (err != 0) {
(void) fprintf(stderr, "failed to hold dataset "
"'%s': %s\n",
path, strerror(err));
return (err);
}
dsl_dataset_long_hold(dmu_objset_ds(*osp), tag);
dsl_pool_rele(dmu_objset_pool(*osp), tag);
/* succeeds or dies */
zdb_load_key(*osp);
/* release it all */
dsl_dataset_long_rele(dmu_objset_ds(*osp), tag);
dsl_dataset_rele(dmu_objset_ds(*osp), tag);
}
int ds_hold_flags = key_loaded ? DS_HOLD_FLAG_DECRYPT : 0;
err = dmu_objset_hold_flags(path, ds_hold_flags, tag, osp);
if (err != 0) {
(void) fprintf(stderr, "failed to hold dataset '%s': %s\n",
path, strerror(err));
return (err);
}
dsl_dataset_long_hold(dmu_objset_ds(*osp), tag);
dsl_pool_rele(dmu_objset_pool(*osp), tag);
if (dmu_objset_type(*osp) == DMU_OST_ZFS &&
(key_loaded || !(*osp)->os_encrypted)) {
(void) zap_lookup(*osp, MASTER_NODE_OBJ, ZPL_VERSION_STR,
8, 1, &version);
if (version >= ZPL_VERSION_SA) {
(void) zap_lookup(*osp, MASTER_NODE_OBJ, ZFS_SA_ATTRS,
8, 1, &sa_attrs);
}
err = sa_setup(*osp, sa_attrs, zfs_attr_table, ZPL_END,
&sa_attr_table);
if (err != 0) {
(void) fprintf(stderr, "sa_setup failed: %s\n",
strerror(err));
dsl_dataset_long_rele(dmu_objset_ds(*osp), tag);
dsl_dataset_rele_flags(dmu_objset_ds(*osp),
ds_hold_flags, tag);
*osp = NULL;
}
}
sa_os = *osp;
return (err);
}
static void
close_objset(objset_t *os, const void *tag)
{
VERIFY3P(os, ==, sa_os);
if (os->os_sa != NULL)
sa_tear_down(os);
dsl_dataset_long_rele(dmu_objset_ds(os), tag);
dsl_dataset_rele_flags(dmu_objset_ds(os),
key_loaded ? DS_HOLD_FLAG_DECRYPT : 0, tag);
sa_attr_table = NULL;
sa_os = NULL;
zdb_unload_key();
}
static void
fuid_table_destroy(void)
{
if (fuid_table_loaded) {
zfs_fuid_table_destroy(&idx_tree, &domain_tree);
fuid_table_loaded = B_FALSE;
}
}
/*
* print uid or gid information.
* For normal POSIX id just the id is printed in decimal format.
* For CIFS files with FUID the fuid is printed in hex followed by
* the domain-rid string.
*/
static void
print_idstr(uint64_t id, const char *id_type)
{
if (FUID_INDEX(id)) {
const char *domain =
zfs_fuid_idx_domain(&idx_tree, FUID_INDEX(id));
(void) printf("\t%s %llx [%s-%d]\n", id_type,
(u_longlong_t)id, domain, (int)FUID_RID(id));
} else {
(void) printf("\t%s %llu\n", id_type, (u_longlong_t)id);
}
}
static void
dump_uidgid(objset_t *os, uint64_t uid, uint64_t gid)
{
uint32_t uid_idx, gid_idx;
uid_idx = FUID_INDEX(uid);
gid_idx = FUID_INDEX(gid);
/* Load domain table, if not already loaded */
if (!fuid_table_loaded && (uid_idx || gid_idx)) {
uint64_t fuid_obj;
/* first find the fuid object. It lives in the master node */
VERIFY(zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES,
8, 1, &fuid_obj) == 0);
zfs_fuid_avl_tree_create(&idx_tree, &domain_tree);
(void) zfs_fuid_table_load(os, fuid_obj,
&idx_tree, &domain_tree);
fuid_table_loaded = B_TRUE;
}
print_idstr(uid, "uid");
print_idstr(gid, "gid");
}
static void
dump_znode_sa_xattr(sa_handle_t *hdl)
{
nvlist_t *sa_xattr;
nvpair_t *elem = NULL;
int sa_xattr_size = 0;
int sa_xattr_entries = 0;
int error;
char *sa_xattr_packed;
error = sa_size(hdl, sa_attr_table[ZPL_DXATTR], &sa_xattr_size);
if (error || sa_xattr_size == 0)
return;
sa_xattr_packed = malloc(sa_xattr_size);
if (sa_xattr_packed == NULL)
return;
error = sa_lookup(hdl, sa_attr_table[ZPL_DXATTR],
sa_xattr_packed, sa_xattr_size);
if (error) {
free(sa_xattr_packed);
return;
}
error = nvlist_unpack(sa_xattr_packed, sa_xattr_size, &sa_xattr, 0);
if (error) {
free(sa_xattr_packed);
return;
}
while ((elem = nvlist_next_nvpair(sa_xattr, elem)) != NULL)
sa_xattr_entries++;
(void) printf("\tSA xattrs: %d bytes, %d entries\n\n",
sa_xattr_size, sa_xattr_entries);
while ((elem = nvlist_next_nvpair(sa_xattr, elem)) != NULL) {
boolean_t can_print = !dump_opt['P'];
uchar_t *value;
uint_t cnt, idx;
(void) printf("\t\t%s = ", nvpair_name(elem));
nvpair_value_byte_array(elem, &value, &cnt);
for (idx = 0; idx < cnt; ++idx) {
if (!isprint(value[idx])) {
can_print = B_FALSE;
break;
}
}
for (idx = 0; idx < cnt; ++idx) {
if (can_print)
(void) putchar(value[idx]);
else
(void) printf("\\%3.3o", value[idx]);
}
(void) putchar('\n');
}
nvlist_free(sa_xattr);
free(sa_xattr_packed);
}
static void
dump_znode_symlink(sa_handle_t *hdl)
{
int sa_symlink_size = 0;
char linktarget[MAXPATHLEN];
int error;
error = sa_size(hdl, sa_attr_table[ZPL_SYMLINK], &sa_symlink_size);
if (error || sa_symlink_size == 0) {
return;
}
if (sa_symlink_size >= sizeof (linktarget)) {
(void) printf("symlink size %d is too large\n",
sa_symlink_size);
return;
}
linktarget[sa_symlink_size] = '\0';
if (sa_lookup(hdl, sa_attr_table[ZPL_SYMLINK],
&linktarget, sa_symlink_size) == 0)
(void) printf("\ttarget %s\n", linktarget);
}
static void
dump_znode(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) data, (void) size;
char path[MAXPATHLEN * 2]; /* allow for xattr and failure prefix */
sa_handle_t *hdl;
uint64_t xattr, rdev, gen;
uint64_t uid, gid, mode, fsize, parent, links;
uint64_t pflags;
uint64_t acctm[2], modtm[2], chgtm[2], crtm[2];
time_t z_crtime, z_atime, z_mtime, z_ctime;
sa_bulk_attr_t bulk[12];
int idx = 0;
int error;
VERIFY3P(os, ==, sa_os);
if (sa_handle_get(os, object, NULL, SA_HDL_PRIVATE, &hdl)) {
(void) printf("Failed to get handle for SA znode\n");
return;
}
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_UID], NULL, &uid, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_GID], NULL, &gid, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_LINKS], NULL,
&links, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_GEN], NULL, &gen, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_MODE], NULL,
&mode, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_PARENT],
NULL, &parent, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_SIZE], NULL,
&fsize, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_ATIME], NULL,
acctm, 16);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_MTIME], NULL,
modtm, 16);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_CRTIME], NULL,
crtm, 16);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_CTIME], NULL,
chgtm, 16);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_FLAGS], NULL,
&pflags, 8);
if (sa_bulk_lookup(hdl, bulk, idx)) {
(void) sa_handle_destroy(hdl);
return;
}
z_crtime = (time_t)crtm[0];
z_atime = (time_t)acctm[0];
z_mtime = (time_t)modtm[0];
z_ctime = (time_t)chgtm[0];
if (dump_opt['d'] > 4) {
error = zfs_obj_to_path(os, object, path, sizeof (path));
if (error == ESTALE) {
(void) snprintf(path, sizeof (path), "on delete queue");
} else if (error != 0) {
leaked_objects++;
(void) snprintf(path, sizeof (path),
"path not found, possibly leaked");
}
(void) printf("\tpath %s\n", path);
}
if (S_ISLNK(mode))
dump_znode_symlink(hdl);
dump_uidgid(os, uid, gid);
(void) printf("\tatime %s", ctime(&z_atime));
(void) printf("\tmtime %s", ctime(&z_mtime));
(void) printf("\tctime %s", ctime(&z_ctime));
(void) printf("\tcrtime %s", ctime(&z_crtime));
(void) printf("\tgen %llu\n", (u_longlong_t)gen);
(void) printf("\tmode %llo\n", (u_longlong_t)mode);
(void) printf("\tsize %llu\n", (u_longlong_t)fsize);
(void) printf("\tparent %llu\n", (u_longlong_t)parent);
(void) printf("\tlinks %llu\n", (u_longlong_t)links);
(void) printf("\tpflags %llx\n", (u_longlong_t)pflags);
if (dmu_objset_projectquota_enabled(os) && (pflags & ZFS_PROJID)) {
uint64_t projid;
if (sa_lookup(hdl, sa_attr_table[ZPL_PROJID], &projid,
sizeof (uint64_t)) == 0)
(void) printf("\tprojid %llu\n", (u_longlong_t)projid);
}
if (sa_lookup(hdl, sa_attr_table[ZPL_XATTR], &xattr,
sizeof (uint64_t)) == 0)
(void) printf("\txattr %llu\n", (u_longlong_t)xattr);
if (sa_lookup(hdl, sa_attr_table[ZPL_RDEV], &rdev,
sizeof (uint64_t)) == 0)
(void) printf("\trdev 0x%016llx\n", (u_longlong_t)rdev);
dump_znode_sa_xattr(hdl);
sa_handle_destroy(hdl);
}
static void
dump_acl(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object, (void) data, (void) size;
}
static void
dump_dmu_objset(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object, (void) data, (void) size;
}
static object_viewer_t *object_viewer[DMU_OT_NUMTYPES + 1] = {
dump_none, /* unallocated */
dump_zap, /* object directory */
dump_uint64, /* object array */
dump_none, /* packed nvlist */
dump_packed_nvlist, /* packed nvlist size */
dump_none, /* bpobj */
dump_bpobj, /* bpobj header */
dump_none, /* SPA space map header */
dump_none, /* SPA space map */
dump_none, /* ZIL intent log */
dump_dnode, /* DMU dnode */
dump_dmu_objset, /* DMU objset */
dump_dsl_dir, /* DSL directory */
dump_zap, /* DSL directory child map */
dump_zap, /* DSL dataset snap map */
dump_zap, /* DSL props */
dump_dsl_dataset, /* DSL dataset */
dump_znode, /* ZFS znode */
dump_acl, /* ZFS V0 ACL */
dump_uint8, /* ZFS plain file */
dump_zpldir, /* ZFS directory */
dump_zap, /* ZFS master node */
dump_zap, /* ZFS delete queue */
dump_uint8, /* zvol object */
dump_zap, /* zvol prop */
dump_uint8, /* other uint8[] */
dump_uint64, /* other uint64[] */
dump_zap, /* other ZAP */
dump_zap, /* persistent error log */
dump_uint8, /* SPA history */
dump_history_offsets, /* SPA history offsets */
dump_zap, /* Pool properties */
dump_zap, /* DSL permissions */
dump_acl, /* ZFS ACL */
dump_uint8, /* ZFS SYSACL */
dump_none, /* FUID nvlist */
dump_packed_nvlist, /* FUID nvlist size */
dump_zap, /* DSL dataset next clones */
dump_zap, /* DSL scrub queue */
dump_zap, /* ZFS user/group/project used */
dump_zap, /* ZFS user/group/project quota */
dump_zap, /* snapshot refcount tags */
dump_ddt_zap, /* DDT ZAP object */
dump_zap, /* DDT statistics */
dump_znode, /* SA object */
dump_zap, /* SA Master Node */
dump_sa_attrs, /* SA attribute registration */
dump_sa_layouts, /* SA attribute layouts */
dump_zap, /* DSL scrub translations */
dump_none, /* fake dedup BP */
dump_zap, /* deadlist */
dump_none, /* deadlist hdr */
dump_zap, /* dsl clones */
dump_bpobj_subobjs, /* bpobj subobjs */
dump_unknown, /* Unknown type, must be last */
};
static boolean_t
match_object_type(dmu_object_type_t obj_type, uint64_t flags)
{
boolean_t match = B_TRUE;
switch (obj_type) {
case DMU_OT_DIRECTORY_CONTENTS:
if (!(flags & ZOR_FLAG_DIRECTORY))
match = B_FALSE;
break;
case DMU_OT_PLAIN_FILE_CONTENTS:
if (!(flags & ZOR_FLAG_PLAIN_FILE))
match = B_FALSE;
break;
case DMU_OT_SPACE_MAP:
if (!(flags & ZOR_FLAG_SPACE_MAP))
match = B_FALSE;
break;
default:
if (strcmp(zdb_ot_name(obj_type), "zap") == 0) {
if (!(flags & ZOR_FLAG_ZAP))
match = B_FALSE;
break;
}
/*
* If all bits except some of the supported flags are
* set, the user combined the all-types flag (A) with
* a negated flag to exclude some types (e.g. A-f to
* show all object types except plain files).
*/
if ((flags | ZOR_SUPPORTED_FLAGS) != ZOR_FLAG_ALL_TYPES)
match = B_FALSE;
break;
}
return (match);
}
static void
dump_object(objset_t *os, uint64_t object, int verbosity,
boolean_t *print_header, uint64_t *dnode_slots_used, uint64_t flags)
{
dmu_buf_t *db = NULL;
dmu_object_info_t doi;
dnode_t *dn;
boolean_t dnode_held = B_FALSE;
void *bonus = NULL;
size_t bsize = 0;
char iblk[32], dblk[32], lsize[32], asize[32], fill[32], dnsize[32];
char bonus_size[32];
char aux[50];
int error;
/* make sure nicenum has enough space */
_Static_assert(sizeof (iblk) >= NN_NUMBUF_SZ, "iblk truncated");
_Static_assert(sizeof (dblk) >= NN_NUMBUF_SZ, "dblk truncated");
_Static_assert(sizeof (lsize) >= NN_NUMBUF_SZ, "lsize truncated");
_Static_assert(sizeof (asize) >= NN_NUMBUF_SZ, "asize truncated");
_Static_assert(sizeof (bonus_size) >= NN_NUMBUF_SZ,
"bonus_size truncated");
if (*print_header) {
(void) printf("\n%10s %3s %5s %5s %5s %6s %5s %6s %s\n",
"Object", "lvl", "iblk", "dblk", "dsize", "dnsize",
"lsize", "%full", "type");
*print_header = 0;
}
if (object == 0) {
dn = DMU_META_DNODE(os);
dmu_object_info_from_dnode(dn, &doi);
} else {
/*
* Encrypted datasets will have sensitive bonus buffers
* encrypted. Therefore we cannot hold the bonus buffer and
* must hold the dnode itself instead.
*/
error = dmu_object_info(os, object, &doi);
if (error)
fatal("dmu_object_info() failed, errno %u", error);
if (!key_loaded && os->os_encrypted &&
DMU_OT_IS_ENCRYPTED(doi.doi_bonus_type)) {
error = dnode_hold(os, object, FTAG, &dn);
if (error)
fatal("dnode_hold() failed, errno %u", error);
dnode_held = B_TRUE;
} else {
error = dmu_bonus_hold(os, object, FTAG, &db);
if (error)
fatal("dmu_bonus_hold(%llu) failed, errno %u",
object, error);
bonus = db->db_data;
bsize = db->db_size;
dn = DB_DNODE((dmu_buf_impl_t *)db);
}
}
/*
* Default to showing all object types if no flags were specified.
*/
if (flags != 0 && flags != ZOR_FLAG_ALL_TYPES &&
!match_object_type(doi.doi_type, flags))
goto out;
if (dnode_slots_used)
*dnode_slots_used = doi.doi_dnodesize / DNODE_MIN_SIZE;
zdb_nicenum(doi.doi_metadata_block_size, iblk, sizeof (iblk));
zdb_nicenum(doi.doi_data_block_size, dblk, sizeof (dblk));
zdb_nicenum(doi.doi_max_offset, lsize, sizeof (lsize));
zdb_nicenum(doi.doi_physical_blocks_512 << 9, asize, sizeof (asize));
zdb_nicenum(doi.doi_bonus_size, bonus_size, sizeof (bonus_size));
zdb_nicenum(doi.doi_dnodesize, dnsize, sizeof (dnsize));
(void) snprintf(fill, sizeof (fill), "%6.2f", 100.0 *
doi.doi_fill_count * doi.doi_data_block_size / (object == 0 ?
DNODES_PER_BLOCK : 1) / doi.doi_max_offset);
aux[0] = '\0';
if (doi.doi_checksum != ZIO_CHECKSUM_INHERIT || verbosity >= 6) {
(void) snprintf(aux + strlen(aux), sizeof (aux) - strlen(aux),
" (K=%s)", ZDB_CHECKSUM_NAME(doi.doi_checksum));
}
if (doi.doi_compress == ZIO_COMPRESS_INHERIT &&
ZIO_COMPRESS_HASLEVEL(os->os_compress) && verbosity >= 6) {
const char *compname = NULL;
if (zfs_prop_index_to_string(ZFS_PROP_COMPRESSION,
ZIO_COMPRESS_RAW(os->os_compress, os->os_complevel),
&compname) == 0) {
(void) snprintf(aux + strlen(aux),
sizeof (aux) - strlen(aux), " (Z=inherit=%s)",
compname);
} else {
(void) snprintf(aux + strlen(aux),
sizeof (aux) - strlen(aux),
" (Z=inherit=%s-unknown)",
ZDB_COMPRESS_NAME(os->os_compress));
}
} else if (doi.doi_compress == ZIO_COMPRESS_INHERIT && verbosity >= 6) {
(void) snprintf(aux + strlen(aux), sizeof (aux) - strlen(aux),
" (Z=inherit=%s)", ZDB_COMPRESS_NAME(os->os_compress));
} else if (doi.doi_compress != ZIO_COMPRESS_INHERIT || verbosity >= 6) {
(void) snprintf(aux + strlen(aux), sizeof (aux) - strlen(aux),
" (Z=%s)", ZDB_COMPRESS_NAME(doi.doi_compress));
}
(void) printf("%10lld %3u %5s %5s %5s %6s %5s %6s %s%s\n",
(u_longlong_t)object, doi.doi_indirection, iblk, dblk,
asize, dnsize, lsize, fill, zdb_ot_name(doi.doi_type), aux);
if (doi.doi_bonus_type != DMU_OT_NONE && verbosity > 3) {
(void) printf("%10s %3s %5s %5s %5s %5s %5s %6s %s\n",
"", "", "", "", "", "", bonus_size, "bonus",
zdb_ot_name(doi.doi_bonus_type));
}
if (verbosity >= 4) {
(void) printf("\tdnode flags: %s%s%s%s\n",
(dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES) ?
"USED_BYTES " : "",
(dn->dn_phys->dn_flags & DNODE_FLAG_USERUSED_ACCOUNTED) ?
"USERUSED_ACCOUNTED " : "",
(dn->dn_phys->dn_flags & DNODE_FLAG_USEROBJUSED_ACCOUNTED) ?
"USEROBJUSED_ACCOUNTED " : "",
(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR) ?
"SPILL_BLKPTR" : "");
(void) printf("\tdnode maxblkid: %llu\n",
(longlong_t)dn->dn_phys->dn_maxblkid);
if (!dnode_held) {
object_viewer[ZDB_OT_TYPE(doi.doi_bonus_type)](os,
object, bonus, bsize);
} else {
(void) printf("\t\t(bonus encrypted)\n");
}
if (key_loaded ||
(!os->os_encrypted || !DMU_OT_IS_ENCRYPTED(doi.doi_type))) {
object_viewer[ZDB_OT_TYPE(doi.doi_type)](os, object,
NULL, 0);
} else {
(void) printf("\t\t(object encrypted)\n");
}
*print_header = B_TRUE;
}
if (verbosity >= 5) {
if (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
char blkbuf[BP_SPRINTF_LEN];
snprintf_blkptr_compact(blkbuf, sizeof (blkbuf),
DN_SPILL_BLKPTR(dn->dn_phys), B_FALSE);
(void) printf("\nSpill block: %s\n", blkbuf);
}
dump_indirect(dn);
}
if (verbosity >= 5) {
/*
* Report the list of segments that comprise the object.
*/
uint64_t start = 0;
uint64_t end;
uint64_t blkfill = 1;
int minlvl = 1;
if (dn->dn_type == DMU_OT_DNODE) {
minlvl = 0;
blkfill = DNODES_PER_BLOCK;
}
for (;;) {
char segsize[32];
/* make sure nicenum has enough space */
_Static_assert(sizeof (segsize) >= NN_NUMBUF_SZ,
"segsize truncated");
error = dnode_next_offset(dn,
0, &start, minlvl, blkfill, 0);
if (error)
break;
end = start;
error = dnode_next_offset(dn,
DNODE_FIND_HOLE, &end, minlvl, blkfill, 0);
zdb_nicenum(end - start, segsize, sizeof (segsize));
(void) printf("\t\tsegment [%016llx, %016llx)"
" size %5s\n", (u_longlong_t)start,
(u_longlong_t)end, segsize);
if (error)
break;
start = end;
}
}
out:
if (db != NULL)
dmu_buf_rele(db, FTAG);
if (dnode_held)
dnode_rele(dn, FTAG);
}
static void
count_dir_mos_objects(dsl_dir_t *dd)
{
mos_obj_refd(dd->dd_object);
mos_obj_refd(dsl_dir_phys(dd)->dd_child_dir_zapobj);
mos_obj_refd(dsl_dir_phys(dd)->dd_deleg_zapobj);
mos_obj_refd(dsl_dir_phys(dd)->dd_props_zapobj);
mos_obj_refd(dsl_dir_phys(dd)->dd_clones);
/*
* The dd_crypto_obj can be referenced by multiple dsl_dir's.
* Ignore the references after the first one.
*/
mos_obj_refd_multiple(dd->dd_crypto_obj);
}
static void
count_ds_mos_objects(dsl_dataset_t *ds)
{
mos_obj_refd(ds->ds_object);
mos_obj_refd(dsl_dataset_phys(ds)->ds_next_clones_obj);
mos_obj_refd(dsl_dataset_phys(ds)->ds_props_obj);
mos_obj_refd(dsl_dataset_phys(ds)->ds_userrefs_obj);
mos_obj_refd(dsl_dataset_phys(ds)->ds_snapnames_zapobj);
mos_obj_refd(ds->ds_bookmarks_obj);
if (!dsl_dataset_is_snapshot(ds)) {
count_dir_mos_objects(ds->ds_dir);
}
}
static const char *const objset_types[DMU_OST_NUMTYPES] = {
"NONE", "META", "ZPL", "ZVOL", "OTHER", "ANY" };
/*
* Parse a string denoting a range of object IDs of the form
* <start>[:<end>[:flags]], and store the results in zor.
* Return 0 on success. On error, return 1 and update the msg
* pointer to point to a descriptive error message.
*/
static int
parse_object_range(char *range, zopt_object_range_t *zor, const char **msg)
{
uint64_t flags = 0;
char *p, *s, *dup, *flagstr, *tmp = NULL;
size_t len;
int i;
int rc = 0;
if (strchr(range, ':') == NULL) {
zor->zor_obj_start = strtoull(range, &p, 0);
if (*p != '\0') {
*msg = "Invalid characters in object ID";
rc = 1;
}
zor->zor_obj_start = ZDB_MAP_OBJECT_ID(zor->zor_obj_start);
zor->zor_obj_end = zor->zor_obj_start;
return (rc);
}
if (strchr(range, ':') == range) {
*msg = "Invalid leading colon";
rc = 1;
return (rc);
}
len = strlen(range);
if (range[len - 1] == ':') {
*msg = "Invalid trailing colon";
rc = 1;
return (rc);
}
dup = strdup(range);
s = strtok_r(dup, ":", &tmp);
zor->zor_obj_start = strtoull(s, &p, 0);
if (*p != '\0') {
*msg = "Invalid characters in start object ID";
rc = 1;
goto out;
}
s = strtok_r(NULL, ":", &tmp);
zor->zor_obj_end = strtoull(s, &p, 0);
if (*p != '\0') {
*msg = "Invalid characters in end object ID";
rc = 1;
goto out;
}
if (zor->zor_obj_start > zor->zor_obj_end) {
*msg = "Start object ID may not exceed end object ID";
rc = 1;
goto out;
}
s = strtok_r(NULL, ":", &tmp);
if (s == NULL) {
zor->zor_flags = ZOR_FLAG_ALL_TYPES;
goto out;
} else if (strtok_r(NULL, ":", &tmp) != NULL) {
*msg = "Invalid colon-delimited field after flags";
rc = 1;
goto out;
}
flagstr = s;
for (i = 0; flagstr[i]; i++) {
int bit;
boolean_t negation = (flagstr[i] == '-');
if (negation) {
i++;
if (flagstr[i] == '\0') {
*msg = "Invalid trailing negation operator";
rc = 1;
goto out;
}
}
bit = flagbits[(uchar_t)flagstr[i]];
if (bit == 0) {
*msg = "Invalid flag";
rc = 1;
goto out;
}
if (negation)
flags &= ~bit;
else
flags |= bit;
}
zor->zor_flags = flags;
zor->zor_obj_start = ZDB_MAP_OBJECT_ID(zor->zor_obj_start);
zor->zor_obj_end = ZDB_MAP_OBJECT_ID(zor->zor_obj_end);
out:
free(dup);
return (rc);
}
static void
dump_objset(objset_t *os)
{
dmu_objset_stats_t dds = { 0 };
uint64_t object, object_count;
uint64_t refdbytes, usedobjs, scratch;
char numbuf[32];
char blkbuf[BP_SPRINTF_LEN + 20];
char osname[ZFS_MAX_DATASET_NAME_LEN];
const char *type = "UNKNOWN";
int verbosity = dump_opt['d'];
boolean_t print_header;
unsigned i;
int error;
uint64_t total_slots_used = 0;
uint64_t max_slot_used = 0;
uint64_t dnode_slots;
uint64_t obj_start;
uint64_t obj_end;
uint64_t flags;
/* make sure nicenum has enough space */
_Static_assert(sizeof (numbuf) >= NN_NUMBUF_SZ, "numbuf truncated");
dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
dmu_objset_fast_stat(os, &dds);
dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
print_header = B_TRUE;
if (dds.dds_type < DMU_OST_NUMTYPES)
type = objset_types[dds.dds_type];
if (dds.dds_type == DMU_OST_META) {
dds.dds_creation_txg = TXG_INITIAL;
usedobjs = BP_GET_FILL(os->os_rootbp);
refdbytes = dsl_dir_phys(os->os_spa->spa_dsl_pool->dp_mos_dir)->
dd_used_bytes;
} else {
dmu_objset_space(os, &refdbytes, &scratch, &usedobjs, &scratch);
}
ASSERT3U(usedobjs, ==, BP_GET_FILL(os->os_rootbp));
zdb_nicenum(refdbytes, numbuf, sizeof (numbuf));
if (verbosity >= 4) {
(void) snprintf(blkbuf, sizeof (blkbuf), ", rootbp ");
(void) snprintf_blkptr(blkbuf + strlen(blkbuf),
sizeof (blkbuf) - strlen(blkbuf), os->os_rootbp);
} else {
blkbuf[0] = '\0';
}
dmu_objset_name(os, osname);
(void) printf("Dataset %s [%s], ID %llu, cr_txg %llu, "
"%s, %llu objects%s%s\n",
osname, type, (u_longlong_t)dmu_objset_id(os),
(u_longlong_t)dds.dds_creation_txg,
numbuf, (u_longlong_t)usedobjs, blkbuf,
(dds.dds_inconsistent) ? " (inconsistent)" : "");
for (i = 0; i < zopt_object_args; i++) {
obj_start = zopt_object_ranges[i].zor_obj_start;
obj_end = zopt_object_ranges[i].zor_obj_end;
flags = zopt_object_ranges[i].zor_flags;
object = obj_start;
if (object == 0 || obj_start == obj_end)
dump_object(os, object, verbosity, &print_header, NULL,
flags);
else
object--;
while ((dmu_object_next(os, &object, B_FALSE, 0) == 0) &&
object <= obj_end) {
dump_object(os, object, verbosity, &print_header, NULL,
flags);
}
}
if (zopt_object_args > 0) {
(void) printf("\n");
return;
}
if (dump_opt['i'] != 0 || verbosity >= 2)
dump_intent_log(dmu_objset_zil(os));
if (dmu_objset_ds(os) != NULL) {
dsl_dataset_t *ds = dmu_objset_ds(os);
dump_blkptr_list(&ds->ds_deadlist, "Deadlist");
if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) &&
!dmu_objset_is_snapshot(os)) {
dump_blkptr_list(&ds->ds_dir->dd_livelist, "Livelist");
if (verify_dd_livelist(os) != 0)
fatal("livelist is incorrect");
}
if (dsl_dataset_remap_deadlist_exists(ds)) {
(void) printf("ds_remap_deadlist:\n");
dump_blkptr_list(&ds->ds_remap_deadlist, "Deadlist");
}
count_ds_mos_objects(ds);
}
if (dmu_objset_ds(os) != NULL)
dump_bookmarks(os, verbosity);
if (verbosity < 2)
return;
if (BP_IS_HOLE(os->os_rootbp))
return;
dump_object(os, 0, verbosity, &print_header, NULL, 0);
object_count = 0;
if (DMU_USERUSED_DNODE(os) != NULL &&
DMU_USERUSED_DNODE(os)->dn_type != 0) {
dump_object(os, DMU_USERUSED_OBJECT, verbosity, &print_header,
NULL, 0);
dump_object(os, DMU_GROUPUSED_OBJECT, verbosity, &print_header,
NULL, 0);
}
if (DMU_PROJECTUSED_DNODE(os) != NULL &&
DMU_PROJECTUSED_DNODE(os)->dn_type != 0)
dump_object(os, DMU_PROJECTUSED_OBJECT, verbosity,
&print_header, NULL, 0);
object = 0;
while ((error = dmu_object_next(os, &object, B_FALSE, 0)) == 0) {
dump_object(os, object, verbosity, &print_header, &dnode_slots,
0);
object_count++;
total_slots_used += dnode_slots;
max_slot_used = object + dnode_slots - 1;
}
(void) printf("\n");
(void) printf(" Dnode slots:\n");
(void) printf("\tTotal used: %10llu\n",
(u_longlong_t)total_slots_used);
(void) printf("\tMax used: %10llu\n",
(u_longlong_t)max_slot_used);
(void) printf("\tPercent empty: %10lf\n",
(double)(max_slot_used - total_slots_used)*100 /
(double)max_slot_used);
(void) printf("\n");
if (error != ESRCH) {
(void) fprintf(stderr, "dmu_object_next() = %d\n", error);
abort();
}
ASSERT3U(object_count, ==, usedobjs);
if (leaked_objects != 0) {
(void) printf("%d potentially leaked objects detected\n",
leaked_objects);
leaked_objects = 0;
}
}
static void
dump_uberblock(uberblock_t *ub, const char *header, const char *footer)
{
time_t timestamp = ub->ub_timestamp;
(void) printf("%s", header ? header : "");
(void) printf("\tmagic = %016llx\n", (u_longlong_t)ub->ub_magic);
(void) printf("\tversion = %llu\n", (u_longlong_t)ub->ub_version);
(void) printf("\ttxg = %llu\n", (u_longlong_t)ub->ub_txg);
(void) printf("\tguid_sum = %llu\n", (u_longlong_t)ub->ub_guid_sum);
(void) printf("\ttimestamp = %llu UTC = %s",
(u_longlong_t)ub->ub_timestamp, ctime(&timestamp));
(void) printf("\tmmp_magic = %016llx\n",
(u_longlong_t)ub->ub_mmp_magic);
if (MMP_VALID(ub)) {
(void) printf("\tmmp_delay = %0llu\n",
(u_longlong_t)ub->ub_mmp_delay);
if (MMP_SEQ_VALID(ub))
(void) printf("\tmmp_seq = %u\n",
(unsigned int) MMP_SEQ(ub));
if (MMP_FAIL_INT_VALID(ub))
(void) printf("\tmmp_fail = %u\n",
(unsigned int) MMP_FAIL_INT(ub));
if (MMP_INTERVAL_VALID(ub))
(void) printf("\tmmp_write = %u\n",
(unsigned int) MMP_INTERVAL(ub));
/* After MMP_* to make summarize_uberblock_mmp cleaner */
(void) printf("\tmmp_valid = %x\n",
(unsigned int) ub->ub_mmp_config & 0xFF);
}
if (dump_opt['u'] >= 4) {
char blkbuf[BP_SPRINTF_LEN];
snprintf_blkptr(blkbuf, sizeof (blkbuf), &ub->ub_rootbp);
(void) printf("\trootbp = %s\n", blkbuf);
}
(void) printf("\tcheckpoint_txg = %llu\n",
(u_longlong_t)ub->ub_checkpoint_txg);
(void) printf("%s", footer ? footer : "");
}
static void
dump_config(spa_t *spa)
{
dmu_buf_t *db;
size_t nvsize = 0;
int error = 0;
error = dmu_bonus_hold(spa->spa_meta_objset,
spa->spa_config_object, FTAG, &db);
if (error == 0) {
nvsize = *(uint64_t *)db->db_data;
dmu_buf_rele(db, FTAG);
(void) printf("\nMOS Configuration:\n");
dump_packed_nvlist(spa->spa_meta_objset,
spa->spa_config_object, (void *)&nvsize, 1);
} else {
(void) fprintf(stderr, "dmu_bonus_hold(%llu) failed, errno %d",
(u_longlong_t)spa->spa_config_object, error);
}
}
static void
dump_cachefile(const char *cachefile)
{
int fd;
struct stat64 statbuf;
char *buf;
nvlist_t *config;
if ((fd = open64(cachefile, O_RDONLY)) < 0) {
(void) printf("cannot open '%s': %s\n", cachefile,
strerror(errno));
exit(1);
}
if (fstat64(fd, &statbuf) != 0) {
(void) printf("failed to stat '%s': %s\n", cachefile,
strerror(errno));
exit(1);
}
if ((buf = malloc(statbuf.st_size)) == NULL) {
(void) fprintf(stderr, "failed to allocate %llu bytes\n",
(u_longlong_t)statbuf.st_size);
exit(1);
}
if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
(void) fprintf(stderr, "failed to read %llu bytes\n",
(u_longlong_t)statbuf.st_size);
exit(1);
}
(void) close(fd);
if (nvlist_unpack(buf, statbuf.st_size, &config, 0) != 0) {
(void) fprintf(stderr, "failed to unpack nvlist\n");
exit(1);
}
free(buf);
dump_nvlist(config, 0);
nvlist_free(config);
}
/*
* ZFS label nvlist stats
*/
typedef struct zdb_nvl_stats {
int zns_list_count;
int zns_leaf_count;
size_t zns_leaf_largest;
size_t zns_leaf_total;
nvlist_t *zns_string;
nvlist_t *zns_uint64;
nvlist_t *zns_boolean;
} zdb_nvl_stats_t;
static void
collect_nvlist_stats(nvlist_t *nvl, zdb_nvl_stats_t *stats)
{
nvlist_t *list, **array;
nvpair_t *nvp = NULL;
const char *name;
uint_t i, items;
stats->zns_list_count++;
while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
name = nvpair_name(nvp);
switch (nvpair_type(nvp)) {
case DATA_TYPE_STRING:
fnvlist_add_string(stats->zns_string, name,
fnvpair_value_string(nvp));
break;
case DATA_TYPE_UINT64:
fnvlist_add_uint64(stats->zns_uint64, name,
fnvpair_value_uint64(nvp));
break;
case DATA_TYPE_BOOLEAN:
fnvlist_add_boolean(stats->zns_boolean, name);
break;
case DATA_TYPE_NVLIST:
if (nvpair_value_nvlist(nvp, &list) == 0)
collect_nvlist_stats(list, stats);
break;
case DATA_TYPE_NVLIST_ARRAY:
if (nvpair_value_nvlist_array(nvp, &array, &items) != 0)
break;
for (i = 0; i < items; i++) {
collect_nvlist_stats(array[i], stats);
/* collect stats on leaf vdev */
if (strcmp(name, "children") == 0) {
size_t size;
(void) nvlist_size(array[i], &size,
NV_ENCODE_XDR);
stats->zns_leaf_total += size;
if (size > stats->zns_leaf_largest)
stats->zns_leaf_largest = size;
stats->zns_leaf_count++;
}
}
break;
default:
(void) printf("skip type %d!\n", (int)nvpair_type(nvp));
}
}
}
static void
dump_nvlist_stats(nvlist_t *nvl, size_t cap)
{
zdb_nvl_stats_t stats = { 0 };
size_t size, sum = 0, total;
size_t noise;
/* requires nvlist with non-unique names for stat collection */
VERIFY0(nvlist_alloc(&stats.zns_string, 0, 0));
VERIFY0(nvlist_alloc(&stats.zns_uint64, 0, 0));
VERIFY0(nvlist_alloc(&stats.zns_boolean, 0, 0));
VERIFY0(nvlist_size(stats.zns_boolean, &noise, NV_ENCODE_XDR));
(void) printf("\n\nZFS Label NVList Config Stats:\n");
VERIFY0(nvlist_size(nvl, &total, NV_ENCODE_XDR));
(void) printf(" %d bytes used, %d bytes free (using %4.1f%%)\n\n",
(int)total, (int)(cap - total), 100.0 * total / cap);
collect_nvlist_stats(nvl, &stats);
VERIFY0(nvlist_size(stats.zns_uint64, &size, NV_ENCODE_XDR));
size -= noise;
sum += size;
(void) printf("%12s %4d %6d bytes (%5.2f%%)\n", "integers:",
(int)fnvlist_num_pairs(stats.zns_uint64),
(int)size, 100.0 * size / total);
VERIFY0(nvlist_size(stats.zns_string, &size, NV_ENCODE_XDR));
size -= noise;
sum += size;
(void) printf("%12s %4d %6d bytes (%5.2f%%)\n", "strings:",
(int)fnvlist_num_pairs(stats.zns_string),
(int)size, 100.0 * size / total);
VERIFY0(nvlist_size(stats.zns_boolean, &size, NV_ENCODE_XDR));
size -= noise;
sum += size;
(void) printf("%12s %4d %6d bytes (%5.2f%%)\n", "booleans:",
(int)fnvlist_num_pairs(stats.zns_boolean),
(int)size, 100.0 * size / total);
size = total - sum; /* treat remainder as nvlist overhead */
(void) printf("%12s %4d %6d bytes (%5.2f%%)\n\n", "nvlists:",
stats.zns_list_count, (int)size, 100.0 * size / total);
if (stats.zns_leaf_count > 0) {
size_t average = stats.zns_leaf_total / stats.zns_leaf_count;
(void) printf("%12s %4d %6d bytes average\n", "leaf vdevs:",
stats.zns_leaf_count, (int)average);
(void) printf("%24d bytes largest\n",
(int)stats.zns_leaf_largest);
if (dump_opt['l'] >= 3 && average > 0)
(void) printf(" space for %d additional leaf vdevs\n",
(int)((cap - total) / average));
}
(void) printf("\n");
nvlist_free(stats.zns_string);
nvlist_free(stats.zns_uint64);
nvlist_free(stats.zns_boolean);
}
typedef struct cksum_record {
zio_cksum_t cksum;
boolean_t labels[VDEV_LABELS];
avl_node_t link;
} cksum_record_t;
static int
cksum_record_compare(const void *x1, const void *x2)
{
const cksum_record_t *l = (cksum_record_t *)x1;
const cksum_record_t *r = (cksum_record_t *)x2;
int arraysize = ARRAY_SIZE(l->cksum.zc_word);
int difference = 0;
for (int i = 0; i < arraysize; i++) {
difference = TREE_CMP(l->cksum.zc_word[i], r->cksum.zc_word[i]);
if (difference)
break;
}
return (difference);
}
static cksum_record_t *
cksum_record_alloc(zio_cksum_t *cksum, int l)
{
cksum_record_t *rec;
rec = umem_zalloc(sizeof (*rec), UMEM_NOFAIL);
rec->cksum = *cksum;
rec->labels[l] = B_TRUE;
return (rec);
}
static cksum_record_t *
cksum_record_lookup(avl_tree_t *tree, zio_cksum_t *cksum)
{
cksum_record_t lookup = { .cksum = *cksum };
avl_index_t where;
return (avl_find(tree, &lookup, &where));
}
static cksum_record_t *
cksum_record_insert(avl_tree_t *tree, zio_cksum_t *cksum, int l)
{
cksum_record_t *rec;
rec = cksum_record_lookup(tree, cksum);
if (rec) {
rec->labels[l] = B_TRUE;
} else {
rec = cksum_record_alloc(cksum, l);
avl_add(tree, rec);
}
return (rec);
}
static int
first_label(cksum_record_t *rec)
{
for (int i = 0; i < VDEV_LABELS; i++)
if (rec->labels[i])
return (i);
return (-1);
}
static void
print_label_numbers(const char *prefix, const cksum_record_t *rec)
{
fputs(prefix, stdout);
for (int i = 0; i < VDEV_LABELS; i++)
if (rec->labels[i] == B_TRUE)
printf("%d ", i);
putchar('\n');
}
#define MAX_UBERBLOCK_COUNT (VDEV_UBERBLOCK_RING >> UBERBLOCK_SHIFT)
typedef struct zdb_label {
vdev_label_t label;
uint64_t label_offset;
nvlist_t *config_nv;
cksum_record_t *config;
cksum_record_t *uberblocks[MAX_UBERBLOCK_COUNT];
boolean_t header_printed;
boolean_t read_failed;
boolean_t cksum_valid;
} zdb_label_t;
static void
print_label_header(zdb_label_t *label, int l)
{
if (dump_opt['q'])
return;
if (label->header_printed == B_TRUE)
return;
(void) printf("------------------------------------\n");
(void) printf("LABEL %d %s\n", l,
label->cksum_valid ? "" : "(Bad label cksum)");
(void) printf("------------------------------------\n");
label->header_printed = B_TRUE;
}
static void
print_l2arc_header(void)
{
(void) printf("------------------------------------\n");
(void) printf("L2ARC device header\n");
(void) printf("------------------------------------\n");
}
static void
print_l2arc_log_blocks(void)
{
(void) printf("------------------------------------\n");
(void) printf("L2ARC device log blocks\n");
(void) printf("------------------------------------\n");
}
static void
dump_l2arc_log_entries(uint64_t log_entries,
l2arc_log_ent_phys_t *le, uint64_t i)
{
for (int j = 0; j < log_entries; j++) {
dva_t dva = le[j].le_dva;
(void) printf("lb[%4llu]\tle[%4d]\tDVA asize: %llu, "
"vdev: %llu, offset: %llu\n",
(u_longlong_t)i, j + 1,
(u_longlong_t)DVA_GET_ASIZE(&dva),
(u_longlong_t)DVA_GET_VDEV(&dva),
(u_longlong_t)DVA_GET_OFFSET(&dva));
(void) printf("|\t\t\t\tbirth: %llu\n",
(u_longlong_t)le[j].le_birth);
(void) printf("|\t\t\t\tlsize: %llu\n",
(u_longlong_t)L2BLK_GET_LSIZE((&le[j])->le_prop));
(void) printf("|\t\t\t\tpsize: %llu\n",
(u_longlong_t)L2BLK_GET_PSIZE((&le[j])->le_prop));
(void) printf("|\t\t\t\tcompr: %llu\n",
(u_longlong_t)L2BLK_GET_COMPRESS((&le[j])->le_prop));
(void) printf("|\t\t\t\tcomplevel: %llu\n",
(u_longlong_t)(&le[j])->le_complevel);
(void) printf("|\t\t\t\ttype: %llu\n",
(u_longlong_t)L2BLK_GET_TYPE((&le[j])->le_prop));
(void) printf("|\t\t\t\tprotected: %llu\n",
(u_longlong_t)L2BLK_GET_PROTECTED((&le[j])->le_prop));
(void) printf("|\t\t\t\tprefetch: %llu\n",
(u_longlong_t)L2BLK_GET_PREFETCH((&le[j])->le_prop));
(void) printf("|\t\t\t\taddress: %llu\n",
(u_longlong_t)le[j].le_daddr);
(void) printf("|\t\t\t\tARC state: %llu\n",
(u_longlong_t)L2BLK_GET_STATE((&le[j])->le_prop));
(void) printf("|\n");
}
(void) printf("\n");
}
static void
dump_l2arc_log_blkptr(const l2arc_log_blkptr_t *lbps)
{
(void) printf("|\t\tdaddr: %llu\n", (u_longlong_t)lbps->lbp_daddr);
(void) printf("|\t\tpayload_asize: %llu\n",
(u_longlong_t)lbps->lbp_payload_asize);
(void) printf("|\t\tpayload_start: %llu\n",
(u_longlong_t)lbps->lbp_payload_start);
(void) printf("|\t\tlsize: %llu\n",
(u_longlong_t)L2BLK_GET_LSIZE(lbps->lbp_prop));
(void) printf("|\t\tasize: %llu\n",
(u_longlong_t)L2BLK_GET_PSIZE(lbps->lbp_prop));
(void) printf("|\t\tcompralgo: %llu\n",
(u_longlong_t)L2BLK_GET_COMPRESS(lbps->lbp_prop));
(void) printf("|\t\tcksumalgo: %llu\n",
(u_longlong_t)L2BLK_GET_CHECKSUM(lbps->lbp_prop));
(void) printf("|\n\n");
}
static void
dump_l2arc_log_blocks(int fd, const l2arc_dev_hdr_phys_t *l2dhdr,
l2arc_dev_hdr_phys_t *rebuild)
{
l2arc_log_blk_phys_t this_lb;
uint64_t asize;
l2arc_log_blkptr_t lbps[2];
abd_t *abd;
zio_cksum_t cksum;
int failed = 0;
l2arc_dev_t dev;
if (!dump_opt['q'])
print_l2arc_log_blocks();
memcpy(lbps, l2dhdr->dh_start_lbps, sizeof (lbps));
dev.l2ad_evict = l2dhdr->dh_evict;
dev.l2ad_start = l2dhdr->dh_start;
dev.l2ad_end = l2dhdr->dh_end;
if (l2dhdr->dh_start_lbps[0].lbp_daddr == 0) {
/* no log blocks to read */
if (!dump_opt['q']) {
(void) printf("No log blocks to read\n");
(void) printf("\n");
}
return;
} else {
dev.l2ad_hand = lbps[0].lbp_daddr +
L2BLK_GET_PSIZE((&lbps[0])->lbp_prop);
}
dev.l2ad_first = !!(l2dhdr->dh_flags & L2ARC_DEV_HDR_EVICT_FIRST);
for (;;) {
if (!l2arc_log_blkptr_valid(&dev, &lbps[0]))
break;
/* L2BLK_GET_PSIZE returns aligned size for log blocks */
asize = L2BLK_GET_PSIZE((&lbps[0])->lbp_prop);
if (pread64(fd, &this_lb, asize, lbps[0].lbp_daddr) != asize) {
if (!dump_opt['q']) {
(void) printf("Error while reading next log "
"block\n\n");
}
break;
}
fletcher_4_native_varsize(&this_lb, asize, &cksum);
if (!ZIO_CHECKSUM_EQUAL(cksum, lbps[0].lbp_cksum)) {
failed++;
if (!dump_opt['q']) {
(void) printf("Invalid cksum\n");
dump_l2arc_log_blkptr(&lbps[0]);
}
break;
}
switch (L2BLK_GET_COMPRESS((&lbps[0])->lbp_prop)) {
case ZIO_COMPRESS_OFF:
break;
default:
abd = abd_alloc_for_io(asize, B_TRUE);
abd_copy_from_buf_off(abd, &this_lb, 0, asize);
if (zio_decompress_data(L2BLK_GET_COMPRESS(
(&lbps[0])->lbp_prop), abd, &this_lb,
asize, sizeof (this_lb), NULL) != 0) {
(void) printf("L2ARC block decompression "
"failed\n");
abd_free(abd);
goto out;
}
abd_free(abd);
break;
}
if (this_lb.lb_magic == BSWAP_64(L2ARC_LOG_BLK_MAGIC))
byteswap_uint64_array(&this_lb, sizeof (this_lb));
if (this_lb.lb_magic != L2ARC_LOG_BLK_MAGIC) {
if (!dump_opt['q'])
(void) printf("Invalid log block magic\n\n");
break;
}
rebuild->dh_lb_count++;
rebuild->dh_lb_asize += asize;
if (dump_opt['l'] > 1 && !dump_opt['q']) {
(void) printf("lb[%4llu]\tmagic: %llu\n",
(u_longlong_t)rebuild->dh_lb_count,
(u_longlong_t)this_lb.lb_magic);
dump_l2arc_log_blkptr(&lbps[0]);
}
if (dump_opt['l'] > 2 && !dump_opt['q'])
dump_l2arc_log_entries(l2dhdr->dh_log_entries,
this_lb.lb_entries,
rebuild->dh_lb_count);
if (l2arc_range_check_overlap(lbps[1].lbp_payload_start,
lbps[0].lbp_payload_start, dev.l2ad_evict) &&
!dev.l2ad_first)
break;
lbps[0] = lbps[1];
lbps[1] = this_lb.lb_prev_lbp;
}
out:
if (!dump_opt['q']) {
(void) printf("log_blk_count:\t %llu with valid cksum\n",
(u_longlong_t)rebuild->dh_lb_count);
(void) printf("\t\t %d with invalid cksum\n", failed);
(void) printf("log_blk_asize:\t %llu\n\n",
(u_longlong_t)rebuild->dh_lb_asize);
}
}
static int
dump_l2arc_header(int fd)
{
l2arc_dev_hdr_phys_t l2dhdr = {0}, rebuild = {0};
int error = B_FALSE;
if (pread64(fd, &l2dhdr, sizeof (l2dhdr),
VDEV_LABEL_START_SIZE) != sizeof (l2dhdr)) {
error = B_TRUE;
} else {
if (l2dhdr.dh_magic == BSWAP_64(L2ARC_DEV_HDR_MAGIC))
byteswap_uint64_array(&l2dhdr, sizeof (l2dhdr));
if (l2dhdr.dh_magic != L2ARC_DEV_HDR_MAGIC)
error = B_TRUE;
}
if (error) {
(void) printf("L2ARC device header not found\n\n");
/* Do not return an error here for backward compatibility */
return (0);
} else if (!dump_opt['q']) {
print_l2arc_header();
(void) printf(" magic: %llu\n",
(u_longlong_t)l2dhdr.dh_magic);
(void) printf(" version: %llu\n",
(u_longlong_t)l2dhdr.dh_version);
(void) printf(" pool_guid: %llu\n",
(u_longlong_t)l2dhdr.dh_spa_guid);
(void) printf(" flags: %llu\n",
(u_longlong_t)l2dhdr.dh_flags);
(void) printf(" start_lbps[0]: %llu\n",
(u_longlong_t)
l2dhdr.dh_start_lbps[0].lbp_daddr);
(void) printf(" start_lbps[1]: %llu\n",
(u_longlong_t)
l2dhdr.dh_start_lbps[1].lbp_daddr);
(void) printf(" log_blk_ent: %llu\n",
(u_longlong_t)l2dhdr.dh_log_entries);
(void) printf(" start: %llu\n",
(u_longlong_t)l2dhdr.dh_start);
(void) printf(" end: %llu\n",
(u_longlong_t)l2dhdr.dh_end);
(void) printf(" evict: %llu\n",
(u_longlong_t)l2dhdr.dh_evict);
(void) printf(" lb_asize_refcount: %llu\n",
(u_longlong_t)l2dhdr.dh_lb_asize);
(void) printf(" lb_count_refcount: %llu\n",
(u_longlong_t)l2dhdr.dh_lb_count);
(void) printf(" trim_action_time: %llu\n",
(u_longlong_t)l2dhdr.dh_trim_action_time);
(void) printf(" trim_state: %llu\n\n",
(u_longlong_t)l2dhdr.dh_trim_state);
}
dump_l2arc_log_blocks(fd, &l2dhdr, &rebuild);
/*
* The total aligned size of log blocks and the number of log blocks
* reported in the header of the device may be less than what zdb
* reports by dump_l2arc_log_blocks() which emulates l2arc_rebuild().
* This happens because dump_l2arc_log_blocks() lacks the memory
* pressure valve that l2arc_rebuild() has. Thus, if we are on a system
* with low memory, l2arc_rebuild will exit prematurely and dh_lb_asize
* and dh_lb_count will be lower to begin with than what exists on the
* device. This is normal and zdb should not exit with an error. The
* opposite case should never happen though, the values reported in the
* header should never be higher than what dump_l2arc_log_blocks() and
* l2arc_rebuild() report. If this happens there is a leak in the
* accounting of log blocks.
*/
if (l2dhdr.dh_lb_asize > rebuild.dh_lb_asize ||
l2dhdr.dh_lb_count > rebuild.dh_lb_count)
return (1);
return (0);
}
static void
dump_config_from_label(zdb_label_t *label, size_t buflen, int l)
{
if (dump_opt['q'])
return;
if ((dump_opt['l'] < 3) && (first_label(label->config) != l))
return;
print_label_header(label, l);
dump_nvlist(label->config_nv, 4);
print_label_numbers(" labels = ", label->config);
if (dump_opt['l'] >= 2)
dump_nvlist_stats(label->config_nv, buflen);
}
#define ZDB_MAX_UB_HEADER_SIZE 32
static void
dump_label_uberblocks(zdb_label_t *label, uint64_t ashift, int label_num)
{
vdev_t vd;
char header[ZDB_MAX_UB_HEADER_SIZE];
vd.vdev_ashift = ashift;
vd.vdev_top = &vd;
for (int i = 0; i < VDEV_UBERBLOCK_COUNT(&vd); i++) {
uint64_t uoff = VDEV_UBERBLOCK_OFFSET(&vd, i);
uberblock_t *ub = (void *)((char *)&label->label + uoff);
cksum_record_t *rec = label->uberblocks[i];
if (rec == NULL) {
if (dump_opt['u'] >= 2) {
print_label_header(label, label_num);
(void) printf(" Uberblock[%d] invalid\n", i);
}
continue;
}
if ((dump_opt['u'] < 3) && (first_label(rec) != label_num))
continue;
if ((dump_opt['u'] < 4) &&
(ub->ub_mmp_magic == MMP_MAGIC) && ub->ub_mmp_delay &&
(i >= VDEV_UBERBLOCK_COUNT(&vd) - MMP_BLOCKS_PER_LABEL))
continue;
print_label_header(label, label_num);
(void) snprintf(header, ZDB_MAX_UB_HEADER_SIZE,
" Uberblock[%d]\n", i);
dump_uberblock(ub, header, "");
print_label_numbers(" labels = ", rec);
}
}
static char curpath[PATH_MAX];
/*
* Iterate through the path components, recursively passing
* current one's obj and remaining path until we find the obj
* for the last one.
*/
static int
dump_path_impl(objset_t *os, uint64_t obj, char *name, uint64_t *retobj)
{
int err;
boolean_t header = B_TRUE;
uint64_t child_obj;
char *s;
dmu_buf_t *db;
dmu_object_info_t doi;
if ((s = strchr(name, '/')) != NULL)
*s = '\0';
err = zap_lookup(os, obj, name, 8, 1, &child_obj);
(void) strlcat(curpath, name, sizeof (curpath));
if (err != 0) {
(void) fprintf(stderr, "failed to lookup %s: %s\n",
curpath, strerror(err));
return (err);
}
child_obj = ZFS_DIRENT_OBJ(child_obj);
err = sa_buf_hold(os, child_obj, FTAG, &db);
if (err != 0) {
(void) fprintf(stderr,
"failed to get SA dbuf for obj %llu: %s\n",
(u_longlong_t)child_obj, strerror(err));
return (EINVAL);
}
dmu_object_info_from_db(db, &doi);
sa_buf_rele(db, FTAG);
if (doi.doi_bonus_type != DMU_OT_SA &&
doi.doi_bonus_type != DMU_OT_ZNODE) {
(void) fprintf(stderr, "invalid bonus type %d for obj %llu\n",
doi.doi_bonus_type, (u_longlong_t)child_obj);
return (EINVAL);
}
if (dump_opt['v'] > 6) {
(void) printf("obj=%llu %s type=%d bonustype=%d\n",
(u_longlong_t)child_obj, curpath, doi.doi_type,
doi.doi_bonus_type);
}
(void) strlcat(curpath, "/", sizeof (curpath));
switch (doi.doi_type) {
case DMU_OT_DIRECTORY_CONTENTS:
if (s != NULL && *(s + 1) != '\0')
return (dump_path_impl(os, child_obj, s + 1, retobj));
zfs_fallthrough;
case DMU_OT_PLAIN_FILE_CONTENTS:
if (retobj != NULL) {
*retobj = child_obj;
} else {
dump_object(os, child_obj, dump_opt['v'], &header,
NULL, 0);
}
return (0);
default:
(void) fprintf(stderr, "object %llu has non-file/directory "
"type %d\n", (u_longlong_t)obj, doi.doi_type);
break;
}
return (EINVAL);
}
/*
* Dump the blocks for the object specified by path inside the dataset.
*/
static int
dump_path(char *ds, char *path, uint64_t *retobj)
{
int err;
objset_t *os;
uint64_t root_obj;
err = open_objset(ds, FTAG, &os);
if (err != 0)
return (err);
err = zap_lookup(os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ, 8, 1, &root_obj);
if (err != 0) {
(void) fprintf(stderr, "can't lookup root znode: %s\n",
strerror(err));
close_objset(os, FTAG);
return (EINVAL);
}
(void) snprintf(curpath, sizeof (curpath), "dataset=%s path=/", ds);
err = dump_path_impl(os, root_obj, path, retobj);
close_objset(os, FTAG);
return (err);
}
static int
dump_backup_bytes(objset_t *os, void *buf, int len, void *arg)
{
const char *p = (const char *)buf;
ssize_t nwritten;
(void) os;
(void) arg;
/* Write the data out, handling short writes and signals. */
while ((nwritten = write(STDOUT_FILENO, p, len)) < len) {
if (nwritten < 0) {
if (errno == EINTR)
continue;
return (errno);
}
p += nwritten;
len -= nwritten;
}
return (0);
}
static void
dump_backup(const char *pool, uint64_t objset_id, const char *flagstr)
{
boolean_t embed = B_FALSE;
boolean_t large_block = B_FALSE;
boolean_t compress = B_FALSE;
boolean_t raw = B_FALSE;
const char *c;
for (c = flagstr; c != NULL && *c != '\0'; c++) {
switch (*c) {
case 'e':
embed = B_TRUE;
break;
case 'L':
large_block = B_TRUE;
break;
case 'c':
compress = B_TRUE;
break;
case 'w':
raw = B_TRUE;
break;
default:
fprintf(stderr, "dump_backup: invalid flag "
"'%c'\n", *c);
return;
}
}
if (isatty(STDOUT_FILENO)) {
fprintf(stderr, "dump_backup: stream cannot be written "
"to a terminal\n");
return;
}
offset_t off = 0;
dmu_send_outparams_t out = {
.dso_outfunc = dump_backup_bytes,
.dso_dryrun = B_FALSE,
};
int err = dmu_send_obj(pool, objset_id, /* fromsnap */0, embed,
large_block, compress, raw, /* saved */ B_FALSE, STDOUT_FILENO,
&off, &out);
if (err != 0) {
fprintf(stderr, "dump_backup: dmu_send_obj: %s\n",
strerror(err));
return;
}
}
static int
zdb_copy_object(objset_t *os, uint64_t srcobj, char *destfile)
{
int err = 0;
uint64_t size, readsize, oursize, offset;
ssize_t writesize;
sa_handle_t *hdl;
(void) printf("Copying object %" PRIu64 " to file %s\n", srcobj,
destfile);
VERIFY3P(os, ==, sa_os);
if ((err = sa_handle_get(os, srcobj, NULL, SA_HDL_PRIVATE, &hdl))) {
(void) printf("Failed to get handle for SA znode\n");
return (err);
}
if ((err = sa_lookup(hdl, sa_attr_table[ZPL_SIZE], &size, 8))) {
(void) sa_handle_destroy(hdl);
return (err);
}
(void) sa_handle_destroy(hdl);
(void) printf("Object %" PRIu64 " is %" PRIu64 " bytes\n", srcobj,
size);
if (size == 0) {
return (EINVAL);
}
int fd = open(destfile, O_WRONLY | O_CREAT | O_TRUNC, 0644);
if (fd == -1)
return (errno);
/*
* We cap the size at 1 mebibyte here to prevent
* allocation failures and nigh-infinite printing if the
* object is extremely large.
*/
oursize = MIN(size, 1 << 20);
offset = 0;
char *buf = kmem_alloc(oursize, KM_NOSLEEP);
if (buf == NULL) {
(void) close(fd);
return (ENOMEM);
}
while (offset < size) {
readsize = MIN(size - offset, 1 << 20);
err = dmu_read(os, srcobj, offset, readsize, buf, 0);
if (err != 0) {
(void) printf("got error %u from dmu_read\n", err);
kmem_free(buf, oursize);
(void) close(fd);
return (err);
}
if (dump_opt['v'] > 3) {
(void) printf("Read offset=%" PRIu64 " size=%" PRIu64
" error=%d\n", offset, readsize, err);
}
writesize = write(fd, buf, readsize);
if (writesize < 0) {
err = errno;
break;
} else if (writesize != readsize) {
/* Incomplete write */
(void) fprintf(stderr, "Short write, only wrote %llu of"
" %" PRIu64 " bytes, exiting...\n",
(u_longlong_t)writesize, readsize);
break;
}
offset += readsize;
}
(void) close(fd);
if (buf != NULL)
kmem_free(buf, oursize);
return (err);
}
static boolean_t
label_cksum_valid(vdev_label_t *label, uint64_t offset)
{
zio_checksum_info_t *ci = &zio_checksum_table[ZIO_CHECKSUM_LABEL];
zio_cksum_t expected_cksum;
zio_cksum_t actual_cksum;
zio_cksum_t verifier;
zio_eck_t *eck;
int byteswap;
void *data = (char *)label + offsetof(vdev_label_t, vl_vdev_phys);
eck = (zio_eck_t *)((char *)(data) + VDEV_PHYS_SIZE) - 1;
offset += offsetof(vdev_label_t, vl_vdev_phys);
ZIO_SET_CHECKSUM(&verifier, offset, 0, 0, 0);
byteswap = (eck->zec_magic == BSWAP_64(ZEC_MAGIC));
if (byteswap)
byteswap_uint64_array(&verifier, sizeof (zio_cksum_t));
expected_cksum = eck->zec_cksum;
eck->zec_cksum = verifier;
abd_t *abd = abd_get_from_buf(data, VDEV_PHYS_SIZE);
ci->ci_func[byteswap](abd, VDEV_PHYS_SIZE, NULL, &actual_cksum);
abd_free(abd);
if (byteswap)
byteswap_uint64_array(&expected_cksum, sizeof (zio_cksum_t));
if (ZIO_CHECKSUM_EQUAL(actual_cksum, expected_cksum))
return (B_TRUE);
return (B_FALSE);
}
static int
dump_label(const char *dev)
{
char path[MAXPATHLEN];
zdb_label_t labels[VDEV_LABELS] = {{{{0}}}};
uint64_t psize, ashift, l2cache;
struct stat64 statbuf;
boolean_t config_found = B_FALSE;
boolean_t error = B_FALSE;
boolean_t read_l2arc_header = B_FALSE;
avl_tree_t config_tree;
avl_tree_t uberblock_tree;
void *node, *cookie;
int fd;
/*
* Check if we were given absolute path and use it as is.
* Otherwise if the provided vdev name doesn't point to a file,
* try prepending expected disk paths and partition numbers.
*/
(void) strlcpy(path, dev, sizeof (path));
if (dev[0] != '/' && stat64(path, &statbuf) != 0) {
int error;
error = zfs_resolve_shortname(dev, path, MAXPATHLEN);
if (error == 0 && zfs_dev_is_whole_disk(path)) {
if (zfs_append_partition(path, MAXPATHLEN) == -1)
error = ENOENT;
}
if (error || (stat64(path, &statbuf) != 0)) {
(void) printf("failed to find device %s, try "
"specifying absolute path instead\n", dev);
return (1);
}
}
if ((fd = open64(path, O_RDONLY)) < 0) {
(void) printf("cannot open '%s': %s\n", path, strerror(errno));
exit(1);
}
if (fstat64_blk(fd, &statbuf) != 0) {
(void) printf("failed to stat '%s': %s\n", path,
strerror(errno));
(void) close(fd);
exit(1);
}
if (S_ISBLK(statbuf.st_mode) && zfs_dev_flush(fd) != 0)
(void) printf("failed to invalidate cache '%s' : %s\n", path,
strerror(errno));
avl_create(&config_tree, cksum_record_compare,
sizeof (cksum_record_t), offsetof(cksum_record_t, link));
avl_create(&uberblock_tree, cksum_record_compare,
sizeof (cksum_record_t), offsetof(cksum_record_t, link));
psize = statbuf.st_size;
psize = P2ALIGN(psize, (uint64_t)sizeof (vdev_label_t));
ashift = SPA_MINBLOCKSHIFT;
/*
* 1. Read the label from disk
* 2. Verify label cksum
* 3. Unpack the configuration and insert in config tree.
* 4. Traverse all uberblocks and insert in uberblock tree.
*/
for (int l = 0; l < VDEV_LABELS; l++) {
zdb_label_t *label = &labels[l];
char *buf = label->label.vl_vdev_phys.vp_nvlist;
size_t buflen = sizeof (label->label.vl_vdev_phys.vp_nvlist);
nvlist_t *config;
cksum_record_t *rec;
zio_cksum_t cksum;
vdev_t vd;
label->label_offset = vdev_label_offset(psize, l, 0);
if (pread64(fd, &label->label, sizeof (label->label),
label->label_offset) != sizeof (label->label)) {
if (!dump_opt['q'])
(void) printf("failed to read label %d\n", l);
label->read_failed = B_TRUE;
error = B_TRUE;
continue;
}
label->read_failed = B_FALSE;
label->cksum_valid = label_cksum_valid(&label->label,
label->label_offset);
if (nvlist_unpack(buf, buflen, &config, 0) == 0) {
nvlist_t *vdev_tree = NULL;
size_t size;
if ((nvlist_lookup_nvlist(config,
ZPOOL_CONFIG_VDEV_TREE, &vdev_tree) != 0) ||
(nvlist_lookup_uint64(vdev_tree,
ZPOOL_CONFIG_ASHIFT, &ashift) != 0))
ashift = SPA_MINBLOCKSHIFT;
if (nvlist_size(config, &size, NV_ENCODE_XDR) != 0)
size = buflen;
/* If the device is a cache device read the header. */
if (!read_l2arc_header) {
if (nvlist_lookup_uint64(config,
ZPOOL_CONFIG_POOL_STATE, &l2cache) == 0 &&
l2cache == POOL_STATE_L2CACHE) {
read_l2arc_header = B_TRUE;
}
}
fletcher_4_native_varsize(buf, size, &cksum);
rec = cksum_record_insert(&config_tree, &cksum, l);
label->config = rec;
label->config_nv = config;
config_found = B_TRUE;
} else {
error = B_TRUE;
}
vd.vdev_ashift = ashift;
vd.vdev_top = &vd;
for (int i = 0; i < VDEV_UBERBLOCK_COUNT(&vd); i++) {
uint64_t uoff = VDEV_UBERBLOCK_OFFSET(&vd, i);
uberblock_t *ub = (void *)((char *)label + uoff);
if (uberblock_verify(ub))
continue;
fletcher_4_native_varsize(ub, sizeof (*ub), &cksum);
rec = cksum_record_insert(&uberblock_tree, &cksum, l);
label->uberblocks[i] = rec;
}
}
/*
* Dump the label and uberblocks.
*/
for (int l = 0; l < VDEV_LABELS; l++) {
zdb_label_t *label = &labels[l];
size_t buflen = sizeof (label->label.vl_vdev_phys.vp_nvlist);
if (label->read_failed == B_TRUE)
continue;
if (label->config_nv) {
dump_config_from_label(label, buflen, l);
} else {
if (!dump_opt['q'])
(void) printf("failed to unpack label %d\n", l);
}
if (dump_opt['u'])
dump_label_uberblocks(label, ashift, l);
nvlist_free(label->config_nv);
}
/*
* Dump the L2ARC header, if existent.
*/
if (read_l2arc_header)
error |= dump_l2arc_header(fd);
cookie = NULL;
while ((node = avl_destroy_nodes(&config_tree, &cookie)) != NULL)
umem_free(node, sizeof (cksum_record_t));
cookie = NULL;
while ((node = avl_destroy_nodes(&uberblock_tree, &cookie)) != NULL)
umem_free(node, sizeof (cksum_record_t));
avl_destroy(&config_tree);
avl_destroy(&uberblock_tree);
(void) close(fd);
return (config_found == B_FALSE ? 2 :
(error == B_TRUE ? 1 : 0));
}
static uint64_t dataset_feature_count[SPA_FEATURES];
static uint64_t global_feature_count[SPA_FEATURES];
static uint64_t remap_deadlist_count = 0;
static int
dump_one_objset(const char *dsname, void *arg)
{
(void) arg;
int error;
objset_t *os;
spa_feature_t f;
error = open_objset(dsname, FTAG, &os);
if (error != 0)
return (0);
for (f = 0; f < SPA_FEATURES; f++) {
if (!dsl_dataset_feature_is_active(dmu_objset_ds(os), f))
continue;
ASSERT(spa_feature_table[f].fi_flags &
ZFEATURE_FLAG_PER_DATASET);
dataset_feature_count[f]++;
}
if (dsl_dataset_remap_deadlist_exists(dmu_objset_ds(os))) {
remap_deadlist_count++;
}
for (dsl_bookmark_node_t *dbn =
avl_first(&dmu_objset_ds(os)->ds_bookmarks); dbn != NULL;
dbn = AVL_NEXT(&dmu_objset_ds(os)->ds_bookmarks, dbn)) {
mos_obj_refd(dbn->dbn_phys.zbm_redaction_obj);
if (dbn->dbn_phys.zbm_redaction_obj != 0)
global_feature_count[SPA_FEATURE_REDACTION_BOOKMARKS]++;
if (dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN)
global_feature_count[SPA_FEATURE_BOOKMARK_WRITTEN]++;
}
if (dsl_deadlist_is_open(&dmu_objset_ds(os)->ds_dir->dd_livelist) &&
!dmu_objset_is_snapshot(os)) {
global_feature_count[SPA_FEATURE_LIVELIST]++;
}
dump_objset(os);
close_objset(os, FTAG);
fuid_table_destroy();
return (0);
}
/*
* Block statistics.
*/
#define PSIZE_HISTO_SIZE (SPA_OLD_MAXBLOCKSIZE / SPA_MINBLOCKSIZE + 2)
typedef struct zdb_blkstats {
uint64_t zb_asize;
uint64_t zb_lsize;
uint64_t zb_psize;
uint64_t zb_count;
uint64_t zb_gangs;
uint64_t zb_ditto_samevdev;
uint64_t zb_ditto_same_ms;
uint64_t zb_psize_histogram[PSIZE_HISTO_SIZE];
} zdb_blkstats_t;
/*
* Extended object types to report deferred frees and dedup auto-ditto blocks.
*/
#define ZDB_OT_DEFERRED (DMU_OT_NUMTYPES + 0)
#define ZDB_OT_DITTO (DMU_OT_NUMTYPES + 1)
#define ZDB_OT_OTHER (DMU_OT_NUMTYPES + 2)
#define ZDB_OT_TOTAL (DMU_OT_NUMTYPES + 3)
static const char *zdb_ot_extname[] = {
"deferred free",
"dedup ditto",
"other",
"Total",
};
#define ZB_TOTAL DN_MAX_LEVELS
#define SPA_MAX_FOR_16M (SPA_MAXBLOCKSHIFT+1)
typedef struct zdb_brt_entry {
dva_t zbre_dva;
uint64_t zbre_refcount;
avl_node_t zbre_node;
} zdb_brt_entry_t;
typedef struct zdb_cb {
zdb_blkstats_t zcb_type[ZB_TOTAL + 1][ZDB_OT_TOTAL + 1];
uint64_t zcb_removing_size;
uint64_t zcb_checkpoint_size;
uint64_t zcb_dedup_asize;
uint64_t zcb_dedup_blocks;
uint64_t zcb_clone_asize;
uint64_t zcb_clone_blocks;
uint64_t zcb_psize_count[SPA_MAX_FOR_16M];
uint64_t zcb_lsize_count[SPA_MAX_FOR_16M];
uint64_t zcb_asize_count[SPA_MAX_FOR_16M];
uint64_t zcb_psize_len[SPA_MAX_FOR_16M];
uint64_t zcb_lsize_len[SPA_MAX_FOR_16M];
uint64_t zcb_asize_len[SPA_MAX_FOR_16M];
uint64_t zcb_psize_total;
uint64_t zcb_lsize_total;
uint64_t zcb_asize_total;
uint64_t zcb_embedded_blocks[NUM_BP_EMBEDDED_TYPES];
uint64_t zcb_embedded_histogram[NUM_BP_EMBEDDED_TYPES]
[BPE_PAYLOAD_SIZE + 1];
uint64_t zcb_start;
hrtime_t zcb_lastprint;
uint64_t zcb_totalasize;
uint64_t zcb_errors[256];
int zcb_readfails;
int zcb_haderrors;
spa_t *zcb_spa;
uint32_t **zcb_vd_obsolete_counts;
avl_tree_t zcb_brt;
boolean_t zcb_brt_is_active;
} zdb_cb_t;
/* test if two DVA offsets from same vdev are within the same metaslab */
static boolean_t
same_metaslab(spa_t *spa, uint64_t vdev, uint64_t off1, uint64_t off2)
{
vdev_t *vd = vdev_lookup_top(spa, vdev);
uint64_t ms_shift = vd->vdev_ms_shift;
return ((off1 >> ms_shift) == (off2 >> ms_shift));
}
/*
* Used to simplify reporting of the histogram data.
*/
typedef struct one_histo {
const char *name;
uint64_t *count;
uint64_t *len;
uint64_t cumulative;
} one_histo_t;
/*
* The number of separate histograms processed for psize, lsize and asize.
*/
#define NUM_HISTO 3
/*
* This routine will create a fixed column size output of three different
* histograms showing by blocksize of 512 - 2^ SPA_MAX_FOR_16M
* the count, length and cumulative length of the psize, lsize and
* asize blocks.
*
* All three types of blocks are listed on a single line
*
* By default the table is printed in nicenumber format (e.g. 123K) but
* if the '-P' parameter is specified then the full raw number (parseable)
* is printed out.
*/
static void
dump_size_histograms(zdb_cb_t *zcb)
{
/*
* A temporary buffer that allows us to convert a number into
* a string using zdb_nicenumber to allow either raw or human
* readable numbers to be output.
*/
char numbuf[32];
/*
* Define titles which are used in the headers of the tables
* printed by this routine.
*/
const char blocksize_title1[] = "block";
const char blocksize_title2[] = "size";
const char count_title[] = "Count";
const char length_title[] = "Size";
const char cumulative_title[] = "Cum.";
/*
* Setup the histogram arrays (psize, lsize, and asize).
*/
one_histo_t parm_histo[NUM_HISTO];
parm_histo[0].name = "psize";
parm_histo[0].count = zcb->zcb_psize_count;
parm_histo[0].len = zcb->zcb_psize_len;
parm_histo[0].cumulative = 0;
parm_histo[1].name = "lsize";
parm_histo[1].count = zcb->zcb_lsize_count;
parm_histo[1].len = zcb->zcb_lsize_len;
parm_histo[1].cumulative = 0;
parm_histo[2].name = "asize";
parm_histo[2].count = zcb->zcb_asize_count;
parm_histo[2].len = zcb->zcb_asize_len;
parm_histo[2].cumulative = 0;
(void) printf("\nBlock Size Histogram\n");
/*
* Print the first line titles
*/
if (dump_opt['P'])
(void) printf("\n%s\t", blocksize_title1);
else
(void) printf("\n%7s ", blocksize_title1);
for (int j = 0; j < NUM_HISTO; j++) {
if (dump_opt['P']) {
if (j < NUM_HISTO - 1) {
(void) printf("%s\t\t\t", parm_histo[j].name);
} else {
/* Don't print trailing spaces */
(void) printf(" %s", parm_histo[j].name);
}
} else {
if (j < NUM_HISTO - 1) {
/* Left aligned strings in the output */
(void) printf("%-7s ",
parm_histo[j].name);
} else {
/* Don't print trailing spaces */
(void) printf("%s", parm_histo[j].name);
}
}
}
(void) printf("\n");
/*
* Print the second line titles
*/
if (dump_opt['P']) {
(void) printf("%s\t", blocksize_title2);
} else {
(void) printf("%7s ", blocksize_title2);
}
for (int i = 0; i < NUM_HISTO; i++) {
if (dump_opt['P']) {
(void) printf("%s\t%s\t%s\t",
count_title, length_title, cumulative_title);
} else {
(void) printf("%7s%7s%7s",
count_title, length_title, cumulative_title);
}
}
(void) printf("\n");
/*
* Print the rows
*/
for (int i = SPA_MINBLOCKSHIFT; i < SPA_MAX_FOR_16M; i++) {
/*
* Print the first column showing the blocksize
*/
zdb_nicenum((1ULL << i), numbuf, sizeof (numbuf));
if (dump_opt['P']) {
printf("%s", numbuf);
} else {
printf("%7s:", numbuf);
}
/*
* Print the remaining set of 3 columns per size:
* for psize, lsize and asize
*/
for (int j = 0; j < NUM_HISTO; j++) {
parm_histo[j].cumulative += parm_histo[j].len[i];
zdb_nicenum(parm_histo[j].count[i],
numbuf, sizeof (numbuf));
if (dump_opt['P'])
(void) printf("\t%s", numbuf);
else
(void) printf("%7s", numbuf);
zdb_nicenum(parm_histo[j].len[i],
numbuf, sizeof (numbuf));
if (dump_opt['P'])
(void) printf("\t%s", numbuf);
else
(void) printf("%7s", numbuf);
zdb_nicenum(parm_histo[j].cumulative,
numbuf, sizeof (numbuf));
if (dump_opt['P'])
(void) printf("\t%s", numbuf);
else
(void) printf("%7s", numbuf);
}
(void) printf("\n");
}
}
static void
zdb_count_block(zdb_cb_t *zcb, zilog_t *zilog, const blkptr_t *bp,
dmu_object_type_t type)
{
uint64_t refcnt = 0;
int i;
ASSERT(type < ZDB_OT_TOTAL);
if (zilog && zil_bp_tree_add(zilog, bp) != 0)
return;
spa_config_enter(zcb->zcb_spa, SCL_CONFIG, FTAG, RW_READER);
for (i = 0; i < 4; i++) {
int l = (i < 2) ? BP_GET_LEVEL(bp) : ZB_TOTAL;
int t = (i & 1) ? type : ZDB_OT_TOTAL;
int equal;
zdb_blkstats_t *zb = &zcb->zcb_type[l][t];
zb->zb_asize += BP_GET_ASIZE(bp);
zb->zb_lsize += BP_GET_LSIZE(bp);
zb->zb_psize += BP_GET_PSIZE(bp);
zb->zb_count++;
/*
* The histogram is only big enough to record blocks up to
* SPA_OLD_MAXBLOCKSIZE; larger blocks go into the last,
* "other", bucket.
*/
unsigned idx = BP_GET_PSIZE(bp) >> SPA_MINBLOCKSHIFT;
idx = MIN(idx, SPA_OLD_MAXBLOCKSIZE / SPA_MINBLOCKSIZE + 1);
zb->zb_psize_histogram[idx]++;
zb->zb_gangs += BP_COUNT_GANG(bp);
switch (BP_GET_NDVAS(bp)) {
case 2:
if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[1])) {
zb->zb_ditto_samevdev++;
if (same_metaslab(zcb->zcb_spa,
DVA_GET_VDEV(&bp->blk_dva[0]),
DVA_GET_OFFSET(&bp->blk_dva[0]),
DVA_GET_OFFSET(&bp->blk_dva[1])))
zb->zb_ditto_same_ms++;
}
break;
case 3:
equal = (DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[1])) +
(DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[2])) +
(DVA_GET_VDEV(&bp->blk_dva[1]) ==
DVA_GET_VDEV(&bp->blk_dva[2]));
if (equal != 0) {
zb->zb_ditto_samevdev++;
if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[1]) &&
same_metaslab(zcb->zcb_spa,
DVA_GET_VDEV(&bp->blk_dva[0]),
DVA_GET_OFFSET(&bp->blk_dva[0]),
DVA_GET_OFFSET(&bp->blk_dva[1])))
zb->zb_ditto_same_ms++;
else if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[2]) &&
same_metaslab(zcb->zcb_spa,
DVA_GET_VDEV(&bp->blk_dva[0]),
DVA_GET_OFFSET(&bp->blk_dva[0]),
DVA_GET_OFFSET(&bp->blk_dva[2])))
zb->zb_ditto_same_ms++;
else if (DVA_GET_VDEV(&bp->blk_dva[1]) ==
DVA_GET_VDEV(&bp->blk_dva[2]) &&
same_metaslab(zcb->zcb_spa,
DVA_GET_VDEV(&bp->blk_dva[1]),
DVA_GET_OFFSET(&bp->blk_dva[1]),
DVA_GET_OFFSET(&bp->blk_dva[2])))
zb->zb_ditto_same_ms++;
}
break;
}
}
spa_config_exit(zcb->zcb_spa, SCL_CONFIG, FTAG);
if (BP_IS_EMBEDDED(bp)) {
zcb->zcb_embedded_blocks[BPE_GET_ETYPE(bp)]++;
zcb->zcb_embedded_histogram[BPE_GET_ETYPE(bp)]
[BPE_GET_PSIZE(bp)]++;
return;
}
/*
* The binning histogram bins by powers of two up to
* SPA_MAXBLOCKSIZE rather than creating bins for
* every possible blocksize found in the pool.
*/
int bin = highbit64(BP_GET_PSIZE(bp)) - 1;
zcb->zcb_psize_count[bin]++;
zcb->zcb_psize_len[bin] += BP_GET_PSIZE(bp);
zcb->zcb_psize_total += BP_GET_PSIZE(bp);
bin = highbit64(BP_GET_LSIZE(bp)) - 1;
zcb->zcb_lsize_count[bin]++;
zcb->zcb_lsize_len[bin] += BP_GET_LSIZE(bp);
zcb->zcb_lsize_total += BP_GET_LSIZE(bp);
bin = highbit64(BP_GET_ASIZE(bp)) - 1;
zcb->zcb_asize_count[bin]++;
zcb->zcb_asize_len[bin] += BP_GET_ASIZE(bp);
zcb->zcb_asize_total += BP_GET_ASIZE(bp);
if (zcb->zcb_brt_is_active && brt_maybe_exists(zcb->zcb_spa, bp)) {
/*
* Cloned blocks are special. We need to count them, so we can
* later uncount them when reporting leaked space, and we must
* only claim them them once.
*
* To do this, we keep our own in-memory BRT. For each block
* we haven't seen before, we look it up in the real BRT and
* if its there, we note it and its refcount then proceed as
* normal. If we see the block again, we count it as a clone
* and then give it no further consideration.
*/
zdb_brt_entry_t zbre_search, *zbre;
avl_index_t where;
zbre_search.zbre_dva = bp->blk_dva[0];
zbre = avl_find(&zcb->zcb_brt, &zbre_search, &where);
if (zbre != NULL) {
zcb->zcb_clone_asize += BP_GET_ASIZE(bp);
zcb->zcb_clone_blocks++;
zbre->zbre_refcount--;
if (zbre->zbre_refcount == 0) {
avl_remove(&zcb->zcb_brt, zbre);
umem_free(zbre, sizeof (zdb_brt_entry_t));
}
return;
}
uint64_t crefcnt = brt_entry_get_refcount(zcb->zcb_spa, bp);
if (crefcnt > 0) {
zbre = umem_zalloc(sizeof (zdb_brt_entry_t),
UMEM_NOFAIL);
zbre->zbre_dva = bp->blk_dva[0];
zbre->zbre_refcount = crefcnt;
avl_insert(&zcb->zcb_brt, zbre, where);
}
}
if (dump_opt['L'])
return;
if (BP_GET_DEDUP(bp)) {
ddt_t *ddt;
ddt_entry_t *dde;
ddt = ddt_select(zcb->zcb_spa, bp);
ddt_enter(ddt);
dde = ddt_lookup(ddt, bp, B_FALSE);
if (dde == NULL) {
refcnt = 0;
} else {
ddt_phys_t *ddp = ddt_phys_select(dde, bp);
ddt_phys_decref(ddp);
refcnt = ddp->ddp_refcnt;
if (ddt_phys_total_refcnt(dde) == 0)
ddt_remove(ddt, dde);
}
ddt_exit(ddt);
}
VERIFY3U(zio_wait(zio_claim(NULL, zcb->zcb_spa,
refcnt ? 0 : spa_min_claim_txg(zcb->zcb_spa),
bp, NULL, NULL, ZIO_FLAG_CANFAIL)), ==, 0);
}
static void
zdb_blkptr_done(zio_t *zio)
{
spa_t *spa = zio->io_spa;
blkptr_t *bp = zio->io_bp;
int ioerr = zio->io_error;
zdb_cb_t *zcb = zio->io_private;
zbookmark_phys_t *zb = &zio->io_bookmark;
mutex_enter(&spa->spa_scrub_lock);
spa->spa_load_verify_bytes -= BP_GET_PSIZE(bp);
cv_broadcast(&spa->spa_scrub_io_cv);
if (ioerr && !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
char blkbuf[BP_SPRINTF_LEN];
zcb->zcb_haderrors = 1;
zcb->zcb_errors[ioerr]++;
if (dump_opt['b'] >= 2)
snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
else
blkbuf[0] = '\0';
(void) printf("zdb_blkptr_cb: "
"Got error %d reading "
"<%llu, %llu, %lld, %llx> %s -- skipping\n",
ioerr,
(u_longlong_t)zb->zb_objset,
(u_longlong_t)zb->zb_object,
(u_longlong_t)zb->zb_level,
(u_longlong_t)zb->zb_blkid,
blkbuf);
}
mutex_exit(&spa->spa_scrub_lock);
abd_free(zio->io_abd);
}
static int
zdb_blkptr_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
{
zdb_cb_t *zcb = arg;
dmu_object_type_t type;
boolean_t is_metadata;
if (zb->zb_level == ZB_DNODE_LEVEL)
return (0);
if (dump_opt['b'] >= 5 && bp->blk_birth > 0) {
char blkbuf[BP_SPRINTF_LEN];
snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
(void) printf("objset %llu object %llu "
"level %lld offset 0x%llx %s\n",
(u_longlong_t)zb->zb_objset,
(u_longlong_t)zb->zb_object,
(longlong_t)zb->zb_level,
(u_longlong_t)blkid2offset(dnp, bp, zb),
blkbuf);
}
if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp))
return (0);
type = BP_GET_TYPE(bp);
zdb_count_block(zcb, zilog, bp,
(type & DMU_OT_NEWTYPE) ? ZDB_OT_OTHER : type);
is_metadata = (BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type));
if (!BP_IS_EMBEDDED(bp) &&
(dump_opt['c'] > 1 || (dump_opt['c'] && is_metadata))) {
size_t size = BP_GET_PSIZE(bp);
abd_t *abd = abd_alloc(size, B_FALSE);
int flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCRUB | ZIO_FLAG_RAW;
/* If it's an intent log block, failure is expected. */
if (zb->zb_level == ZB_ZIL_LEVEL)
flags |= ZIO_FLAG_SPECULATIVE;
mutex_enter(&spa->spa_scrub_lock);
while (spa->spa_load_verify_bytes > max_inflight_bytes)
cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
spa->spa_load_verify_bytes += size;
mutex_exit(&spa->spa_scrub_lock);
zio_nowait(zio_read(NULL, spa, bp, abd, size,
zdb_blkptr_done, zcb, ZIO_PRIORITY_ASYNC_READ, flags, zb));
}
zcb->zcb_readfails = 0;
/* only call gethrtime() every 100 blocks */
static int iters;
if (++iters > 100)
iters = 0;
else
return (0);
if (dump_opt['b'] < 5 && gethrtime() > zcb->zcb_lastprint + NANOSEC) {
uint64_t now = gethrtime();
char buf[10];
uint64_t bytes = zcb->zcb_type[ZB_TOTAL][ZDB_OT_TOTAL].zb_asize;
uint64_t kb_per_sec =
1 + bytes / (1 + ((now - zcb->zcb_start) / 1000 / 1000));
uint64_t sec_remaining =
(zcb->zcb_totalasize - bytes) / 1024 / kb_per_sec;
/* make sure nicenum has enough space */
_Static_assert(sizeof (buf) >= NN_NUMBUF_SZ, "buf truncated");
zfs_nicebytes(bytes, buf, sizeof (buf));
(void) fprintf(stderr,
"\r%5s completed (%4"PRIu64"MB/s) "
"estimated time remaining: "
"%"PRIu64"hr %02"PRIu64"min %02"PRIu64"sec ",
buf, kb_per_sec / 1024,
sec_remaining / 60 / 60,
sec_remaining / 60 % 60,
sec_remaining % 60);
zcb->zcb_lastprint = now;
}
return (0);
}
static void
zdb_leak(void *arg, uint64_t start, uint64_t size)
{
vdev_t *vd = arg;
(void) printf("leaked space: vdev %llu, offset 0x%llx, size %llu\n",
(u_longlong_t)vd->vdev_id, (u_longlong_t)start, (u_longlong_t)size);
}
static metaslab_ops_t zdb_metaslab_ops = {
NULL /* alloc */
};
static int
load_unflushed_svr_segs_cb(spa_t *spa, space_map_entry_t *sme,
uint64_t txg, void *arg)
{
spa_vdev_removal_t *svr = arg;
uint64_t offset = sme->sme_offset;
uint64_t size = sme->sme_run;
/* skip vdevs we don't care about */
if (sme->sme_vdev != svr->svr_vdev_id)
return (0);
vdev_t *vd = vdev_lookup_top(spa, sme->sme_vdev);
metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE);
if (txg < metaslab_unflushed_txg(ms))
return (0);
if (sme->sme_type == SM_ALLOC)
range_tree_add(svr->svr_allocd_segs, offset, size);
else
range_tree_remove(svr->svr_allocd_segs, offset, size);
return (0);
}
static void
claim_segment_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
uint64_t size, void *arg)
{
(void) inner_offset, (void) arg;
/*
* This callback was called through a remap from
* a device being removed. Therefore, the vdev that
* this callback is applied to is a concrete
* vdev.
*/
ASSERT(vdev_is_concrete(vd));
VERIFY0(metaslab_claim_impl(vd, offset, size,
spa_min_claim_txg(vd->vdev_spa)));
}
static void
claim_segment_cb(void *arg, uint64_t offset, uint64_t size)
{
vdev_t *vd = arg;
vdev_indirect_ops.vdev_op_remap(vd, offset, size,
claim_segment_impl_cb, NULL);
}
/*
* After accounting for all allocated blocks that are directly referenced,
* we might have missed a reference to a block from a partially complete
* (and thus unused) indirect mapping object. We perform a secondary pass
* through the metaslabs we have already mapped and claim the destination
* blocks.
*/
static void
zdb_claim_removing(spa_t *spa, zdb_cb_t *zcb)
{
if (dump_opt['L'])
return;
if (spa->spa_vdev_removal == NULL)
return;
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
spa_vdev_removal_t *svr = spa->spa_vdev_removal;
vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
ASSERT0(range_tree_space(svr->svr_allocd_segs));
range_tree_t *allocs = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
for (uint64_t msi = 0; msi < vd->vdev_ms_count; msi++) {
metaslab_t *msp = vd->vdev_ms[msi];
ASSERT0(range_tree_space(allocs));
if (msp->ms_sm != NULL)
VERIFY0(space_map_load(msp->ms_sm, allocs, SM_ALLOC));
range_tree_vacate(allocs, range_tree_add, svr->svr_allocd_segs);
}
range_tree_destroy(allocs);
iterate_through_spacemap_logs(spa, load_unflushed_svr_segs_cb, svr);
/*
* Clear everything past what has been synced,
* because we have not allocated mappings for
* it yet.
*/
range_tree_clear(svr->svr_allocd_segs,
vdev_indirect_mapping_max_offset(vim),
vd->vdev_asize - vdev_indirect_mapping_max_offset(vim));
zcb->zcb_removing_size += range_tree_space(svr->svr_allocd_segs);
range_tree_vacate(svr->svr_allocd_segs, claim_segment_cb, vd);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
static int
increment_indirect_mapping_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
(void) tx;
zdb_cb_t *zcb = arg;
spa_t *spa = zcb->zcb_spa;
vdev_t *vd;
const dva_t *dva = &bp->blk_dva[0];
ASSERT(!bp_freed);
ASSERT(!dump_opt['L']);
ASSERT3U(BP_GET_NDVAS(bp), ==, 1);
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
vd = vdev_lookup_top(zcb->zcb_spa, DVA_GET_VDEV(dva));
ASSERT3P(vd, !=, NULL);
spa_config_exit(spa, SCL_VDEV, FTAG);
ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0);
ASSERT3P(zcb->zcb_vd_obsolete_counts[vd->vdev_id], !=, NULL);
vdev_indirect_mapping_increment_obsolete_count(
vd->vdev_indirect_mapping,
DVA_GET_OFFSET(dva), DVA_GET_ASIZE(dva),
zcb->zcb_vd_obsolete_counts[vd->vdev_id]);
return (0);
}
static uint32_t *
zdb_load_obsolete_counts(vdev_t *vd)
{
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
spa_t *spa = vd->vdev_spa;
spa_condensing_indirect_phys_t *scip =
&spa->spa_condensing_indirect_phys;
uint64_t obsolete_sm_object;
uint32_t *counts;
VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
EQUIV(obsolete_sm_object != 0, vd->vdev_obsolete_sm != NULL);
counts = vdev_indirect_mapping_load_obsolete_counts(vim);
if (vd->vdev_obsolete_sm != NULL) {
vdev_indirect_mapping_load_obsolete_spacemap(vim, counts,
vd->vdev_obsolete_sm);
}
if (scip->scip_vdev == vd->vdev_id &&
scip->scip_prev_obsolete_sm_object != 0) {
space_map_t *prev_obsolete_sm = NULL;
VERIFY0(space_map_open(&prev_obsolete_sm, spa->spa_meta_objset,
scip->scip_prev_obsolete_sm_object, 0, vd->vdev_asize, 0));
vdev_indirect_mapping_load_obsolete_spacemap(vim, counts,
prev_obsolete_sm);
space_map_close(prev_obsolete_sm);
}
return (counts);
}
static void
zdb_ddt_leak_init(spa_t *spa, zdb_cb_t *zcb)
{
ddt_bookmark_t ddb = {0};
ddt_entry_t dde;
int error;
int p;
ASSERT(!dump_opt['L']);
while ((error = ddt_walk(spa, &ddb, &dde)) == 0) {
blkptr_t blk;
ddt_phys_t *ddp = dde.dde_phys;
if (ddb.ddb_class == DDT_CLASS_UNIQUE)
return;
ASSERT(ddt_phys_total_refcnt(&dde) > 1);
for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
if (ddp->ddp_phys_birth == 0)
continue;
ddt_bp_create(ddb.ddb_checksum,
&dde.dde_key, ddp, &blk);
if (p == DDT_PHYS_DITTO) {
zdb_count_block(zcb, NULL, &blk, ZDB_OT_DITTO);
} else {
zcb->zcb_dedup_asize +=
BP_GET_ASIZE(&blk) * (ddp->ddp_refcnt - 1);
zcb->zcb_dedup_blocks++;
}
}
ddt_t *ddt = spa->spa_ddt[ddb.ddb_checksum];
ddt_enter(ddt);
VERIFY(ddt_lookup(ddt, &blk, B_TRUE) != NULL);
ddt_exit(ddt);
}
ASSERT(error == ENOENT);
}
typedef struct checkpoint_sm_exclude_entry_arg {
vdev_t *cseea_vd;
uint64_t cseea_checkpoint_size;
} checkpoint_sm_exclude_entry_arg_t;
static int
checkpoint_sm_exclude_entry_cb(space_map_entry_t *sme, void *arg)
{
checkpoint_sm_exclude_entry_arg_t *cseea = arg;
vdev_t *vd = cseea->cseea_vd;
metaslab_t *ms = vd->vdev_ms[sme->sme_offset >> vd->vdev_ms_shift];
uint64_t end = sme->sme_offset + sme->sme_run;
ASSERT(sme->sme_type == SM_FREE);
/*
* Since the vdev_checkpoint_sm exists in the vdev level
* and the ms_sm space maps exist in the metaslab level,
* an entry in the checkpoint space map could theoretically
* cross the boundaries of the metaslab that it belongs.
*
* In reality, because of the way that we populate and
* manipulate the checkpoint's space maps currently,
* there shouldn't be any entries that cross metaslabs.
* Hence the assertion below.
*
* That said, there is no fundamental requirement that
* the checkpoint's space map entries should not cross
* metaslab boundaries. So if needed we could add code
* that handles metaslab-crossing segments in the future.
*/
VERIFY3U(sme->sme_offset, >=, ms->ms_start);
VERIFY3U(end, <=, ms->ms_start + ms->ms_size);
/*
* By removing the entry from the allocated segments we
* also verify that the entry is there to begin with.
*/
mutex_enter(&ms->ms_lock);
range_tree_remove(ms->ms_allocatable, sme->sme_offset, sme->sme_run);
mutex_exit(&ms->ms_lock);
cseea->cseea_checkpoint_size += sme->sme_run;
return (0);
}
static void
zdb_leak_init_vdev_exclude_checkpoint(vdev_t *vd, zdb_cb_t *zcb)
{
spa_t *spa = vd->vdev_spa;
space_map_t *checkpoint_sm = NULL;
uint64_t checkpoint_sm_obj;
/*
* If there is no vdev_top_zap, we are in a pool whose
* version predates the pool checkpoint feature.
*/
if (vd->vdev_top_zap == 0)
return;
/*
* If there is no reference of the vdev_checkpoint_sm in
* the vdev_top_zap, then one of the following scenarios
* is true:
*
* 1] There is no checkpoint
* 2] There is a checkpoint, but no checkpointed blocks
* have been freed yet
* 3] The current vdev is indirect
*
* In these cases we return immediately.
*/
if (zap_contains(spa_meta_objset(spa), vd->vdev_top_zap,
VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) != 0)
return;
VERIFY0(zap_lookup(spa_meta_objset(spa), vd->vdev_top_zap,
VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, sizeof (uint64_t), 1,
&checkpoint_sm_obj));
checkpoint_sm_exclude_entry_arg_t cseea;
cseea.cseea_vd = vd;
cseea.cseea_checkpoint_size = 0;
VERIFY0(space_map_open(&checkpoint_sm, spa_meta_objset(spa),
checkpoint_sm_obj, 0, vd->vdev_asize, vd->vdev_ashift));
VERIFY0(space_map_iterate(checkpoint_sm,
space_map_length(checkpoint_sm),
checkpoint_sm_exclude_entry_cb, &cseea));
space_map_close(checkpoint_sm);
zcb->zcb_checkpoint_size += cseea.cseea_checkpoint_size;
}
static void
zdb_leak_init_exclude_checkpoint(spa_t *spa, zdb_cb_t *zcb)
{
ASSERT(!dump_opt['L']);
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
ASSERT3U(c, ==, rvd->vdev_child[c]->vdev_id);
zdb_leak_init_vdev_exclude_checkpoint(rvd->vdev_child[c], zcb);
}
}
static int
count_unflushed_space_cb(spa_t *spa, space_map_entry_t *sme,
uint64_t txg, void *arg)
{
int64_t *ualloc_space = arg;
uint64_t offset = sme->sme_offset;
uint64_t vdev_id = sme->sme_vdev;
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
if (!vdev_is_concrete(vd))
return (0);
metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE);
if (txg < metaslab_unflushed_txg(ms))
return (0);
if (sme->sme_type == SM_ALLOC)
*ualloc_space += sme->sme_run;
else
*ualloc_space -= sme->sme_run;
return (0);
}
static int64_t
get_unflushed_alloc_space(spa_t *spa)
{
if (dump_opt['L'])
return (0);
int64_t ualloc_space = 0;
iterate_through_spacemap_logs(spa, count_unflushed_space_cb,
&ualloc_space);
return (ualloc_space);
}
static int
load_unflushed_cb(spa_t *spa, space_map_entry_t *sme, uint64_t txg, void *arg)
{
maptype_t *uic_maptype = arg;
uint64_t offset = sme->sme_offset;
uint64_t size = sme->sme_run;
uint64_t vdev_id = sme->sme_vdev;
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
/* skip indirect vdevs */
if (!vdev_is_concrete(vd))
return (0);
metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE);
ASSERT(*uic_maptype == SM_ALLOC || *uic_maptype == SM_FREE);
if (txg < metaslab_unflushed_txg(ms))
return (0);
if (*uic_maptype == sme->sme_type)
range_tree_add(ms->ms_allocatable, offset, size);
else
range_tree_remove(ms->ms_allocatable, offset, size);
return (0);
}
static void
load_unflushed_to_ms_allocatables(spa_t *spa, maptype_t maptype)
{
iterate_through_spacemap_logs(spa, load_unflushed_cb, &maptype);
}
static void
load_concrete_ms_allocatable_trees(spa_t *spa, maptype_t maptype)
{
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t i = 0; i < rvd->vdev_children; i++) {
vdev_t *vd = rvd->vdev_child[i];
ASSERT3U(i, ==, vd->vdev_id);
if (vd->vdev_ops == &vdev_indirect_ops)
continue;
for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *msp = vd->vdev_ms[m];
(void) fprintf(stderr,
"\rloading concrete vdev %llu, "
"metaslab %llu of %llu ...",
(longlong_t)vd->vdev_id,
(longlong_t)msp->ms_id,
(longlong_t)vd->vdev_ms_count);
mutex_enter(&msp->ms_lock);
range_tree_vacate(msp->ms_allocatable, NULL, NULL);
/*
* We don't want to spend the CPU manipulating the
* size-ordered tree, so clear the range_tree ops.
*/
msp->ms_allocatable->rt_ops = NULL;
if (msp->ms_sm != NULL) {
VERIFY0(space_map_load(msp->ms_sm,
msp->ms_allocatable, maptype));
}
if (!msp->ms_loaded)
msp->ms_loaded = B_TRUE;
mutex_exit(&msp->ms_lock);
}
}
load_unflushed_to_ms_allocatables(spa, maptype);
}
/*
* vm_idxp is an in-out parameter which (for indirect vdevs) is the
* index in vim_entries that has the first entry in this metaslab.
* On return, it will be set to the first entry after this metaslab.
*/
static void
load_indirect_ms_allocatable_tree(vdev_t *vd, metaslab_t *msp,
uint64_t *vim_idxp)
{
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
mutex_enter(&msp->ms_lock);
range_tree_vacate(msp->ms_allocatable, NULL, NULL);
/*
* We don't want to spend the CPU manipulating the
* size-ordered tree, so clear the range_tree ops.
*/
msp->ms_allocatable->rt_ops = NULL;
for (; *vim_idxp < vdev_indirect_mapping_num_entries(vim);
(*vim_idxp)++) {
vdev_indirect_mapping_entry_phys_t *vimep =
&vim->vim_entries[*vim_idxp];
uint64_t ent_offset = DVA_MAPPING_GET_SRC_OFFSET(vimep);
uint64_t ent_len = DVA_GET_ASIZE(&vimep->vimep_dst);
ASSERT3U(ent_offset, >=, msp->ms_start);
if (ent_offset >= msp->ms_start + msp->ms_size)
break;
/*
* Mappings do not cross metaslab boundaries,
* because we create them by walking the metaslabs.
*/
ASSERT3U(ent_offset + ent_len, <=,
msp->ms_start + msp->ms_size);
range_tree_add(msp->ms_allocatable, ent_offset, ent_len);
}
if (!msp->ms_loaded)
msp->ms_loaded = B_TRUE;
mutex_exit(&msp->ms_lock);
}
static void
zdb_leak_init_prepare_indirect_vdevs(spa_t *spa, zdb_cb_t *zcb)
{
ASSERT(!dump_opt['L']);
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
ASSERT3U(c, ==, vd->vdev_id);
if (vd->vdev_ops != &vdev_indirect_ops)
continue;
/*
* Note: we don't check for mapping leaks on
* removing vdevs because their ms_allocatable's
* are used to look for leaks in allocated space.
*/
zcb->zcb_vd_obsolete_counts[c] = zdb_load_obsolete_counts(vd);
/*
* Normally, indirect vdevs don't have any
* metaslabs. We want to set them up for
* zio_claim().
*/
vdev_metaslab_group_create(vd);
VERIFY0(vdev_metaslab_init(vd, 0));
vdev_indirect_mapping_t *vim __maybe_unused =
vd->vdev_indirect_mapping;
uint64_t vim_idx = 0;
for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
(void) fprintf(stderr,
"\rloading indirect vdev %llu, "
"metaslab %llu of %llu ...",
(longlong_t)vd->vdev_id,
(longlong_t)vd->vdev_ms[m]->ms_id,
(longlong_t)vd->vdev_ms_count);
load_indirect_ms_allocatable_tree(vd, vd->vdev_ms[m],
&vim_idx);
}
ASSERT3U(vim_idx, ==, vdev_indirect_mapping_num_entries(vim));
}
}
static void
zdb_leak_init(spa_t *spa, zdb_cb_t *zcb)
{
zcb->zcb_spa = spa;
if (dump_opt['L'])
return;
dsl_pool_t *dp = spa->spa_dsl_pool;
vdev_t *rvd = spa->spa_root_vdev;
/*
* We are going to be changing the meaning of the metaslab's
* ms_allocatable. Ensure that the allocator doesn't try to
* use the tree.
*/
spa->spa_normal_class->mc_ops = &zdb_metaslab_ops;
spa->spa_log_class->mc_ops = &zdb_metaslab_ops;
spa->spa_embedded_log_class->mc_ops = &zdb_metaslab_ops;
zcb->zcb_vd_obsolete_counts =
umem_zalloc(rvd->vdev_children * sizeof (uint32_t *),
UMEM_NOFAIL);
/*
* For leak detection, we overload the ms_allocatable trees
* to contain allocated segments instead of free segments.
* As a result, we can't use the normal metaslab_load/unload
* interfaces.
*/
zdb_leak_init_prepare_indirect_vdevs(spa, zcb);
load_concrete_ms_allocatable_trees(spa, SM_ALLOC);
/*
* On load_concrete_ms_allocatable_trees() we loaded all the
* allocated entries from the ms_sm to the ms_allocatable for
* each metaslab. If the pool has a checkpoint or is in the
* middle of discarding a checkpoint, some of these blocks
* may have been freed but their ms_sm may not have been
* updated because they are referenced by the checkpoint. In
* order to avoid false-positives during leak-detection, we
* go through the vdev's checkpoint space map and exclude all
* its entries from their relevant ms_allocatable.
*
* We also aggregate the space held by the checkpoint and add
* it to zcb_checkpoint_size.
*
* Note that at this point we are also verifying that all the
* entries on the checkpoint_sm are marked as allocated in
* the ms_sm of their relevant metaslab.
* [see comment in checkpoint_sm_exclude_entry_cb()]
*/
zdb_leak_init_exclude_checkpoint(spa, zcb);
ASSERT3U(zcb->zcb_checkpoint_size, ==, spa_get_checkpoint_space(spa));
/* for cleaner progress output */
(void) fprintf(stderr, "\n");
if (bpobj_is_open(&dp->dp_obsolete_bpobj)) {
ASSERT(spa_feature_is_enabled(spa,
SPA_FEATURE_DEVICE_REMOVAL));
(void) bpobj_iterate_nofree(&dp->dp_obsolete_bpobj,
increment_indirect_mapping_cb, zcb, NULL);
}
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
zdb_ddt_leak_init(spa, zcb);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
static boolean_t
zdb_check_for_obsolete_leaks(vdev_t *vd, zdb_cb_t *zcb)
{
boolean_t leaks = B_FALSE;
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
uint64_t total_leaked = 0;
boolean_t are_precise = B_FALSE;
ASSERT(vim != NULL);
for (uint64_t i = 0; i < vdev_indirect_mapping_num_entries(vim); i++) {
vdev_indirect_mapping_entry_phys_t *vimep =
&vim->vim_entries[i];
uint64_t obsolete_bytes = 0;
uint64_t offset = DVA_MAPPING_GET_SRC_OFFSET(vimep);
metaslab_t *msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
/*
* This is not very efficient but it's easy to
* verify correctness.
*/
for (uint64_t inner_offset = 0;
inner_offset < DVA_GET_ASIZE(&vimep->vimep_dst);
inner_offset += 1ULL << vd->vdev_ashift) {
if (range_tree_contains(msp->ms_allocatable,
offset + inner_offset, 1ULL << vd->vdev_ashift)) {
obsolete_bytes += 1ULL << vd->vdev_ashift;
}
}
int64_t bytes_leaked = obsolete_bytes -
zcb->zcb_vd_obsolete_counts[vd->vdev_id][i];
ASSERT3U(DVA_GET_ASIZE(&vimep->vimep_dst), >=,
zcb->zcb_vd_obsolete_counts[vd->vdev_id][i]);
VERIFY0(vdev_obsolete_counts_are_precise(vd, &are_precise));
if (bytes_leaked != 0 && (are_precise || dump_opt['d'] >= 5)) {
(void) printf("obsolete indirect mapping count "
"mismatch on %llu:%llx:%llx : %llx bytes leaked\n",
(u_longlong_t)vd->vdev_id,
(u_longlong_t)DVA_MAPPING_GET_SRC_OFFSET(vimep),
(u_longlong_t)DVA_GET_ASIZE(&vimep->vimep_dst),
(u_longlong_t)bytes_leaked);
}
total_leaked += ABS(bytes_leaked);
}
VERIFY0(vdev_obsolete_counts_are_precise(vd, &are_precise));
if (!are_precise && total_leaked > 0) {
int pct_leaked = total_leaked * 100 /
vdev_indirect_mapping_bytes_mapped(vim);
(void) printf("cannot verify obsolete indirect mapping "
"counts of vdev %llu because precise feature was not "
"enabled when it was removed: %d%% (%llx bytes) of mapping"
"unreferenced\n",
(u_longlong_t)vd->vdev_id, pct_leaked,
(u_longlong_t)total_leaked);
} else if (total_leaked > 0) {
(void) printf("obsolete indirect mapping count mismatch "
"for vdev %llu -- %llx total bytes mismatched\n",
(u_longlong_t)vd->vdev_id,
(u_longlong_t)total_leaked);
leaks |= B_TRUE;
}
vdev_indirect_mapping_free_obsolete_counts(vim,
zcb->zcb_vd_obsolete_counts[vd->vdev_id]);
zcb->zcb_vd_obsolete_counts[vd->vdev_id] = NULL;
return (leaks);
}
static boolean_t
zdb_leak_fini(spa_t *spa, zdb_cb_t *zcb)
{
if (dump_opt['L'])
return (B_FALSE);
boolean_t leaks = B_FALSE;
vdev_t *rvd = spa->spa_root_vdev;
for (unsigned c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
if (zcb->zcb_vd_obsolete_counts[c] != NULL) {
leaks |= zdb_check_for_obsolete_leaks(vd, zcb);
}
for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *msp = vd->vdev_ms[m];
ASSERT3P(msp->ms_group, ==, (msp->ms_group->mg_class ==
spa_embedded_log_class(spa)) ?
vd->vdev_log_mg : vd->vdev_mg);
/*
* ms_allocatable has been overloaded
* to contain allocated segments. Now that
* we finished traversing all blocks, any
* block that remains in the ms_allocatable
* represents an allocated block that we
* did not claim during the traversal.
* Claimed blocks would have been removed
* from the ms_allocatable. For indirect
* vdevs, space remaining in the tree
* represents parts of the mapping that are
* not referenced, which is not a bug.
*/
if (vd->vdev_ops == &vdev_indirect_ops) {
range_tree_vacate(msp->ms_allocatable,
NULL, NULL);
} else {
range_tree_vacate(msp->ms_allocatable,
zdb_leak, vd);
}
if (msp->ms_loaded) {
msp->ms_loaded = B_FALSE;
}
}
}
umem_free(zcb->zcb_vd_obsolete_counts,
rvd->vdev_children * sizeof (uint32_t *));
zcb->zcb_vd_obsolete_counts = NULL;
return (leaks);
}
static int
count_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
(void) tx;
zdb_cb_t *zcb = arg;
if (dump_opt['b'] >= 5) {
char blkbuf[BP_SPRINTF_LEN];
snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
(void) printf("[%s] %s\n",
"deferred free", blkbuf);
}
zdb_count_block(zcb, NULL, bp, ZDB_OT_DEFERRED);
return (0);
}
/*
* Iterate over livelists which have been destroyed by the user but
* are still present in the MOS, waiting to be freed
*/
static void
iterate_deleted_livelists(spa_t *spa, ll_iter_t func, void *arg)
{
objset_t *mos = spa->spa_meta_objset;
uint64_t zap_obj;
int err = zap_lookup(mos, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_DELETED_CLONES, sizeof (uint64_t), 1, &zap_obj);
if (err == ENOENT)
return;
ASSERT0(err);
zap_cursor_t zc;
zap_attribute_t attr;
dsl_deadlist_t ll;
/* NULL out os prior to dsl_deadlist_open in case it's garbage */
ll.dl_os = NULL;
for (zap_cursor_init(&zc, mos, zap_obj);
zap_cursor_retrieve(&zc, &attr) == 0;
(void) zap_cursor_advance(&zc)) {
dsl_deadlist_open(&ll, mos, attr.za_first_integer);
func(&ll, arg);
dsl_deadlist_close(&ll);
}
zap_cursor_fini(&zc);
}
static int
bpobj_count_block_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
ASSERT(!bp_freed);
return (count_block_cb(arg, bp, tx));
}
static int
livelist_entry_count_blocks_cb(void *args, dsl_deadlist_entry_t *dle)
{
zdb_cb_t *zbc = args;
bplist_t blks;
bplist_create(&blks);
/* determine which blocks have been alloc'd but not freed */
VERIFY0(dsl_process_sub_livelist(&dle->dle_bpobj, &blks, NULL, NULL));
/* count those blocks */
(void) bplist_iterate(&blks, count_block_cb, zbc, NULL);
bplist_destroy(&blks);
return (0);
}
static void
livelist_count_blocks(dsl_deadlist_t *ll, void *arg)
{
dsl_deadlist_iterate(ll, livelist_entry_count_blocks_cb, arg);
}
/*
* Count the blocks in the livelists that have been destroyed by the user
* but haven't yet been freed.
*/
static void
deleted_livelists_count_blocks(spa_t *spa, zdb_cb_t *zbc)
{
iterate_deleted_livelists(spa, livelist_count_blocks, zbc);
}
static void
dump_livelist_cb(dsl_deadlist_t *ll, void *arg)
{
ASSERT3P(arg, ==, NULL);
global_feature_count[SPA_FEATURE_LIVELIST]++;
dump_blkptr_list(ll, "Deleted Livelist");
dsl_deadlist_iterate(ll, sublivelist_verify_lightweight, NULL);
}
/*
* Print out, register object references to, and increment feature counts for
* livelists that have been destroyed by the user but haven't yet been freed.
*/
static void
deleted_livelists_dump_mos(spa_t *spa)
{
uint64_t zap_obj;
objset_t *mos = spa->spa_meta_objset;
int err = zap_lookup(mos, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_DELETED_CLONES, sizeof (uint64_t), 1, &zap_obj);
if (err == ENOENT)
return;
mos_obj_refd(zap_obj);
iterate_deleted_livelists(spa, dump_livelist_cb, NULL);
}
static int
zdb_brt_entry_compare(const void *zcn1, const void *zcn2)
{
const dva_t *dva1 = &((const zdb_brt_entry_t *)zcn1)->zbre_dva;
const dva_t *dva2 = &((const zdb_brt_entry_t *)zcn2)->zbre_dva;
int cmp;
cmp = TREE_CMP(DVA_GET_VDEV(dva1), DVA_GET_VDEV(dva2));
if (cmp == 0)
cmp = TREE_CMP(DVA_GET_OFFSET(dva1), DVA_GET_OFFSET(dva2));
return (cmp);
}
static int
dump_block_stats(spa_t *spa)
{
zdb_cb_t *zcb;
zdb_blkstats_t *zb, *tzb;
uint64_t norm_alloc, norm_space, total_alloc, total_found;
int flags = TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA |
TRAVERSE_NO_DECRYPT | TRAVERSE_HARD;
boolean_t leaks = B_FALSE;
int e, c, err;
bp_embedded_type_t i;
zcb = umem_zalloc(sizeof (zdb_cb_t), UMEM_NOFAIL);
if (spa_feature_is_active(spa, SPA_FEATURE_BLOCK_CLONING)) {
avl_create(&zcb->zcb_brt, zdb_brt_entry_compare,
sizeof (zdb_brt_entry_t),
offsetof(zdb_brt_entry_t, zbre_node));
zcb->zcb_brt_is_active = B_TRUE;
}
(void) printf("\nTraversing all blocks %s%s%s%s%s...\n\n",
(dump_opt['c'] || !dump_opt['L']) ? "to verify " : "",
(dump_opt['c'] == 1) ? "metadata " : "",
dump_opt['c'] ? "checksums " : "",
(dump_opt['c'] && !dump_opt['L']) ? "and verify " : "",
!dump_opt['L'] ? "nothing leaked " : "");
/*
* When leak detection is enabled we load all space maps as SM_ALLOC
* maps, then traverse the pool claiming each block we discover. If
* the pool is perfectly consistent, the segment trees will be empty
* when we're done. Anything left over is a leak; any block we can't
* claim (because it's not part of any space map) is a double
* allocation, reference to a freed block, or an unclaimed log block.
*
* When leak detection is disabled (-L option) we still traverse the
* pool claiming each block we discover, but we skip opening any space
* maps.
*/
zdb_leak_init(spa, zcb);
/*
* If there's a deferred-free bplist, process that first.
*/
(void) bpobj_iterate_nofree(&spa->spa_deferred_bpobj,
bpobj_count_block_cb, zcb, NULL);
if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
(void) bpobj_iterate_nofree(&spa->spa_dsl_pool->dp_free_bpobj,
bpobj_count_block_cb, zcb, NULL);
}
zdb_claim_removing(spa, zcb);
if (spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) {
VERIFY3U(0, ==, bptree_iterate(spa->spa_meta_objset,
spa->spa_dsl_pool->dp_bptree_obj, B_FALSE, count_block_cb,
zcb, NULL));
}
deleted_livelists_count_blocks(spa, zcb);
if (dump_opt['c'] > 1)
flags |= TRAVERSE_PREFETCH_DATA;
zcb->zcb_totalasize = metaslab_class_get_alloc(spa_normal_class(spa));
zcb->zcb_totalasize += metaslab_class_get_alloc(spa_special_class(spa));
zcb->zcb_totalasize += metaslab_class_get_alloc(spa_dedup_class(spa));
zcb->zcb_totalasize +=
metaslab_class_get_alloc(spa_embedded_log_class(spa));
zcb->zcb_start = zcb->zcb_lastprint = gethrtime();
err = traverse_pool(spa, 0, flags, zdb_blkptr_cb, zcb);
/*
* If we've traversed the data blocks then we need to wait for those
* I/Os to complete. We leverage "The Godfather" zio to wait on
* all async I/Os to complete.
*/
if (dump_opt['c']) {
for (c = 0; c < max_ncpus; c++) {
(void) zio_wait(spa->spa_async_zio_root[c]);
spa->spa_async_zio_root[c] = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_GODFATHER);
}
}
ASSERT0(spa->spa_load_verify_bytes);
/*
* Done after zio_wait() since zcb_haderrors is modified in
* zdb_blkptr_done()
*/
zcb->zcb_haderrors |= err;
if (zcb->zcb_haderrors) {
(void) printf("\nError counts:\n\n");
(void) printf("\t%5s %s\n", "errno", "count");
for (e = 0; e < 256; e++) {
if (zcb->zcb_errors[e] != 0) {
(void) printf("\t%5d %llu\n",
e, (u_longlong_t)zcb->zcb_errors[e]);
}
}
}
/*
* Report any leaked segments.
*/
leaks |= zdb_leak_fini(spa, zcb);
tzb = &zcb->zcb_type[ZB_TOTAL][ZDB_OT_TOTAL];
norm_alloc = metaslab_class_get_alloc(spa_normal_class(spa));
norm_space = metaslab_class_get_space(spa_normal_class(spa));
total_alloc = norm_alloc +
metaslab_class_get_alloc(spa_log_class(spa)) +
metaslab_class_get_alloc(spa_embedded_log_class(spa)) +
metaslab_class_get_alloc(spa_special_class(spa)) +
metaslab_class_get_alloc(spa_dedup_class(spa)) +
get_unflushed_alloc_space(spa);
total_found =
tzb->zb_asize - zcb->zcb_dedup_asize - zcb->zcb_clone_asize +
zcb->zcb_removing_size + zcb->zcb_checkpoint_size;
if (total_found == total_alloc && !dump_opt['L']) {
(void) printf("\n\tNo leaks (block sum matches space"
" maps exactly)\n");
} else if (!dump_opt['L']) {
(void) printf("block traversal size %llu != alloc %llu "
"(%s %lld)\n",
(u_longlong_t)total_found,
(u_longlong_t)total_alloc,
(dump_opt['L']) ? "unreachable" : "leaked",
(longlong_t)(total_alloc - total_found));
leaks = B_TRUE;
}
if (tzb->zb_count == 0) {
umem_free(zcb, sizeof (zdb_cb_t));
return (2);
}
(void) printf("\n");
(void) printf("\t%-16s %14llu\n", "bp count:",
(u_longlong_t)tzb->zb_count);
(void) printf("\t%-16s %14llu\n", "ganged count:",
(longlong_t)tzb->zb_gangs);
(void) printf("\t%-16s %14llu avg: %6llu\n", "bp logical:",
(u_longlong_t)tzb->zb_lsize,
(u_longlong_t)(tzb->zb_lsize / tzb->zb_count));
(void) printf("\t%-16s %14llu avg: %6llu compression: %6.2f\n",
"bp physical:", (u_longlong_t)tzb->zb_psize,
(u_longlong_t)(tzb->zb_psize / tzb->zb_count),
(double)tzb->zb_lsize / tzb->zb_psize);
(void) printf("\t%-16s %14llu avg: %6llu compression: %6.2f\n",
"bp allocated:", (u_longlong_t)tzb->zb_asize,
(u_longlong_t)(tzb->zb_asize / tzb->zb_count),
(double)tzb->zb_lsize / tzb->zb_asize);
(void) printf("\t%-16s %14llu ref>1: %6llu deduplication: %6.2f\n",
"bp deduped:", (u_longlong_t)zcb->zcb_dedup_asize,
(u_longlong_t)zcb->zcb_dedup_blocks,
(double)zcb->zcb_dedup_asize / tzb->zb_asize + 1.0);
(void) printf("\t%-16s %14llu count: %6llu\n",
"bp cloned:", (u_longlong_t)zcb->zcb_clone_asize,
(u_longlong_t)zcb->zcb_clone_blocks);
(void) printf("\t%-16s %14llu used: %5.2f%%\n", "Normal class:",
(u_longlong_t)norm_alloc, 100.0 * norm_alloc / norm_space);
if (spa_special_class(spa)->mc_allocator[0].mca_rotor != NULL) {
uint64_t alloc = metaslab_class_get_alloc(
spa_special_class(spa));
uint64_t space = metaslab_class_get_space(
spa_special_class(spa));
(void) printf("\t%-16s %14llu used: %5.2f%%\n",
"Special class", (u_longlong_t)alloc,
100.0 * alloc / space);
}
if (spa_dedup_class(spa)->mc_allocator[0].mca_rotor != NULL) {
uint64_t alloc = metaslab_class_get_alloc(
spa_dedup_class(spa));
uint64_t space = metaslab_class_get_space(
spa_dedup_class(spa));
(void) printf("\t%-16s %14llu used: %5.2f%%\n",
"Dedup class", (u_longlong_t)alloc,
100.0 * alloc / space);
}
if (spa_embedded_log_class(spa)->mc_allocator[0].mca_rotor != NULL) {
uint64_t alloc = metaslab_class_get_alloc(
spa_embedded_log_class(spa));
uint64_t space = metaslab_class_get_space(
spa_embedded_log_class(spa));
(void) printf("\t%-16s %14llu used: %5.2f%%\n",
"Embedded log class", (u_longlong_t)alloc,
100.0 * alloc / space);
}
for (i = 0; i < NUM_BP_EMBEDDED_TYPES; i++) {
if (zcb->zcb_embedded_blocks[i] == 0)
continue;
(void) printf("\n");
(void) printf("\tadditional, non-pointer bps of type %u: "
"%10llu\n",
i, (u_longlong_t)zcb->zcb_embedded_blocks[i]);
if (dump_opt['b'] >= 3) {
(void) printf("\t number of (compressed) bytes: "
"number of bps\n");
dump_histogram(zcb->zcb_embedded_histogram[i],
sizeof (zcb->zcb_embedded_histogram[i]) /
sizeof (zcb->zcb_embedded_histogram[i][0]), 0);
}
}
if (tzb->zb_ditto_samevdev != 0) {
(void) printf("\tDittoed blocks on same vdev: %llu\n",
(longlong_t)tzb->zb_ditto_samevdev);
}
if (tzb->zb_ditto_same_ms != 0) {
(void) printf("\tDittoed blocks in same metaslab: %llu\n",
(longlong_t)tzb->zb_ditto_same_ms);
}
for (uint64_t v = 0; v < spa->spa_root_vdev->vdev_children; v++) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[v];
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
if (vim == NULL) {
continue;
}
char mem[32];
zdb_nicenum(vdev_indirect_mapping_num_entries(vim),
mem, vdev_indirect_mapping_size(vim));
(void) printf("\tindirect vdev id %llu has %llu segments "
"(%s in memory)\n",
(longlong_t)vd->vdev_id,
(longlong_t)vdev_indirect_mapping_num_entries(vim), mem);
}
if (dump_opt['b'] >= 2) {
int l, t, level;
char csize[32], lsize[32], psize[32], asize[32];
char avg[32], gang[32];
(void) printf("\nBlocks\tLSIZE\tPSIZE\tASIZE"
"\t avg\t comp\t%%Total\tType\n");
zfs_blkstat_t *mdstats = umem_zalloc(sizeof (zfs_blkstat_t),
UMEM_NOFAIL);
for (t = 0; t <= ZDB_OT_TOTAL; t++) {
const char *typename;
/* make sure nicenum has enough space */
_Static_assert(sizeof (csize) >= NN_NUMBUF_SZ,
"csize truncated");
_Static_assert(sizeof (lsize) >= NN_NUMBUF_SZ,
"lsize truncated");
_Static_assert(sizeof (psize) >= NN_NUMBUF_SZ,
"psize truncated");
_Static_assert(sizeof (asize) >= NN_NUMBUF_SZ,
"asize truncated");
_Static_assert(sizeof (avg) >= NN_NUMBUF_SZ,
"avg truncated");
_Static_assert(sizeof (gang) >= NN_NUMBUF_SZ,
"gang truncated");
if (t < DMU_OT_NUMTYPES)
typename = dmu_ot[t].ot_name;
else
typename = zdb_ot_extname[t - DMU_OT_NUMTYPES];
if (zcb->zcb_type[ZB_TOTAL][t].zb_asize == 0) {
(void) printf("%6s\t%5s\t%5s\t%5s"
"\t%5s\t%5s\t%6s\t%s\n",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
typename);
continue;
}
for (l = ZB_TOTAL - 1; l >= -1; l--) {
level = (l == -1 ? ZB_TOTAL : l);
zb = &zcb->zcb_type[level][t];
if (zb->zb_asize == 0)
continue;
if (level != ZB_TOTAL && t < DMU_OT_NUMTYPES &&
(level > 0 || DMU_OT_IS_METADATA(t))) {
mdstats->zb_count += zb->zb_count;
mdstats->zb_lsize += zb->zb_lsize;
mdstats->zb_psize += zb->zb_psize;
mdstats->zb_asize += zb->zb_asize;
mdstats->zb_gangs += zb->zb_gangs;
}
if (dump_opt['b'] < 3 && level != ZB_TOTAL)
continue;
if (level == 0 && zb->zb_asize ==
zcb->zcb_type[ZB_TOTAL][t].zb_asize)
continue;
zdb_nicenum(zb->zb_count, csize,
sizeof (csize));
zdb_nicenum(zb->zb_lsize, lsize,
sizeof (lsize));
zdb_nicenum(zb->zb_psize, psize,
sizeof (psize));
zdb_nicenum(zb->zb_asize, asize,
sizeof (asize));
zdb_nicenum(zb->zb_asize / zb->zb_count, avg,
sizeof (avg));
zdb_nicenum(zb->zb_gangs, gang, sizeof (gang));
(void) printf("%6s\t%5s\t%5s\t%5s\t%5s"
"\t%5.2f\t%6.2f\t",
csize, lsize, psize, asize, avg,
(double)zb->zb_lsize / zb->zb_psize,
100.0 * zb->zb_asize / tzb->zb_asize);
if (level == ZB_TOTAL)
(void) printf("%s\n", typename);
else
(void) printf(" L%d %s\n",
level, typename);
if (dump_opt['b'] >= 3 && zb->zb_gangs > 0) {
(void) printf("\t number of ganged "
"blocks: %s\n", gang);
}
if (dump_opt['b'] >= 4) {
(void) printf("psize "
"(in 512-byte sectors): "
"number of blocks\n");
dump_histogram(zb->zb_psize_histogram,
PSIZE_HISTO_SIZE, 0);
}
}
}
zdb_nicenum(mdstats->zb_count, csize,
sizeof (csize));
zdb_nicenum(mdstats->zb_lsize, lsize,
sizeof (lsize));
zdb_nicenum(mdstats->zb_psize, psize,
sizeof (psize));
zdb_nicenum(mdstats->zb_asize, asize,
sizeof (asize));
zdb_nicenum(mdstats->zb_asize / mdstats->zb_count, avg,
sizeof (avg));
zdb_nicenum(mdstats->zb_gangs, gang, sizeof (gang));
(void) printf("%6s\t%5s\t%5s\t%5s\t%5s"
"\t%5.2f\t%6.2f\t",
csize, lsize, psize, asize, avg,
(double)mdstats->zb_lsize / mdstats->zb_psize,
100.0 * mdstats->zb_asize / tzb->zb_asize);
(void) printf("%s\n", "Metadata Total");
/* Output a table summarizing block sizes in the pool */
if (dump_opt['b'] >= 2) {
dump_size_histograms(zcb);
}
umem_free(mdstats, sizeof (zfs_blkstat_t));
}
(void) printf("\n");
if (leaks) {
umem_free(zcb, sizeof (zdb_cb_t));
return (2);
}
if (zcb->zcb_haderrors) {
umem_free(zcb, sizeof (zdb_cb_t));
return (3);
}
umem_free(zcb, sizeof (zdb_cb_t));
return (0);
}
typedef struct zdb_ddt_entry {
ddt_key_t zdde_key;
uint64_t zdde_ref_blocks;
uint64_t zdde_ref_lsize;
uint64_t zdde_ref_psize;
uint64_t zdde_ref_dsize;
avl_node_t zdde_node;
} zdb_ddt_entry_t;
static int
zdb_ddt_add_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
{
(void) zilog, (void) dnp;
avl_tree_t *t = arg;
avl_index_t where;
zdb_ddt_entry_t *zdde, zdde_search;
if (zb->zb_level == ZB_DNODE_LEVEL || BP_IS_HOLE(bp) ||
BP_IS_EMBEDDED(bp))
return (0);
if (dump_opt['S'] > 1 && zb->zb_level == ZB_ROOT_LEVEL) {
(void) printf("traversing objset %llu, %llu objects, "
"%lu blocks so far\n",
(u_longlong_t)zb->zb_objset,
(u_longlong_t)BP_GET_FILL(bp),
avl_numnodes(t));
}
if (BP_IS_HOLE(bp) || BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_OFF ||
BP_GET_LEVEL(bp) > 0 || DMU_OT_IS_METADATA(BP_GET_TYPE(bp)))
return (0);
ddt_key_fill(&zdde_search.zdde_key, bp);
zdde = avl_find(t, &zdde_search, &where);
if (zdde == NULL) {
zdde = umem_zalloc(sizeof (*zdde), UMEM_NOFAIL);
zdde->zdde_key = zdde_search.zdde_key;
avl_insert(t, zdde, where);
}
zdde->zdde_ref_blocks += 1;
zdde->zdde_ref_lsize += BP_GET_LSIZE(bp);
zdde->zdde_ref_psize += BP_GET_PSIZE(bp);
zdde->zdde_ref_dsize += bp_get_dsize_sync(spa, bp);
return (0);
}
static void
dump_simulated_ddt(spa_t *spa)
{
avl_tree_t t;
void *cookie = NULL;
zdb_ddt_entry_t *zdde;
ddt_histogram_t ddh_total = {{{0}}};
ddt_stat_t dds_total = {0};
avl_create(&t, ddt_entry_compare,
sizeof (zdb_ddt_entry_t), offsetof(zdb_ddt_entry_t, zdde_node));
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
(void) traverse_pool(spa, 0, TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA |
TRAVERSE_NO_DECRYPT, zdb_ddt_add_cb, &t);
spa_config_exit(spa, SCL_CONFIG, FTAG);
while ((zdde = avl_destroy_nodes(&t, &cookie)) != NULL) {
ddt_stat_t dds;
uint64_t refcnt = zdde->zdde_ref_blocks;
ASSERT(refcnt != 0);
dds.dds_blocks = zdde->zdde_ref_blocks / refcnt;
dds.dds_lsize = zdde->zdde_ref_lsize / refcnt;
dds.dds_psize = zdde->zdde_ref_psize / refcnt;
dds.dds_dsize = zdde->zdde_ref_dsize / refcnt;
dds.dds_ref_blocks = zdde->zdde_ref_blocks;
dds.dds_ref_lsize = zdde->zdde_ref_lsize;
dds.dds_ref_psize = zdde->zdde_ref_psize;
dds.dds_ref_dsize = zdde->zdde_ref_dsize;
ddt_stat_add(&ddh_total.ddh_stat[highbit64(refcnt) - 1],
&dds, 0);
umem_free(zdde, sizeof (*zdde));
}
avl_destroy(&t);
ddt_histogram_stat(&dds_total, &ddh_total);
(void) printf("Simulated DDT histogram:\n");
zpool_dump_ddt(&dds_total, &ddh_total);
dump_dedup_ratio(&dds_total);
}
static int
verify_device_removal_feature_counts(spa_t *spa)
{
uint64_t dr_feature_refcount = 0;
uint64_t oc_feature_refcount = 0;
uint64_t indirect_vdev_count = 0;
uint64_t precise_vdev_count = 0;
uint64_t obsolete_counts_object_count = 0;
uint64_t obsolete_sm_count = 0;
uint64_t obsolete_counts_count = 0;
uint64_t scip_count = 0;
uint64_t obsolete_bpobj_count = 0;
int ret = 0;
spa_condensing_indirect_phys_t *scip =
&spa->spa_condensing_indirect_phys;
if (scip->scip_next_mapping_object != 0) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[scip->scip_vdev];
ASSERT(scip->scip_prev_obsolete_sm_object != 0);
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
(void) printf("Condensing indirect vdev %llu: new mapping "
"object %llu, prev obsolete sm %llu\n",
(u_longlong_t)scip->scip_vdev,
(u_longlong_t)scip->scip_next_mapping_object,
(u_longlong_t)scip->scip_prev_obsolete_sm_object);
if (scip->scip_prev_obsolete_sm_object != 0) {
space_map_t *prev_obsolete_sm = NULL;
VERIFY0(space_map_open(&prev_obsolete_sm,
spa->spa_meta_objset,
scip->scip_prev_obsolete_sm_object,
0, vd->vdev_asize, 0));
dump_spacemap(spa->spa_meta_objset, prev_obsolete_sm);
(void) printf("\n");
space_map_close(prev_obsolete_sm);
}
scip_count += 2;
}
for (uint64_t i = 0; i < spa->spa_root_vdev->vdev_children; i++) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[i];
vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
if (vic->vic_mapping_object != 0) {
ASSERT(vd->vdev_ops == &vdev_indirect_ops ||
vd->vdev_removing);
indirect_vdev_count++;
if (vd->vdev_indirect_mapping->vim_havecounts) {
obsolete_counts_count++;
}
}
boolean_t are_precise;
VERIFY0(vdev_obsolete_counts_are_precise(vd, &are_precise));
if (are_precise) {
ASSERT(vic->vic_mapping_object != 0);
precise_vdev_count++;
}
uint64_t obsolete_sm_object;
VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
if (obsolete_sm_object != 0) {
ASSERT(vic->vic_mapping_object != 0);
obsolete_sm_count++;
}
}
(void) feature_get_refcount(spa,
&spa_feature_table[SPA_FEATURE_DEVICE_REMOVAL],
&dr_feature_refcount);
(void) feature_get_refcount(spa,
&spa_feature_table[SPA_FEATURE_OBSOLETE_COUNTS],
&oc_feature_refcount);
if (dr_feature_refcount != indirect_vdev_count) {
ret = 1;
(void) printf("Number of indirect vdevs (%llu) " \
"does not match feature count (%llu)\n",
(u_longlong_t)indirect_vdev_count,
(u_longlong_t)dr_feature_refcount);
} else {
(void) printf("Verified device_removal feature refcount " \
"of %llu is correct\n",
(u_longlong_t)dr_feature_refcount);
}
if (zap_contains(spa_meta_objset(spa), DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_OBSOLETE_BPOBJ) == 0) {
obsolete_bpobj_count++;
}
obsolete_counts_object_count = precise_vdev_count;
obsolete_counts_object_count += obsolete_sm_count;
obsolete_counts_object_count += obsolete_counts_count;
obsolete_counts_object_count += scip_count;
obsolete_counts_object_count += obsolete_bpobj_count;
obsolete_counts_object_count += remap_deadlist_count;
if (oc_feature_refcount != obsolete_counts_object_count) {
ret = 1;
(void) printf("Number of obsolete counts objects (%llu) " \
"does not match feature count (%llu)\n",
(u_longlong_t)obsolete_counts_object_count,
(u_longlong_t)oc_feature_refcount);
(void) printf("pv:%llu os:%llu oc:%llu sc:%llu "
"ob:%llu rd:%llu\n",
(u_longlong_t)precise_vdev_count,
(u_longlong_t)obsolete_sm_count,
(u_longlong_t)obsolete_counts_count,
(u_longlong_t)scip_count,
(u_longlong_t)obsolete_bpobj_count,
(u_longlong_t)remap_deadlist_count);
} else {
(void) printf("Verified indirect_refcount feature refcount " \
"of %llu is correct\n",
(u_longlong_t)oc_feature_refcount);
}
return (ret);
}
static void
zdb_set_skip_mmp(char *target)
{
spa_t *spa;
/*
* Disable the activity check to allow examination of
* active pools.
*/
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(target)) != NULL) {
spa->spa_import_flags |= ZFS_IMPORT_SKIP_MMP;
}
mutex_exit(&spa_namespace_lock);
}
#define BOGUS_SUFFIX "_CHECKPOINTED_UNIVERSE"
/*
* Import the checkpointed state of the pool specified by the target
* parameter as readonly. The function also accepts a pool config
* as an optional parameter, else it attempts to infer the config by
* the name of the target pool.
*
* Note that the checkpointed state's pool name will be the name of
* the original pool with the above suffix appended to it. In addition,
* if the target is not a pool name (e.g. a path to a dataset) then
* the new_path parameter is populated with the updated path to
* reflect the fact that we are looking into the checkpointed state.
*
* The function returns a newly-allocated copy of the name of the
* pool containing the checkpointed state. When this copy is no
* longer needed it should be freed with free(3C). Same thing
* applies to the new_path parameter if allocated.
*/
static char *
import_checkpointed_state(char *target, nvlist_t *cfg, char **new_path)
{
int error = 0;
char *poolname, *bogus_name = NULL;
boolean_t freecfg = B_FALSE;
/* If the target is not a pool, the extract the pool name */
char *path_start = strchr(target, '/');
if (path_start != NULL) {
size_t poolname_len = path_start - target;
poolname = strndup(target, poolname_len);
} else {
poolname = target;
}
if (cfg == NULL) {
zdb_set_skip_mmp(poolname);
error = spa_get_stats(poolname, &cfg, NULL, 0);
if (error != 0) {
fatal("Tried to read config of pool \"%s\" but "
"spa_get_stats() failed with error %d\n",
poolname, error);
}
freecfg = B_TRUE;
}
if (asprintf(&bogus_name, "%s%s", poolname, BOGUS_SUFFIX) == -1) {
if (target != poolname)
free(poolname);
return (NULL);
}
fnvlist_add_string(cfg, ZPOOL_CONFIG_POOL_NAME, bogus_name);
error = spa_import(bogus_name, cfg, NULL,
ZFS_IMPORT_MISSING_LOG | ZFS_IMPORT_CHECKPOINT |
ZFS_IMPORT_SKIP_MMP);
if (freecfg)
nvlist_free(cfg);
if (error != 0) {
fatal("Tried to import pool \"%s\" but spa_import() failed "
"with error %d\n", bogus_name, error);
}
if (new_path != NULL && path_start != NULL) {
if (asprintf(new_path, "%s%s", bogus_name, path_start) == -1) {
free(bogus_name);
if (path_start != NULL)
free(poolname);
return (NULL);
}
}
if (target != poolname)
free(poolname);
return (bogus_name);
}
typedef struct verify_checkpoint_sm_entry_cb_arg {
vdev_t *vcsec_vd;
/* the following fields are only used for printing progress */
uint64_t vcsec_entryid;
uint64_t vcsec_num_entries;
} verify_checkpoint_sm_entry_cb_arg_t;
#define ENTRIES_PER_PROGRESS_UPDATE 10000
static int
verify_checkpoint_sm_entry_cb(space_map_entry_t *sme, void *arg)
{
verify_checkpoint_sm_entry_cb_arg_t *vcsec = arg;
vdev_t *vd = vcsec->vcsec_vd;
metaslab_t *ms = vd->vdev_ms[sme->sme_offset >> vd->vdev_ms_shift];
uint64_t end = sme->sme_offset + sme->sme_run;
ASSERT(sme->sme_type == SM_FREE);
if ((vcsec->vcsec_entryid % ENTRIES_PER_PROGRESS_UPDATE) == 0) {
(void) fprintf(stderr,
"\rverifying vdev %llu, space map entry %llu of %llu ...",
(longlong_t)vd->vdev_id,
(longlong_t)vcsec->vcsec_entryid,
(longlong_t)vcsec->vcsec_num_entries);
}
vcsec->vcsec_entryid++;
/*
* See comment in checkpoint_sm_exclude_entry_cb()
*/
VERIFY3U(sme->sme_offset, >=, ms->ms_start);
VERIFY3U(end, <=, ms->ms_start + ms->ms_size);
/*
* The entries in the vdev_checkpoint_sm should be marked as
* allocated in the checkpointed state of the pool, therefore
* their respective ms_allocateable trees should not contain them.
*/
mutex_enter(&ms->ms_lock);
range_tree_verify_not_present(ms->ms_allocatable,
sme->sme_offset, sme->sme_run);
mutex_exit(&ms->ms_lock);
return (0);
}
/*
* Verify that all segments in the vdev_checkpoint_sm are allocated
* according to the checkpoint's ms_sm (i.e. are not in the checkpoint's
* ms_allocatable).
*
* Do so by comparing the checkpoint space maps (vdev_checkpoint_sm) of
* each vdev in the current state of the pool to the metaslab space maps
* (ms_sm) of the checkpointed state of the pool.
*
* Note that the function changes the state of the ms_allocatable
* trees of the current spa_t. The entries of these ms_allocatable
* trees are cleared out and then repopulated from with the free
* entries of their respective ms_sm space maps.
*/
static void
verify_checkpoint_vdev_spacemaps(spa_t *checkpoint, spa_t *current)
{
vdev_t *ckpoint_rvd = checkpoint->spa_root_vdev;
vdev_t *current_rvd = current->spa_root_vdev;
load_concrete_ms_allocatable_trees(checkpoint, SM_FREE);
for (uint64_t c = 0; c < ckpoint_rvd->vdev_children; c++) {
vdev_t *ckpoint_vd = ckpoint_rvd->vdev_child[c];
vdev_t *current_vd = current_rvd->vdev_child[c];
space_map_t *checkpoint_sm = NULL;
uint64_t checkpoint_sm_obj;
if (ckpoint_vd->vdev_ops == &vdev_indirect_ops) {
/*
* Since we don't allow device removal in a pool
* that has a checkpoint, we expect that all removed
* vdevs were removed from the pool before the
* checkpoint.
*/
ASSERT3P(current_vd->vdev_ops, ==, &vdev_indirect_ops);
continue;
}
/*
* If the checkpoint space map doesn't exist, then nothing
* here is checkpointed so there's nothing to verify.
*/
if (current_vd->vdev_top_zap == 0 ||
zap_contains(spa_meta_objset(current),
current_vd->vdev_top_zap,
VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) != 0)
continue;
VERIFY0(zap_lookup(spa_meta_objset(current),
current_vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM,
sizeof (uint64_t), 1, &checkpoint_sm_obj));
VERIFY0(space_map_open(&checkpoint_sm, spa_meta_objset(current),
checkpoint_sm_obj, 0, current_vd->vdev_asize,
current_vd->vdev_ashift));
verify_checkpoint_sm_entry_cb_arg_t vcsec;
vcsec.vcsec_vd = ckpoint_vd;
vcsec.vcsec_entryid = 0;
vcsec.vcsec_num_entries =
space_map_length(checkpoint_sm) / sizeof (uint64_t);
VERIFY0(space_map_iterate(checkpoint_sm,
space_map_length(checkpoint_sm),
verify_checkpoint_sm_entry_cb, &vcsec));
if (dump_opt['m'] > 3)
dump_spacemap(current->spa_meta_objset, checkpoint_sm);
space_map_close(checkpoint_sm);
}
/*
* If we've added vdevs since we took the checkpoint, ensure
* that their checkpoint space maps are empty.
*/
if (ckpoint_rvd->vdev_children < current_rvd->vdev_children) {
for (uint64_t c = ckpoint_rvd->vdev_children;
c < current_rvd->vdev_children; c++) {
vdev_t *current_vd = current_rvd->vdev_child[c];
VERIFY3P(current_vd->vdev_checkpoint_sm, ==, NULL);
}
}
/* for cleaner progress output */
(void) fprintf(stderr, "\n");
}
/*
* Verifies that all space that's allocated in the checkpoint is
* still allocated in the current version, by checking that everything
* in checkpoint's ms_allocatable (which is actually allocated, not
* allocatable/free) is not present in current's ms_allocatable.
*
* Note that the function changes the state of the ms_allocatable
* trees of both spas when called. The entries of all ms_allocatable
* trees are cleared out and then repopulated from their respective
* ms_sm space maps. In the checkpointed state we load the allocated
* entries, and in the current state we load the free entries.
*/
static void
verify_checkpoint_ms_spacemaps(spa_t *checkpoint, spa_t *current)
{
vdev_t *ckpoint_rvd = checkpoint->spa_root_vdev;
vdev_t *current_rvd = current->spa_root_vdev;
load_concrete_ms_allocatable_trees(checkpoint, SM_ALLOC);
load_concrete_ms_allocatable_trees(current, SM_FREE);
for (uint64_t i = 0; i < ckpoint_rvd->vdev_children; i++) {
vdev_t *ckpoint_vd = ckpoint_rvd->vdev_child[i];
vdev_t *current_vd = current_rvd->vdev_child[i];
if (ckpoint_vd->vdev_ops == &vdev_indirect_ops) {
/*
* See comment in verify_checkpoint_vdev_spacemaps()
*/
ASSERT3P(current_vd->vdev_ops, ==, &vdev_indirect_ops);
continue;
}
for (uint64_t m = 0; m < ckpoint_vd->vdev_ms_count; m++) {
metaslab_t *ckpoint_msp = ckpoint_vd->vdev_ms[m];
metaslab_t *current_msp = current_vd->vdev_ms[m];
(void) fprintf(stderr,
"\rverifying vdev %llu of %llu, "
"metaslab %llu of %llu ...",
(longlong_t)current_vd->vdev_id,
(longlong_t)current_rvd->vdev_children,
(longlong_t)current_vd->vdev_ms[m]->ms_id,
(longlong_t)current_vd->vdev_ms_count);
/*
* We walk through the ms_allocatable trees that
* are loaded with the allocated blocks from the
* ms_sm spacemaps of the checkpoint. For each
* one of these ranges we ensure that none of them
* exists in the ms_allocatable trees of the
* current state which are loaded with the ranges
* that are currently free.
*
* This way we ensure that none of the blocks that
* are part of the checkpoint were freed by mistake.
*/
range_tree_walk(ckpoint_msp->ms_allocatable,
(range_tree_func_t *)range_tree_verify_not_present,
current_msp->ms_allocatable);
}
}
/* for cleaner progress output */
(void) fprintf(stderr, "\n");
}
static void
verify_checkpoint_blocks(spa_t *spa)
{
ASSERT(!dump_opt['L']);
spa_t *checkpoint_spa;
char *checkpoint_pool;
int error = 0;
/*
* We import the checkpointed state of the pool (under a different
* name) so we can do verification on it against the current state
* of the pool.
*/
checkpoint_pool = import_checkpointed_state(spa->spa_name, NULL,
NULL);
ASSERT(strcmp(spa->spa_name, checkpoint_pool) != 0);
error = spa_open(checkpoint_pool, &checkpoint_spa, FTAG);
if (error != 0) {
fatal("Tried to open pool \"%s\" but spa_open() failed with "
"error %d\n", checkpoint_pool, error);
}
/*
* Ensure that ranges in the checkpoint space maps of each vdev
* are allocated according to the checkpointed state's metaslab
* space maps.
*/
verify_checkpoint_vdev_spacemaps(checkpoint_spa, spa);
/*
* Ensure that allocated ranges in the checkpoint's metaslab
* space maps remain allocated in the metaslab space maps of
* the current state.
*/
verify_checkpoint_ms_spacemaps(checkpoint_spa, spa);
/*
* Once we are done, we get rid of the checkpointed state.
*/
spa_close(checkpoint_spa, FTAG);
free(checkpoint_pool);
}
static void
dump_leftover_checkpoint_blocks(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t i = 0; i < rvd->vdev_children; i++) {
vdev_t *vd = rvd->vdev_child[i];
space_map_t *checkpoint_sm = NULL;
uint64_t checkpoint_sm_obj;
if (vd->vdev_top_zap == 0)
continue;
if (zap_contains(spa_meta_objset(spa), vd->vdev_top_zap,
VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) != 0)
continue;
VERIFY0(zap_lookup(spa_meta_objset(spa), vd->vdev_top_zap,
VDEV_TOP_ZAP_POOL_CHECKPOINT_SM,
sizeof (uint64_t), 1, &checkpoint_sm_obj));
VERIFY0(space_map_open(&checkpoint_sm, spa_meta_objset(spa),
checkpoint_sm_obj, 0, vd->vdev_asize, vd->vdev_ashift));
dump_spacemap(spa->spa_meta_objset, checkpoint_sm);
space_map_close(checkpoint_sm);
}
}
static int
verify_checkpoint(spa_t *spa)
{
uberblock_t checkpoint;
int error;
if (!spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT))
return (0);
error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t),
sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint);
if (error == ENOENT && !dump_opt['L']) {
/*
* If the feature is active but the uberblock is missing
* then we must be in the middle of discarding the
* checkpoint.
*/
(void) printf("\nPartially discarded checkpoint "
"state found:\n");
if (dump_opt['m'] > 3)
dump_leftover_checkpoint_blocks(spa);
return (0);
} else if (error != 0) {
(void) printf("lookup error %d when looking for "
"checkpointed uberblock in MOS\n", error);
return (error);
}
dump_uberblock(&checkpoint, "\nCheckpointed uberblock found:\n", "\n");
if (checkpoint.ub_checkpoint_txg == 0) {
(void) printf("\nub_checkpoint_txg not set in checkpointed "
"uberblock\n");
error = 3;
}
if (error == 0 && !dump_opt['L'])
verify_checkpoint_blocks(spa);
return (error);
}
static void
mos_leaks_cb(void *arg, uint64_t start, uint64_t size)
{
(void) arg;
for (uint64_t i = start; i < size; i++) {
(void) printf("MOS object %llu referenced but not allocated\n",
(u_longlong_t)i);
}
}
static void
mos_obj_refd(uint64_t obj)
{
if (obj != 0 && mos_refd_objs != NULL)
range_tree_add(mos_refd_objs, obj, 1);
}
/*
* Call on a MOS object that may already have been referenced.
*/
static void
mos_obj_refd_multiple(uint64_t obj)
{
if (obj != 0 && mos_refd_objs != NULL &&
!range_tree_contains(mos_refd_objs, obj, 1))
range_tree_add(mos_refd_objs, obj, 1);
}
static void
mos_leak_vdev_top_zap(vdev_t *vd)
{
uint64_t ms_flush_data_obj;
int error = zap_lookup(spa_meta_objset(vd->vdev_spa),
vd->vdev_top_zap, VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS,
sizeof (ms_flush_data_obj), 1, &ms_flush_data_obj);
if (error == ENOENT)
return;
ASSERT0(error);
mos_obj_refd(ms_flush_data_obj);
}
static void
mos_leak_vdev(vdev_t *vd)
{
mos_obj_refd(vd->vdev_dtl_object);
mos_obj_refd(vd->vdev_ms_array);
mos_obj_refd(vd->vdev_indirect_config.vic_births_object);
mos_obj_refd(vd->vdev_indirect_config.vic_mapping_object);
mos_obj_refd(vd->vdev_leaf_zap);
if (vd->vdev_checkpoint_sm != NULL)
mos_obj_refd(vd->vdev_checkpoint_sm->sm_object);
if (vd->vdev_indirect_mapping != NULL) {
mos_obj_refd(vd->vdev_indirect_mapping->
vim_phys->vimp_counts_object);
}
if (vd->vdev_obsolete_sm != NULL)
mos_obj_refd(vd->vdev_obsolete_sm->sm_object);
for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *ms = vd->vdev_ms[m];
mos_obj_refd(space_map_object(ms->ms_sm));
}
if (vd->vdev_root_zap != 0)
mos_obj_refd(vd->vdev_root_zap);
if (vd->vdev_top_zap != 0) {
mos_obj_refd(vd->vdev_top_zap);
mos_leak_vdev_top_zap(vd);
}
for (uint64_t c = 0; c < vd->vdev_children; c++) {
mos_leak_vdev(vd->vdev_child[c]);
}
}
static void
mos_leak_log_spacemaps(spa_t *spa)
{
uint64_t spacemap_zap;
int error = zap_lookup(spa_meta_objset(spa),
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_LOG_SPACEMAP_ZAP,
sizeof (spacemap_zap), 1, &spacemap_zap);
if (error == ENOENT)
return;
ASSERT0(error);
mos_obj_refd(spacemap_zap);
for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls))
mos_obj_refd(sls->sls_sm_obj);
}
static void
errorlog_count_refd(objset_t *mos, uint64_t errlog)
{
zap_cursor_t zc;
zap_attribute_t za;
for (zap_cursor_init(&zc, mos, errlog);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
mos_obj_refd(za.za_first_integer);
}
zap_cursor_fini(&zc);
}
static int
dump_mos_leaks(spa_t *spa)
{
int rv = 0;
objset_t *mos = spa->spa_meta_objset;
dsl_pool_t *dp = spa->spa_dsl_pool;
/* Visit and mark all referenced objects in the MOS */
mos_obj_refd(DMU_POOL_DIRECTORY_OBJECT);
mos_obj_refd(spa->spa_pool_props_object);
mos_obj_refd(spa->spa_config_object);
mos_obj_refd(spa->spa_ddt_stat_object);
mos_obj_refd(spa->spa_feat_desc_obj);
mos_obj_refd(spa->spa_feat_enabled_txg_obj);
mos_obj_refd(spa->spa_feat_for_read_obj);
mos_obj_refd(spa->spa_feat_for_write_obj);
mos_obj_refd(spa->spa_history);
mos_obj_refd(spa->spa_errlog_last);
mos_obj_refd(spa->spa_errlog_scrub);
if (spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) {
errorlog_count_refd(mos, spa->spa_errlog_last);
errorlog_count_refd(mos, spa->spa_errlog_scrub);
}
mos_obj_refd(spa->spa_all_vdev_zaps);
mos_obj_refd(spa->spa_dsl_pool->dp_bptree_obj);
mos_obj_refd(spa->spa_dsl_pool->dp_tmp_userrefs_obj);
mos_obj_refd(spa->spa_dsl_pool->dp_scan->scn_phys.scn_queue_obj);
bpobj_count_refd(&spa->spa_deferred_bpobj);
mos_obj_refd(dp->dp_empty_bpobj);
bpobj_count_refd(&dp->dp_obsolete_bpobj);
bpobj_count_refd(&dp->dp_free_bpobj);
mos_obj_refd(spa->spa_l2cache.sav_object);
mos_obj_refd(spa->spa_spares.sav_object);
if (spa->spa_syncing_log_sm != NULL)
mos_obj_refd(spa->spa_syncing_log_sm->sm_object);
mos_leak_log_spacemaps(spa);
mos_obj_refd(spa->spa_condensing_indirect_phys.
scip_next_mapping_object);
mos_obj_refd(spa->spa_condensing_indirect_phys.
scip_prev_obsolete_sm_object);
if (spa->spa_condensing_indirect_phys.scip_next_mapping_object != 0) {
vdev_indirect_mapping_t *vim =
vdev_indirect_mapping_open(mos,
spa->spa_condensing_indirect_phys.scip_next_mapping_object);
mos_obj_refd(vim->vim_phys->vimp_counts_object);
vdev_indirect_mapping_close(vim);
}
deleted_livelists_dump_mos(spa);
if (dp->dp_origin_snap != NULL) {
dsl_dataset_t *ds;
dsl_pool_config_enter(dp, FTAG);
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dataset_phys(dp->dp_origin_snap)->ds_next_snap_obj,
FTAG, &ds));
count_ds_mos_objects(ds);
dump_blkptr_list(&ds->ds_deadlist, "Deadlist");
dsl_dataset_rele(ds, FTAG);
dsl_pool_config_exit(dp, FTAG);
count_ds_mos_objects(dp->dp_origin_snap);
dump_blkptr_list(&dp->dp_origin_snap->ds_deadlist, "Deadlist");
}
count_dir_mos_objects(dp->dp_mos_dir);
if (dp->dp_free_dir != NULL)
count_dir_mos_objects(dp->dp_free_dir);
if (dp->dp_leak_dir != NULL)
count_dir_mos_objects(dp->dp_leak_dir);
mos_leak_vdev(spa->spa_root_vdev);
for (uint64_t class = 0; class < DDT_CLASSES; class++) {
for (uint64_t type = 0; type < DDT_TYPES; type++) {
for (uint64_t cksum = 0;
cksum < ZIO_CHECKSUM_FUNCTIONS; cksum++) {
ddt_t *ddt = spa->spa_ddt[cksum];
mos_obj_refd(ddt->ddt_object[type][class]);
}
}
}
+ if (spa->spa_brt != NULL) {
+ brt_t *brt = spa->spa_brt;
+ for (uint64_t vdevid = 0; vdevid < brt->brt_nvdevs; vdevid++) {
+ brt_vdev_t *brtvd = &brt->brt_vdevs[vdevid];
+ if (brtvd != NULL && brtvd->bv_initiated) {
+ mos_obj_refd(brtvd->bv_mos_brtvdev);
+ mos_obj_refd(brtvd->bv_mos_entries);
+ }
+ }
+ }
+
/*
* Visit all allocated objects and make sure they are referenced.
*/
uint64_t object = 0;
while (dmu_object_next(mos, &object, B_FALSE, 0) == 0) {
if (range_tree_contains(mos_refd_objs, object, 1)) {
range_tree_remove(mos_refd_objs, object, 1);
} else {
dmu_object_info_t doi;
const char *name;
VERIFY0(dmu_object_info(mos, object, &doi));
if (doi.doi_type & DMU_OT_NEWTYPE) {
dmu_object_byteswap_t bswap =
DMU_OT_BYTESWAP(doi.doi_type);
name = dmu_ot_byteswap[bswap].ob_name;
} else {
name = dmu_ot[doi.doi_type].ot_name;
}
(void) printf("MOS object %llu (%s) leaked\n",
(u_longlong_t)object, name);
rv = 2;
}
}
(void) range_tree_walk(mos_refd_objs, mos_leaks_cb, NULL);
if (!range_tree_is_empty(mos_refd_objs))
rv = 2;
range_tree_vacate(mos_refd_objs, NULL, NULL);
range_tree_destroy(mos_refd_objs);
return (rv);
}
typedef struct log_sm_obsolete_stats_arg {
uint64_t lsos_current_txg;
uint64_t lsos_total_entries;
uint64_t lsos_valid_entries;
uint64_t lsos_sm_entries;
uint64_t lsos_valid_sm_entries;
} log_sm_obsolete_stats_arg_t;
static int
log_spacemap_obsolete_stats_cb(spa_t *spa, space_map_entry_t *sme,
uint64_t txg, void *arg)
{
log_sm_obsolete_stats_arg_t *lsos = arg;
uint64_t offset = sme->sme_offset;
uint64_t vdev_id = sme->sme_vdev;
if (lsos->lsos_current_txg == 0) {
/* this is the first log */
lsos->lsos_current_txg = txg;
} else if (lsos->lsos_current_txg < txg) {
/* we just changed log - print stats and reset */
(void) printf("%-8llu valid entries out of %-8llu - txg %llu\n",
(u_longlong_t)lsos->lsos_valid_sm_entries,
(u_longlong_t)lsos->lsos_sm_entries,
(u_longlong_t)lsos->lsos_current_txg);
lsos->lsos_valid_sm_entries = 0;
lsos->lsos_sm_entries = 0;
lsos->lsos_current_txg = txg;
}
ASSERT3U(lsos->lsos_current_txg, ==, txg);
lsos->lsos_sm_entries++;
lsos->lsos_total_entries++;
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
if (!vdev_is_concrete(vd))
return (0);
metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE);
if (txg < metaslab_unflushed_txg(ms))
return (0);
lsos->lsos_valid_sm_entries++;
lsos->lsos_valid_entries++;
return (0);
}
static void
dump_log_spacemap_obsolete_stats(spa_t *spa)
{
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
return;
log_sm_obsolete_stats_arg_t lsos = {0};
(void) printf("Log Space Map Obsolete Entry Statistics:\n");
iterate_through_spacemap_logs(spa,
log_spacemap_obsolete_stats_cb, &lsos);
/* print stats for latest log */
(void) printf("%-8llu valid entries out of %-8llu - txg %llu\n",
(u_longlong_t)lsos.lsos_valid_sm_entries,
(u_longlong_t)lsos.lsos_sm_entries,
(u_longlong_t)lsos.lsos_current_txg);
(void) printf("%-8llu valid entries out of %-8llu - total\n\n",
(u_longlong_t)lsos.lsos_valid_entries,
(u_longlong_t)lsos.lsos_total_entries);
}
static void
dump_zpool(spa_t *spa)
{
dsl_pool_t *dp = spa_get_dsl(spa);
int rc = 0;
if (dump_opt['y']) {
livelist_metaslab_validate(spa);
}
if (dump_opt['S']) {
dump_simulated_ddt(spa);
return;
}
if (!dump_opt['e'] && dump_opt['C'] > 1) {
(void) printf("\nCached configuration:\n");
dump_nvlist(spa->spa_config, 8);
}
if (dump_opt['C'])
dump_config(spa);
if (dump_opt['u'])
dump_uberblock(&spa->spa_uberblock, "\nUberblock:\n", "\n");
if (dump_opt['D'])
dump_all_ddts(spa);
if (dump_opt['T'])
dump_brt(spa);
if (dump_opt['d'] > 2 || dump_opt['m'])
dump_metaslabs(spa);
if (dump_opt['M'])
dump_metaslab_groups(spa, dump_opt['M'] > 1);
if (dump_opt['d'] > 2 || dump_opt['m']) {
dump_log_spacemaps(spa);
dump_log_spacemap_obsolete_stats(spa);
}
if (dump_opt['d'] || dump_opt['i']) {
spa_feature_t f;
mos_refd_objs = range_tree_create(NULL, RANGE_SEG64, NULL, 0,
0);
dump_objset(dp->dp_meta_objset);
if (dump_opt['d'] >= 3) {
dsl_pool_t *dp = spa->spa_dsl_pool;
dump_full_bpobj(&spa->spa_deferred_bpobj,
"Deferred frees", 0);
if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
dump_full_bpobj(&dp->dp_free_bpobj,
"Pool snapshot frees", 0);
}
if (bpobj_is_open(&dp->dp_obsolete_bpobj)) {
ASSERT(spa_feature_is_enabled(spa,
SPA_FEATURE_DEVICE_REMOVAL));
dump_full_bpobj(&dp->dp_obsolete_bpobj,
"Pool obsolete blocks", 0);
}
if (spa_feature_is_active(spa,
SPA_FEATURE_ASYNC_DESTROY)) {
dump_bptree(spa->spa_meta_objset,
dp->dp_bptree_obj,
"Pool dataset frees");
}
dump_dtl(spa->spa_root_vdev, 0);
}
for (spa_feature_t f = 0; f < SPA_FEATURES; f++)
global_feature_count[f] = UINT64_MAX;
global_feature_count[SPA_FEATURE_REDACTION_BOOKMARKS] = 0;
global_feature_count[SPA_FEATURE_BOOKMARK_WRITTEN] = 0;
global_feature_count[SPA_FEATURE_LIVELIST] = 0;
(void) dmu_objset_find(spa_name(spa), dump_one_objset,
NULL, DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN);
if (rc == 0 && !dump_opt['L'])
rc = dump_mos_leaks(spa);
for (f = 0; f < SPA_FEATURES; f++) {
uint64_t refcount;
uint64_t *arr;
if (!(spa_feature_table[f].fi_flags &
ZFEATURE_FLAG_PER_DATASET)) {
if (global_feature_count[f] == UINT64_MAX)
continue;
if (!spa_feature_is_enabled(spa, f)) {
ASSERT0(global_feature_count[f]);
continue;
}
arr = global_feature_count;
} else {
if (!spa_feature_is_enabled(spa, f)) {
ASSERT0(dataset_feature_count[f]);
continue;
}
arr = dataset_feature_count;
}
if (feature_get_refcount(spa, &spa_feature_table[f],
&refcount) == ENOTSUP)
continue;
if (arr[f] != refcount) {
(void) printf("%s feature refcount mismatch: "
"%lld consumers != %lld refcount\n",
spa_feature_table[f].fi_uname,
(longlong_t)arr[f], (longlong_t)refcount);
rc = 2;
} else {
(void) printf("Verified %s feature refcount "
"of %llu is correct\n",
spa_feature_table[f].fi_uname,
(longlong_t)refcount);
}
}
if (rc == 0)
rc = verify_device_removal_feature_counts(spa);
}
if (rc == 0 && (dump_opt['b'] || dump_opt['c']))
rc = dump_block_stats(spa);
if (rc == 0)
rc = verify_spacemap_refcounts(spa);
if (dump_opt['s'])
show_pool_stats(spa);
if (dump_opt['h'])
dump_history(spa);
if (rc == 0)
rc = verify_checkpoint(spa);
if (rc != 0) {
dump_debug_buffer();
exit(rc);
}
}
#define ZDB_FLAG_CHECKSUM 0x0001
#define ZDB_FLAG_DECOMPRESS 0x0002
#define ZDB_FLAG_BSWAP 0x0004
#define ZDB_FLAG_GBH 0x0008
#define ZDB_FLAG_INDIRECT 0x0010
#define ZDB_FLAG_RAW 0x0020
#define ZDB_FLAG_PRINT_BLKPTR 0x0040
#define ZDB_FLAG_VERBOSE 0x0080
static int flagbits[256];
static char flagbitstr[16];
static void
zdb_print_blkptr(const blkptr_t *bp, int flags)
{
char blkbuf[BP_SPRINTF_LEN];
if (flags & ZDB_FLAG_BSWAP)
byteswap_uint64_array((void *)bp, sizeof (blkptr_t));
snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
(void) printf("%s\n", blkbuf);
}
static void
zdb_dump_indirect(blkptr_t *bp, int nbps, int flags)
{
int i;
for (i = 0; i < nbps; i++)
zdb_print_blkptr(&bp[i], flags);
}
static void
zdb_dump_gbh(void *buf, int flags)
{
zdb_dump_indirect((blkptr_t *)buf, SPA_GBH_NBLKPTRS, flags);
}
static void
zdb_dump_block_raw(void *buf, uint64_t size, int flags)
{
if (flags & ZDB_FLAG_BSWAP)
byteswap_uint64_array(buf, size);
VERIFY(write(fileno(stdout), buf, size) == size);
}
static void
zdb_dump_block(char *label, void *buf, uint64_t size, int flags)
{
uint64_t *d = (uint64_t *)buf;
unsigned nwords = size / sizeof (uint64_t);
int do_bswap = !!(flags & ZDB_FLAG_BSWAP);
unsigned i, j;
const char *hdr;
char *c;
if (do_bswap)
hdr = " 7 6 5 4 3 2 1 0 f e d c b a 9 8";
else
hdr = " 0 1 2 3 4 5 6 7 8 9 a b c d e f";
(void) printf("\n%s\n%6s %s 0123456789abcdef\n", label, "", hdr);
#ifdef _LITTLE_ENDIAN
/* correct the endianness */
do_bswap = !do_bswap;
#endif
for (i = 0; i < nwords; i += 2) {
(void) printf("%06llx: %016llx %016llx ",
(u_longlong_t)(i * sizeof (uint64_t)),
(u_longlong_t)(do_bswap ? BSWAP_64(d[i]) : d[i]),
(u_longlong_t)(do_bswap ? BSWAP_64(d[i + 1]) : d[i + 1]));
c = (char *)&d[i];
for (j = 0; j < 2 * sizeof (uint64_t); j++)
(void) printf("%c", isprint(c[j]) ? c[j] : '.');
(void) printf("\n");
}
}
/*
* There are two acceptable formats:
* leaf_name - For example: c1t0d0 or /tmp/ztest.0a
* child[.child]* - For example: 0.1.1
*
* The second form can be used to specify arbitrary vdevs anywhere
* in the hierarchy. For example, in a pool with a mirror of
* RAID-Zs, you can specify either RAID-Z vdev with 0.0 or 0.1 .
*/
static vdev_t *
zdb_vdev_lookup(vdev_t *vdev, const char *path)
{
char *s, *p, *q;
unsigned i;
if (vdev == NULL)
return (NULL);
/* First, assume the x.x.x.x format */
i = strtoul(path, &s, 10);
if (s == path || (s && *s != '.' && *s != '\0'))
goto name;
if (i >= vdev->vdev_children)
return (NULL);
vdev = vdev->vdev_child[i];
if (s && *s == '\0')
return (vdev);
return (zdb_vdev_lookup(vdev, s+1));
name:
for (i = 0; i < vdev->vdev_children; i++) {
vdev_t *vc = vdev->vdev_child[i];
if (vc->vdev_path == NULL) {
vc = zdb_vdev_lookup(vc, path);
if (vc == NULL)
continue;
else
return (vc);
}
p = strrchr(vc->vdev_path, '/');
p = p ? p + 1 : vc->vdev_path;
q = &vc->vdev_path[strlen(vc->vdev_path) - 2];
if (strcmp(vc->vdev_path, path) == 0)
return (vc);
if (strcmp(p, path) == 0)
return (vc);
if (strcmp(q, "s0") == 0 && strncmp(p, path, q - p) == 0)
return (vc);
}
return (NULL);
}
static int
name_from_objset_id(spa_t *spa, uint64_t objset_id, char *outstr)
{
dsl_dataset_t *ds;
dsl_pool_config_enter(spa->spa_dsl_pool, FTAG);
int error = dsl_dataset_hold_obj(spa->spa_dsl_pool, objset_id,
NULL, &ds);
if (error != 0) {
(void) fprintf(stderr, "failed to hold objset %llu: %s\n",
(u_longlong_t)objset_id, strerror(error));
dsl_pool_config_exit(spa->spa_dsl_pool, FTAG);
return (error);
}
dsl_dataset_name(ds, outstr);
dsl_dataset_rele(ds, NULL);
dsl_pool_config_exit(spa->spa_dsl_pool, FTAG);
return (0);
}
static boolean_t
zdb_parse_block_sizes(char *sizes, uint64_t *lsize, uint64_t *psize)
{
char *s0, *s1, *tmp = NULL;
if (sizes == NULL)
return (B_FALSE);
s0 = strtok_r(sizes, "/", &tmp);
if (s0 == NULL)
return (B_FALSE);
s1 = strtok_r(NULL, "/", &tmp);
*lsize = strtoull(s0, NULL, 16);
*psize = s1 ? strtoull(s1, NULL, 16) : *lsize;
return (*lsize >= *psize && *psize > 0);
}
#define ZIO_COMPRESS_MASK(alg) (1ULL << (ZIO_COMPRESS_##alg))
static boolean_t
zdb_decompress_block(abd_t *pabd, void *buf, void *lbuf, uint64_t lsize,
uint64_t psize, int flags)
{
(void) buf;
boolean_t exceeded = B_FALSE;
/*
* We don't know how the data was compressed, so just try
* every decompress function at every inflated blocksize.
*/
void *lbuf2 = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
int cfuncs[ZIO_COMPRESS_FUNCTIONS] = { 0 };
int *cfuncp = cfuncs;
uint64_t maxlsize = SPA_MAXBLOCKSIZE;
uint64_t mask = ZIO_COMPRESS_MASK(ON) | ZIO_COMPRESS_MASK(OFF) |
ZIO_COMPRESS_MASK(INHERIT) | ZIO_COMPRESS_MASK(EMPTY) |
(getenv("ZDB_NO_ZLE") ? ZIO_COMPRESS_MASK(ZLE) : 0);
*cfuncp++ = ZIO_COMPRESS_LZ4;
*cfuncp++ = ZIO_COMPRESS_LZJB;
mask |= ZIO_COMPRESS_MASK(LZ4) | ZIO_COMPRESS_MASK(LZJB);
+ /*
+ * Every gzip level has the same decompressor, no need to
+ * run it 9 times per bruteforce attempt.
+ */
+ mask |= ZIO_COMPRESS_MASK(GZIP_2) | ZIO_COMPRESS_MASK(GZIP_3);
+ mask |= ZIO_COMPRESS_MASK(GZIP_4) | ZIO_COMPRESS_MASK(GZIP_5);
+ mask |= ZIO_COMPRESS_MASK(GZIP_6) | ZIO_COMPRESS_MASK(GZIP_7);
+ mask |= ZIO_COMPRESS_MASK(GZIP_8) | ZIO_COMPRESS_MASK(GZIP_9);
for (int c = 0; c < ZIO_COMPRESS_FUNCTIONS; c++)
if (((1ULL << c) & mask) == 0)
*cfuncp++ = c;
/*
* On the one hand, with SPA_MAXBLOCKSIZE at 16MB, this
* could take a while and we should let the user know
* we are not stuck. On the other hand, printing progress
* info gets old after a while. User can specify 'v' flag
* to see the progression.
*/
if (lsize == psize)
lsize += SPA_MINBLOCKSIZE;
else
maxlsize = lsize;
for (; lsize <= maxlsize; lsize += SPA_MINBLOCKSIZE) {
for (cfuncp = cfuncs; *cfuncp; cfuncp++) {
if (flags & ZDB_FLAG_VERBOSE) {
(void) fprintf(stderr,
"Trying %05llx -> %05llx (%s)\n",
(u_longlong_t)psize,
(u_longlong_t)lsize,
zio_compress_table[*cfuncp].\
ci_name);
}
/*
* We randomize lbuf2, and decompress to both
* lbuf and lbuf2. This way, we will know if
* decompression fill exactly to lsize.
*/
VERIFY0(random_get_pseudo_bytes(lbuf2, lsize));
if (zio_decompress_data(*cfuncp, pabd,
lbuf, psize, lsize, NULL) == 0 &&
zio_decompress_data(*cfuncp, pabd,
lbuf2, psize, lsize, NULL) == 0 &&
memcmp(lbuf, lbuf2, lsize) == 0)
break;
}
if (*cfuncp != 0)
break;
}
umem_free(lbuf2, SPA_MAXBLOCKSIZE);
if (lsize > maxlsize) {
exceeded = B_TRUE;
}
if (*cfuncp == ZIO_COMPRESS_ZLE) {
printf("\nZLE decompression was selected. If you "
"suspect the results are wrong,\ntry avoiding ZLE "
"by setting and exporting ZDB_NO_ZLE=\"true\"\n");
}
return (exceeded);
}
/*
* Read a block from a pool and print it out. The syntax of the
* block descriptor is:
*
* pool:vdev_specifier:offset:[lsize/]psize[:flags]
*
* pool - The name of the pool you wish to read from
* vdev_specifier - Which vdev (see comment for zdb_vdev_lookup)
* offset - offset, in hex, in bytes
* size - Amount of data to read, in hex, in bytes
* flags - A string of characters specifying options
* b: Decode a blkptr at given offset within block
* c: Calculate and display checksums
* d: Decompress data before dumping
* e: Byteswap data before dumping
* g: Display data as a gang block header
* i: Display as an indirect block
* r: Dump raw data to stdout
* v: Verbose
*
*/
static void
zdb_read_block(char *thing, spa_t *spa)
{
blkptr_t blk, *bp = &blk;
dva_t *dva = bp->blk_dva;
int flags = 0;
uint64_t offset = 0, psize = 0, lsize = 0, blkptr_offset = 0;
zio_t *zio;
vdev_t *vd;
abd_t *pabd;
void *lbuf, *buf;
char *s, *p, *dup, *flagstr, *sizes, *tmp = NULL;
const char *vdev, *errmsg = NULL;
int i, error;
boolean_t borrowed = B_FALSE, found = B_FALSE;
dup = strdup(thing);
s = strtok_r(dup, ":", &tmp);
vdev = s ?: "";
s = strtok_r(NULL, ":", &tmp);
offset = strtoull(s ? s : "", NULL, 16);
sizes = strtok_r(NULL, ":", &tmp);
s = strtok_r(NULL, ":", &tmp);
flagstr = strdup(s ?: "");
if (!zdb_parse_block_sizes(sizes, &lsize, &psize))
errmsg = "invalid size(s)";
if (!IS_P2ALIGNED(psize, DEV_BSIZE) || !IS_P2ALIGNED(lsize, DEV_BSIZE))
errmsg = "size must be a multiple of sector size";
if (!IS_P2ALIGNED(offset, DEV_BSIZE))
errmsg = "offset must be a multiple of sector size";
if (errmsg) {
(void) printf("Invalid block specifier: %s - %s\n",
thing, errmsg);
goto done;
}
tmp = NULL;
for (s = strtok_r(flagstr, ":", &tmp);
s != NULL;
s = strtok_r(NULL, ":", &tmp)) {
for (i = 0; i < strlen(flagstr); i++) {
int bit = flagbits[(uchar_t)flagstr[i]];
if (bit == 0) {
(void) printf("***Ignoring flag: %c\n",
(uchar_t)flagstr[i]);
continue;
}
found = B_TRUE;
flags |= bit;
p = &flagstr[i + 1];
if (*p != ':' && *p != '\0') {
int j = 0, nextbit = flagbits[(uchar_t)*p];
char *end, offstr[8] = { 0 };
if ((bit == ZDB_FLAG_PRINT_BLKPTR) &&
(nextbit == 0)) {
/* look ahead to isolate the offset */
while (nextbit == 0 &&
strchr(flagbitstr, *p) == NULL) {
offstr[j] = *p;
j++;
if (i + j > strlen(flagstr))
break;
p++;
nextbit = flagbits[(uchar_t)*p];
}
blkptr_offset = strtoull(offstr, &end,
16);
i += j;
} else if (nextbit == 0) {
(void) printf("***Ignoring flag arg:"
" '%c'\n", (uchar_t)*p);
}
}
}
}
if (blkptr_offset % sizeof (blkptr_t)) {
printf("Block pointer offset 0x%llx "
"must be divisible by 0x%x\n",
(longlong_t)blkptr_offset, (int)sizeof (blkptr_t));
goto done;
}
if (found == B_FALSE && strlen(flagstr) > 0) {
printf("Invalid flag arg: '%s'\n", flagstr);
goto done;
}
vd = zdb_vdev_lookup(spa->spa_root_vdev, vdev);
if (vd == NULL) {
(void) printf("***Invalid vdev: %s\n", vdev);
goto done;
} else {
if (vd->vdev_path)
(void) fprintf(stderr, "Found vdev: %s\n",
vd->vdev_path);
else
(void) fprintf(stderr, "Found vdev type: %s\n",
vd->vdev_ops->vdev_op_type);
}
pabd = abd_alloc_for_io(SPA_MAXBLOCKSIZE, B_FALSE);
lbuf = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
BP_ZERO(bp);
DVA_SET_VDEV(&dva[0], vd->vdev_id);
DVA_SET_OFFSET(&dva[0], offset);
DVA_SET_GANG(&dva[0], !!(flags & ZDB_FLAG_GBH));
DVA_SET_ASIZE(&dva[0], vdev_psize_to_asize(vd, psize));
BP_SET_BIRTH(bp, TXG_INITIAL, TXG_INITIAL);
BP_SET_LSIZE(bp, lsize);
BP_SET_PSIZE(bp, psize);
BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF);
BP_SET_CHECKSUM(bp, ZIO_CHECKSUM_OFF);
BP_SET_TYPE(bp, DMU_OT_NONE);
BP_SET_LEVEL(bp, 0);
BP_SET_DEDUP(bp, 0);
BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
zio = zio_root(spa, NULL, NULL, 0);
if (vd == vd->vdev_top) {
/*
* Treat this as a normal block read.
*/
zio_nowait(zio_read(zio, spa, bp, pabd, psize, NULL, NULL,
ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW, NULL));
} else {
/*
* Treat this as a vdev child I/O.
*/
zio_nowait(zio_vdev_child_io(zio, bp, vd, offset, pabd,
psize, ZIO_TYPE_READ, ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY |
ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW | ZIO_FLAG_OPTIONAL,
NULL, NULL));
}
error = zio_wait(zio);
spa_config_exit(spa, SCL_STATE, FTAG);
if (error) {
(void) printf("Read of %s failed, error: %d\n", thing, error);
goto out;
}
uint64_t orig_lsize = lsize;
buf = lbuf;
if (flags & ZDB_FLAG_DECOMPRESS) {
boolean_t failed = zdb_decompress_block(pabd, buf, lbuf,
lsize, psize, flags);
if (failed) {
(void) printf("Decompress of %s failed\n", thing);
goto out;
}
} else {
buf = abd_borrow_buf_copy(pabd, lsize);
borrowed = B_TRUE;
}
/*
* Try to detect invalid block pointer. If invalid, try
* decompressing.
*/
if ((flags & ZDB_FLAG_PRINT_BLKPTR || flags & ZDB_FLAG_INDIRECT) &&
!(flags & ZDB_FLAG_DECOMPRESS)) {
const blkptr_t *b = (const blkptr_t *)(void *)
((uintptr_t)buf + (uintptr_t)blkptr_offset);
if (zfs_blkptr_verify(spa, b,
BLK_CONFIG_NEEDED, BLK_VERIFY_ONLY) == B_FALSE) {
abd_return_buf_copy(pabd, buf, lsize);
borrowed = B_FALSE;
buf = lbuf;
boolean_t failed = zdb_decompress_block(pabd, buf,
lbuf, lsize, psize, flags);
b = (const blkptr_t *)(void *)
((uintptr_t)buf + (uintptr_t)blkptr_offset);
if (failed || zfs_blkptr_verify(spa, b,
BLK_CONFIG_NEEDED, BLK_VERIFY_LOG) == B_FALSE) {
printf("invalid block pointer at this DVA\n");
goto out;
}
}
}
if (flags & ZDB_FLAG_PRINT_BLKPTR)
zdb_print_blkptr((blkptr_t *)(void *)
((uintptr_t)buf + (uintptr_t)blkptr_offset), flags);
else if (flags & ZDB_FLAG_RAW)
zdb_dump_block_raw(buf, lsize, flags);
else if (flags & ZDB_FLAG_INDIRECT)
zdb_dump_indirect((blkptr_t *)buf,
orig_lsize / sizeof (blkptr_t), flags);
else if (flags & ZDB_FLAG_GBH)
zdb_dump_gbh(buf, flags);
else
zdb_dump_block(thing, buf, lsize, flags);
/*
* If :c was specified, iterate through the checksum table to
* calculate and display each checksum for our specified
* DVA and length.
*/
if ((flags & ZDB_FLAG_CHECKSUM) && !(flags & ZDB_FLAG_RAW) &&
!(flags & ZDB_FLAG_GBH)) {
zio_t *czio;
(void) printf("\n");
for (enum zio_checksum ck = ZIO_CHECKSUM_LABEL;
ck < ZIO_CHECKSUM_FUNCTIONS; ck++) {
if ((zio_checksum_table[ck].ci_flags &
ZCHECKSUM_FLAG_EMBEDDED) ||
ck == ZIO_CHECKSUM_NOPARITY) {
continue;
}
BP_SET_CHECKSUM(bp, ck);
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
czio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
czio->io_bp = bp;
if (vd == vd->vdev_top) {
zio_nowait(zio_read(czio, spa, bp, pabd, psize,
NULL, NULL,
ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW |
ZIO_FLAG_DONT_RETRY, NULL));
} else {
zio_nowait(zio_vdev_child_io(czio, bp, vd,
offset, pabd, psize, ZIO_TYPE_READ,
ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_DONT_PROPAGATE |
ZIO_FLAG_DONT_RETRY |
ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW |
ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_OPTIONAL, NULL, NULL));
}
error = zio_wait(czio);
if (error == 0 || error == ECKSUM) {
zio_t *ck_zio = zio_root(spa, NULL, NULL, 0);
ck_zio->io_offset =
DVA_GET_OFFSET(&bp->blk_dva[0]);
ck_zio->io_bp = bp;
zio_checksum_compute(ck_zio, ck, pabd, lsize);
printf(
"%12s\t"
"cksum=%016llx:%016llx:%016llx:%016llx\n",
zio_checksum_table[ck].ci_name,
(u_longlong_t)bp->blk_cksum.zc_word[0],
(u_longlong_t)bp->blk_cksum.zc_word[1],
(u_longlong_t)bp->blk_cksum.zc_word[2],
(u_longlong_t)bp->blk_cksum.zc_word[3]);
zio_wait(ck_zio);
} else {
printf("error %d reading block\n", error);
}
spa_config_exit(spa, SCL_STATE, FTAG);
}
}
if (borrowed)
abd_return_buf_copy(pabd, buf, lsize);
out:
abd_free(pabd);
umem_free(lbuf, SPA_MAXBLOCKSIZE);
done:
free(flagstr);
free(dup);
}
static void
zdb_embedded_block(char *thing)
{
blkptr_t bp = {{{{0}}}};
unsigned long long *words = (void *)&bp;
char *buf;
int err;
err = sscanf(thing, "%llx:%llx:%llx:%llx:%llx:%llx:%llx:%llx:"
"%llx:%llx:%llx:%llx:%llx:%llx:%llx:%llx",
words + 0, words + 1, words + 2, words + 3,
words + 4, words + 5, words + 6, words + 7,
words + 8, words + 9, words + 10, words + 11,
words + 12, words + 13, words + 14, words + 15);
if (err != 16) {
(void) fprintf(stderr, "invalid input format\n");
exit(1);
}
ASSERT3U(BPE_GET_LSIZE(&bp), <=, SPA_MAXBLOCKSIZE);
buf = malloc(SPA_MAXBLOCKSIZE);
if (buf == NULL) {
(void) fprintf(stderr, "out of memory\n");
exit(1);
}
err = decode_embedded_bp(&bp, buf, BPE_GET_LSIZE(&bp));
if (err != 0) {
(void) fprintf(stderr, "decode failed: %u\n", err);
exit(1);
}
zdb_dump_block_raw(buf, BPE_GET_LSIZE(&bp), 0);
free(buf);
}
/* check for valid hex or decimal numeric string */
static boolean_t
zdb_numeric(char *str)
{
int i = 0;
if (strlen(str) == 0)
return (B_FALSE);
if (strncmp(str, "0x", 2) == 0 || strncmp(str, "0X", 2) == 0)
i = 2;
for (; i < strlen(str); i++) {
if (!isxdigit(str[i]))
return (B_FALSE);
}
return (B_TRUE);
}
int
main(int argc, char **argv)
{
int c;
spa_t *spa = NULL;
objset_t *os = NULL;
int dump_all = 1;
int verbose = 0;
int error = 0;
char **searchdirs = NULL;
int nsearch = 0;
char *target, *target_pool, dsname[ZFS_MAX_DATASET_NAME_LEN];
nvlist_t *policy = NULL;
uint64_t max_txg = UINT64_MAX;
int64_t objset_id = -1;
uint64_t object;
int flags = ZFS_IMPORT_MISSING_LOG;
int rewind = ZPOOL_NEVER_REWIND;
char *spa_config_path_env, *objset_str;
boolean_t target_is_spa = B_TRUE, dataset_lookup = B_FALSE;
nvlist_t *cfg = NULL;
dprintf_setup(&argc, argv);
/*
* If there is an environment variable SPA_CONFIG_PATH it overrides
* default spa_config_path setting. If -U flag is specified it will
* override this environment variable settings once again.
*/
spa_config_path_env = getenv("SPA_CONFIG_PATH");
if (spa_config_path_env != NULL)
spa_config_path = spa_config_path_env;
/*
* For performance reasons, we set this tunable down. We do so before
* the arg parsing section so that the user can override this value if
* they choose.
*/
zfs_btree_verify_intensity = 3;
struct option long_options[] = {
{"ignore-assertions", no_argument, NULL, 'A'},
{"block-stats", no_argument, NULL, 'b'},
{"backup", no_argument, NULL, 'B'},
{"checksum", no_argument, NULL, 'c'},
{"config", no_argument, NULL, 'C'},
{"datasets", no_argument, NULL, 'd'},
{"dedup-stats", no_argument, NULL, 'D'},
{"exported", no_argument, NULL, 'e'},
{"embedded-block-pointer", no_argument, NULL, 'E'},
{"automatic-rewind", no_argument, NULL, 'F'},
{"dump-debug-msg", no_argument, NULL, 'G'},
{"history", no_argument, NULL, 'h'},
{"intent-logs", no_argument, NULL, 'i'},
{"inflight", required_argument, NULL, 'I'},
{"checkpointed-state", no_argument, NULL, 'k'},
{"key", required_argument, NULL, 'K'},
{"label", no_argument, NULL, 'l'},
{"disable-leak-tracking", no_argument, NULL, 'L'},
{"metaslabs", no_argument, NULL, 'm'},
{"metaslab-groups", no_argument, NULL, 'M'},
{"numeric", no_argument, NULL, 'N'},
{"option", required_argument, NULL, 'o'},
{"object-lookups", no_argument, NULL, 'O'},
{"path", required_argument, NULL, 'p'},
{"parseable", no_argument, NULL, 'P'},
{"skip-label", no_argument, NULL, 'q'},
{"copy-object", no_argument, NULL, 'r'},
{"read-block", no_argument, NULL, 'R'},
{"io-stats", no_argument, NULL, 's'},
{"simulate-dedup", no_argument, NULL, 'S'},
{"txg", required_argument, NULL, 't'},
{"brt-stats", no_argument, NULL, 'T'},
{"uberblock", no_argument, NULL, 'u'},
{"cachefile", required_argument, NULL, 'U'},
{"verbose", no_argument, NULL, 'v'},
{"verbatim", no_argument, NULL, 'V'},
{"dump-blocks", required_argument, NULL, 'x'},
{"extreme-rewind", no_argument, NULL, 'X'},
{"all-reconstruction", no_argument, NULL, 'Y'},
{"livelist", no_argument, NULL, 'y'},
{"zstd-headers", no_argument, NULL, 'Z'},
{0, 0, 0, 0}
};
while ((c = getopt_long(argc, argv,
"AbBcCdDeEFGhiI:kK:lLmMNo:Op:PqrRsSt:TuU:vVx:XYyZ",
long_options, NULL)) != -1) {
switch (c) {
case 'b':
case 'B':
case 'c':
case 'C':
case 'd':
case 'D':
case 'E':
case 'G':
case 'h':
case 'i':
case 'l':
case 'm':
case 'M':
case 'N':
case 'O':
case 'r':
case 'R':
case 's':
case 'S':
case 'T':
case 'u':
case 'y':
case 'Z':
dump_opt[c]++;
dump_all = 0;
break;
case 'A':
case 'e':
case 'F':
case 'k':
case 'L':
case 'P':
case 'q':
case 'X':
dump_opt[c]++;
break;
case 'Y':
zfs_reconstruct_indirect_combinations_max = INT_MAX;
zfs_deadman_enabled = 0;
break;
/* NB: Sort single match options below. */
case 'I':
max_inflight_bytes = strtoull(optarg, NULL, 0);
if (max_inflight_bytes == 0) {
(void) fprintf(stderr, "maximum number "
"of inflight bytes must be greater "
"than 0\n");
usage();
}
break;
case 'K':
dump_opt[c]++;
key_material = strdup(optarg);
/* redact key material in process table */
while (*optarg != '\0') { *optarg++ = '*'; }
break;
case 'o':
error = set_global_var(optarg);
if (error != 0)
usage();
break;
case 'p':
if (searchdirs == NULL) {
searchdirs = umem_alloc(sizeof (char *),
UMEM_NOFAIL);
} else {
char **tmp = umem_alloc((nsearch + 1) *
sizeof (char *), UMEM_NOFAIL);
memcpy(tmp, searchdirs, nsearch *
sizeof (char *));
umem_free(searchdirs,
nsearch * sizeof (char *));
searchdirs = tmp;
}
searchdirs[nsearch++] = optarg;
break;
case 't':
max_txg = strtoull(optarg, NULL, 0);
if (max_txg < TXG_INITIAL) {
(void) fprintf(stderr, "incorrect txg "
"specified: %s\n", optarg);
usage();
}
break;
case 'U':
spa_config_path = optarg;
if (spa_config_path[0] != '/') {
(void) fprintf(stderr,
"cachefile must be an absolute path "
"(i.e. start with a slash)\n");
usage();
}
break;
case 'v':
verbose++;
break;
case 'V':
flags = ZFS_IMPORT_VERBATIM;
break;
case 'x':
vn_dumpdir = optarg;
break;
default:
usage();
break;
}
}
if (!dump_opt['e'] && searchdirs != NULL) {
(void) fprintf(stderr, "-p option requires use of -e\n");
usage();
}
#if defined(_LP64)
/*
* ZDB does not typically re-read blocks; therefore limit the ARC
* to 256 MB, which can be used entirely for metadata.
*/
zfs_arc_min = 2ULL << SPA_MAXBLOCKSHIFT;
zfs_arc_max = 256 * 1024 * 1024;
#endif
/*
* "zdb -c" uses checksum-verifying scrub i/os which are async reads.
* "zdb -b" uses traversal prefetch which uses async reads.
* For good performance, let several of them be active at once.
*/
zfs_vdev_async_read_max_active = 10;
/*
* Disable reference tracking for better performance.
*/
reference_tracking_enable = B_FALSE;
/*
* Do not fail spa_load when spa_load_verify fails. This is needed
* to load non-idle pools.
*/
spa_load_verify_dryrun = B_TRUE;
/*
* ZDB should have ability to read spacemaps.
*/
spa_mode_readable_spacemaps = B_TRUE;
kernel_init(SPA_MODE_READ);
if (dump_all)
verbose = MAX(verbose, 1);
for (c = 0; c < 256; c++) {
if (dump_all && strchr("ABeEFkKlLNOPrRSXy", c) == NULL)
dump_opt[c] = 1;
if (dump_opt[c])
dump_opt[c] += verbose;
}
libspl_set_assert_ok((dump_opt['A'] == 1) || (dump_opt['A'] > 2));
zfs_recover = (dump_opt['A'] > 1);
argc -= optind;
argv += optind;
if (argc < 2 && dump_opt['R'])
usage();
if (dump_opt['E']) {
if (argc != 1)
usage();
zdb_embedded_block(argv[0]);
return (0);
}
if (argc < 1) {
if (!dump_opt['e'] && dump_opt['C']) {
dump_cachefile(spa_config_path);
return (0);
}
usage();
}
if (dump_opt['l'])
return (dump_label(argv[0]));
if (dump_opt['X'] || dump_opt['F'])
rewind = ZPOOL_DO_REWIND |
(dump_opt['X'] ? ZPOOL_EXTREME_REWIND : 0);
/* -N implies -d */
if (dump_opt['N'] && dump_opt['d'] == 0)
dump_opt['d'] = dump_opt['N'];
if (nvlist_alloc(&policy, NV_UNIQUE_NAME_TYPE, 0) != 0 ||
nvlist_add_uint64(policy, ZPOOL_LOAD_REQUEST_TXG, max_txg) != 0 ||
nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY, rewind) != 0)
fatal("internal error: %s", strerror(ENOMEM));
error = 0;
target = argv[0];
if (strpbrk(target, "/@") != NULL) {
size_t targetlen;
target_pool = strdup(target);
*strpbrk(target_pool, "/@") = '\0';
target_is_spa = B_FALSE;
targetlen = strlen(target);
if (targetlen && target[targetlen - 1] == '/')
target[targetlen - 1] = '\0';
/*
* See if an objset ID was supplied (-d <pool>/<objset ID>).
* To disambiguate tank/100, consider the 100 as objsetID
* if -N was given, otherwise 100 is an objsetID iff
* tank/100 as a named dataset fails on lookup.
*/
objset_str = strchr(target, '/');
if (objset_str && strlen(objset_str) > 1 &&
zdb_numeric(objset_str + 1)) {
char *endptr;
errno = 0;
objset_str++;
objset_id = strtoull(objset_str, &endptr, 0);
/* dataset 0 is the same as opening the pool */
if (errno == 0 && endptr != objset_str &&
objset_id != 0) {
if (dump_opt['N'])
dataset_lookup = B_TRUE;
}
/* normal dataset name not an objset ID */
if (endptr == objset_str) {
objset_id = -1;
}
} else if (objset_str && !zdb_numeric(objset_str + 1) &&
dump_opt['N']) {
printf("Supply a numeric objset ID with -N\n");
exit(1);
}
} else {
target_pool = target;
}
if (dump_opt['e']) {
importargs_t args = { 0 };
args.paths = nsearch;
args.path = searchdirs;
args.can_be_active = B_TRUE;
libpc_handle_t lpch = {
.lpc_lib_handle = NULL,
.lpc_ops = &libzpool_config_ops,
.lpc_printerr = B_TRUE
};
error = zpool_find_config(&lpch, target_pool, &cfg, &args);
if (error == 0) {
if (nvlist_add_nvlist(cfg,
ZPOOL_LOAD_POLICY, policy) != 0) {
fatal("can't open '%s': %s",
target, strerror(ENOMEM));
}
if (dump_opt['C'] > 1) {
(void) printf("\nConfiguration for import:\n");
dump_nvlist(cfg, 8);
}
/*
* Disable the activity check to allow examination of
* active pools.
*/
error = spa_import(target_pool, cfg, NULL,
flags | ZFS_IMPORT_SKIP_MMP);
}
}
if (searchdirs != NULL) {
umem_free(searchdirs, nsearch * sizeof (char *));
searchdirs = NULL;
}
/*
* We need to make sure to process -O option or call
* dump_path after the -e option has been processed,
* which imports the pool to the namespace if it's
* not in the cachefile.
*/
if (dump_opt['O']) {
if (argc != 2)
usage();
dump_opt['v'] = verbose + 3;
return (dump_path(argv[0], argv[1], NULL));
}
if (dump_opt['r']) {
target_is_spa = B_FALSE;
if (argc != 3)
usage();
dump_opt['v'] = verbose;
error = dump_path(argv[0], argv[1], &object);
if (error != 0)
fatal("internal error: %s", strerror(error));
}
/*
* import_checkpointed_state makes the assumption that the
* target pool that we pass it is already part of the spa
* namespace. Because of that we need to make sure to call
* it always after the -e option has been processed, which
* imports the pool to the namespace if it's not in the
* cachefile.
*/
char *checkpoint_pool = NULL;
char *checkpoint_target = NULL;
if (dump_opt['k']) {
checkpoint_pool = import_checkpointed_state(target, cfg,
&checkpoint_target);
if (checkpoint_target != NULL)
target = checkpoint_target;
}
if (cfg != NULL) {
nvlist_free(cfg);
cfg = NULL;
}
if (target_pool != target)
free(target_pool);
if (error == 0) {
if (dump_opt['k'] && (target_is_spa || dump_opt['R'])) {
ASSERT(checkpoint_pool != NULL);
ASSERT(checkpoint_target == NULL);
error = spa_open(checkpoint_pool, &spa, FTAG);
if (error != 0) {
fatal("Tried to open pool \"%s\" but "
"spa_open() failed with error %d\n",
checkpoint_pool, error);
}
} else if (target_is_spa || dump_opt['R'] || dump_opt['B'] ||
objset_id == 0) {
zdb_set_skip_mmp(target);
error = spa_open_rewind(target, &spa, FTAG, policy,
NULL);
if (error) {
/*
* If we're missing the log device then
* try opening the pool after clearing the
* log state.
*/
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(target)) != NULL &&
spa->spa_log_state == SPA_LOG_MISSING) {
spa->spa_log_state = SPA_LOG_CLEAR;
error = 0;
}
mutex_exit(&spa_namespace_lock);
if (!error) {
error = spa_open_rewind(target, &spa,
FTAG, policy, NULL);
}
}
} else if (strpbrk(target, "#") != NULL) {
dsl_pool_t *dp;
error = dsl_pool_hold(target, FTAG, &dp);
if (error != 0) {
fatal("can't dump '%s': %s", target,
strerror(error));
}
error = dump_bookmark(dp, target, B_TRUE, verbose > 1);
dsl_pool_rele(dp, FTAG);
if (error != 0) {
fatal("can't dump '%s': %s", target,
strerror(error));
}
return (error);
} else {
target_pool = strdup(target);
if (strpbrk(target, "/@") != NULL)
*strpbrk(target_pool, "/@") = '\0';
zdb_set_skip_mmp(target);
/*
* If -N was supplied, the user has indicated that
* zdb -d <pool>/<objsetID> is in effect. Otherwise
* we first assume that the dataset string is the
* dataset name. If dmu_objset_hold fails with the
* dataset string, and we have an objset_id, retry the
* lookup with the objsetID.
*/
boolean_t retry = B_TRUE;
retry_lookup:
if (dataset_lookup == B_TRUE) {
/*
* Use the supplied id to get the name
* for open_objset.
*/
error = spa_open(target_pool, &spa, FTAG);
if (error == 0) {
error = name_from_objset_id(spa,
objset_id, dsname);
spa_close(spa, FTAG);
if (error == 0)
target = dsname;
}
}
if (error == 0) {
if (objset_id > 0 && retry) {
int err = dmu_objset_hold(target, FTAG,
&os);
if (err) {
dataset_lookup = B_TRUE;
retry = B_FALSE;
goto retry_lookup;
} else {
dmu_objset_rele(os, FTAG);
}
}
error = open_objset(target, FTAG, &os);
}
if (error == 0)
spa = dmu_objset_spa(os);
free(target_pool);
}
}
nvlist_free(policy);
if (error)
fatal("can't open '%s': %s", target, strerror(error));
/*
* Set the pool failure mode to panic in order to prevent the pool
* from suspending. A suspended I/O will have no way to resume and
* can prevent the zdb(8) command from terminating as expected.
*/
if (spa != NULL)
spa->spa_failmode = ZIO_FAILURE_MODE_PANIC;
argv++;
argc--;
if (dump_opt['r']) {
error = zdb_copy_object(os, object, argv[1]);
} else if (!dump_opt['R']) {
flagbits['d'] = ZOR_FLAG_DIRECTORY;
flagbits['f'] = ZOR_FLAG_PLAIN_FILE;
flagbits['m'] = ZOR_FLAG_SPACE_MAP;
flagbits['z'] = ZOR_FLAG_ZAP;
flagbits['A'] = ZOR_FLAG_ALL_TYPES;
if (argc > 0 && dump_opt['d']) {
zopt_object_args = argc;
zopt_object_ranges = calloc(zopt_object_args,
sizeof (zopt_object_range_t));
for (unsigned i = 0; i < zopt_object_args; i++) {
int err;
const char *msg = NULL;
err = parse_object_range(argv[i],
&zopt_object_ranges[i], &msg);
if (err != 0)
fatal("Bad object or range: '%s': %s\n",
argv[i], msg ?: "");
}
} else if (argc > 0 && dump_opt['m']) {
zopt_metaslab_args = argc;
zopt_metaslab = calloc(zopt_metaslab_args,
sizeof (uint64_t));
for (unsigned i = 0; i < zopt_metaslab_args; i++) {
errno = 0;
zopt_metaslab[i] = strtoull(argv[i], NULL, 0);
if (zopt_metaslab[i] == 0 && errno != 0)
fatal("bad number %s: %s", argv[i],
strerror(errno));
}
}
if (dump_opt['B']) {
dump_backup(target, objset_id,
argc > 0 ? argv[0] : NULL);
} else if (os != NULL) {
dump_objset(os);
} else if (zopt_object_args > 0 && !dump_opt['m']) {
dump_objset(spa->spa_meta_objset);
} else {
dump_zpool(spa);
}
} else {
flagbits['b'] = ZDB_FLAG_PRINT_BLKPTR;
flagbits['c'] = ZDB_FLAG_CHECKSUM;
flagbits['d'] = ZDB_FLAG_DECOMPRESS;
flagbits['e'] = ZDB_FLAG_BSWAP;
flagbits['g'] = ZDB_FLAG_GBH;
flagbits['i'] = ZDB_FLAG_INDIRECT;
flagbits['r'] = ZDB_FLAG_RAW;
flagbits['v'] = ZDB_FLAG_VERBOSE;
for (int i = 0; i < argc; i++)
zdb_read_block(argv[i], spa);
}
if (dump_opt['k']) {
free(checkpoint_pool);
if (!target_is_spa)
free(checkpoint_target);
}
if (os != NULL) {
close_objset(os, FTAG);
} else {
spa_close(spa, FTAG);
}
fuid_table_destroy();
dump_debug_buffer();
kernel_fini();
return (error);
}
diff --git a/sys/contrib/openzfs/cmd/zdb/zdb_il.c b/sys/contrib/openzfs/cmd/zdb/zdb_il.c
index 970c45c9b3bb..63d95ddedc3b 100644
--- a/sys/contrib/openzfs/cmd/zdb/zdb_il.c
+++ b/sys/contrib/openzfs/cmd/zdb/zdb_il.c
@@ -1,485 +1,541 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2012 Cyril Plisko. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2013, 2017 by Delphix. All rights reserved.
*/
/*
* Print intent log header and statistics.
*/
#include <stdio.h>
#include <stdlib.h>
#include <ctype.h>
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/dmu.h>
#include <sys/stat.h>
#include <sys/resource.h>
#include <sys/zil.h>
#include <sys/zil_impl.h>
#include <sys/spa_impl.h>
#include <sys/abd.h>
#include "zdb.h"
extern uint8_t dump_opt[256];
static char tab_prefix[4] = "\t\t\t";
static void
print_log_bp(const blkptr_t *bp, const char *prefix)
{
char blkbuf[BP_SPRINTF_LEN];
snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
(void) printf("%s%s\n", prefix, blkbuf);
}
static void
zil_prt_rec_create(zilog_t *zilog, int txtype, const void *arg)
{
(void) zilog;
const lr_create_t *lr = arg;
time_t crtime = lr->lr_crtime[0];
char *name, *link;
lr_attr_t *lrattr;
name = (char *)(lr + 1);
if (lr->lr_common.lrc_txtype == TX_CREATE_ATTR ||
lr->lr_common.lrc_txtype == TX_MKDIR_ATTR) {
lrattr = (lr_attr_t *)(lr + 1);
name += ZIL_XVAT_SIZE(lrattr->lr_attr_masksize);
}
if (txtype == TX_SYMLINK) {
link = name + strlen(name) + 1;
(void) printf("%s%s -> %s\n", tab_prefix, name, link);
} else if (txtype != TX_MKXATTR) {
(void) printf("%s%s\n", tab_prefix, name);
}
(void) printf("%s%s", tab_prefix, ctime(&crtime));
(void) printf("%sdoid %llu, foid %llu, slots %llu, mode %llo\n",
tab_prefix, (u_longlong_t)lr->lr_doid,
(u_longlong_t)LR_FOID_GET_OBJ(lr->lr_foid),
(u_longlong_t)LR_FOID_GET_SLOTS(lr->lr_foid),
(longlong_t)lr->lr_mode);
(void) printf("%suid %llu, gid %llu, gen %llu, rdev 0x%llx\n",
tab_prefix,
(u_longlong_t)lr->lr_uid, (u_longlong_t)lr->lr_gid,
(u_longlong_t)lr->lr_gen, (u_longlong_t)lr->lr_rdev);
}
static void
zil_prt_rec_remove(zilog_t *zilog, int txtype, const void *arg)
{
(void) zilog, (void) txtype;
const lr_remove_t *lr = arg;
(void) printf("%sdoid %llu, name %s\n", tab_prefix,
(u_longlong_t)lr->lr_doid, (char *)(lr + 1));
}
static void
zil_prt_rec_link(zilog_t *zilog, int txtype, const void *arg)
{
(void) zilog, (void) txtype;
const lr_link_t *lr = arg;
(void) printf("%sdoid %llu, link_obj %llu, name %s\n", tab_prefix,
(u_longlong_t)lr->lr_doid, (u_longlong_t)lr->lr_link_obj,
(char *)(lr + 1));
}
static void
zil_prt_rec_rename(zilog_t *zilog, int txtype, const void *arg)
{
(void) zilog, (void) txtype;
const lr_rename_t *lr = arg;
char *snm = (char *)(lr + 1);
char *tnm = snm + strlen(snm) + 1;
(void) printf("%ssdoid %llu, tdoid %llu\n", tab_prefix,
(u_longlong_t)lr->lr_sdoid, (u_longlong_t)lr->lr_tdoid);
(void) printf("%ssrc %s tgt %s\n", tab_prefix, snm, tnm);
switch (txtype) {
case TX_RENAME_EXCHANGE:
(void) printf("%sflags RENAME_EXCHANGE\n", tab_prefix);
break;
case TX_RENAME_WHITEOUT:
(void) printf("%sflags RENAME_WHITEOUT\n", tab_prefix);
break;
}
}
static int
zil_prt_rec_write_cb(void *data, size_t len, void *unused)
{
(void) unused;
char *cdata = data;
for (size_t i = 0; i < len; i++) {
if (isprint(*cdata))
(void) printf("%c ", *cdata);
else
(void) printf("%2X", *cdata);
cdata++;
}
return (0);
}
static void
zil_prt_rec_write(zilog_t *zilog, int txtype, const void *arg)
{
const lr_write_t *lr = arg;
abd_t *data;
const blkptr_t *bp = &lr->lr_blkptr;
zbookmark_phys_t zb;
int verbose = MAX(dump_opt['d'], dump_opt['i']);
int error;
(void) printf("%sfoid %llu, offset %llx, length %llx\n", tab_prefix,
(u_longlong_t)lr->lr_foid, (u_longlong_t)lr->lr_offset,
(u_longlong_t)lr->lr_length);
- if (txtype == TX_WRITE2 || verbose < 5)
+ if (txtype == TX_WRITE2 || verbose < 4)
return;
if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
(void) printf("%shas blkptr, %s\n", tab_prefix,
!BP_IS_HOLE(bp) &&
bp->blk_birth >= spa_min_claim_txg(zilog->zl_spa) ?
"will claim" : "won't claim");
print_log_bp(bp, tab_prefix);
+ if (verbose < 5)
+ return;
if (BP_IS_HOLE(bp)) {
(void) printf("\t\t\tLSIZE 0x%llx\n",
(u_longlong_t)BP_GET_LSIZE(bp));
(void) printf("%s<hole>\n", tab_prefix);
return;
}
if (bp->blk_birth < zilog->zl_header->zh_claim_txg) {
(void) printf("%s<block already committed>\n",
tab_prefix);
return;
}
ASSERT3U(BP_GET_LSIZE(bp), !=, 0);
SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os),
lr->lr_foid, ZB_ZIL_LEVEL,
lr->lr_offset / BP_GET_LSIZE(bp));
data = abd_alloc(BP_GET_LSIZE(bp), B_FALSE);
error = zio_wait(zio_read(NULL, zilog->zl_spa,
bp, data, BP_GET_LSIZE(bp), NULL, NULL,
ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL, &zb));
if (error)
goto out;
} else {
+ if (verbose < 5)
+ return;
+
/* data is stored after the end of the lr_write record */
data = abd_alloc(lr->lr_length, B_FALSE);
abd_copy_from_buf(data, lr + 1, lr->lr_length);
}
(void) printf("%s", tab_prefix);
(void) abd_iterate_func(data,
0, MIN(lr->lr_length, (verbose < 6 ? 20 : SPA_MAXBLOCKSIZE)),
zil_prt_rec_write_cb, NULL);
(void) printf("\n");
out:
abd_free(data);
}
+static void
+zil_prt_rec_write_enc(zilog_t *zilog, int txtype, const void *arg)
+{
+ (void) txtype;
+ const lr_write_t *lr = arg;
+ const blkptr_t *bp = &lr->lr_blkptr;
+ int verbose = MAX(dump_opt['d'], dump_opt['i']);
+
+ (void) printf("%s(encrypted)\n", tab_prefix);
+
+ if (verbose < 4)
+ return;
+
+ if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
+ (void) printf("%shas blkptr, %s\n", tab_prefix,
+ !BP_IS_HOLE(bp) &&
+ bp->blk_birth >= spa_min_claim_txg(zilog->zl_spa) ?
+ "will claim" : "won't claim");
+ print_log_bp(bp, tab_prefix);
+ }
+}
+
static void
zil_prt_rec_truncate(zilog_t *zilog, int txtype, const void *arg)
{
(void) zilog, (void) txtype;
const lr_truncate_t *lr = arg;
(void) printf("%sfoid %llu, offset 0x%llx, length 0x%llx\n", tab_prefix,
(u_longlong_t)lr->lr_foid, (longlong_t)lr->lr_offset,
(u_longlong_t)lr->lr_length);
}
static void
zil_prt_rec_setattr(zilog_t *zilog, int txtype, const void *arg)
{
(void) zilog, (void) txtype;
const lr_setattr_t *lr = arg;
time_t atime = (time_t)lr->lr_atime[0];
time_t mtime = (time_t)lr->lr_mtime[0];
(void) printf("%sfoid %llu, mask 0x%llx\n", tab_prefix,
(u_longlong_t)lr->lr_foid, (u_longlong_t)lr->lr_mask);
if (lr->lr_mask & AT_MODE) {
(void) printf("%sAT_MODE %llo\n", tab_prefix,
(longlong_t)lr->lr_mode);
}
if (lr->lr_mask & AT_UID) {
(void) printf("%sAT_UID %llu\n", tab_prefix,
(u_longlong_t)lr->lr_uid);
}
if (lr->lr_mask & AT_GID) {
(void) printf("%sAT_GID %llu\n", tab_prefix,
(u_longlong_t)lr->lr_gid);
}
if (lr->lr_mask & AT_SIZE) {
(void) printf("%sAT_SIZE %llu\n", tab_prefix,
(u_longlong_t)lr->lr_size);
}
if (lr->lr_mask & AT_ATIME) {
(void) printf("%sAT_ATIME %llu.%09llu %s", tab_prefix,
(u_longlong_t)lr->lr_atime[0],
(u_longlong_t)lr->lr_atime[1],
ctime(&atime));
}
if (lr->lr_mask & AT_MTIME) {
(void) printf("%sAT_MTIME %llu.%09llu %s", tab_prefix,
(u_longlong_t)lr->lr_mtime[0],
(u_longlong_t)lr->lr_mtime[1],
ctime(&mtime));
}
}
static void
zil_prt_rec_setsaxattr(zilog_t *zilog, int txtype, const void *arg)
{
(void) zilog, (void) txtype;
const lr_setsaxattr_t *lr = arg;
char *name = (char *)(lr + 1);
(void) printf("%sfoid %llu\n", tab_prefix,
(u_longlong_t)lr->lr_foid);
(void) printf("%sXAT_NAME %s\n", tab_prefix, name);
if (lr->lr_size == 0) {
(void) printf("%sXAT_VALUE NULL\n", tab_prefix);
} else {
(void) printf("%sXAT_VALUE ", tab_prefix);
char *val = name + (strlen(name) + 1);
for (int i = 0; i < lr->lr_size; i++) {
(void) printf("%c", *val);
val++;
}
}
}
static void
zil_prt_rec_acl(zilog_t *zilog, int txtype, const void *arg)
{
(void) zilog, (void) txtype;
const lr_acl_t *lr = arg;
(void) printf("%sfoid %llu, aclcnt %llu\n", tab_prefix,
(u_longlong_t)lr->lr_foid, (u_longlong_t)lr->lr_aclcnt);
}
static void
zil_prt_rec_clone_range(zilog_t *zilog, int txtype, const void *arg)
{
(void) zilog, (void) txtype;
const lr_clone_range_t *lr = arg;
+ int verbose = MAX(dump_opt['d'], dump_opt['i']);
(void) printf("%sfoid %llu, offset %llx, length %llx, blksize %llx\n",
tab_prefix, (u_longlong_t)lr->lr_foid, (u_longlong_t)lr->lr_offset,
(u_longlong_t)lr->lr_length, (u_longlong_t)lr->lr_blksz);
+ if (verbose < 4)
+ return;
+
+ for (unsigned int i = 0; i < lr->lr_nbps; i++) {
+ (void) printf("%s[%u/%llu] ", tab_prefix, i + 1,
+ (u_longlong_t)lr->lr_nbps);
+ print_log_bp(&lr->lr_bps[i], "");
+ }
+}
+
+static void
+zil_prt_rec_clone_range_enc(zilog_t *zilog, int txtype, const void *arg)
+{
+ (void) zilog, (void) txtype;
+ const lr_clone_range_t *lr = arg;
+ int verbose = MAX(dump_opt['d'], dump_opt['i']);
+
+ (void) printf("%s(encrypted)\n", tab_prefix);
+
+ if (verbose < 4)
+ return;
+
for (unsigned int i = 0; i < lr->lr_nbps; i++) {
(void) printf("%s[%u/%llu] ", tab_prefix, i + 1,
(u_longlong_t)lr->lr_nbps);
print_log_bp(&lr->lr_bps[i], "");
}
}
typedef void (*zil_prt_rec_func_t)(zilog_t *, int, const void *);
typedef struct zil_rec_info {
zil_prt_rec_func_t zri_print;
+ zil_prt_rec_func_t zri_print_enc;
const char *zri_name;
uint64_t zri_count;
} zil_rec_info_t;
static zil_rec_info_t zil_rec_info[TX_MAX_TYPE] = {
{.zri_print = NULL, .zri_name = "Total "},
{.zri_print = zil_prt_rec_create, .zri_name = "TX_CREATE "},
{.zri_print = zil_prt_rec_create, .zri_name = "TX_MKDIR "},
{.zri_print = zil_prt_rec_create, .zri_name = "TX_MKXATTR "},
{.zri_print = zil_prt_rec_create, .zri_name = "TX_SYMLINK "},
{.zri_print = zil_prt_rec_remove, .zri_name = "TX_REMOVE "},
{.zri_print = zil_prt_rec_remove, .zri_name = "TX_RMDIR "},
{.zri_print = zil_prt_rec_link, .zri_name = "TX_LINK "},
{.zri_print = zil_prt_rec_rename, .zri_name = "TX_RENAME "},
- {.zri_print = zil_prt_rec_write, .zri_name = "TX_WRITE "},
+ {.zri_print = zil_prt_rec_write,
+ .zri_print_enc = zil_prt_rec_write_enc,
+ .zri_name = "TX_WRITE "},
{.zri_print = zil_prt_rec_truncate, .zri_name = "TX_TRUNCATE "},
{.zri_print = zil_prt_rec_setattr, .zri_name = "TX_SETATTR "},
{.zri_print = zil_prt_rec_acl, .zri_name = "TX_ACL_V0 "},
{.zri_print = zil_prt_rec_acl, .zri_name = "TX_ACL_ACL "},
{.zri_print = zil_prt_rec_create, .zri_name = "TX_CREATE_ACL "},
{.zri_print = zil_prt_rec_create, .zri_name = "TX_CREATE_ATTR "},
{.zri_print = zil_prt_rec_create, .zri_name = "TX_CREATE_ACL_ATTR "},
{.zri_print = zil_prt_rec_create, .zri_name = "TX_MKDIR_ACL "},
{.zri_print = zil_prt_rec_create, .zri_name = "TX_MKDIR_ATTR "},
{.zri_print = zil_prt_rec_create, .zri_name = "TX_MKDIR_ACL_ATTR "},
{.zri_print = zil_prt_rec_write, .zri_name = "TX_WRITE2 "},
{.zri_print = zil_prt_rec_setsaxattr,
.zri_name = "TX_SETSAXATTR "},
{.zri_print = zil_prt_rec_rename, .zri_name = "TX_RENAME_EXCHANGE "},
{.zri_print = zil_prt_rec_rename, .zri_name = "TX_RENAME_WHITEOUT "},
{.zri_print = zil_prt_rec_clone_range,
+ .zri_print_enc = zil_prt_rec_clone_range_enc,
.zri_name = "TX_CLONE_RANGE "},
};
static int
print_log_record(zilog_t *zilog, const lr_t *lr, void *arg, uint64_t claim_txg)
{
(void) arg, (void) claim_txg;
int txtype;
int verbose = MAX(dump_opt['d'], dump_opt['i']);
/* reduce size of txtype to strip off TX_CI bit */
txtype = lr->lrc_txtype;
ASSERT(txtype != 0 && (uint_t)txtype < TX_MAX_TYPE);
ASSERT(lr->lrc_txg);
(void) printf("\t\t%s%s len %6llu, txg %llu, seq %llu\n",
(lr->lrc_txtype & TX_CI) ? "CI-" : "",
zil_rec_info[txtype].zri_name,
(u_longlong_t)lr->lrc_reclen,
(u_longlong_t)lr->lrc_txg,
(u_longlong_t)lr->lrc_seq);
if (txtype && verbose >= 3) {
if (!zilog->zl_os->os_encrypted) {
zil_rec_info[txtype].zri_print(zilog, txtype, lr);
+ } else if (zil_rec_info[txtype].zri_print_enc) {
+ zil_rec_info[txtype].zri_print_enc(zilog, txtype, lr);
} else {
(void) printf("%s(encrypted)\n", tab_prefix);
}
}
zil_rec_info[txtype].zri_count++;
zil_rec_info[0].zri_count++;
return (0);
}
static int
print_log_block(zilog_t *zilog, const blkptr_t *bp, void *arg,
uint64_t claim_txg)
{
(void) arg;
char blkbuf[BP_SPRINTF_LEN + 10];
int verbose = MAX(dump_opt['d'], dump_opt['i']);
const char *claim;
if (verbose <= 3)
return (0);
if (verbose >= 5) {
(void) strcpy(blkbuf, ", ");
snprintf_blkptr(blkbuf + strlen(blkbuf),
sizeof (blkbuf) - strlen(blkbuf), bp);
} else {
blkbuf[0] = '\0';
}
if (claim_txg != 0)
claim = "already claimed";
else if (bp->blk_birth >= spa_min_claim_txg(zilog->zl_spa))
claim = "will claim";
else
claim = "won't claim";
(void) printf("\tBlock seqno %llu, %s%s\n",
(u_longlong_t)bp->blk_cksum.zc_word[ZIL_ZC_SEQ], claim, blkbuf);
return (0);
}
static void
print_log_stats(int verbose)
{
unsigned i, w, p10;
if (verbose > 3)
(void) printf("\n");
if (zil_rec_info[0].zri_count == 0)
return;
for (w = 1, p10 = 10; zil_rec_info[0].zri_count >= p10; p10 *= 10)
w++;
for (i = 0; i < TX_MAX_TYPE; i++)
if (zil_rec_info[i].zri_count || verbose >= 3)
(void) printf("\t\t%s %*llu\n",
zil_rec_info[i].zri_name, w,
(u_longlong_t)zil_rec_info[i].zri_count);
(void) printf("\n");
}
void
dump_intent_log(zilog_t *zilog)
{
const zil_header_t *zh = zilog->zl_header;
int verbose = MAX(dump_opt['d'], dump_opt['i']);
int i;
if (BP_IS_HOLE(&zh->zh_log) || verbose < 1)
return;
(void) printf("\n ZIL header: claim_txg %llu, "
"claim_blk_seq %llu, claim_lr_seq %llu",
(u_longlong_t)zh->zh_claim_txg,
(u_longlong_t)zh->zh_claim_blk_seq,
(u_longlong_t)zh->zh_claim_lr_seq);
(void) printf(" replay_seq %llu, flags 0x%llx\n",
(u_longlong_t)zh->zh_replay_seq, (u_longlong_t)zh->zh_flags);
for (i = 0; i < TX_MAX_TYPE; i++)
zil_rec_info[i].zri_count = 0;
/* see comment in zil_claim() or zil_check_log_chain() */
if (zilog->zl_spa->spa_uberblock.ub_checkpoint_txg != 0 &&
zh->zh_claim_txg == 0)
return;
if (verbose >= 2) {
(void) printf("\n");
(void) zil_parse(zilog, print_log_block, print_log_record, NULL,
zh->zh_claim_txg, B_FALSE);
print_log_stats(verbose);
}
}
diff --git a/sys/contrib/openzfs/cmd/zed/agents/zfs_mod.c b/sys/contrib/openzfs/cmd/zed/agents/zfs_mod.c
index 9636c99fc85f..69163b80bd5a 100644
--- a/sys/contrib/openzfs/cmd/zed/agents/zfs_mod.c
+++ b/sys/contrib/openzfs/cmd/zed/agents/zfs_mod.c
@@ -1,1366 +1,1370 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright 2014 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2016, 2017, Intel Corporation.
* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
* Copyright (c) 2023, Klara Inc.
*/
/*
* ZFS syseventd module.
*
* file origin: openzfs/usr/src/cmd/syseventd/modules/zfs_mod/zfs_mod.c
*
* The purpose of this module is to identify when devices are added to the
* system, and appropriately online or replace the affected vdevs.
*
* When a device is added to the system:
*
* 1. Search for any vdevs whose devid matches that of the newly added
* device.
*
* 2. If no vdevs are found, then search for any vdevs whose udev path
* matches that of the new device.
*
* 3. If no vdevs match by either method, then ignore the event.
*
* 4. Attempt to online the device with a flag to indicate that it should
* be unspared when resilvering completes. If this succeeds, then the
* same device was inserted and we should continue normally.
*
* 5. If the pool does not have the 'autoreplace' property set, attempt to
* online the device again without the unspare flag, which will
* generate a FMA fault.
*
* 6. If the pool has the 'autoreplace' property set, and the matching vdev
* is a whole disk, then label the new disk and attempt a 'zpool
* replace'.
*
* The module responds to EC_DEV_ADD events. The special ESC_ZFS_VDEV_CHECK
* event indicates that a device failed to open during pool load, but the
* autoreplace property was set. In this case, we deferred the associated
* FMA fault until our module had a chance to process the autoreplace logic.
* If the device could not be replaced, then the second online attempt will
* trigger the FMA fault that we skipped earlier.
*
* On Linux udev provides a disk insert for both the disk and the partition.
*/
#include <ctype.h>
#include <fcntl.h>
#include <libnvpair.h>
#include <libzfs.h>
#include <libzutil.h>
#include <limits.h>
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
#include <syslog.h>
#include <sys/list.h>
#include <sys/sunddi.h>
#include <sys/sysevent/eventdefs.h>
#include <sys/sysevent/dev.h>
#include <thread_pool.h>
#include <pthread.h>
#include <unistd.h>
#include <errno.h>
#include "zfs_agents.h"
#include "../zed_log.h"
#define DEV_BYID_PATH "/dev/disk/by-id/"
#define DEV_BYPATH_PATH "/dev/disk/by-path/"
#define DEV_BYVDEV_PATH "/dev/disk/by-vdev/"
typedef void (*zfs_process_func_t)(zpool_handle_t *, nvlist_t *, boolean_t);
libzfs_handle_t *g_zfshdl;
list_t g_pool_list; /* list of unavailable pools at initialization */
list_t g_device_list; /* list of disks with asynchronous label request */
tpool_t *g_tpool;
boolean_t g_enumeration_done;
pthread_t g_zfs_tid; /* zfs_enum_pools() thread */
typedef struct unavailpool {
zpool_handle_t *uap_zhp;
list_node_t uap_node;
} unavailpool_t;
typedef struct pendingdev {
char pd_physpath[128];
list_node_t pd_node;
} pendingdev_t;
static int
zfs_toplevel_state(zpool_handle_t *zhp)
{
nvlist_t *nvroot;
vdev_stat_t *vs;
unsigned int c;
verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) == 0);
return (vs->vs_state);
}
static int
zfs_unavail_pool(zpool_handle_t *zhp, void *data)
{
zed_log_msg(LOG_INFO, "zfs_unavail_pool: examining '%s' (state %d)",
zpool_get_name(zhp), (int)zfs_toplevel_state(zhp));
if (zfs_toplevel_state(zhp) < VDEV_STATE_DEGRADED) {
unavailpool_t *uap;
uap = malloc(sizeof (unavailpool_t));
if (uap == NULL) {
perror("malloc");
exit(EXIT_FAILURE);
}
uap->uap_zhp = zhp;
list_insert_tail((list_t *)data, uap);
} else {
zpool_close(zhp);
}
return (0);
}
/*
* Write an array of strings to the zed log
*/
static void lines_to_zed_log_msg(char **lines, int lines_cnt)
{
int i;
for (i = 0; i < lines_cnt; i++) {
zed_log_msg(LOG_INFO, "%s", lines[i]);
}
}
/*
* Two stage replace on Linux
* since we get disk notifications
* we can wait for partitioned disk slice to show up!
*
* First stage tags the disk, initiates async partitioning, and returns
* Second stage finds the tag and proceeds to ZFS labeling/replace
*
* disk-add --> label-disk + tag-disk --> partition-add --> zpool_vdev_attach
*
* 1. physical match with no fs, no partition
* tag it top, partition disk
*
* 2. physical match again, see partition and tag
*
*/
/*
* The device associated with the given vdev (either by devid or physical path)
* has been added to the system. If 'isdisk' is set, then we only attempt a
* replacement if it's a whole disk. This also implies that we should label the
* disk first.
*
* First, we attempt to online the device (making sure to undo any spare
* operation when finished). If this succeeds, then we're done. If it fails,
* and the new state is VDEV_CANT_OPEN, it indicates that the device was opened,
* but that the label was not what we expected. If the 'autoreplace' property
* is enabled, then we relabel the disk (if specified), and attempt a 'zpool
* replace'. If the online is successful, but the new state is something else
* (REMOVED or FAULTED), it indicates that we're out of sync or in some sort of
* race, and we should avoid attempting to relabel the disk.
*
* Also can arrive here from a ESC_ZFS_VDEV_CHECK event
*/
static void
zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
{
const char *path;
vdev_state_t newstate;
nvlist_t *nvroot, *newvd;
pendingdev_t *device;
uint64_t wholedisk = 0ULL;
uint64_t offline = 0ULL, faulted = 0ULL;
uint64_t guid = 0ULL;
uint64_t is_spare = 0;
const char *physpath = NULL, *new_devid = NULL, *enc_sysfs_path = NULL;
char rawpath[PATH_MAX], fullpath[PATH_MAX];
char pathbuf[PATH_MAX];
int ret;
int online_flag = ZFS_ONLINE_CHECKREMOVE | ZFS_ONLINE_UNSPARE;
boolean_t is_sd = B_FALSE;
boolean_t is_mpath_wholedisk = B_FALSE;
uint_t c;
vdev_stat_t *vs;
char **lines = NULL;
int lines_cnt = 0;
/*
* Get the persistent path, typically under the '/dev/disk/by-id' or
* '/dev/disk/by-vdev' directories. Note that this path can change
* when a vdev is replaced with a new disk.
*/
if (nvlist_lookup_string(vdev, ZPOOL_CONFIG_PATH, &path) != 0)
return;
/* Skip healthy disks */
verify(nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) == 0);
if (vs->vs_state == VDEV_STATE_HEALTHY) {
zed_log_msg(LOG_INFO, "%s: %s is already healthy, skip it.",
__func__, path);
return;
}
(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_PHYS_PATH, &physpath);
+
+ update_vdev_config_dev_sysfs_path(vdev, path,
+ ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH);
(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
&enc_sysfs_path);
+
(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_OFFLINE, &offline);
(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_FAULTED, &faulted);
(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_GUID, &guid);
(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_IS_SPARE, &is_spare);
/*
* Special case:
*
* We've seen times where a disk won't have a ZPOOL_CONFIG_PHYS_PATH
* entry in their config. For example, on this force-faulted disk:
*
* children[0]:
* type: 'disk'
* id: 0
* guid: 14309659774640089719
* path: '/dev/disk/by-vdev/L28'
* whole_disk: 0
* DTL: 654
* create_txg: 4
* com.delphix:vdev_zap_leaf: 1161
* faulted: 1
* aux_state: 'external'
* children[1]:
* type: 'disk'
* id: 1
* guid: 16002508084177980912
* path: '/dev/disk/by-vdev/L29'
* devid: 'dm-uuid-mpath-35000c500a61d68a3'
* phys_path: 'L29'
* vdev_enc_sysfs_path: '/sys/class/enclosure/0:0:1:0/SLOT 30 32'
* whole_disk: 0
* DTL: 1028
* create_txg: 4
* com.delphix:vdev_zap_leaf: 131
*
* If the disk's path is a /dev/disk/by-vdev/ path, then we can infer
* the ZPOOL_CONFIG_PHYS_PATH from the by-vdev disk name.
*/
if (physpath == NULL && path != NULL) {
/* If path begins with "/dev/disk/by-vdev/" ... */
if (strncmp(path, DEV_BYVDEV_PATH,
strlen(DEV_BYVDEV_PATH)) == 0) {
/* Set physpath to the char after "/dev/disk/by-vdev" */
physpath = &path[strlen(DEV_BYVDEV_PATH)];
}
}
/*
* We don't want to autoreplace offlined disks. However, we do want to
* replace force-faulted disks (`zpool offline -f`). Force-faulted
* disks have both offline=1 and faulted=1 in the nvlist.
*/
if (offline && !faulted) {
zed_log_msg(LOG_INFO, "%s: %s is offline, skip autoreplace",
__func__, path);
return;
}
is_mpath_wholedisk = is_mpath_whole_disk(path);
zed_log_msg(LOG_INFO, "zfs_process_add: pool '%s' vdev '%s', phys '%s'"
" %s blank disk, %s mpath blank disk, %s labeled, enc sysfs '%s', "
"(guid %llu)",
zpool_get_name(zhp), path,
physpath ? physpath : "NULL",
wholedisk ? "is" : "not",
is_mpath_wholedisk? "is" : "not",
labeled ? "is" : "not",
enc_sysfs_path,
(long long unsigned int)guid);
/*
* The VDEV guid is preferred for identification (gets passed in path)
*/
if (guid != 0) {
(void) snprintf(fullpath, sizeof (fullpath), "%llu",
(long long unsigned int)guid);
} else {
/*
* otherwise use path sans partition suffix for whole disks
*/
(void) strlcpy(fullpath, path, sizeof (fullpath));
if (wholedisk) {
char *spath = zfs_strip_partition(fullpath);
if (!spath) {
zed_log_msg(LOG_INFO, "%s: Can't alloc",
__func__);
return;
}
(void) strlcpy(fullpath, spath, sizeof (fullpath));
free(spath);
}
}
if (is_spare)
online_flag |= ZFS_ONLINE_SPARE;
/*
* Attempt to online the device.
*/
if (zpool_vdev_online(zhp, fullpath, online_flag, &newstate) == 0 &&
(newstate == VDEV_STATE_HEALTHY ||
newstate == VDEV_STATE_DEGRADED)) {
zed_log_msg(LOG_INFO,
" zpool_vdev_online: vdev '%s' ('%s') is "
"%s", fullpath, physpath, (newstate == VDEV_STATE_HEALTHY) ?
"HEALTHY" : "DEGRADED");
return;
}
/*
* vdev_id alias rule for using scsi_debug devices (FMA automated
* testing)
*/
if (physpath != NULL && strcmp("scsidebug", physpath) == 0)
is_sd = B_TRUE;
/*
* If the pool doesn't have the autoreplace property set, then use
* vdev online to trigger a FMA fault by posting an ereport.
*/
if (!zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOREPLACE, NULL) ||
!(wholedisk || is_mpath_wholedisk) || (physpath == NULL)) {
(void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
&newstate);
zed_log_msg(LOG_INFO, "Pool's autoreplace is not enabled or "
"not a blank disk for '%s' ('%s')", fullpath,
physpath);
return;
}
/*
* Convert physical path into its current device node. Rawpath
* needs to be /dev/disk/by-vdev for a scsi_debug device since
* /dev/disk/by-path will not be present.
*/
(void) snprintf(rawpath, sizeof (rawpath), "%s%s",
is_sd ? DEV_BYVDEV_PATH : DEV_BYPATH_PATH, physpath);
if (realpath(rawpath, pathbuf) == NULL && !is_mpath_wholedisk) {
zed_log_msg(LOG_INFO, " realpath: %s failed (%s)",
rawpath, strerror(errno));
int err = zpool_vdev_online(zhp, fullpath,
ZFS_ONLINE_FORCEFAULT, &newstate);
zed_log_msg(LOG_INFO, " zpool_vdev_online: %s FORCEFAULT (%s) "
"err %d, new state %d",
fullpath, libzfs_error_description(g_zfshdl), err,
err ? (int)newstate : 0);
return;
}
/* Only autoreplace bad disks */
if ((vs->vs_state != VDEV_STATE_DEGRADED) &&
(vs->vs_state != VDEV_STATE_FAULTED) &&
(vs->vs_state != VDEV_STATE_REMOVED) &&
(vs->vs_state != VDEV_STATE_CANT_OPEN)) {
zed_log_msg(LOG_INFO, " not autoreplacing since disk isn't in "
"a bad state (currently %llu)", vs->vs_state);
return;
}
nvlist_lookup_string(vdev, "new_devid", &new_devid);
if (is_mpath_wholedisk) {
/* Don't label device mapper or multipath disks. */
zed_log_msg(LOG_INFO,
" it's a multipath wholedisk, don't label");
if (zpool_prepare_disk(zhp, vdev, "autoreplace", &lines,
&lines_cnt) != 0) {
zed_log_msg(LOG_INFO,
" zpool_prepare_disk: could not "
"prepare '%s' (%s)", fullpath,
libzfs_error_description(g_zfshdl));
if (lines_cnt > 0) {
zed_log_msg(LOG_INFO,
" zfs_prepare_disk output:");
lines_to_zed_log_msg(lines, lines_cnt);
}
libzfs_free_str_array(lines, lines_cnt);
return;
}
} else if (!labeled) {
/*
* we're auto-replacing a raw disk, so label it first
*/
char *leafname;
/*
* If this is a request to label a whole disk, then attempt to
* write out the label. Before we can label the disk, we need
* to map the physical string that was matched on to the under
* lying device node.
*
* If any part of this process fails, then do a force online
* to trigger a ZFS fault for the device (and any hot spare
* replacement).
*/
leafname = strrchr(pathbuf, '/') + 1;
/*
* If this is a request to label a whole disk, then attempt to
* write out the label.
*/
if (zpool_prepare_and_label_disk(g_zfshdl, zhp, leafname,
vdev, "autoreplace", &lines, &lines_cnt) != 0) {
zed_log_msg(LOG_WARNING,
" zpool_prepare_and_label_disk: could not "
"label '%s' (%s)", leafname,
libzfs_error_description(g_zfshdl));
if (lines_cnt > 0) {
zed_log_msg(LOG_INFO,
" zfs_prepare_disk output:");
lines_to_zed_log_msg(lines, lines_cnt);
}
libzfs_free_str_array(lines, lines_cnt);
(void) zpool_vdev_online(zhp, fullpath,
ZFS_ONLINE_FORCEFAULT, &newstate);
return;
}
/*
* The disk labeling is asynchronous on Linux. Just record
* this label request and return as there will be another
* disk add event for the partition after the labeling is
* completed.
*/
device = malloc(sizeof (pendingdev_t));
if (device == NULL) {
perror("malloc");
exit(EXIT_FAILURE);
}
(void) strlcpy(device->pd_physpath, physpath,
sizeof (device->pd_physpath));
list_insert_tail(&g_device_list, device);
zed_log_msg(LOG_NOTICE, " zpool_label_disk: async '%s' (%llu)",
leafname, (u_longlong_t)guid);
return; /* resumes at EC_DEV_ADD.ESC_DISK for partition */
} else /* labeled */ {
boolean_t found = B_FALSE;
/*
* match up with request above to label the disk
*/
for (device = list_head(&g_device_list); device != NULL;
device = list_next(&g_device_list, device)) {
if (strcmp(physpath, device->pd_physpath) == 0) {
list_remove(&g_device_list, device);
free(device);
found = B_TRUE;
break;
}
zed_log_msg(LOG_INFO, "zpool_label_disk: %s != %s",
physpath, device->pd_physpath);
}
if (!found) {
/* unexpected partition slice encountered */
zed_log_msg(LOG_WARNING, "labeled disk %s was "
"unexpected here", fullpath);
(void) zpool_vdev_online(zhp, fullpath,
ZFS_ONLINE_FORCEFAULT, &newstate);
return;
}
zed_log_msg(LOG_INFO, " zpool_label_disk: resume '%s' (%llu)",
physpath, (u_longlong_t)guid);
/*
* Paths that begin with '/dev/disk/by-id/' will change and so
* they must be updated before calling zpool_vdev_attach().
*/
if (strncmp(path, DEV_BYID_PATH, strlen(DEV_BYID_PATH)) == 0) {
(void) snprintf(pathbuf, sizeof (pathbuf), "%s%s",
DEV_BYID_PATH, new_devid);
zed_log_msg(LOG_INFO, " zpool_label_disk: path '%s' "
"replaced by '%s'", path, pathbuf);
path = pathbuf;
}
}
libzfs_free_str_array(lines, lines_cnt);
/*
* Construct the root vdev to pass to zpool_vdev_attach(). While adding
* the entire vdev structure is harmless, we construct a reduced set of
* path/physpath/wholedisk to keep it simple.
*/
if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0) {
zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");
return;
}
if (nvlist_alloc(&newvd, NV_UNIQUE_NAME, 0) != 0) {
zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");
nvlist_free(nvroot);
return;
}
if (nvlist_add_string(newvd, ZPOOL_CONFIG_TYPE, VDEV_TYPE_DISK) != 0 ||
nvlist_add_string(newvd, ZPOOL_CONFIG_PATH, path) != 0 ||
nvlist_add_string(newvd, ZPOOL_CONFIG_DEVID, new_devid) != 0 ||
(physpath != NULL && nvlist_add_string(newvd,
ZPOOL_CONFIG_PHYS_PATH, physpath) != 0) ||
(enc_sysfs_path != NULL && nvlist_add_string(newvd,
ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH, enc_sysfs_path) != 0) ||
nvlist_add_uint64(newvd, ZPOOL_CONFIG_WHOLE_DISK, wholedisk) != 0 ||
nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) != 0 ||
nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
(const nvlist_t **)&newvd, 1) != 0) {
zed_log_msg(LOG_WARNING, "zfs_mod: unable to add nvlist pairs");
nvlist_free(newvd);
nvlist_free(nvroot);
return;
}
nvlist_free(newvd);
/*
* Wait for udev to verify the links exist, then auto-replace
* the leaf disk at same physical location.
*/
if (zpool_label_disk_wait(path, DISK_LABEL_WAIT) != 0) {
zed_log_msg(LOG_WARNING, "zfs_mod: pool '%s', after labeling "
"replacement disk, the expected disk partition link '%s' "
"is missing after waiting %u ms",
zpool_get_name(zhp), path, DISK_LABEL_WAIT);
nvlist_free(nvroot);
return;
}
/*
* Prefer sequential resilvering when supported (mirrors and dRAID),
* otherwise fallback to a traditional healing resilver.
*/
ret = zpool_vdev_attach(zhp, fullpath, path, nvroot, B_TRUE, B_TRUE);
if (ret != 0) {
ret = zpool_vdev_attach(zhp, fullpath, path, nvroot,
B_TRUE, B_FALSE);
}
zed_log_msg(LOG_WARNING, " zpool_vdev_replace: %s with %s (%s)",
fullpath, path, (ret == 0) ? "no errors" :
libzfs_error_description(g_zfshdl));
nvlist_free(nvroot);
}
/*
* Utility functions to find a vdev matching given criteria.
*/
typedef struct dev_data {
const char *dd_compare;
const char *dd_prop;
zfs_process_func_t dd_func;
boolean_t dd_found;
boolean_t dd_islabeled;
uint64_t dd_pool_guid;
uint64_t dd_vdev_guid;
uint64_t dd_new_vdev_guid;
const char *dd_new_devid;
uint64_t dd_num_spares;
} dev_data_t;
static void
zfs_iter_vdev(zpool_handle_t *zhp, nvlist_t *nvl, void *data)
{
dev_data_t *dp = data;
const char *path = NULL;
uint_t c, children;
nvlist_t **child;
uint64_t guid = 0;
uint64_t isspare = 0;
/*
* First iterate over any children.
*/
if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0) {
for (c = 0; c < children; c++)
zfs_iter_vdev(zhp, child[c], data);
}
/*
* Iterate over any spares and cache devices
*/
if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_SPARES,
&child, &children) == 0) {
for (c = 0; c < children; c++)
zfs_iter_vdev(zhp, child[c], data);
}
if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_L2CACHE,
&child, &children) == 0) {
for (c = 0; c < children; c++)
zfs_iter_vdev(zhp, child[c], data);
}
/* once a vdev was matched and processed there is nothing left to do */
if (dp->dd_found && dp->dd_num_spares == 0)
return;
(void) nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_GUID, &guid);
/*
* Match by GUID if available otherwise fallback to devid or physical
*/
if (dp->dd_vdev_guid != 0) {
if (guid != dp->dd_vdev_guid)
return;
zed_log_msg(LOG_INFO, " zfs_iter_vdev: matched on %llu", guid);
dp->dd_found = B_TRUE;
} else if (dp->dd_compare != NULL) {
/*
* NOTE: On Linux there is an event for partition, so unlike
* illumos, substring matching is not required to accommodate
* the partition suffix. An exact match will be present in
* the dp->dd_compare value.
* If the attached disk already contains a vdev GUID, it means
* the disk is not clean. In such a scenario, the physical path
* would be a match that makes the disk faulted when trying to
* online it. So, we would only want to proceed if either GUID
* matches with the last attached disk or the disk is in clean
* state.
*/
if (nvlist_lookup_string(nvl, dp->dd_prop, &path) != 0 ||
strcmp(dp->dd_compare, path) != 0) {
return;
}
if (dp->dd_new_vdev_guid != 0 && dp->dd_new_vdev_guid != guid) {
zed_log_msg(LOG_INFO, " %s: no match (GUID:%llu"
" != vdev GUID:%llu)", __func__,
dp->dd_new_vdev_guid, guid);
return;
}
zed_log_msg(LOG_INFO, " zfs_iter_vdev: matched %s on %s",
dp->dd_prop, path);
dp->dd_found = B_TRUE;
/* pass the new devid for use by auto-replacing code */
if (dp->dd_new_devid != NULL) {
(void) nvlist_add_string(nvl, "new_devid",
dp->dd_new_devid);
}
}
if (dp->dd_found == B_TRUE && nvlist_lookup_uint64(nvl,
ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare)
dp->dd_num_spares++;
(dp->dd_func)(zhp, nvl, dp->dd_islabeled);
}
static void
zfs_enable_ds(void *arg)
{
unavailpool_t *pool = (unavailpool_t *)arg;
(void) zpool_enable_datasets(pool->uap_zhp, NULL, 0);
zpool_close(pool->uap_zhp);
free(pool);
}
static int
zfs_iter_pool(zpool_handle_t *zhp, void *data)
{
nvlist_t *config, *nvl;
dev_data_t *dp = data;
uint64_t pool_guid;
unavailpool_t *pool;
zed_log_msg(LOG_INFO, "zfs_iter_pool: evaluating vdevs on %s (by %s)",
zpool_get_name(zhp), dp->dd_vdev_guid ? "GUID" : dp->dd_prop);
/*
* For each vdev in this pool, look for a match to apply dd_func
*/
if ((config = zpool_get_config(zhp, NULL)) != NULL) {
if (dp->dd_pool_guid == 0 ||
(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
&pool_guid) == 0 && pool_guid == dp->dd_pool_guid)) {
(void) nvlist_lookup_nvlist(config,
ZPOOL_CONFIG_VDEV_TREE, &nvl);
zfs_iter_vdev(zhp, nvl, data);
}
} else {
zed_log_msg(LOG_INFO, "%s: no config\n", __func__);
}
/*
* if this pool was originally unavailable,
* then enable its datasets asynchronously
*/
if (g_enumeration_done) {
for (pool = list_head(&g_pool_list); pool != NULL;
pool = list_next(&g_pool_list, pool)) {
if (strcmp(zpool_get_name(zhp),
zpool_get_name(pool->uap_zhp)))
continue;
if (zfs_toplevel_state(zhp) >= VDEV_STATE_DEGRADED) {
list_remove(&g_pool_list, pool);
(void) tpool_dispatch(g_tpool, zfs_enable_ds,
pool);
break;
}
}
}
zpool_close(zhp);
/* cease iteration after a match */
return (dp->dd_found && dp->dd_num_spares == 0);
}
/*
* Given a physical device location, iterate over all
* (pool, vdev) pairs which correspond to that location.
*/
static boolean_t
devphys_iter(const char *physical, const char *devid, zfs_process_func_t func,
boolean_t is_slice, uint64_t new_vdev_guid)
{
dev_data_t data = { 0 };
data.dd_compare = physical;
data.dd_func = func;
data.dd_prop = ZPOOL_CONFIG_PHYS_PATH;
data.dd_found = B_FALSE;
data.dd_islabeled = is_slice;
data.dd_new_devid = devid; /* used by auto replace code */
data.dd_new_vdev_guid = new_vdev_guid;
(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
return (data.dd_found);
}
/*
* Given a device identifier, find any vdevs with a matching by-vdev
* path. Normally we shouldn't need this as the comparison would be
* made earlier in the devphys_iter(). For example, if we were replacing
* /dev/disk/by-vdev/L28, normally devphys_iter() would match the
* ZPOOL_CONFIG_PHYS_PATH of "L28" from the old disk config to "L28"
* of the new disk config. However, we've seen cases where
* ZPOOL_CONFIG_PHYS_PATH was not in the config for the old disk. Here's
* an example of a real 2-disk mirror pool where one disk was force
* faulted:
*
* com.delphix:vdev_zap_top: 129
* children[0]:
* type: 'disk'
* id: 0
* guid: 14309659774640089719
* path: '/dev/disk/by-vdev/L28'
* whole_disk: 0
* DTL: 654
* create_txg: 4
* com.delphix:vdev_zap_leaf: 1161
* faulted: 1
* aux_state: 'external'
* children[1]:
* type: 'disk'
* id: 1
* guid: 16002508084177980912
* path: '/dev/disk/by-vdev/L29'
* devid: 'dm-uuid-mpath-35000c500a61d68a3'
* phys_path: 'L29'
* vdev_enc_sysfs_path: '/sys/class/enclosure/0:0:1:0/SLOT 30 32'
* whole_disk: 0
* DTL: 1028
* create_txg: 4
* com.delphix:vdev_zap_leaf: 131
*
* So in the case above, the only thing we could compare is the path.
*
* We can do this because we assume by-vdev paths are authoritative as physical
* paths. We could not assume this for normal paths like /dev/sda since the
* physical location /dev/sda points to could change over time.
*/
static boolean_t
by_vdev_path_iter(const char *by_vdev_path, const char *devid,
zfs_process_func_t func, boolean_t is_slice)
{
dev_data_t data = { 0 };
data.dd_compare = by_vdev_path;
data.dd_func = func;
data.dd_prop = ZPOOL_CONFIG_PATH;
data.dd_found = B_FALSE;
data.dd_islabeled = is_slice;
data.dd_new_devid = devid;
if (strncmp(by_vdev_path, DEV_BYVDEV_PATH,
strlen(DEV_BYVDEV_PATH)) != 0) {
/* by_vdev_path doesn't start with "/dev/disk/by-vdev/" */
return (B_FALSE);
}
(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
return (data.dd_found);
}
/*
* Given a device identifier, find any vdevs with a matching devid.
* On Linux we can match devid directly which is always a whole disk.
*/
static boolean_t
devid_iter(const char *devid, zfs_process_func_t func, boolean_t is_slice)
{
dev_data_t data = { 0 };
data.dd_compare = devid;
data.dd_func = func;
data.dd_prop = ZPOOL_CONFIG_DEVID;
data.dd_found = B_FALSE;
data.dd_islabeled = is_slice;
data.dd_new_devid = devid;
(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
return (data.dd_found);
}
/*
* Given a device guid, find any vdevs with a matching guid.
*/
static boolean_t
guid_iter(uint64_t pool_guid, uint64_t vdev_guid, const char *devid,
zfs_process_func_t func, boolean_t is_slice)
{
dev_data_t data = { 0 };
data.dd_func = func;
data.dd_found = B_FALSE;
data.dd_pool_guid = pool_guid;
data.dd_vdev_guid = vdev_guid;
data.dd_islabeled = is_slice;
data.dd_new_devid = devid;
(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
return (data.dd_found);
}
/*
* Handle a EC_DEV_ADD.ESC_DISK event.
*
* illumos
* Expects: DEV_PHYS_PATH string in schema
* Matches: vdev's ZPOOL_CONFIG_PHYS_PATH or ZPOOL_CONFIG_DEVID
*
* path: '/dev/dsk/c0t1d0s0' (persistent)
* devid: 'id1,sd@SATA_____Hitachi_HDS72101______JP2940HZ3H74MC/a'
* phys_path: '/pci@0,0/pci103c,1609@11/disk@1,0:a'
*
* linux
* provides: DEV_PHYS_PATH and DEV_IDENTIFIER strings in schema
* Matches: vdev's ZPOOL_CONFIG_PHYS_PATH or ZPOOL_CONFIG_DEVID
*
* path: '/dev/sdc1' (not persistent)
* devid: 'ata-SAMSUNG_HD204UI_S2HGJD2Z805891-part1'
* phys_path: 'pci-0000:04:00.0-sas-0x4433221106000000-lun-0'
*/
static int
zfs_deliver_add(nvlist_t *nvl)
{
const char *devpath = NULL, *devid = NULL;
uint64_t pool_guid = 0, vdev_guid = 0;
boolean_t is_slice;
/*
* Expecting a devid string and an optional physical location and guid
*/
if (nvlist_lookup_string(nvl, DEV_IDENTIFIER, &devid) != 0) {
zed_log_msg(LOG_INFO, "%s: no dev identifier\n", __func__);
return (-1);
}
(void) nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devpath);
(void) nvlist_lookup_uint64(nvl, ZFS_EV_POOL_GUID, &pool_guid);
(void) nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID, &vdev_guid);
is_slice = (nvlist_lookup_boolean(nvl, DEV_IS_PART) == 0);
zed_log_msg(LOG_INFO, "zfs_deliver_add: adding %s (%s) (is_slice %d)",
devid, devpath ? devpath : "NULL", is_slice);
/*
* Iterate over all vdevs looking for a match in the following order:
* 1. ZPOOL_CONFIG_DEVID (identifies the unique disk)
* 2. ZPOOL_CONFIG_PHYS_PATH (identifies disk physical location).
* 3. ZPOOL_CONFIG_GUID (identifies unique vdev).
* 4. ZPOOL_CONFIG_PATH for /dev/disk/by-vdev devices only (since
* by-vdev paths represent physical paths).
*/
if (devid_iter(devid, zfs_process_add, is_slice))
return (0);
if (devpath != NULL && devphys_iter(devpath, devid, zfs_process_add,
is_slice, vdev_guid))
return (0);
if (vdev_guid != 0)
(void) guid_iter(pool_guid, vdev_guid, devid, zfs_process_add,
is_slice);
if (devpath != NULL) {
/* Can we match a /dev/disk/by-vdev/ path? */
char by_vdev_path[MAXPATHLEN];
snprintf(by_vdev_path, sizeof (by_vdev_path),
"/dev/disk/by-vdev/%s", devpath);
if (by_vdev_path_iter(by_vdev_path, devid, zfs_process_add,
is_slice))
return (0);
}
return (0);
}
/*
* Called when we receive a VDEV_CHECK event, which indicates a device could not
* be opened during initial pool open, but the autoreplace property was set on
* the pool. In this case, we treat it as if it were an add event.
*/
static int
zfs_deliver_check(nvlist_t *nvl)
{
dev_data_t data = { 0 };
if (nvlist_lookup_uint64(nvl, ZFS_EV_POOL_GUID,
&data.dd_pool_guid) != 0 ||
nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID,
&data.dd_vdev_guid) != 0 ||
data.dd_vdev_guid == 0)
return (0);
zed_log_msg(LOG_INFO, "zfs_deliver_check: pool '%llu', vdev %llu",
data.dd_pool_guid, data.dd_vdev_guid);
data.dd_func = zfs_process_add;
(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
return (0);
}
/*
* Given a path to a vdev, lookup the vdev's physical size from its
* config nvlist.
*
* Returns the vdev's physical size in bytes on success, 0 on error.
*/
static uint64_t
vdev_size_from_config(zpool_handle_t *zhp, const char *vdev_path)
{
nvlist_t *nvl = NULL;
boolean_t avail_spare, l2cache, log;
vdev_stat_t *vs = NULL;
uint_t c;
nvl = zpool_find_vdev(zhp, vdev_path, &avail_spare, &l2cache, &log);
if (!nvl)
return (0);
verify(nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) == 0);
if (!vs) {
zed_log_msg(LOG_INFO, "%s: no nvlist for '%s'", __func__,
vdev_path);
return (0);
}
return (vs->vs_pspace);
}
/*
* Given a path to a vdev, lookup if the vdev is a "whole disk" in the
* config nvlist. "whole disk" means that ZFS was passed a whole disk
* at pool creation time, which it partitioned up and has full control over.
* Thus a partition with wholedisk=1 set tells us that zfs created the
* partition at creation time. A partition without whole disk set would have
* been created by externally (like with fdisk) and passed to ZFS.
*
* Returns the whole disk value (either 0 or 1).
*/
static uint64_t
vdev_whole_disk_from_config(zpool_handle_t *zhp, const char *vdev_path)
{
nvlist_t *nvl = NULL;
boolean_t avail_spare, l2cache, log;
uint64_t wholedisk = 0;
nvl = zpool_find_vdev(zhp, vdev_path, &avail_spare, &l2cache, &log);
if (!nvl)
return (0);
(void) nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
return (wholedisk);
}
/*
* If the device size grew more than 1% then return true.
*/
#define DEVICE_GREW(oldsize, newsize) \
((newsize > oldsize) && \
((newsize / (newsize - oldsize)) <= 100))
static int
zfsdle_vdev_online(zpool_handle_t *zhp, void *data)
{
boolean_t avail_spare, l2cache;
nvlist_t *udev_nvl = data;
nvlist_t *tgt;
int error;
const char *tmp_devname;
char devname[MAXPATHLEN] = "";
uint64_t guid;
if (nvlist_lookup_uint64(udev_nvl, ZFS_EV_VDEV_GUID, &guid) == 0) {
sprintf(devname, "%llu", (u_longlong_t)guid);
} else if (nvlist_lookup_string(udev_nvl, DEV_PHYS_PATH,
&tmp_devname) == 0) {
strlcpy(devname, tmp_devname, MAXPATHLEN);
zfs_append_partition(devname, MAXPATHLEN);
} else {
zed_log_msg(LOG_INFO, "%s: no guid or physpath", __func__);
}
zed_log_msg(LOG_INFO, "zfsdle_vdev_online: searching for '%s' in '%s'",
devname, zpool_get_name(zhp));
if ((tgt = zpool_find_vdev_by_physpath(zhp, devname,
&avail_spare, &l2cache, NULL)) != NULL) {
const char *path;
char fullpath[MAXPATHLEN];
uint64_t wholedisk = 0;
error = nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &path);
if (error) {
zpool_close(zhp);
return (0);
}
(void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
&wholedisk);
if (wholedisk) {
char *tmp;
path = strrchr(path, '/');
if (path != NULL) {
tmp = zfs_strip_partition(path + 1);
if (tmp == NULL) {
zpool_close(zhp);
return (0);
}
} else {
zpool_close(zhp);
return (0);
}
(void) strlcpy(fullpath, tmp, sizeof (fullpath));
free(tmp);
/*
* We need to reopen the pool associated with this
* device so that the kernel can update the size of
* the expanded device. When expanding there is no
* need to restart the scrub from the beginning.
*/
boolean_t scrub_restart = B_FALSE;
(void) zpool_reopen_one(zhp, &scrub_restart);
} else {
(void) strlcpy(fullpath, path, sizeof (fullpath));
}
if (zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
vdev_state_t newstate;
if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL) {
/*
* If this disk size has not changed, then
* there's no need to do an autoexpand. To
* check we look at the disk's size in its
* config, and compare it to the disk size
* that udev is reporting.
*/
uint64_t udev_size = 0, conf_size = 0,
wholedisk = 0, udev_parent_size = 0;
/*
* Get the size of our disk that udev is
* reporting.
*/
if (nvlist_lookup_uint64(udev_nvl, DEV_SIZE,
&udev_size) != 0) {
udev_size = 0;
}
/*
* Get the size of our disk's parent device
* from udev (where sda1's parent is sda).
*/
if (nvlist_lookup_uint64(udev_nvl,
DEV_PARENT_SIZE, &udev_parent_size) != 0) {
udev_parent_size = 0;
}
conf_size = vdev_size_from_config(zhp,
fullpath);
wholedisk = vdev_whole_disk_from_config(zhp,
fullpath);
/*
* Only attempt an autoexpand if the vdev size
* changed. There are two different cases
* to consider.
*
* 1. wholedisk=1
* If you do a 'zpool create' on a whole disk
* (like /dev/sda), then zfs will create
* partitions on the disk (like /dev/sda1). In
* that case, wholedisk=1 will be set in the
* partition's nvlist config. So zed will need
* to see if your parent device (/dev/sda)
* expanded in size, and if so, then attempt
* the autoexpand.
*
* 2. wholedisk=0
* If you do a 'zpool create' on an existing
* partition, or a device that doesn't allow
* partitions, then wholedisk=0, and you will
* simply need to check if the device itself
* expanded in size.
*/
if (DEVICE_GREW(conf_size, udev_size) ||
(wholedisk && DEVICE_GREW(conf_size,
udev_parent_size))) {
error = zpool_vdev_online(zhp, fullpath,
0, &newstate);
zed_log_msg(LOG_INFO,
"%s: autoexpanding '%s' from %llu"
" to %llu bytes in pool '%s': %d",
__func__, fullpath, conf_size,
MAX(udev_size, udev_parent_size),
zpool_get_name(zhp), error);
}
}
}
zpool_close(zhp);
return (1);
}
zpool_close(zhp);
return (0);
}
/*
* This function handles the ESC_DEV_DLE device change event. Use the
* provided vdev guid when looking up a disk or partition, when the guid
* is not present assume the entire disk is owned by ZFS and append the
* expected -part1 partition information then lookup by physical path.
*/
static int
zfs_deliver_dle(nvlist_t *nvl)
{
const char *devname;
char name[MAXPATHLEN];
uint64_t guid;
if (nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID, &guid) == 0) {
sprintf(name, "%llu", (u_longlong_t)guid);
} else if (nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devname) == 0) {
strlcpy(name, devname, MAXPATHLEN);
zfs_append_partition(name, MAXPATHLEN);
} else {
sprintf(name, "unknown");
zed_log_msg(LOG_INFO, "zfs_deliver_dle: no guid or physpath");
}
if (zpool_iter(g_zfshdl, zfsdle_vdev_online, nvl) != 1) {
zed_log_msg(LOG_INFO, "zfs_deliver_dle: device '%s' not "
"found", name);
return (1);
}
return (0);
}
/*
* syseventd daemon module event handler
*
* Handles syseventd daemon zfs device related events:
*
* EC_DEV_ADD.ESC_DISK
* EC_DEV_STATUS.ESC_DEV_DLE
* EC_ZFS.ESC_ZFS_VDEV_CHECK
*
* Note: assumes only one thread active at a time (not thread safe)
*/
static int
zfs_slm_deliver_event(const char *class, const char *subclass, nvlist_t *nvl)
{
int ret;
boolean_t is_check = B_FALSE, is_dle = B_FALSE;
if (strcmp(class, EC_DEV_ADD) == 0) {
/*
* We're mainly interested in disk additions, but we also listen
* for new loop devices, to allow for simplified testing.
*/
if (strcmp(subclass, ESC_DISK) != 0 &&
strcmp(subclass, ESC_LOFI) != 0)
return (0);
is_check = B_FALSE;
} else if (strcmp(class, EC_ZFS) == 0 &&
strcmp(subclass, ESC_ZFS_VDEV_CHECK) == 0) {
/*
* This event signifies that a device failed to open
* during pool load, but the 'autoreplace' property was
* set, so we should pretend it's just been added.
*/
is_check = B_TRUE;
} else if (strcmp(class, EC_DEV_STATUS) == 0 &&
strcmp(subclass, ESC_DEV_DLE) == 0) {
is_dle = B_TRUE;
} else {
return (0);
}
if (is_dle)
ret = zfs_deliver_dle(nvl);
else if (is_check)
ret = zfs_deliver_check(nvl);
else
ret = zfs_deliver_add(nvl);
return (ret);
}
static void *
zfs_enum_pools(void *arg)
{
(void) arg;
(void) zpool_iter(g_zfshdl, zfs_unavail_pool, (void *)&g_pool_list);
/*
* Linux - instead of using a thread pool, each list entry
* will spawn a thread when an unavailable pool transitions
* to available. zfs_slm_fini will wait for these threads.
*/
g_enumeration_done = B_TRUE;
return (NULL);
}
/*
* called from zed daemon at startup
*
* sent messages from zevents or udev monitor
*
* For now, each agent has its own libzfs instance
*/
int
zfs_slm_init(void)
{
if ((g_zfshdl = libzfs_init()) == NULL)
return (-1);
/*
* collect a list of unavailable pools (asynchronously,
* since this can take a while)
*/
list_create(&g_pool_list, sizeof (struct unavailpool),
offsetof(struct unavailpool, uap_node));
if (pthread_create(&g_zfs_tid, NULL, zfs_enum_pools, NULL) != 0) {
list_destroy(&g_pool_list);
libzfs_fini(g_zfshdl);
return (-1);
}
pthread_setname_np(g_zfs_tid, "enum-pools");
list_create(&g_device_list, sizeof (struct pendingdev),
offsetof(struct pendingdev, pd_node));
return (0);
}
void
zfs_slm_fini(void)
{
unavailpool_t *pool;
pendingdev_t *device;
/* wait for zfs_enum_pools thread to complete */
(void) pthread_join(g_zfs_tid, NULL);
/* destroy the thread pool */
if (g_tpool != NULL) {
tpool_wait(g_tpool);
tpool_destroy(g_tpool);
}
while ((pool = list_remove_head(&g_pool_list)) != NULL) {
zpool_close(pool->uap_zhp);
free(pool);
}
list_destroy(&g_pool_list);
while ((device = list_remove_head(&g_device_list)) != NULL)
free(device);
list_destroy(&g_device_list);
libzfs_fini(g_zfshdl);
}
void
zfs_slm_event(const char *class, const char *subclass, nvlist_t *nvl)
{
zed_log_msg(LOG_INFO, "zfs_slm_event: %s.%s", class, subclass);
(void) zfs_slm_deliver_event(class, subclass, nvl);
}
diff --git a/sys/contrib/openzfs/cmd/zed/zed.d/statechange-slot_off.sh b/sys/contrib/openzfs/cmd/zed/zed.d/statechange-slot_off.sh
index 150012abe71a..06acce93b8aa 100755
--- a/sys/contrib/openzfs/cmd/zed/zed.d/statechange-slot_off.sh
+++ b/sys/contrib/openzfs/cmd/zed/zed.d/statechange-slot_off.sh
@@ -1,64 +1,64 @@
#!/bin/sh
# shellcheck disable=SC3014,SC2154,SC2086,SC2034
#
# Turn off disk's enclosure slot if it becomes FAULTED.
#
# Bad SCSI disks can often "disappear and reappear" causing all sorts of chaos
# as they flip between FAULTED and ONLINE. If
-# ZED_POWER_OFF_ENCLOUSRE_SLOT_ON_FAULT is set in zed.rc, and the disk gets
+# ZED_POWER_OFF_ENCLOSURE_SLOT_ON_FAULT is set in zed.rc, and the disk gets
# FAULTED, then power down the slot via sysfs:
#
# /sys/class/enclosure/<enclosure>/<slot>/power_status
#
# We assume the user will be responsible for turning the slot back on again.
#
# Note that this script requires that your enclosure be supported by the
# Linux SCSI Enclosure services (SES) driver. The script will do nothing
# if you have no enclosure, or if your enclosure isn't supported.
#
# Exit codes:
# 0: slot successfully powered off
# 1: enclosure not available
-# 2: ZED_POWER_OFF_ENCLOUSRE_SLOT_ON_FAULT disabled
+# 2: ZED_POWER_OFF_ENCLOSURE_SLOT_ON_FAULT disabled
# 3: vdev was not FAULTED
# 4: The enclosure sysfs path passed from ZFS does not exist
# 5: Enclosure slot didn't actually turn off after we told it to
[ -f "${ZED_ZEDLET_DIR}/zed.rc" ] && . "${ZED_ZEDLET_DIR}/zed.rc"
. "${ZED_ZEDLET_DIR}/zed-functions.sh"
if [ ! -d /sys/class/enclosure ] ; then
# No JBOD enclosure or NVMe slots
exit 1
fi
-if [ "${ZED_POWER_OFF_ENCLOUSRE_SLOT_ON_FAULT}" != "1" ] ; then
+if [ "${ZED_POWER_OFF_ENCLOSURE_SLOT_ON_FAULT}" != "1" ] ; then
exit 2
fi
if [ "$ZEVENT_VDEV_STATE_STR" != "FAULTED" ] ; then
exit 3
fi
if [ ! -f "$ZEVENT_VDEV_ENC_SYSFS_PATH/power_status" ] ; then
exit 4
fi
# Turn off the slot and wait for sysfs to report that the slot is off.
# It can take ~400ms on some enclosures and multiple retries may be needed.
for i in $(seq 1 20) ; do
echo "off" | tee "$ZEVENT_VDEV_ENC_SYSFS_PATH/power_status"
for j in $(seq 1 5) ; do
if [ "$(cat $ZEVENT_VDEV_ENC_SYSFS_PATH/power_status)" == "off" ] ; then
break 2
fi
sleep 0.1
done
done
if [ "$(cat $ZEVENT_VDEV_ENC_SYSFS_PATH/power_status)" != "off" ] ; then
exit 5
fi
zed_log_msg "powered down slot $ZEVENT_VDEV_ENC_SYSFS_PATH for $ZEVENT_VDEV_PATH"
diff --git a/sys/contrib/openzfs/cmd/zed/zed.d/zed-functions.sh b/sys/contrib/openzfs/cmd/zed/zed.d/zed-functions.sh
index 49b6b54029aa..3a2519633d01 100644
--- a/sys/contrib/openzfs/cmd/zed/zed.d/zed-functions.sh
+++ b/sys/contrib/openzfs/cmd/zed/zed.d/zed-functions.sh
@@ -1,626 +1,724 @@
#!/bin/sh
# shellcheck disable=SC2154,SC3043
# zed-functions.sh
#
# ZED helper functions for use in ZEDLETs
# Variable Defaults
#
: "${ZED_LOCKDIR:="/var/lock"}"
: "${ZED_NOTIFY_INTERVAL_SECS:=3600}"
: "${ZED_NOTIFY_VERBOSE:=0}"
: "${ZED_RUNDIR:="/var/run"}"
: "${ZED_SYSLOG_PRIORITY:="daemon.notice"}"
: "${ZED_SYSLOG_TAG:="zed"}"
ZED_FLOCK_FD=8
# zed_check_cmd (cmd, ...)
#
# For each argument given, search PATH for the executable command [cmd].
# Log a message if [cmd] is not found.
#
# Arguments
# cmd: name of executable command for which to search
#
# Return
# 0 if all commands are found in PATH and are executable
# n for a count of the command executables that are not found
#
zed_check_cmd()
{
local cmd
local rv=0
for cmd; do
if ! command -v "${cmd}" >/dev/null 2>&1; then
zed_log_err "\"${cmd}\" not installed"
rv=$((rv + 1))
fi
done
return "${rv}"
}
# zed_log_msg (msg, ...)
#
# Write all argument strings to the system log.
#
# Globals
# ZED_SYSLOG_PRIORITY
# ZED_SYSLOG_TAG
#
# Return
# nothing
#
zed_log_msg()
{
logger -p "${ZED_SYSLOG_PRIORITY}" -t "${ZED_SYSLOG_TAG}" -- "$@"
}
# zed_log_err (msg, ...)
#
# Write an error message to the system log. This message will contain the
# script name, EID, and all argument strings.
#
# Globals
# ZED_SYSLOG_PRIORITY
# ZED_SYSLOG_TAG
# ZEVENT_EID
#
# Return
# nothing
#
zed_log_err()
{
zed_log_msg "error: ${0##*/}:""${ZEVENT_EID:+" eid=${ZEVENT_EID}:"}" "$@"
}
# zed_lock (lockfile, [fd])
#
# Obtain an exclusive (write) lock on [lockfile]. If the lock cannot be
# immediately acquired, wait until it becomes available.
#
# Every zed_lock() must be paired with a corresponding zed_unlock().
#
# By default, flock-style locks associate the lockfile with file descriptor 8.
# The bash manpage warns that file descriptors >9 should be used with care as
# they may conflict with file descriptors used internally by the shell. File
# descriptor 9 is reserved for zed_rate_limit(). If concurrent locks are held
# within the same process, they must use different file descriptors (preferably
# decrementing from 8); otherwise, obtaining a new lock with a given file
# descriptor will release the previous lock associated with that descriptor.
#
# Arguments
# lockfile: pathname of the lock file; the lock will be stored in
# ZED_LOCKDIR unless the pathname contains a "/".
# fd: integer for the file descriptor used by flock (OPTIONAL unless holding
# concurrent locks)
#
# Globals
# ZED_FLOCK_FD
# ZED_LOCKDIR
#
# Return
# nothing
#
zed_lock()
{
local lockfile="$1"
local fd="${2:-${ZED_FLOCK_FD}}"
local umask_bak
local err
[ -n "${lockfile}" ] || return
if ! expr "${lockfile}" : '.*/' >/dev/null 2>&1; then
lockfile="${ZED_LOCKDIR}/${lockfile}"
fi
umask_bak="$(umask)"
umask 077
# Obtain a lock on the file bound to the given file descriptor.
#
eval "exec ${fd}>> '${lockfile}'"
if ! err="$(flock --exclusive "${fd}" 2>&1)"; then
zed_log_err "failed to lock \"${lockfile}\": ${err}"
fi
umask "${umask_bak}"
}
# zed_unlock (lockfile, [fd])
#
# Release the lock on [lockfile].
#
# Arguments
# lockfile: pathname of the lock file
# fd: integer for the file descriptor used by flock (must match the file
# descriptor passed to the zed_lock function call)
#
# Globals
# ZED_FLOCK_FD
# ZED_LOCKDIR
#
# Return
# nothing
#
zed_unlock()
{
local lockfile="$1"
local fd="${2:-${ZED_FLOCK_FD}}"
local err
[ -n "${lockfile}" ] || return
if ! expr "${lockfile}" : '.*/' >/dev/null 2>&1; then
lockfile="${ZED_LOCKDIR}/${lockfile}"
fi
# Release the lock and close the file descriptor.
if ! err="$(flock --unlock "${fd}" 2>&1)"; then
zed_log_err "failed to unlock \"${lockfile}\": ${err}"
fi
eval "exec ${fd}>&-"
}
# zed_notify (subject, pathname)
#
# Send a notification via all available methods.
#
# Arguments
# subject: notification subject
# pathname: pathname containing the notification message (OPTIONAL)
#
# Return
# 0: notification succeeded via at least one method
# 1: notification failed
# 2: no notification methods configured
#
zed_notify()
{
local subject="$1"
local pathname="$2"
local num_success=0
local num_failure=0
zed_notify_email "${subject}" "${pathname}"; rv=$?
[ "${rv}" -eq 0 ] && num_success=$((num_success + 1))
[ "${rv}" -eq 1 ] && num_failure=$((num_failure + 1))
zed_notify_pushbullet "${subject}" "${pathname}"; rv=$?
[ "${rv}" -eq 0 ] && num_success=$((num_success + 1))
[ "${rv}" -eq 1 ] && num_failure=$((num_failure + 1))
zed_notify_slack_webhook "${subject}" "${pathname}"; rv=$?
[ "${rv}" -eq 0 ] && num_success=$((num_success + 1))
[ "${rv}" -eq 1 ] && num_failure=$((num_failure + 1))
zed_notify_pushover "${subject}" "${pathname}"; rv=$?
[ "${rv}" -eq 0 ] && num_success=$((num_success + 1))
[ "${rv}" -eq 1 ] && num_failure=$((num_failure + 1))
+ zed_notify_ntfy "${subject}" "${pathname}"; rv=$?
+ [ "${rv}" -eq 0 ] && num_success=$((num_success + 1))
+ [ "${rv}" -eq 1 ] && num_failure=$((num_failure + 1))
+
[ "${num_success}" -gt 0 ] && return 0
[ "${num_failure}" -gt 0 ] && return 1
return 2
}
# zed_notify_email (subject, pathname)
#
# Send a notification via email to the address specified by ZED_EMAIL_ADDR.
#
# Requires the mail executable to be installed in the standard PATH, or
# ZED_EMAIL_PROG to be defined with the pathname of an executable capable of
# reading a message body from stdin.
#
# Command-line options to the mail executable can be specified in
# ZED_EMAIL_OPTS. This undergoes the following keyword substitutions:
# - @ADDRESS@ is replaced with the space-delimited recipient email address(es)
# - @SUBJECT@ is replaced with the notification subject
# If @SUBJECT@ was omited here, a "Subject: ..." header will be added to notification
#
#
# Arguments
# subject: notification subject
# pathname: pathname containing the notification message (OPTIONAL)
#
# Globals
# ZED_EMAIL_PROG
# ZED_EMAIL_OPTS
# ZED_EMAIL_ADDR
#
# Return
# 0: notification sent
# 1: notification failed
# 2: not configured
#
zed_notify_email()
{
local subject="${1:-"ZED notification"}"
local pathname="${2:-"/dev/null"}"
: "${ZED_EMAIL_PROG:="mail"}"
: "${ZED_EMAIL_OPTS:="-s '@SUBJECT@' @ADDRESS@"}"
# For backward compatibility with ZED_EMAIL.
if [ -n "${ZED_EMAIL}" ] && [ -z "${ZED_EMAIL_ADDR}" ]; then
ZED_EMAIL_ADDR="${ZED_EMAIL}"
fi
[ -n "${ZED_EMAIL_ADDR}" ] || return 2
zed_check_cmd "${ZED_EMAIL_PROG}" || return 1
[ -n "${subject}" ] || return 1
if [ ! -r "${pathname}" ]; then
zed_log_err \
"${ZED_EMAIL_PROG##*/} cannot read \"${pathname}\""
return 1
fi
# construct cmdline options
ZED_EMAIL_OPTS_PARSED="$(echo "${ZED_EMAIL_OPTS}" \
| sed -e "s/@ADDRESS@/${ZED_EMAIL_ADDR}/g" \
-e "s/@SUBJECT@/${subject}/g")"
# pipe message to email prog
# shellcheck disable=SC2086,SC2248
{
# no subject passed as option?
if [ "${ZED_EMAIL_OPTS%@SUBJECT@*}" = "${ZED_EMAIL_OPTS}" ] ; then
# inject subject header
printf "Subject: %s\n" "${subject}"
fi
# output message
cat "${pathname}"
} |
eval ${ZED_EMAIL_PROG} ${ZED_EMAIL_OPTS_PARSED} >/dev/null 2>&1
rv=$?
if [ "${rv}" -ne 0 ]; then
zed_log_err "${ZED_EMAIL_PROG##*/} exit=${rv}"
return 1
fi
return 0
}
# zed_notify_pushbullet (subject, pathname)
#
# Send a notification via Pushbullet <https://www.pushbullet.com/>.
# The access token (ZED_PUSHBULLET_ACCESS_TOKEN) identifies this client to the
# Pushbullet server. The optional channel tag (ZED_PUSHBULLET_CHANNEL_TAG) is
# for pushing to notification feeds that can be subscribed to; if a channel is
# not defined, push notifications will instead be sent to all devices
# associated with the account specified by the access token.
#
# Requires awk, curl, and sed executables to be installed in the standard PATH.
#
# References
# https://docs.pushbullet.com/
# https://www.pushbullet.com/security
#
# Arguments
# subject: notification subject
# pathname: pathname containing the notification message (OPTIONAL)
#
# Globals
# ZED_PUSHBULLET_ACCESS_TOKEN
# ZED_PUSHBULLET_CHANNEL_TAG
#
# Return
# 0: notification sent
# 1: notification failed
# 2: not configured
#
zed_notify_pushbullet()
{
local subject="$1"
local pathname="${2:-"/dev/null"}"
local msg_body
local msg_tag
local msg_json
local msg_out
local msg_err
local url="https://api.pushbullet.com/v2/pushes"
[ -n "${ZED_PUSHBULLET_ACCESS_TOKEN}" ] || return 2
[ -n "${subject}" ] || return 1
if [ ! -r "${pathname}" ]; then
zed_log_err "pushbullet cannot read \"${pathname}\""
return 1
fi
zed_check_cmd "awk" "curl" "sed" || return 1
# Escape the following characters in the message body for JSON:
# newline, backslash, double quote, horizontal tab, vertical tab,
# and carriage return.
#
msg_body="$(awk '{ ORS="\\n" } { gsub(/\\/, "\\\\"); gsub(/"/, "\\\"");
gsub(/\t/, "\\t"); gsub(/\f/, "\\f"); gsub(/\r/, "\\r"); print }' \
"${pathname}")"
# Push to a channel if one is configured.
#
[ -n "${ZED_PUSHBULLET_CHANNEL_TAG}" ] && msg_tag="$(printf \
'"channel_tag": "%s", ' "${ZED_PUSHBULLET_CHANNEL_TAG}")"
# Construct the JSON message for pushing a note.
#
msg_json="$(printf '{%s"type": "note", "title": "%s", "body": "%s"}' \
"${msg_tag}" "${subject}" "${msg_body}")"
# Send the POST request and check for errors.
#
msg_out="$(curl -u "${ZED_PUSHBULLET_ACCESS_TOKEN}:" -X POST "${url}" \
--header "Content-Type: application/json" --data-binary "${msg_json}" \
2>/dev/null)"; rv=$?
if [ "${rv}" -ne 0 ]; then
zed_log_err "curl exit=${rv}"
return 1
fi
msg_err="$(echo "${msg_out}" \
| sed -n -e 's/.*"error" *:.*"message" *: *"\([^"]*\)".*/\1/p')"
if [ -n "${msg_err}" ]; then
zed_log_err "pushbullet \"${msg_err}"\"
return 1
fi
return 0
}
# zed_notify_slack_webhook (subject, pathname)
#
# Notification via Slack Webhook <https://api.slack.com/incoming-webhooks>.
# The Webhook URL (ZED_SLACK_WEBHOOK_URL) identifies this client to the
# Slack channel.
#
# Requires awk, curl, and sed executables to be installed in the standard PATH.
#
# References
# https://api.slack.com/incoming-webhooks
#
# Arguments
# subject: notification subject
# pathname: pathname containing the notification message (OPTIONAL)
#
# Globals
# ZED_SLACK_WEBHOOK_URL
#
# Return
# 0: notification sent
# 1: notification failed
# 2: not configured
#
zed_notify_slack_webhook()
{
[ -n "${ZED_SLACK_WEBHOOK_URL}" ] || return 2
local subject="$1"
local pathname="${2:-"/dev/null"}"
local msg_body
local msg_tag
local msg_json
local msg_out
local msg_err
local url="${ZED_SLACK_WEBHOOK_URL}"
[ -n "${subject}" ] || return 1
if [ ! -r "${pathname}" ]; then
zed_log_err "slack webhook cannot read \"${pathname}\""
return 1
fi
zed_check_cmd "awk" "curl" "sed" || return 1
# Escape the following characters in the message body for JSON:
# newline, backslash, double quote, horizontal tab, vertical tab,
# and carriage return.
#
msg_body="$(awk '{ ORS="\\n" } { gsub(/\\/, "\\\\"); gsub(/"/, "\\\"");
gsub(/\t/, "\\t"); gsub(/\f/, "\\f"); gsub(/\r/, "\\r"); print }' \
"${pathname}")"
# Construct the JSON message for posting.
#
msg_json="$(printf '{"text": "*%s*\\n%s"}' "${subject}" "${msg_body}" )"
# Send the POST request and check for errors.
#
msg_out="$(curl -X POST "${url}" \
--header "Content-Type: application/json" --data-binary "${msg_json}" \
2>/dev/null)"; rv=$?
if [ "${rv}" -ne 0 ]; then
zed_log_err "curl exit=${rv}"
return 1
fi
msg_err="$(echo "${msg_out}" \
| sed -n -e 's/.*"error" *:.*"message" *: *"\([^"]*\)".*/\1/p')"
if [ -n "${msg_err}" ]; then
zed_log_err "slack webhook \"${msg_err}"\"
return 1
fi
return 0
}
# zed_notify_pushover (subject, pathname)
#
# Send a notification via Pushover <https://pushover.net/>.
# The access token (ZED_PUSHOVER_TOKEN) identifies this client to the
# Pushover server. The user token (ZED_PUSHOVER_USER) defines the user or
# group to which the notification will be sent.
#
# Requires curl and sed executables to be installed in the standard PATH.
#
# References
# https://pushover.net/api
#
# Arguments
# subject: notification subject
# pathname: pathname containing the notification message (OPTIONAL)
#
# Globals
# ZED_PUSHOVER_TOKEN
# ZED_PUSHOVER_USER
#
# Return
# 0: notification sent
# 1: notification failed
# 2: not configured
#
zed_notify_pushover()
{
local subject="$1"
local pathname="${2:-"/dev/null"}"
local msg_body
local msg_out
local msg_err
local url="https://api.pushover.net/1/messages.json"
[ -n "${ZED_PUSHOVER_TOKEN}" ] && [ -n "${ZED_PUSHOVER_USER}" ] || return 2
if [ ! -r "${pathname}" ]; then
zed_log_err "pushover cannot read \"${pathname}\""
return 1
fi
zed_check_cmd "curl" "sed" || return 1
# Read the message body in.
#
msg_body="$(cat "${pathname}")"
if [ -z "${msg_body}" ]
then
msg_body=$subject
subject=""
fi
# Send the POST request and check for errors.
#
msg_out="$( \
curl \
--form-string "token=${ZED_PUSHOVER_TOKEN}" \
--form-string "user=${ZED_PUSHOVER_USER}" \
--form-string "message=${msg_body}" \
--form-string "title=${subject}" \
"${url}" \
2>/dev/null \
)"; rv=$?
if [ "${rv}" -ne 0 ]; then
zed_log_err "curl exit=${rv}"
return 1
fi
msg_err="$(echo "${msg_out}" \
| sed -n -e 's/.*"errors" *:.*\[\(.*\)\].*/\1/p')"
if [ -n "${msg_err}" ]; then
zed_log_err "pushover \"${msg_err}"\"
return 1
fi
return 0
}
+# zed_notify_ntfy (subject, pathname)
+#
+# Send a notification via Ntfy.sh <https://ntfy.sh/>.
+# The ntfy topic (ZED_NTFY_TOPIC) identifies the topic that the notification
+# will be sent to Ntfy.sh server. The ntfy url (ZED_NTFY_URL) defines the
+# self-hosted or provided hosted ntfy service location. The ntfy access token
+# <https://docs.ntfy.sh/publish/#access-tokens> (ZED_NTFY_ACCESS_TOKEN) reprsents an
+# access token that could be used if a topic is read/write protected. If a
+# topic can be written to publicaly, a ZED_NTFY_ACCESS_TOKEN is not required.
+#
+# Requires curl and sed executables to be installed in the standard PATH.
+#
+# References
+# https://docs.ntfy.sh
+#
+# Arguments
+# subject: notification subject
+# pathname: pathname containing the notification message (OPTIONAL)
+#
+# Globals
+# ZED_NTFY_TOPIC
+# ZED_NTFY_ACCESS_TOKEN (OPTIONAL)
+# ZED_NTFY_URL
+#
+# Return
+# 0: notification sent
+# 1: notification failed
+# 2: not configured
+#
+zed_notify_ntfy()
+{
+ local subject="$1"
+ local pathname="${2:-"/dev/null"}"
+ local msg_body
+ local msg_out
+ local msg_err
+
+ [ -n "${ZED_NTFY_TOPIC}" ] || return 2
+ local url="${ZED_NTFY_URL:-"https://ntfy.sh"}/${ZED_NTFY_TOPIC}"
+
+ if [ ! -r "${pathname}" ]; then
+ zed_log_err "ntfy cannot read \"${pathname}\""
+ return 1
+ fi
+
+ zed_check_cmd "curl" "sed" || return 1
+
+ # Read the message body in.
+ #
+ msg_body="$(cat "${pathname}")"
+
+ if [ -z "${msg_body}" ]
+ then
+ msg_body=$subject
+ subject=""
+ fi
+
+ # Send the POST request and check for errors.
+ #
+ if [ -n "${ZED_NTFY_ACCESS_TOKEN}" ]; then
+ msg_out="$( \
+ curl \
+ -u ":${ZED_NTFY_ACCESS_TOKEN}" \
+ -H "Title: ${subject}" \
+ -d "${msg_body}" \
+ -H "Priority: high" \
+ "${url}" \
+ 2>/dev/null \
+ )"; rv=$?
+ else
+ msg_out="$( \
+ curl \
+ -H "Title: ${subject}" \
+ -d "${msg_body}" \
+ -H "Priority: high" \
+ "${url}" \
+ 2>/dev/null \
+ )"; rv=$?
+ fi
+ if [ "${rv}" -ne 0 ]; then
+ zed_log_err "curl exit=${rv}"
+ return 1
+ fi
+ msg_err="$(echo "${msg_out}" \
+ | sed -n -e 's/.*"errors" *:.*\[\(.*\)\].*/\1/p')"
+ if [ -n "${msg_err}" ]; then
+ zed_log_err "ntfy \"${msg_err}"\"
+ return 1
+ fi
+ return 0
+}
+
+
+
# zed_rate_limit (tag, [interval])
#
# Check whether an event of a given type [tag] has already occurred within the
# last [interval] seconds.
#
# This function obtains a lock on the statefile using file descriptor 9.
#
# Arguments
# tag: arbitrary string for grouping related events to rate-limit
# interval: time interval in seconds (OPTIONAL)
#
# Globals
# ZED_NOTIFY_INTERVAL_SECS
# ZED_RUNDIR
#
# Return
# 0 if the event should be processed
# 1 if the event should be dropped
#
# State File Format
# time;tag
#
zed_rate_limit()
{
local tag="$1"
local interval="${2:-${ZED_NOTIFY_INTERVAL_SECS}}"
local lockfile="zed.zedlet.state.lock"
local lockfile_fd=9
local statefile="${ZED_RUNDIR}/zed.zedlet.state"
local time_now
local time_prev
local umask_bak
local rv=0
[ -n "${tag}" ] || return 0
zed_lock "${lockfile}" "${lockfile_fd}"
time_now="$(date +%s)"
time_prev="$(grep -E "^[0-9]+;${tag}\$" "${statefile}" 2>/dev/null \
| tail -1 | cut -d\; -f1)"
if [ -n "${time_prev}" ] \
&& [ "$((time_now - time_prev))" -lt "${interval}" ]; then
rv=1
else
umask_bak="$(umask)"
umask 077
grep -E -v "^[0-9]+;${tag}\$" "${statefile}" 2>/dev/null \
> "${statefile}.$$"
echo "${time_now};${tag}" >> "${statefile}.$$"
mv -f "${statefile}.$$" "${statefile}"
umask "${umask_bak}"
fi
zed_unlock "${lockfile}" "${lockfile_fd}"
return "${rv}"
}
# zed_guid_to_pool (guid)
#
# Convert a pool GUID into its pool name (like "tank")
# Arguments
# guid: pool GUID (decimal or hex)
#
# Return
# Pool name
#
zed_guid_to_pool()
{
if [ -z "$1" ] ; then
return
fi
guid="$(printf "%u" "$1")"
$ZPOOL get -H -ovalue,name guid | awk '$1 == '"$guid"' {print $2; exit}'
}
# zed_exit_if_ignoring_this_event
#
# Exit the script if we should ignore this event, as determined by
# $ZED_SYSLOG_SUBCLASS_INCLUDE and $ZED_SYSLOG_SUBCLASS_EXCLUDE in zed.rc.
# This function assumes you've imported the normal zed variables.
zed_exit_if_ignoring_this_event()
{
if [ -n "${ZED_SYSLOG_SUBCLASS_INCLUDE}" ]; then
eval "case ${ZEVENT_SUBCLASS} in
${ZED_SYSLOG_SUBCLASS_INCLUDE});;
*) exit 0;;
esac"
elif [ -n "${ZED_SYSLOG_SUBCLASS_EXCLUDE}" ]; then
eval "case ${ZEVENT_SUBCLASS} in
${ZED_SYSLOG_SUBCLASS_EXCLUDE}) exit 0;;
*);;
esac"
fi
}
diff --git a/sys/contrib/openzfs/cmd/zed/zed.d/zed.rc b/sys/contrib/openzfs/cmd/zed/zed.d/zed.rc
index 78dc1afc7b15..bc269b155d76 100644
--- a/sys/contrib/openzfs/cmd/zed/zed.d/zed.rc
+++ b/sys/contrib/openzfs/cmd/zed/zed.d/zed.rc
@@ -1,149 +1,171 @@
##
# zed.rc – ZEDLET configuration.
##
# shellcheck disable=SC2034
##
# Absolute path to the debug output file.
#
#ZED_DEBUG_LOG="/tmp/zed.debug.log"
##
# Email address of the zpool administrator for receipt of notifications;
# multiple addresses can be specified if they are delimited by whitespace.
# Email will only be sent if ZED_EMAIL_ADDR is defined.
# Enabled by default; comment to disable.
#
ZED_EMAIL_ADDR="root"
##
# Name or path of executable responsible for sending notifications via email;
# the mail program must be capable of reading a message body from stdin.
# Email will only be sent if ZED_EMAIL_ADDR is defined.
#
#ZED_EMAIL_PROG="mail"
##
# Command-line options for ZED_EMAIL_PROG.
# The string @ADDRESS@ will be replaced with the recipient email address(es).
# The string @SUBJECT@ will be replaced with the notification subject;
# this should be protected with quotes to prevent word-splitting.
# Email will only be sent if ZED_EMAIL_ADDR is defined.
# If @SUBJECT@ was omited here, a "Subject: ..." header will be added to notification
#
#ZED_EMAIL_OPTS="-s '@SUBJECT@' @ADDRESS@"
##
# Default directory for zed lock files.
#
#ZED_LOCKDIR="/var/lock"
##
# Minimum number of seconds between notifications for a similar event.
#
#ZED_NOTIFY_INTERVAL_SECS=3600
##
# Notification verbosity.
# If set to 0, suppress notification if the pool is healthy.
# If set to 1, send notification regardless of pool health.
#
#ZED_NOTIFY_VERBOSE=0
##
# Send notifications for 'ereport.fs.zfs.data' events.
# Disabled by default, any non-empty value will enable the feature.
#
#ZED_NOTIFY_DATA=
##
# Pushbullet access token.
# This grants full access to your account -- protect it accordingly!
# <https://www.pushbullet.com/get-started>
# <https://www.pushbullet.com/account>
# Disabled by default; uncomment to enable.
#
#ZED_PUSHBULLET_ACCESS_TOKEN=""
##
# Pushbullet channel tag for push notification feeds that can be subscribed to.
# <https://www.pushbullet.com/my-channel>
# If not defined, push notifications will instead be sent to all devices
# associated with the account specified by the access token.
# Disabled by default; uncomment to enable.
#
#ZED_PUSHBULLET_CHANNEL_TAG=""
##
# Slack Webhook URL.
# This allows posting to the given channel and includes an access token.
# <https://api.slack.com/incoming-webhooks>
# Disabled by default; uncomment to enable.
#
#ZED_SLACK_WEBHOOK_URL=""
##
# Pushover token.
# This defines the application from which the notification will be sent.
# <https://pushover.net/api#registration>
# Disabled by default; uncomment to enable.
# ZED_PUSHOVER_USER, below, must also be configured.
#
#ZED_PUSHOVER_TOKEN=""
##
# Pushover user key.
# This defines which user or group will receive Pushover notifications.
# <https://pushover.net/api#identifiers>
# Disabled by default; uncomment to enable.
# ZED_PUSHOVER_TOKEN, above, must also be configured.
#ZED_PUSHOVER_USER=""
##
# Default directory for zed state files.
#
#ZED_RUNDIR="/var/run"
##
# Turn on/off enclosure LEDs when drives get DEGRADED/FAULTED. This works for
# device mapper and multipath devices as well. This works with JBOD enclosures
# and NVMe PCI drives (assuming they're supported by Linux in sysfs).
#
ZED_USE_ENCLOSURE_LEDS=1
##
# Run a scrub after every resilver
# Disabled by default, 1 to enable and 0 to disable.
#ZED_SCRUB_AFTER_RESILVER=0
##
# The syslog priority (e.g., specified as a "facility.level" pair).
#
#ZED_SYSLOG_PRIORITY="daemon.notice"
##
# The syslog tag for marking zed events.
#
#ZED_SYSLOG_TAG="zed"
##
# Which set of event subclasses to log
# By default, events from all subclasses are logged.
# If ZED_SYSLOG_SUBCLASS_INCLUDE is set, only subclasses
# matching the pattern are logged. Use the pipe symbol (|)
# or shell wildcards (*, ?) to match multiple subclasses.
# Otherwise, if ZED_SYSLOG_SUBCLASS_EXCLUDE is set, the
# matching subclasses are excluded from logging.
#ZED_SYSLOG_SUBCLASS_INCLUDE="checksum|scrub_*|vdev.*"
ZED_SYSLOG_SUBCLASS_EXCLUDE="history_event"
##
# Use GUIDs instead of names when logging pool and vdevs
# Disabled by default, 1 to enable and 0 to disable.
#ZED_SYSLOG_DISPLAY_GUIDS=1
##
# Power off the drive's slot in the enclosure if it becomes FAULTED. This can
# help silence misbehaving drives. This assumes your drive enclosure fully
# supports slot power control via sysfs.
-#ZED_POWER_OFF_ENCLOUSRE_SLOT_ON_FAULT=1
+#ZED_POWER_OFF_ENCLOSURE_SLOT_ON_FAULT=1
+
+##
+# Ntfy topic
+# This defines which topic will receive the ntfy notification.
+# <https://docs.ntfy.sh/publish/>
+# Disabled by default; uncomment to enable.
+#ZED_NTFY_TOPIC=""
+
+##
+# Ntfy access token (optional for public topics)
+# This defines an access token which can be used
+# to allow you to authenticate when sending to topics
+# <https://docs.ntfy.sh/publish/#access-tokens>
+# Disabled by default; uncomment to enable.
+#ZED_NTFY_ACCESS_TOKEN=""
+
+##
+# Ntfy Service URL
+# This defines which service the ntfy call will be directed toward
+# <https://docs.ntfy.sh/install/>
+# https://ntfy.sh by default; uncomment to enable an alternative service url.
+#ZED_NTFY_URL="https://ntfy.sh"
diff --git a/sys/contrib/openzfs/cmd/zed/zed_event.c b/sys/contrib/openzfs/cmd/zed/zed_event.c
index c60d5a4bc22e..7e5867692234 100644
--- a/sys/contrib/openzfs/cmd/zed/zed_event.c
+++ b/sys/contrib/openzfs/cmd/zed/zed_event.c
@@ -1,1002 +1,1033 @@
/*
* This file is part of the ZFS Event Daemon (ZED).
*
* Developed at Lawrence Livermore National Laboratory (LLNL-CODE-403049).
* Copyright (C) 2013-2014 Lawrence Livermore National Security, LLC.
* Refer to the OpenZFS git commit log for authoritative copyright attribution.
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License Version 1.0 (CDDL-1.0).
* You can obtain a copy of the license from the top-level file
* "OPENSOLARIS.LICENSE" or at <http://opensource.org/licenses/CDDL-1.0>.
* You may not use this file except in compliance with the license.
*/
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
#include <libzfs_core.h>
#include <paths.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/zfs_ioctl.h>
#include <time.h>
#include <unistd.h>
#include <sys/fm/fs/zfs.h>
#include "zed.h"
#include "zed_conf.h"
#include "zed_disk_event.h"
#include "zed_event.h"
#include "zed_exec.h"
#include "zed_file.h"
#include "zed_log.h"
#include "zed_strings.h"
#include "agents/zfs_agents.h"
+#include <libzutil.h>
#define MAXBUF 4096
static int max_zevent_buf_len = 1 << 20;
/*
* Open the libzfs interface.
*/
int
zed_event_init(struct zed_conf *zcp)
{
if (!zcp)
zed_log_die("Failed zed_event_init: %s", strerror(EINVAL));
zcp->zfs_hdl = libzfs_init();
if (!zcp->zfs_hdl) {
if (zcp->do_idle)
return (-1);
zed_log_die("Failed to initialize libzfs");
}
zcp->zevent_fd = open(ZFS_DEV, O_RDWR | O_CLOEXEC);
if (zcp->zevent_fd < 0) {
if (zcp->do_idle)
return (-1);
zed_log_die("Failed to open \"%s\": %s",
ZFS_DEV, strerror(errno));
}
zfs_agent_init(zcp->zfs_hdl);
if (zed_disk_event_init() != 0) {
if (zcp->do_idle)
return (-1);
zed_log_die("Failed to initialize disk events");
}
if (zcp->max_zevent_buf_len != 0)
max_zevent_buf_len = zcp->max_zevent_buf_len;
return (0);
}
/*
* Close the libzfs interface.
*/
void
zed_event_fini(struct zed_conf *zcp)
{
if (!zcp)
zed_log_die("Failed zed_event_fini: %s", strerror(EINVAL));
zed_disk_event_fini();
zfs_agent_fini();
if (zcp->zevent_fd >= 0) {
if (close(zcp->zevent_fd) < 0)
zed_log_msg(LOG_WARNING, "Failed to close \"%s\": %s",
ZFS_DEV, strerror(errno));
zcp->zevent_fd = -1;
}
if (zcp->zfs_hdl) {
libzfs_fini(zcp->zfs_hdl);
zcp->zfs_hdl = NULL;
}
zed_exec_fini();
}
static void
_bump_event_queue_length(void)
{
int zzlm = -1, wr;
char qlen_buf[12] = {0}; /* parameter is int => max "-2147483647\n" */
long int qlen, orig_qlen;
zzlm = open("/sys/module/zfs/parameters/zfs_zevent_len_max", O_RDWR);
if (zzlm < 0)
goto done;
if (read(zzlm, qlen_buf, sizeof (qlen_buf)) < 0)
goto done;
qlen_buf[sizeof (qlen_buf) - 1] = '\0';
errno = 0;
orig_qlen = qlen = strtol(qlen_buf, NULL, 10);
if (errno == ERANGE)
goto done;
if (qlen <= 0)
qlen = 512; /* default zfs_zevent_len_max value */
else
qlen *= 2;
/*
* Don't consume all of kernel memory with event logs if something
* goes wrong.
*/
if (qlen > max_zevent_buf_len)
qlen = max_zevent_buf_len;
if (qlen == orig_qlen)
goto done;
wr = snprintf(qlen_buf, sizeof (qlen_buf), "%ld", qlen);
if (wr >= sizeof (qlen_buf)) {
wr = sizeof (qlen_buf) - 1;
zed_log_msg(LOG_WARNING, "Truncation in %s()", __func__);
}
if (pwrite(zzlm, qlen_buf, wr + 1, 0) < 0)
goto done;
zed_log_msg(LOG_WARNING, "Bumping queue length to %ld", qlen);
done:
if (zzlm > -1)
(void) close(zzlm);
}
/*
* Seek to the event specified by [saved_eid] and [saved_etime].
* This protects against processing a given event more than once.
* Return 0 upon a successful seek to the specified event, or -1 otherwise.
*
* A zevent is considered to be uniquely specified by its (eid,time) tuple.
* The unsigned 64b eid is set to 1 when the kernel module is loaded, and
* incremented by 1 for each new event. Since the state file can persist
* across a kernel module reload, the time must be checked to ensure a match.
*/
int
zed_event_seek(struct zed_conf *zcp, uint64_t saved_eid, int64_t saved_etime[])
{
uint64_t eid;
int found;
nvlist_t *nvl;
int n_dropped;
int64_t *etime;
uint_t nelem;
int rv;
if (!zcp) {
errno = EINVAL;
zed_log_msg(LOG_ERR, "Failed to seek zevent: %s",
strerror(errno));
return (-1);
}
eid = 0;
found = 0;
while ((eid < saved_eid) && !found) {
rv = zpool_events_next(zcp->zfs_hdl, &nvl, &n_dropped,
ZEVENT_NONBLOCK, zcp->zevent_fd);
if ((rv != 0) || !nvl)
break;
if (n_dropped > 0) {
zed_log_msg(LOG_WARNING, "Missed %d events", n_dropped);
_bump_event_queue_length();
}
if (nvlist_lookup_uint64(nvl, "eid", &eid) != 0) {
zed_log_msg(LOG_WARNING, "Failed to lookup zevent eid");
} else if (nvlist_lookup_int64_array(nvl, "time",
&etime, &nelem) != 0) {
zed_log_msg(LOG_WARNING,
"Failed to lookup zevent time (eid=%llu)", eid);
} else if (nelem != 2) {
zed_log_msg(LOG_WARNING,
"Failed to lookup zevent time (eid=%llu, nelem=%u)",
eid, nelem);
} else if ((eid != saved_eid) ||
(etime[0] != saved_etime[0]) ||
(etime[1] != saved_etime[1])) {
/* no-op */
} else {
found = 1;
}
free(nvl);
}
if (!found && (saved_eid > 0)) {
if (zpool_events_seek(zcp->zfs_hdl, ZEVENT_SEEK_START,
zcp->zevent_fd) < 0)
zed_log_msg(LOG_WARNING, "Failed to seek to eid=0");
else
eid = 0;
}
zed_log_msg(LOG_NOTICE, "Processing events since eid=%llu", eid);
return (found ? 0 : -1);
}
/*
* Return non-zero if nvpair [name] should be formatted in hex; o/w, return 0.
*/
static int
_zed_event_value_is_hex(const char *name)
{
const char *hex_suffix[] = {
"_guid",
"_guids",
NULL
};
const char **pp;
char *p;
if (!name)
return (0);
for (pp = hex_suffix; *pp; pp++) {
p = strstr(name, *pp);
if (p && strlen(p) == strlen(*pp))
return (1);
}
return (0);
}
/*
* Add an environment variable for [eid] to the container [zsp].
*
* The variable name is the concatenation of [prefix] and [name] converted to
* uppercase with non-alphanumeric characters converted to underscores;
* [prefix] is optional, and [name] must begin with an alphabetic character.
* If the converted variable name already exists within the container [zsp],
* its existing value will be replaced with the new value.
*
* The variable value is specified by the format string [fmt].
*
* Returns 0 on success, and -1 on error (with errno set).
*
* All environment variables in [zsp] should be added through this function.
*/
static __attribute__((format(printf, 5, 6))) int
_zed_event_add_var(uint64_t eid, zed_strings_t *zsp,
const char *prefix, const char *name, const char *fmt, ...)
{
char keybuf[MAXBUF];
char valbuf[MAXBUF];
char *dstp;
const char *srcp;
const char *lastp;
int n;
int buflen;
va_list vargs;
assert(zsp != NULL);
assert(fmt != NULL);
if (!name) {
errno = EINVAL;
zed_log_msg(LOG_WARNING,
"Failed to add variable for eid=%llu: Name is empty", eid);
return (-1);
} else if (!isalpha(name[0])) {
errno = EINVAL;
zed_log_msg(LOG_WARNING,
"Failed to add variable for eid=%llu: "
"Name \"%s\" is invalid", eid, name);
return (-1);
}
/*
* Construct the string key by converting PREFIX (if present) and NAME.
*/
dstp = keybuf;
lastp = keybuf + sizeof (keybuf);
if (prefix) {
for (srcp = prefix; *srcp && (dstp < lastp); srcp++)
*dstp++ = isalnum(*srcp) ? toupper(*srcp) : '_';
}
for (srcp = name; *srcp && (dstp < lastp); srcp++)
*dstp++ = isalnum(*srcp) ? toupper(*srcp) : '_';
if (dstp == lastp) {
errno = ENAMETOOLONG;
zed_log_msg(LOG_WARNING,
"Failed to add variable for eid=%llu: Name too long", eid);
return (-1);
}
*dstp = '\0';
/*
* Construct the string specified by "[PREFIX][NAME]=[FMT]".
*/
dstp = valbuf;
buflen = sizeof (valbuf);
n = strlcpy(dstp, keybuf, buflen);
if (n >= sizeof (valbuf)) {
errno = EMSGSIZE;
zed_log_msg(LOG_WARNING, "Failed to add %s for eid=%llu: %s",
keybuf, eid, "Exceeded buffer size");
return (-1);
}
dstp += n;
buflen -= n;
*dstp++ = '=';
buflen--;
if (buflen <= 0) {
errno = EMSGSIZE;
zed_log_msg(LOG_WARNING, "Failed to add %s for eid=%llu: %s",
keybuf, eid, "Exceeded buffer size");
return (-1);
}
va_start(vargs, fmt);
n = vsnprintf(dstp, buflen, fmt, vargs);
va_end(vargs);
if ((n < 0) || (n >= buflen)) {
errno = EMSGSIZE;
zed_log_msg(LOG_WARNING, "Failed to add %s for eid=%llu: %s",
keybuf, eid, "Exceeded buffer size");
return (-1);
} else if (zed_strings_add(zsp, keybuf, valbuf) < 0) {
zed_log_msg(LOG_WARNING, "Failed to add %s for eid=%llu: %s",
keybuf, eid, strerror(errno));
return (-1);
}
return (0);
}
static int
_zed_event_add_array_err(uint64_t eid, const char *name)
{
errno = EMSGSIZE;
zed_log_msg(LOG_WARNING,
"Failed to convert nvpair \"%s\" for eid=%llu: "
"Exceeded buffer size", name, eid);
return (-1);
}
static int
_zed_event_add_int8_array(uint64_t eid, zed_strings_t *zsp,
const char *prefix, nvpair_t *nvp)
{
char buf[MAXBUF];
int buflen = sizeof (buf);
const char *name;
int8_t *i8p;
uint_t nelem;
uint_t i;
char *p;
int n;
assert((nvp != NULL) && (nvpair_type(nvp) == DATA_TYPE_INT8_ARRAY));
name = nvpair_name(nvp);
(void) nvpair_value_int8_array(nvp, &i8p, &nelem);
for (i = 0, p = buf; (i < nelem) && (buflen > 0); i++) {
n = snprintf(p, buflen, "%d ", i8p[i]);
if ((n < 0) || (n >= buflen))
return (_zed_event_add_array_err(eid, name));
p += n;
buflen -= n;
}
if (nelem > 0)
*--p = '\0';
return (_zed_event_add_var(eid, zsp, prefix, name, "%s", buf));
}
static int
_zed_event_add_uint8_array(uint64_t eid, zed_strings_t *zsp,
const char *prefix, nvpair_t *nvp)
{
char buf[MAXBUF];
int buflen = sizeof (buf);
const char *name;
uint8_t *u8p;
uint_t nelem;
uint_t i;
char *p;
int n;
assert((nvp != NULL) && (nvpair_type(nvp) == DATA_TYPE_UINT8_ARRAY));
name = nvpair_name(nvp);
(void) nvpair_value_uint8_array(nvp, &u8p, &nelem);
for (i = 0, p = buf; (i < nelem) && (buflen > 0); i++) {
n = snprintf(p, buflen, "%u ", u8p[i]);
if ((n < 0) || (n >= buflen))
return (_zed_event_add_array_err(eid, name));
p += n;
buflen -= n;
}
if (nelem > 0)
*--p = '\0';
return (_zed_event_add_var(eid, zsp, prefix, name, "%s", buf));
}
static int
_zed_event_add_int16_array(uint64_t eid, zed_strings_t *zsp,
const char *prefix, nvpair_t *nvp)
{
char buf[MAXBUF];
int buflen = sizeof (buf);
const char *name;
int16_t *i16p;
uint_t nelem;
uint_t i;
char *p;
int n;
assert((nvp != NULL) && (nvpair_type(nvp) == DATA_TYPE_INT16_ARRAY));
name = nvpair_name(nvp);
(void) nvpair_value_int16_array(nvp, &i16p, &nelem);
for (i = 0, p = buf; (i < nelem) && (buflen > 0); i++) {
n = snprintf(p, buflen, "%d ", i16p[i]);
if ((n < 0) || (n >= buflen))
return (_zed_event_add_array_err(eid, name));
p += n;
buflen -= n;
}
if (nelem > 0)
*--p = '\0';
return (_zed_event_add_var(eid, zsp, prefix, name, "%s", buf));
}
static int
_zed_event_add_uint16_array(uint64_t eid, zed_strings_t *zsp,
const char *prefix, nvpair_t *nvp)
{
char buf[MAXBUF];
int buflen = sizeof (buf);
const char *name;
uint16_t *u16p;
uint_t nelem;
uint_t i;
char *p;
int n;
assert((nvp != NULL) && (nvpair_type(nvp) == DATA_TYPE_UINT16_ARRAY));
name = nvpair_name(nvp);
(void) nvpair_value_uint16_array(nvp, &u16p, &nelem);
for (i = 0, p = buf; (i < nelem) && (buflen > 0); i++) {
n = snprintf(p, buflen, "%u ", u16p[i]);
if ((n < 0) || (n >= buflen))
return (_zed_event_add_array_err(eid, name));
p += n;
buflen -= n;
}
if (nelem > 0)
*--p = '\0';
return (_zed_event_add_var(eid, zsp, prefix, name, "%s", buf));
}
static int
_zed_event_add_int32_array(uint64_t eid, zed_strings_t *zsp,
const char *prefix, nvpair_t *nvp)
{
char buf[MAXBUF];
int buflen = sizeof (buf);
const char *name;
int32_t *i32p;
uint_t nelem;
uint_t i;
char *p;
int n;
assert((nvp != NULL) && (nvpair_type(nvp) == DATA_TYPE_INT32_ARRAY));
name = nvpair_name(nvp);
(void) nvpair_value_int32_array(nvp, &i32p, &nelem);
for (i = 0, p = buf; (i < nelem) && (buflen > 0); i++) {
n = snprintf(p, buflen, "%d ", i32p[i]);
if ((n < 0) || (n >= buflen))
return (_zed_event_add_array_err(eid, name));
p += n;
buflen -= n;
}
if (nelem > 0)
*--p = '\0';
return (_zed_event_add_var(eid, zsp, prefix, name, "%s", buf));
}
static int
_zed_event_add_uint32_array(uint64_t eid, zed_strings_t *zsp,
const char *prefix, nvpair_t *nvp)
{
char buf[MAXBUF];
int buflen = sizeof (buf);
const char *name;
uint32_t *u32p;
uint_t nelem;
uint_t i;
char *p;
int n;
assert((nvp != NULL) && (nvpair_type(nvp) == DATA_TYPE_UINT32_ARRAY));
name = nvpair_name(nvp);
(void) nvpair_value_uint32_array(nvp, &u32p, &nelem);
for (i = 0, p = buf; (i < nelem) && (buflen > 0); i++) {
n = snprintf(p, buflen, "%u ", u32p[i]);
if ((n < 0) || (n >= buflen))
return (_zed_event_add_array_err(eid, name));
p += n;
buflen -= n;
}
if (nelem > 0)
*--p = '\0';
return (_zed_event_add_var(eid, zsp, prefix, name, "%s", buf));
}
static int
_zed_event_add_int64_array(uint64_t eid, zed_strings_t *zsp,
const char *prefix, nvpair_t *nvp)
{
char buf[MAXBUF];
int buflen = sizeof (buf);
const char *name;
int64_t *i64p;
uint_t nelem;
uint_t i;
char *p;
int n;
assert((nvp != NULL) && (nvpair_type(nvp) == DATA_TYPE_INT64_ARRAY));
name = nvpair_name(nvp);
(void) nvpair_value_int64_array(nvp, &i64p, &nelem);
for (i = 0, p = buf; (i < nelem) && (buflen > 0); i++) {
n = snprintf(p, buflen, "%lld ", (u_longlong_t)i64p[i]);
if ((n < 0) || (n >= buflen))
return (_zed_event_add_array_err(eid, name));
p += n;
buflen -= n;
}
if (nelem > 0)
*--p = '\0';
return (_zed_event_add_var(eid, zsp, prefix, name, "%s", buf));
}
static int
_zed_event_add_uint64_array(uint64_t eid, zed_strings_t *zsp,
const char *prefix, nvpair_t *nvp)
{
char buf[MAXBUF];
int buflen = sizeof (buf);
const char *name;
const char *fmt;
uint64_t *u64p;
uint_t nelem;
uint_t i;
char *p;
int n;
assert((nvp != NULL) && (nvpair_type(nvp) == DATA_TYPE_UINT64_ARRAY));
name = nvpair_name(nvp);
fmt = _zed_event_value_is_hex(name) ? "0x%.16llX " : "%llu ";
(void) nvpair_value_uint64_array(nvp, &u64p, &nelem);
for (i = 0, p = buf; (i < nelem) && (buflen > 0); i++) {
n = snprintf(p, buflen, fmt, (u_longlong_t)u64p[i]);
if ((n < 0) || (n >= buflen))
return (_zed_event_add_array_err(eid, name));
p += n;
buflen -= n;
}
if (nelem > 0)
*--p = '\0';
return (_zed_event_add_var(eid, zsp, prefix, name, "%s", buf));
}
static int
_zed_event_add_string_array(uint64_t eid, zed_strings_t *zsp,
const char *prefix, nvpair_t *nvp)
{
char buf[MAXBUF];
int buflen = sizeof (buf);
const char *name;
const char **strp;
uint_t nelem;
uint_t i;
char *p;
int n;
assert((nvp != NULL) && (nvpair_type(nvp) == DATA_TYPE_STRING_ARRAY));
name = nvpair_name(nvp);
(void) nvpair_value_string_array(nvp, &strp, &nelem);
for (i = 0, p = buf; (i < nelem) && (buflen > 0); i++) {
n = snprintf(p, buflen, "%s ", strp[i] ? strp[i] : "<NULL>");
if ((n < 0) || (n >= buflen))
return (_zed_event_add_array_err(eid, name));
p += n;
buflen -= n;
}
if (nelem > 0)
*--p = '\0';
return (_zed_event_add_var(eid, zsp, prefix, name, "%s", buf));
}
/*
* Convert the nvpair [nvp] to a string which is added to the environment
* of the child process.
* Return 0 on success, -1 on error.
*/
static void
_zed_event_add_nvpair(uint64_t eid, zed_strings_t *zsp, nvpair_t *nvp)
{
const char *name;
data_type_t type;
const char *prefix = ZEVENT_VAR_PREFIX;
boolean_t b;
double d;
uint8_t i8;
uint16_t i16;
uint32_t i32;
uint64_t i64;
const char *str;
assert(zsp != NULL);
assert(nvp != NULL);
name = nvpair_name(nvp);
type = nvpair_type(nvp);
switch (type) {
case DATA_TYPE_BOOLEAN:
_zed_event_add_var(eid, zsp, prefix, name, "%s", "1");
break;
case DATA_TYPE_BOOLEAN_VALUE:
(void) nvpair_value_boolean_value(nvp, &b);
_zed_event_add_var(eid, zsp, prefix, name, "%s", b ? "1" : "0");
break;
case DATA_TYPE_BYTE:
(void) nvpair_value_byte(nvp, &i8);
_zed_event_add_var(eid, zsp, prefix, name, "%d", i8);
break;
case DATA_TYPE_INT8:
(void) nvpair_value_int8(nvp, (int8_t *)&i8);
_zed_event_add_var(eid, zsp, prefix, name, "%d", i8);
break;
case DATA_TYPE_UINT8:
(void) nvpair_value_uint8(nvp, &i8);
_zed_event_add_var(eid, zsp, prefix, name, "%u", i8);
break;
case DATA_TYPE_INT16:
(void) nvpair_value_int16(nvp, (int16_t *)&i16);
_zed_event_add_var(eid, zsp, prefix, name, "%d", i16);
break;
case DATA_TYPE_UINT16:
(void) nvpair_value_uint16(nvp, &i16);
_zed_event_add_var(eid, zsp, prefix, name, "%u", i16);
break;
case DATA_TYPE_INT32:
(void) nvpair_value_int32(nvp, (int32_t *)&i32);
_zed_event_add_var(eid, zsp, prefix, name, "%d", i32);
break;
case DATA_TYPE_UINT32:
(void) nvpair_value_uint32(nvp, &i32);
_zed_event_add_var(eid, zsp, prefix, name, "%u", i32);
break;
case DATA_TYPE_INT64:
(void) nvpair_value_int64(nvp, (int64_t *)&i64);
_zed_event_add_var(eid, zsp, prefix, name,
"%lld", (longlong_t)i64);
break;
case DATA_TYPE_UINT64:
(void) nvpair_value_uint64(nvp, &i64);
_zed_event_add_var(eid, zsp, prefix, name,
(_zed_event_value_is_hex(name) ? "0x%.16llX" : "%llu"),
(u_longlong_t)i64);
/*
* shadow readable strings for vdev state pairs
*/
if (strcmp(name, FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE) == 0 ||
strcmp(name, FM_EREPORT_PAYLOAD_ZFS_VDEV_LASTSTATE) == 0) {
char alt[32];
(void) snprintf(alt, sizeof (alt), "%s_str", name);
_zed_event_add_var(eid, zsp, prefix, alt, "%s",
zpool_state_to_name(i64, VDEV_AUX_NONE));
} else
/*
* shadow readable strings for pool state
*/
if (strcmp(name, FM_EREPORT_PAYLOAD_ZFS_POOL_STATE) == 0) {
char alt[32];
(void) snprintf(alt, sizeof (alt), "%s_str", name);
_zed_event_add_var(eid, zsp, prefix, alt, "%s",
zpool_pool_state_to_name(i64));
}
break;
case DATA_TYPE_DOUBLE:
(void) nvpair_value_double(nvp, &d);
_zed_event_add_var(eid, zsp, prefix, name, "%g", d);
break;
case DATA_TYPE_HRTIME:
(void) nvpair_value_hrtime(nvp, (hrtime_t *)&i64);
_zed_event_add_var(eid, zsp, prefix, name,
"%llu", (u_longlong_t)i64);
break;
case DATA_TYPE_STRING:
(void) nvpair_value_string(nvp, &str);
_zed_event_add_var(eid, zsp, prefix, name,
"%s", (str ? str : "<NULL>"));
break;
case DATA_TYPE_INT8_ARRAY:
_zed_event_add_int8_array(eid, zsp, prefix, nvp);
break;
case DATA_TYPE_UINT8_ARRAY:
_zed_event_add_uint8_array(eid, zsp, prefix, nvp);
break;
case DATA_TYPE_INT16_ARRAY:
_zed_event_add_int16_array(eid, zsp, prefix, nvp);
break;
case DATA_TYPE_UINT16_ARRAY:
_zed_event_add_uint16_array(eid, zsp, prefix, nvp);
break;
case DATA_TYPE_INT32_ARRAY:
_zed_event_add_int32_array(eid, zsp, prefix, nvp);
break;
case DATA_TYPE_UINT32_ARRAY:
_zed_event_add_uint32_array(eid, zsp, prefix, nvp);
break;
case DATA_TYPE_INT64_ARRAY:
_zed_event_add_int64_array(eid, zsp, prefix, nvp);
break;
case DATA_TYPE_UINT64_ARRAY:
_zed_event_add_uint64_array(eid, zsp, prefix, nvp);
break;
case DATA_TYPE_STRING_ARRAY:
_zed_event_add_string_array(eid, zsp, prefix, nvp);
break;
case DATA_TYPE_NVLIST:
case DATA_TYPE_BOOLEAN_ARRAY:
case DATA_TYPE_BYTE_ARRAY:
case DATA_TYPE_NVLIST_ARRAY:
_zed_event_add_var(eid, zsp, prefix, name, "_NOT_IMPLEMENTED_");
break;
default:
errno = EINVAL;
zed_log_msg(LOG_WARNING,
"Failed to convert nvpair \"%s\" for eid=%llu: "
"Unrecognized type=%u", name, eid, (unsigned int) type);
break;
}
}
/*
* Restrict various environment variables to safe and sane values
* when constructing the environment for the child process, unless
* we're running with a custom $PATH (like under the ZFS test suite).
*
* Reference: Secure Programming Cookbook by Viega & Messier, Section 1.1.
*/
static void
_zed_event_add_env_restrict(uint64_t eid, zed_strings_t *zsp,
const char *path)
{
const char *env_restrict[][2] = {
{ "IFS", " \t\n" },
{ "PATH", _PATH_STDPATH },
{ "ZDB", SBINDIR "/zdb" },
{ "ZED", SBINDIR "/zed" },
{ "ZFS", SBINDIR "/zfs" },
{ "ZINJECT", SBINDIR "/zinject" },
{ "ZPOOL", SBINDIR "/zpool" },
{ "ZFS_ALIAS", ZFS_META_ALIAS },
{ "ZFS_VERSION", ZFS_META_VERSION },
{ "ZFS_RELEASE", ZFS_META_RELEASE },
{ NULL, NULL }
};
/*
* If we have a custom $PATH, use the default ZFS binary locations
* instead of the hard-coded ones.
*/
const char *env_path[][2] = {
{ "IFS", " \t\n" },
{ "PATH", NULL }, /* $PATH copied in later on */
{ "ZDB", "zdb" },
{ "ZED", "zed" },
{ "ZFS", "zfs" },
{ "ZINJECT", "zinject" },
{ "ZPOOL", "zpool" },
{ "ZFS_ALIAS", ZFS_META_ALIAS },
{ "ZFS_VERSION", ZFS_META_VERSION },
{ "ZFS_RELEASE", ZFS_META_RELEASE },
{ NULL, NULL }
};
const char *(*pa)[2];
assert(zsp != NULL);
pa = path != NULL ? env_path : env_restrict;
for (; *(*pa); pa++) {
/* Use our custom $PATH if we have one */
if (path != NULL && strcmp((*pa)[0], "PATH") == 0)
(*pa)[1] = path;
_zed_event_add_var(eid, zsp, NULL, (*pa)[0], "%s", (*pa)[1]);
}
}
/*
* Preserve specified variables from the parent environment
* when constructing the environment for the child process.
*
* Reference: Secure Programming Cookbook by Viega & Messier, Section 1.1.
*/
static void
_zed_event_add_env_preserve(uint64_t eid, zed_strings_t *zsp)
{
const char *env_preserve[] = {
"TZ",
NULL
};
const char **keyp;
const char *val;
assert(zsp != NULL);
for (keyp = env_preserve; *keyp; keyp++) {
if ((val = getenv(*keyp)))
_zed_event_add_var(eid, zsp, NULL, *keyp, "%s", val);
}
}
/*
* Compute the "subclass" by removing the first 3 components of [class]
* (which will always be of the form "*.fs.zfs"). Return a pointer inside
* the string [class], or NULL if insufficient components exist.
*/
static const char *
_zed_event_get_subclass(const char *class)
{
const char *p;
int i;
if (!class)
return (NULL);
p = class;
for (i = 0; i < 3; i++) {
p = strchr(p, '.');
if (!p)
break;
p++;
}
return (p);
}
/*
* Convert the zevent time from a 2-element array of 64b integers
* into a more convenient form:
* - TIME_SECS is the second component of the time.
* - TIME_NSECS is the nanosecond component of the time.
* - TIME_STRING is an almost-RFC3339-compliant string representation.
*/
static void
_zed_event_add_time_strings(uint64_t eid, zed_strings_t *zsp, int64_t etime[])
{
struct tm stp;
char buf[32];
assert(zsp != NULL);
assert(etime != NULL);
_zed_event_add_var(eid, zsp, ZEVENT_VAR_PREFIX, "TIME_SECS",
"%" PRId64, etime[0]);
_zed_event_add_var(eid, zsp, ZEVENT_VAR_PREFIX, "TIME_NSECS",
"%" PRId64, etime[1]);
if (!localtime_r((const time_t *) &etime[0], &stp)) {
zed_log_msg(LOG_WARNING, "Failed to add %s%s for eid=%llu: %s",
ZEVENT_VAR_PREFIX, "TIME_STRING", eid, "localtime error");
} else if (!strftime(buf, sizeof (buf), "%Y-%m-%d %H:%M:%S%z", &stp)) {
zed_log_msg(LOG_WARNING, "Failed to add %s%s for eid=%llu: %s",
ZEVENT_VAR_PREFIX, "TIME_STRING", eid, "strftime error");
} else {
_zed_event_add_var(eid, zsp, ZEVENT_VAR_PREFIX, "TIME_STRING",
"%s", buf);
}
}
+
+static void
+_zed_event_update_enc_sysfs_path(nvlist_t *nvl)
+{
+ const char *vdev_path;
+
+ if (nvlist_lookup_string(nvl, FM_EREPORT_PAYLOAD_ZFS_VDEV_PATH,
+ &vdev_path) != 0) {
+ return; /* some other kind of event, ignore it */
+ }
+
+ if (vdev_path == NULL) {
+ return;
+ }
+
+ update_vdev_config_dev_sysfs_path(nvl, vdev_path,
+ FM_EREPORT_PAYLOAD_ZFS_VDEV_ENC_SYSFS_PATH);
+}
+
/*
* Service the next zevent, blocking until one is available.
*/
int
zed_event_service(struct zed_conf *zcp)
{
nvlist_t *nvl;
nvpair_t *nvp;
int n_dropped;
zed_strings_t *zsp;
uint64_t eid;
int64_t *etime;
uint_t nelem;
const char *class;
const char *subclass;
int rv;
if (!zcp) {
errno = EINVAL;
zed_log_msg(LOG_ERR, "Failed to service zevent: %s",
strerror(errno));
return (EINVAL);
}
rv = zpool_events_next(zcp->zfs_hdl, &nvl, &n_dropped, ZEVENT_NONE,
zcp->zevent_fd);
if ((rv != 0) || !nvl)
return (errno);
if (n_dropped > 0) {
zed_log_msg(LOG_WARNING, "Missed %d events", n_dropped);
_bump_event_queue_length();
}
if (nvlist_lookup_uint64(nvl, "eid", &eid) != 0) {
zed_log_msg(LOG_WARNING, "Failed to lookup zevent eid");
} else if (nvlist_lookup_int64_array(
nvl, "time", &etime, &nelem) != 0) {
zed_log_msg(LOG_WARNING,
"Failed to lookup zevent time (eid=%llu)", eid);
} else if (nelem != 2) {
zed_log_msg(LOG_WARNING,
"Failed to lookup zevent time (eid=%llu, nelem=%u)",
eid, nelem);
} else if (nvlist_lookup_string(nvl, "class", &class) != 0) {
zed_log_msg(LOG_WARNING,
"Failed to lookup zevent class (eid=%llu)", eid);
} else {
+ /*
+ * Special case: If we can dynamically detect an enclosure sysfs
+ * path, then use that value rather than the one stored in the
+ * vd->vdev_enc_sysfs_path. There have been rare cases where
+ * vd->vdev_enc_sysfs_path becomes outdated. However, there
+ * will be other times when we can not dynamically detect the
+ * sysfs path (like if a disk disappears) and have to rely on
+ * the old value for things like turning on the fault LED.
+ */
+ _zed_event_update_enc_sysfs_path(nvl);
+
/* let internal modules see this event first */
zfs_agent_post_event(class, NULL, nvl);
zsp = zed_strings_create();
nvp = NULL;
while ((nvp = nvlist_next_nvpair(nvl, nvp)))
_zed_event_add_nvpair(eid, zsp, nvp);
_zed_event_add_env_restrict(eid, zsp, zcp->path);
_zed_event_add_env_preserve(eid, zsp);
_zed_event_add_var(eid, zsp, ZED_VAR_PREFIX, "PID",
"%d", (int)getpid());
_zed_event_add_var(eid, zsp, ZED_VAR_PREFIX, "ZEDLET_DIR",
"%s", zcp->zedlet_dir);
subclass = _zed_event_get_subclass(class);
_zed_event_add_var(eid, zsp, ZEVENT_VAR_PREFIX, "SUBCLASS",
"%s", (subclass ? subclass : class));
_zed_event_add_time_strings(eid, zsp, etime);
zed_exec_process(eid, class, subclass, zcp, zsp);
zed_conf_write_state(zcp, eid, etime);
zed_strings_destroy(zsp);
}
nvlist_free(nvl);
return (0);
}
diff --git a/sys/contrib/openzfs/cmd/zfs/zfs_main.c b/sys/contrib/openzfs/cmd/zfs/zfs_main.c
index 5a2285ce434b..f7e07b8199d3 100644
--- a/sys/contrib/openzfs/cmd/zfs/zfs_main.c
+++ b/sys/contrib/openzfs/cmd/zfs/zfs_main.c
@@ -1,8986 +1,8997 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright 2012 Milan Jurik. All rights reserved.
* Copyright (c) 2012, Joyent, Inc. All rights reserved.
* Copyright (c) 2013 Steven Hartland. All rights reserved.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>.
* Copyright 2016 Nexenta Systems, Inc.
* Copyright (c) 2019 Datto Inc.
* Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>
* Copyright 2019 Joyent, Inc.
* Copyright (c) 2019, 2020 by Christian Schwarz. All rights reserved.
*/
#include <assert.h>
#include <ctype.h>
#include <sys/debug.h>
#include <errno.h>
#include <getopt.h>
#include <libgen.h>
#include <libintl.h>
#include <libuutil.h>
#include <libnvpair.h>
#include <locale.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <zone.h>
#include <grp.h>
#include <pwd.h>
#include <umem.h>
#include <pthread.h>
#include <signal.h>
#include <sys/list.h>
#include <sys/mkdev.h>
#include <sys/mntent.h>
#include <sys/mnttab.h>
#include <sys/mount.h>
#include <sys/stat.h>
#include <sys/fs/zfs.h>
#include <sys/systeminfo.h>
#include <sys/types.h>
#include <time.h>
#include <sys/zfs_project.h>
#include <libzfs.h>
#include <libzfs_core.h>
#include <zfs_prop.h>
#include <zfs_deleg.h>
#include <libzutil.h>
#ifdef HAVE_IDMAP
#include <aclutils.h>
#include <directory.h>
#endif /* HAVE_IDMAP */
#include "zfs_iter.h"
#include "zfs_util.h"
#include "zfs_comutil.h"
#include "zfs_projectutil.h"
libzfs_handle_t *g_zfs;
static char history_str[HIS_MAX_RECORD_LEN];
static boolean_t log_history = B_TRUE;
static int zfs_do_clone(int argc, char **argv);
static int zfs_do_create(int argc, char **argv);
static int zfs_do_destroy(int argc, char **argv);
static int zfs_do_get(int argc, char **argv);
static int zfs_do_inherit(int argc, char **argv);
static int zfs_do_list(int argc, char **argv);
static int zfs_do_mount(int argc, char **argv);
static int zfs_do_rename(int argc, char **argv);
static int zfs_do_rollback(int argc, char **argv);
static int zfs_do_set(int argc, char **argv);
static int zfs_do_upgrade(int argc, char **argv);
static int zfs_do_snapshot(int argc, char **argv);
static int zfs_do_unmount(int argc, char **argv);
static int zfs_do_share(int argc, char **argv);
static int zfs_do_unshare(int argc, char **argv);
static int zfs_do_send(int argc, char **argv);
static int zfs_do_receive(int argc, char **argv);
static int zfs_do_promote(int argc, char **argv);
static int zfs_do_userspace(int argc, char **argv);
static int zfs_do_allow(int argc, char **argv);
static int zfs_do_unallow(int argc, char **argv);
static int zfs_do_hold(int argc, char **argv);
static int zfs_do_holds(int argc, char **argv);
static int zfs_do_release(int argc, char **argv);
static int zfs_do_diff(int argc, char **argv);
static int zfs_do_bookmark(int argc, char **argv);
static int zfs_do_channel_program(int argc, char **argv);
static int zfs_do_load_key(int argc, char **argv);
static int zfs_do_unload_key(int argc, char **argv);
static int zfs_do_change_key(int argc, char **argv);
static int zfs_do_project(int argc, char **argv);
static int zfs_do_version(int argc, char **argv);
static int zfs_do_redact(int argc, char **argv);
static int zfs_do_wait(int argc, char **argv);
#ifdef __FreeBSD__
static int zfs_do_jail(int argc, char **argv);
static int zfs_do_unjail(int argc, char **argv);
#endif
#ifdef __linux__
static int zfs_do_zone(int argc, char **argv);
static int zfs_do_unzone(int argc, char **argv);
#endif
static int zfs_do_help(int argc, char **argv);
/*
* Enable a reasonable set of defaults for libumem debugging on DEBUG builds.
*/
#ifdef DEBUG
const char *
_umem_debug_init(void)
{
return ("default,verbose"); /* $UMEM_DEBUG setting */
}
const char *
_umem_logging_init(void)
{
return ("fail,contents"); /* $UMEM_LOGGING setting */
}
#endif
typedef enum {
HELP_CLONE,
HELP_CREATE,
HELP_DESTROY,
HELP_GET,
HELP_INHERIT,
HELP_UPGRADE,
HELP_LIST,
HELP_MOUNT,
HELP_PROMOTE,
HELP_RECEIVE,
HELP_RENAME,
HELP_ROLLBACK,
HELP_SEND,
HELP_SET,
HELP_SHARE,
HELP_SNAPSHOT,
HELP_UNMOUNT,
HELP_UNSHARE,
HELP_ALLOW,
HELP_UNALLOW,
HELP_USERSPACE,
HELP_GROUPSPACE,
HELP_PROJECTSPACE,
HELP_PROJECT,
HELP_HOLD,
HELP_HOLDS,
HELP_RELEASE,
HELP_DIFF,
HELP_BOOKMARK,
HELP_CHANNEL_PROGRAM,
HELP_LOAD_KEY,
HELP_UNLOAD_KEY,
HELP_CHANGE_KEY,
HELP_VERSION,
HELP_REDACT,
HELP_JAIL,
HELP_UNJAIL,
HELP_WAIT,
HELP_ZONE,
HELP_UNZONE,
} zfs_help_t;
typedef struct zfs_command {
const char *name;
int (*func)(int argc, char **argv);
zfs_help_t usage;
} zfs_command_t;
/*
* Master command table. Each ZFS command has a name, associated function, and
* usage message. The usage messages need to be internationalized, so we have
* to have a function to return the usage message based on a command index.
*
* These commands are organized according to how they are displayed in the usage
* message. An empty command (one with a NULL name) indicates an empty line in
* the generic usage message.
*/
static zfs_command_t command_table[] = {
{ "version", zfs_do_version, HELP_VERSION },
{ NULL },
{ "create", zfs_do_create, HELP_CREATE },
{ "destroy", zfs_do_destroy, HELP_DESTROY },
{ NULL },
{ "snapshot", zfs_do_snapshot, HELP_SNAPSHOT },
{ "rollback", zfs_do_rollback, HELP_ROLLBACK },
{ "clone", zfs_do_clone, HELP_CLONE },
{ "promote", zfs_do_promote, HELP_PROMOTE },
{ "rename", zfs_do_rename, HELP_RENAME },
{ "bookmark", zfs_do_bookmark, HELP_BOOKMARK },
{ "program", zfs_do_channel_program, HELP_CHANNEL_PROGRAM },
{ NULL },
{ "list", zfs_do_list, HELP_LIST },
{ NULL },
{ "set", zfs_do_set, HELP_SET },
{ "get", zfs_do_get, HELP_GET },
{ "inherit", zfs_do_inherit, HELP_INHERIT },
{ "upgrade", zfs_do_upgrade, HELP_UPGRADE },
{ NULL },
{ "userspace", zfs_do_userspace, HELP_USERSPACE },
{ "groupspace", zfs_do_userspace, HELP_GROUPSPACE },
{ "projectspace", zfs_do_userspace, HELP_PROJECTSPACE },
{ NULL },
{ "project", zfs_do_project, HELP_PROJECT },
{ NULL },
{ "mount", zfs_do_mount, HELP_MOUNT },
{ "unmount", zfs_do_unmount, HELP_UNMOUNT },
{ "share", zfs_do_share, HELP_SHARE },
{ "unshare", zfs_do_unshare, HELP_UNSHARE },
{ NULL },
{ "send", zfs_do_send, HELP_SEND },
{ "receive", zfs_do_receive, HELP_RECEIVE },
{ NULL },
{ "allow", zfs_do_allow, HELP_ALLOW },
{ NULL },
{ "unallow", zfs_do_unallow, HELP_UNALLOW },
{ NULL },
{ "hold", zfs_do_hold, HELP_HOLD },
{ "holds", zfs_do_holds, HELP_HOLDS },
{ "release", zfs_do_release, HELP_RELEASE },
{ "diff", zfs_do_diff, HELP_DIFF },
{ "load-key", zfs_do_load_key, HELP_LOAD_KEY },
{ "unload-key", zfs_do_unload_key, HELP_UNLOAD_KEY },
{ "change-key", zfs_do_change_key, HELP_CHANGE_KEY },
{ "redact", zfs_do_redact, HELP_REDACT },
{ "wait", zfs_do_wait, HELP_WAIT },
#ifdef __FreeBSD__
{ "jail", zfs_do_jail, HELP_JAIL },
{ "unjail", zfs_do_unjail, HELP_UNJAIL },
#endif
#ifdef __linux__
{ "zone", zfs_do_zone, HELP_ZONE },
{ "unzone", zfs_do_unzone, HELP_UNZONE },
#endif
};
#define NCOMMAND (sizeof (command_table) / sizeof (command_table[0]))
zfs_command_t *current_command;
static const char *
get_usage(zfs_help_t idx)
{
switch (idx) {
case HELP_CLONE:
return (gettext("\tclone [-p] [-o property=value] ... "
"<snapshot> <filesystem|volume>\n"));
case HELP_CREATE:
return (gettext("\tcreate [-Pnpuv] [-o property=value] ... "
"<filesystem>\n"
"\tcreate [-Pnpsv] [-b blocksize] [-o property=value] ... "
"-V <size> <volume>\n"));
case HELP_DESTROY:
return (gettext("\tdestroy [-fnpRrv] <filesystem|volume>\n"
"\tdestroy [-dnpRrv] "
"<filesystem|volume>@<snap>[%<snap>][,...]\n"
"\tdestroy <filesystem|volume>#<bookmark>\n"));
case HELP_GET:
return (gettext("\tget [-rHp] [-d max] "
"[-o \"all\" | field[,...]]\n"
"\t [-t type[,...]] [-s source[,...]]\n"
"\t <\"all\" | property[,...]> "
"[filesystem|volume|snapshot|bookmark] ...\n"));
case HELP_INHERIT:
return (gettext("\tinherit [-rS] <property> "
"<filesystem|volume|snapshot> ...\n"));
case HELP_UPGRADE:
return (gettext("\tupgrade [-v]\n"
"\tupgrade [-r] [-V version] <-a | filesystem ...>\n"));
case HELP_LIST:
return (gettext("\tlist [-Hp] [-r|-d max] [-o property[,...]] "
"[-s property]...\n\t [-S property]... [-t type[,...]] "
"[filesystem|volume|snapshot] ...\n"));
case HELP_MOUNT:
return (gettext("\tmount\n"
"\tmount [-flvO] [-o opts] <-a | filesystem>\n"));
case HELP_PROMOTE:
return (gettext("\tpromote <clone-filesystem>\n"));
case HELP_RECEIVE:
return (gettext("\treceive [-vMnsFhu] "
"[-o <property>=<value>] ... [-x <property>] ...\n"
"\t <filesystem|volume|snapshot>\n"
"\treceive [-vMnsFhu] [-o <property>=<value>] ... "
"[-x <property>] ... \n"
"\t [-d | -e] <filesystem>\n"
"\treceive -A <filesystem|volume>\n"));
case HELP_RENAME:
return (gettext("\trename [-f] <filesystem|volume|snapshot> "
"<filesystem|volume|snapshot>\n"
"\trename -p [-f] <filesystem|volume> <filesystem|volume>\n"
"\trename -u [-f] <filesystem> <filesystem>\n"
"\trename -r <snapshot> <snapshot>\n"));
case HELP_ROLLBACK:
return (gettext("\trollback [-rRf] <snapshot>\n"));
case HELP_SEND:
return (gettext("\tsend [-DLPbcehnpsVvw] "
"[-i|-I snapshot]\n"
"\t [-R [-X dataset[,dataset]...]] <snapshot>\n"
"\tsend [-DnVvPLecw] [-i snapshot|bookmark] "
"<filesystem|volume|snapshot>\n"
"\tsend [-DnPpVvLec] [-i bookmark|snapshot] "
"--redact <bookmark> <snapshot>\n"
"\tsend [-nVvPe] -t <receive_resume_token>\n"
"\tsend [-PnVv] --saved filesystem\n"));
case HELP_SET:
return (gettext("\tset [-u] <property=value> ... "
"<filesystem|volume|snapshot> ...\n"));
case HELP_SHARE:
return (gettext("\tshare [-l] <-a [nfs|smb] | filesystem>\n"));
case HELP_SNAPSHOT:
return (gettext("\tsnapshot [-r] [-o property=value] ... "
"<filesystem|volume>@<snap> ...\n"));
case HELP_UNMOUNT:
return (gettext("\tunmount [-fu] "
"<-a | filesystem|mountpoint>\n"));
case HELP_UNSHARE:
return (gettext("\tunshare "
"<-a [nfs|smb] | filesystem|mountpoint>\n"));
case HELP_ALLOW:
return (gettext("\tallow <filesystem|volume>\n"
"\tallow [-ldug] "
"<\"everyone\"|user|group>[,...] <perm|@setname>[,...]\n"
"\t <filesystem|volume>\n"
"\tallow [-ld] -e <perm|@setname>[,...] "
"<filesystem|volume>\n"
"\tallow -c <perm|@setname>[,...] <filesystem|volume>\n"
"\tallow -s @setname <perm|@setname>[,...] "
"<filesystem|volume>\n"));
case HELP_UNALLOW:
return (gettext("\tunallow [-rldug] "
"<\"everyone\"|user|group>[,...]\n"
"\t [<perm|@setname>[,...]] <filesystem|volume>\n"
"\tunallow [-rld] -e [<perm|@setname>[,...]] "
"<filesystem|volume>\n"
"\tunallow [-r] -c [<perm|@setname>[,...]] "
"<filesystem|volume>\n"
"\tunallow [-r] -s @setname [<perm|@setname>[,...]] "
"<filesystem|volume>\n"));
case HELP_USERSPACE:
return (gettext("\tuserspace [-Hinp] [-o field[,...]] "
"[-s field] ...\n"
"\t [-S field] ... [-t type[,...]] "
"<filesystem|snapshot|path>\n"));
case HELP_GROUPSPACE:
return (gettext("\tgroupspace [-Hinp] [-o field[,...]] "
"[-s field] ...\n"
"\t [-S field] ... [-t type[,...]] "
"<filesystem|snapshot|path>\n"));
case HELP_PROJECTSPACE:
return (gettext("\tprojectspace [-Hp] [-o field[,...]] "
"[-s field] ... \n"
"\t [-S field] ... <filesystem|snapshot|path>\n"));
case HELP_PROJECT:
return (gettext("\tproject [-d|-r] <directory|file ...>\n"
"\tproject -c [-0] [-d|-r] [-p id] <directory|file ...>\n"
"\tproject -C [-k] [-r] <directory ...>\n"
"\tproject [-p id] [-r] [-s] <directory ...>\n"));
case HELP_HOLD:
return (gettext("\thold [-r] <tag> <snapshot> ...\n"));
case HELP_HOLDS:
return (gettext("\tholds [-rHp] <snapshot> ...\n"));
case HELP_RELEASE:
return (gettext("\trelease [-r] <tag> <snapshot> ...\n"));
case HELP_DIFF:
return (gettext("\tdiff [-FHth] <snapshot> "
"[snapshot|filesystem]\n"));
case HELP_BOOKMARK:
return (gettext("\tbookmark <snapshot|bookmark> "
"<newbookmark>\n"));
case HELP_CHANNEL_PROGRAM:
return (gettext("\tprogram [-jn] [-t <instruction limit>] "
"[-m <memory limit (b)>]\n"
"\t <pool> <program file> [lua args...]\n"));
case HELP_LOAD_KEY:
return (gettext("\tload-key [-rn] [-L <keylocation>] "
"<-a | filesystem|volume>\n"));
case HELP_UNLOAD_KEY:
return (gettext("\tunload-key [-r] "
"<-a | filesystem|volume>\n"));
case HELP_CHANGE_KEY:
return (gettext("\tchange-key [-l] [-o keyformat=<value>]\n"
"\t [-o keylocation=<value>] [-o pbkdf2iters=<value>]\n"
"\t <filesystem|volume>\n"
"\tchange-key -i [-l] <filesystem|volume>\n"));
case HELP_VERSION:
return (gettext("\tversion\n"));
case HELP_REDACT:
return (gettext("\tredact <snapshot> <bookmark> "
"<redaction_snapshot> ...\n"));
case HELP_JAIL:
return (gettext("\tjail <jailid|jailname> <filesystem>\n"));
case HELP_UNJAIL:
return (gettext("\tunjail <jailid|jailname> <filesystem>\n"));
case HELP_WAIT:
return (gettext("\twait [-t <activity>] <filesystem>\n"));
case HELP_ZONE:
return (gettext("\tzone <nsfile> <filesystem>\n"));
case HELP_UNZONE:
return (gettext("\tunzone <nsfile> <filesystem>\n"));
default:
__builtin_unreachable();
}
}
void
nomem(void)
{
(void) fprintf(stderr, gettext("internal error: out of memory\n"));
exit(1);
}
/*
* Utility function to guarantee malloc() success.
*/
void *
safe_malloc(size_t size)
{
void *data;
if ((data = calloc(1, size)) == NULL)
nomem();
return (data);
}
static void *
safe_realloc(void *data, size_t size)
{
void *newp;
if ((newp = realloc(data, size)) == NULL) {
free(data);
nomem();
}
return (newp);
}
static char *
safe_strdup(const char *str)
{
char *dupstr = strdup(str);
if (dupstr == NULL)
nomem();
return (dupstr);
}
/*
* Callback routine that will print out information for each of
* the properties.
*/
static int
usage_prop_cb(int prop, void *cb)
{
FILE *fp = cb;
(void) fprintf(fp, "\t%-15s ", zfs_prop_to_name(prop));
if (zfs_prop_readonly(prop))
(void) fprintf(fp, " NO ");
else
(void) fprintf(fp, "YES ");
if (zfs_prop_inheritable(prop))
(void) fprintf(fp, " YES ");
else
(void) fprintf(fp, " NO ");
(void) fprintf(fp, "%s\n", zfs_prop_values(prop) ?: "-");
return (ZPROP_CONT);
}
/*
* Display usage message. If we're inside a command, display only the usage for
* that command. Otherwise, iterate over the entire command table and display
* a complete usage message.
*/
static __attribute__((noreturn)) void
usage(boolean_t requested)
{
int i;
boolean_t show_properties = B_FALSE;
FILE *fp = requested ? stdout : stderr;
if (current_command == NULL) {
(void) fprintf(fp, gettext("usage: zfs command args ...\n"));
(void) fprintf(fp,
gettext("where 'command' is one of the following:\n\n"));
for (i = 0; i < NCOMMAND; i++) {
if (command_table[i].name == NULL)
(void) fprintf(fp, "\n");
else
(void) fprintf(fp, "%s",
get_usage(command_table[i].usage));
}
(void) fprintf(fp, gettext("\nEach dataset is of the form: "
"pool/[dataset/]*dataset[@name]\n"));
} else {
(void) fprintf(fp, gettext("usage:\n"));
(void) fprintf(fp, "%s", get_usage(current_command->usage));
}
if (current_command != NULL &&
(strcmp(current_command->name, "set") == 0 ||
strcmp(current_command->name, "get") == 0 ||
strcmp(current_command->name, "inherit") == 0 ||
strcmp(current_command->name, "list") == 0))
show_properties = B_TRUE;
if (show_properties) {
(void) fprintf(fp, "%s",
gettext("\nThe following properties are supported:\n"));
(void) fprintf(fp, "\n\t%-14s %s %s %s\n\n",
"PROPERTY", "EDIT", "INHERIT", "VALUES");
/* Iterate over all properties */
(void) zprop_iter(usage_prop_cb, fp, B_FALSE, B_TRUE,
ZFS_TYPE_DATASET);
(void) fprintf(fp, "\t%-15s ", "userused@...");
(void) fprintf(fp, " NO NO <size>\n");
(void) fprintf(fp, "\t%-15s ", "groupused@...");
(void) fprintf(fp, " NO NO <size>\n");
(void) fprintf(fp, "\t%-15s ", "projectused@...");
(void) fprintf(fp, " NO NO <size>\n");
(void) fprintf(fp, "\t%-15s ", "userobjused@...");
(void) fprintf(fp, " NO NO <size>\n");
(void) fprintf(fp, "\t%-15s ", "groupobjused@...");
(void) fprintf(fp, " NO NO <size>\n");
(void) fprintf(fp, "\t%-15s ", "projectobjused@...");
(void) fprintf(fp, " NO NO <size>\n");
(void) fprintf(fp, "\t%-15s ", "userquota@...");
(void) fprintf(fp, "YES NO <size> | none\n");
(void) fprintf(fp, "\t%-15s ", "groupquota@...");
(void) fprintf(fp, "YES NO <size> | none\n");
(void) fprintf(fp, "\t%-15s ", "projectquota@...");
(void) fprintf(fp, "YES NO <size> | none\n");
(void) fprintf(fp, "\t%-15s ", "userobjquota@...");
(void) fprintf(fp, "YES NO <size> | none\n");
(void) fprintf(fp, "\t%-15s ", "groupobjquota@...");
(void) fprintf(fp, "YES NO <size> | none\n");
(void) fprintf(fp, "\t%-15s ", "projectobjquota@...");
(void) fprintf(fp, "YES NO <size> | none\n");
(void) fprintf(fp, "\t%-15s ", "written@<snap>");
(void) fprintf(fp, " NO NO <size>\n");
(void) fprintf(fp, "\t%-15s ", "written#<bookmark>");
(void) fprintf(fp, " NO NO <size>\n");
(void) fprintf(fp, gettext("\nSizes are specified in bytes "
"with standard units such as K, M, G, etc.\n"));
(void) fprintf(fp, "%s", gettext("\nUser-defined properties "
"can be specified by using a name containing a colon "
"(:).\n"));
(void) fprintf(fp, gettext("\nThe {user|group|project}"
"[obj]{used|quota}@ properties must be appended with\n"
"a user|group|project specifier of one of these forms:\n"
" POSIX name (eg: \"matt\")\n"
" POSIX id (eg: \"126829\")\n"
" SMB name@domain (eg: \"matt@sun\")\n"
" SMB SID (eg: \"S-1-234-567-89\")\n"));
} else {
(void) fprintf(fp,
gettext("\nFor the property list, run: %s\n"),
"zfs set|get");
(void) fprintf(fp,
gettext("\nFor the delegated permission list, run: %s\n"),
"zfs allow|unallow");
(void) fprintf(fp,
gettext("\nFor further help on a command or topic, "
"run: %s\n"), "zfs help [<topic>]");
}
/*
* See comments at end of main().
*/
if (getenv("ZFS_ABORT") != NULL) {
(void) printf("dumping core by request\n");
abort();
}
exit(requested ? 0 : 2);
}
/*
* Take a property=value argument string and add it to the given nvlist.
* Modifies the argument inplace.
*/
static boolean_t
parseprop(nvlist_t *props, char *propname)
{
char *propval;
if ((propval = strchr(propname, '=')) == NULL) {
(void) fprintf(stderr, gettext("missing "
"'=' for property=value argument\n"));
return (B_FALSE);
}
*propval = '\0';
propval++;
if (nvlist_exists(props, propname)) {
(void) fprintf(stderr, gettext("property '%s' "
"specified multiple times\n"), propname);
return (B_FALSE);
}
if (nvlist_add_string(props, propname, propval) != 0)
nomem();
return (B_TRUE);
}
/*
* Take a property name argument and add it to the given nvlist.
* Modifies the argument inplace.
*/
static boolean_t
parsepropname(nvlist_t *props, char *propname)
{
if (strchr(propname, '=') != NULL) {
(void) fprintf(stderr, gettext("invalid character "
"'=' in property argument\n"));
return (B_FALSE);
}
if (nvlist_exists(props, propname)) {
(void) fprintf(stderr, gettext("property '%s' "
"specified multiple times\n"), propname);
return (B_FALSE);
}
if (nvlist_add_boolean(props, propname) != 0)
nomem();
return (B_TRUE);
}
static int
parse_depth(char *opt, int *flags)
{
char *tmp;
int depth;
depth = (int)strtol(opt, &tmp, 0);
if (*tmp) {
(void) fprintf(stderr,
gettext("%s is not an integer\n"), optarg);
usage(B_FALSE);
}
if (depth < 0) {
(void) fprintf(stderr,
gettext("Depth can not be negative.\n"));
usage(B_FALSE);
}
*flags |= (ZFS_ITER_DEPTH_LIMIT|ZFS_ITER_RECURSE);
return (depth);
}
#define PROGRESS_DELAY 2 /* seconds */
static const char *pt_reverse =
"\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b";
static time_t pt_begin;
static char *pt_header = NULL;
static boolean_t pt_shown;
static void
start_progress_timer(void)
{
pt_begin = time(NULL) + PROGRESS_DELAY;
pt_shown = B_FALSE;
}
static void
set_progress_header(const char *header)
{
assert(pt_header == NULL);
pt_header = safe_strdup(header);
if (pt_shown) {
(void) printf("%s: ", header);
(void) fflush(stdout);
}
}
static void
update_progress(const char *update)
{
if (!pt_shown && time(NULL) > pt_begin) {
int len = strlen(update);
(void) printf("%s: %s%*.*s", pt_header, update, len, len,
pt_reverse);
(void) fflush(stdout);
pt_shown = B_TRUE;
} else if (pt_shown) {
int len = strlen(update);
(void) printf("%s%*.*s", update, len, len, pt_reverse);
(void) fflush(stdout);
}
}
static void
finish_progress(const char *done)
{
if (pt_shown) {
(void) puts(done);
(void) fflush(stdout);
}
free(pt_header);
pt_header = NULL;
}
static int
zfs_mount_and_share(libzfs_handle_t *hdl, const char *dataset, zfs_type_t type)
{
zfs_handle_t *zhp = NULL;
int ret = 0;
zhp = zfs_open(hdl, dataset, type);
if (zhp == NULL)
return (1);
/*
* Volumes may neither be mounted or shared. Potentially in the
* future filesystems detected on these volumes could be mounted.
*/
if (zfs_get_type(zhp) == ZFS_TYPE_VOLUME) {
zfs_close(zhp);
return (0);
}
/*
* Mount and/or share the new filesystem as appropriate. We provide a
* verbose error message to let the user know that their filesystem was
* in fact created, even if we failed to mount or share it.
*
* If the user doesn't want the dataset automatically mounted, then
* skip the mount/share step
*/
if (zfs_prop_valid_for_type(ZFS_PROP_CANMOUNT, type, B_FALSE) &&
zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) == ZFS_CANMOUNT_ON) {
if (zfs_mount_delegation_check()) {
(void) fprintf(stderr, gettext("filesystem "
"successfully created, but it may only be "
"mounted by root\n"));
ret = 1;
} else if (zfs_mount(zhp, NULL, 0) != 0) {
(void) fprintf(stderr, gettext("filesystem "
"successfully created, but not mounted\n"));
ret = 1;
} else if (zfs_share(zhp, NULL) != 0) {
(void) fprintf(stderr, gettext("filesystem "
"successfully created, but not shared\n"));
ret = 1;
}
zfs_commit_shares(NULL);
}
zfs_close(zhp);
return (ret);
}
/*
* zfs clone [-p] [-o prop=value] ... <snap> <fs | vol>
*
* Given an existing dataset, create a writable copy whose initial contents
* are the same as the source. The newly created dataset maintains a
* dependency on the original; the original cannot be destroyed so long as
* the clone exists.
*
* The '-p' flag creates all the non-existing ancestors of the target first.
*/
static int
zfs_do_clone(int argc, char **argv)
{
zfs_handle_t *zhp = NULL;
boolean_t parents = B_FALSE;
nvlist_t *props;
int ret = 0;
int c;
if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
nomem();
/* check options */
while ((c = getopt(argc, argv, "o:p")) != -1) {
switch (c) {
case 'o':
if (!parseprop(props, optarg)) {
nvlist_free(props);
return (1);
}
break;
case 'p':
parents = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
goto usage;
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing source dataset "
"argument\n"));
goto usage;
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing target dataset "
"argument\n"));
goto usage;
}
if (argc > 2) {
(void) fprintf(stderr, gettext("too many arguments\n"));
goto usage;
}
/* open the source dataset */
if ((zhp = zfs_open(g_zfs, argv[0], ZFS_TYPE_SNAPSHOT)) == NULL) {
nvlist_free(props);
return (1);
}
if (parents && zfs_name_valid(argv[1], ZFS_TYPE_FILESYSTEM |
ZFS_TYPE_VOLUME)) {
/*
* Now create the ancestors of the target dataset. If the
* target already exists and '-p' option was used we should not
* complain.
*/
if (zfs_dataset_exists(g_zfs, argv[1], ZFS_TYPE_FILESYSTEM |
ZFS_TYPE_VOLUME)) {
zfs_close(zhp);
nvlist_free(props);
return (0);
}
if (zfs_create_ancestors(g_zfs, argv[1]) != 0) {
zfs_close(zhp);
nvlist_free(props);
return (1);
}
}
/* pass to libzfs */
ret = zfs_clone(zhp, argv[1], props);
/* create the mountpoint if necessary */
if (ret == 0) {
if (log_history) {
(void) zpool_log_history(g_zfs, history_str);
log_history = B_FALSE;
}
ret = zfs_mount_and_share(g_zfs, argv[1], ZFS_TYPE_DATASET);
}
zfs_close(zhp);
nvlist_free(props);
return (!!ret);
usage:
ASSERT3P(zhp, ==, NULL);
nvlist_free(props);
usage(B_FALSE);
return (-1);
}
/*
* Return a default volblocksize for the pool which always uses more than
* half of the data sectors. This primarily applies to dRAID which always
* writes full stripe widths.
*/
static uint64_t
default_volblocksize(zpool_handle_t *zhp, nvlist_t *props)
{
uint64_t volblocksize, asize = SPA_MINBLOCKSIZE;
nvlist_t *tree, **vdevs;
uint_t nvdevs;
nvlist_t *config = zpool_get_config(zhp, NULL);
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree) != 0 ||
nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN,
&vdevs, &nvdevs) != 0) {
return (ZVOL_DEFAULT_BLOCKSIZE);
}
for (int i = 0; i < nvdevs; i++) {
nvlist_t *nv = vdevs[i];
uint64_t ashift, ndata, nparity;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &ashift) != 0)
continue;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DRAID_NDATA,
&ndata) == 0) {
/* dRAID minimum allocation width */
asize = MAX(asize, ndata * (1ULL << ashift));
} else if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
&nparity) == 0) {
/* raidz minimum allocation width */
if (nparity == 1)
asize = MAX(asize, 2 * (1ULL << ashift));
else
asize = MAX(asize, 4 * (1ULL << ashift));
} else {
/* mirror or (non-redundant) leaf vdev */
asize = MAX(asize, 1ULL << ashift);
}
}
/*
* Calculate the target volblocksize such that more than half
* of the asize is used. The following table is for 4k sectors.
*
* n asize blksz used | n asize blksz used
* -------------------------+---------------------------------
* 1 4,096 8,192 100% | 9 36,864 32,768 88%
* 2 8,192 8,192 100% | 10 40,960 32,768 80%
* 3 12,288 8,192 66% | 11 45,056 32,768 72%
* 4 16,384 16,384 100% | 12 49,152 32,768 66%
* 5 20,480 16,384 80% | 13 53,248 32,768 61%
* 6 24,576 16,384 66% | 14 57,344 32,768 57%
* 7 28,672 16,384 57% | 15 61,440 32,768 53%
* 8 32,768 32,768 100% | 16 65,536 65,636 100%
*
* This is primarily a concern for dRAID which always allocates
* a full stripe width. For dRAID the default stripe width is
* n=8 in which case the volblocksize is set to 32k. Ignoring
* compression there are no unused sectors. This same reasoning
* applies to raidz[2,3] so target 4 sectors to minimize waste.
*/
uint64_t tgt_volblocksize = ZVOL_DEFAULT_BLOCKSIZE;
while (tgt_volblocksize * 2 <= asize)
tgt_volblocksize *= 2;
const char *prop = zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE);
if (nvlist_lookup_uint64(props, prop, &volblocksize) == 0) {
/* Issue a warning when a non-optimal size is requested. */
if (volblocksize < ZVOL_DEFAULT_BLOCKSIZE) {
(void) fprintf(stderr, gettext("Warning: "
"volblocksize (%llu) is less than the default "
"minimum block size (%llu).\nTo reduce wasted "
"space a volblocksize of %llu is recommended.\n"),
(u_longlong_t)volblocksize,
(u_longlong_t)ZVOL_DEFAULT_BLOCKSIZE,
(u_longlong_t)tgt_volblocksize);
} else if (volblocksize < tgt_volblocksize) {
(void) fprintf(stderr, gettext("Warning: "
"volblocksize (%llu) is much less than the "
"minimum allocation\nunit (%llu), which wastes "
"at least %llu%% of space. To reduce wasted "
"space,\nuse a larger volblocksize (%llu is "
"recommended), fewer dRAID data disks\n"
"per group, or smaller sector size (ashift).\n"),
(u_longlong_t)volblocksize, (u_longlong_t)asize,
(u_longlong_t)((100 * (asize - volblocksize)) /
asize), (u_longlong_t)tgt_volblocksize);
}
} else {
volblocksize = tgt_volblocksize;
fnvlist_add_uint64(props, prop, volblocksize);
}
return (volblocksize);
}
/*
* zfs create [-Pnpv] [-o prop=value] ... fs
* zfs create [-Pnpsv] [-b blocksize] [-o prop=value] ... -V vol size
*
* Create a new dataset. This command can be used to create filesystems
* and volumes. Snapshot creation is handled by 'zfs snapshot'.
* For volumes, the user must specify a size to be used.
*
* The '-s' flag applies only to volumes, and indicates that we should not try
* to set the reservation for this volume. By default we set a reservation
* equal to the size for any volume. For pools with SPA_VERSION >=
* SPA_VERSION_REFRESERVATION, we set a refreservation instead.
*
* The '-p' flag creates all the non-existing ancestors of the target first.
*
* The '-n' flag is no-op (dry run) mode. This will perform a user-space sanity
* check of arguments and properties, but does not check for permissions,
* available space, etc.
*
* The '-u' flag prevents the newly created file system from being mounted.
*
* The '-v' flag is for verbose output.
*
* The '-P' flag is used for parseable output. It implies '-v'.
*/
static int
zfs_do_create(int argc, char **argv)
{
zfs_type_t type = ZFS_TYPE_FILESYSTEM;
zpool_handle_t *zpool_handle = NULL;
nvlist_t *real_props = NULL;
uint64_t volsize = 0;
int c;
boolean_t noreserve = B_FALSE;
boolean_t bflag = B_FALSE;
boolean_t parents = B_FALSE;
boolean_t dryrun = B_FALSE;
boolean_t nomount = B_FALSE;
boolean_t verbose = B_FALSE;
boolean_t parseable = B_FALSE;
int ret = 1;
nvlist_t *props;
uint64_t intval;
const char *strval;
if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
nomem();
/* check options */
while ((c = getopt(argc, argv, ":PV:b:nso:puv")) != -1) {
switch (c) {
case 'V':
type = ZFS_TYPE_VOLUME;
if (zfs_nicestrtonum(g_zfs, optarg, &intval) != 0) {
(void) fprintf(stderr, gettext("bad volume "
"size '%s': %s\n"), optarg,
libzfs_error_description(g_zfs));
goto error;
}
if (nvlist_add_uint64(props,
zfs_prop_to_name(ZFS_PROP_VOLSIZE), intval) != 0)
nomem();
volsize = intval;
break;
case 'P':
verbose = B_TRUE;
parseable = B_TRUE;
break;
case 'p':
parents = B_TRUE;
break;
case 'b':
bflag = B_TRUE;
if (zfs_nicestrtonum(g_zfs, optarg, &intval) != 0) {
(void) fprintf(stderr, gettext("bad volume "
"block size '%s': %s\n"), optarg,
libzfs_error_description(g_zfs));
goto error;
}
if (nvlist_add_uint64(props,
zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE),
intval) != 0)
nomem();
break;
case 'n':
dryrun = B_TRUE;
break;
case 'o':
if (!parseprop(props, optarg))
goto error;
break;
case 's':
noreserve = B_TRUE;
break;
case 'u':
nomount = B_TRUE;
break;
case 'v':
verbose = B_TRUE;
break;
case ':':
(void) fprintf(stderr, gettext("missing size "
"argument\n"));
goto badusage;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
goto badusage;
}
}
if ((bflag || noreserve) && type != ZFS_TYPE_VOLUME) {
(void) fprintf(stderr, gettext("'-s' and '-b' can only be "
"used when creating a volume\n"));
goto badusage;
}
if (nomount && type != ZFS_TYPE_FILESYSTEM) {
(void) fprintf(stderr, gettext("'-u' can only be "
"used when creating a filesystem\n"));
goto badusage;
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc == 0) {
(void) fprintf(stderr, gettext("missing %s argument\n"),
zfs_type_to_name(type));
goto badusage;
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
goto badusage;
}
if (dryrun || type == ZFS_TYPE_VOLUME) {
char msg[ZFS_MAX_DATASET_NAME_LEN * 2];
char *p;
if ((p = strchr(argv[0], '/')) != NULL)
*p = '\0';
zpool_handle = zpool_open(g_zfs, argv[0]);
if (p != NULL)
*p = '/';
if (zpool_handle == NULL)
goto error;
(void) snprintf(msg, sizeof (msg),
dryrun ? gettext("cannot verify '%s'") :
gettext("cannot create '%s'"), argv[0]);
if (props && (real_props = zfs_valid_proplist(g_zfs, type,
props, 0, NULL, zpool_handle, B_TRUE, msg)) == NULL) {
zpool_close(zpool_handle);
goto error;
}
}
if (type == ZFS_TYPE_VOLUME) {
const char *prop = zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE);
uint64_t volblocksize = default_volblocksize(zpool_handle,
real_props);
if (volblocksize != ZVOL_DEFAULT_BLOCKSIZE &&
nvlist_lookup_string(props, prop, &strval) != 0) {
char *tmp;
if (asprintf(&tmp, "%llu",
(u_longlong_t)volblocksize) == -1)
nomem();
nvlist_add_string(props, prop, tmp);
free(tmp);
}
/*
* If volsize is not a multiple of volblocksize, round it
* up to the nearest multiple of the volblocksize.
*/
if (volsize % volblocksize) {
volsize = P2ROUNDUP_TYPED(volsize, volblocksize,
uint64_t);
if (nvlist_add_uint64(props,
zfs_prop_to_name(ZFS_PROP_VOLSIZE), volsize) != 0) {
nvlist_free(props);
nomem();
}
}
}
if (type == ZFS_TYPE_VOLUME && !noreserve) {
uint64_t spa_version;
zfs_prop_t resv_prop;
spa_version = zpool_get_prop_int(zpool_handle,
ZPOOL_PROP_VERSION, NULL);
if (spa_version >= SPA_VERSION_REFRESERVATION)
resv_prop = ZFS_PROP_REFRESERVATION;
else
resv_prop = ZFS_PROP_RESERVATION;
volsize = zvol_volsize_to_reservation(zpool_handle, volsize,
real_props);
if (nvlist_lookup_string(props, zfs_prop_to_name(resv_prop),
&strval) != 0) {
if (nvlist_add_uint64(props,
zfs_prop_to_name(resv_prop), volsize) != 0) {
nvlist_free(props);
nomem();
}
}
}
if (zpool_handle != NULL) {
zpool_close(zpool_handle);
nvlist_free(real_props);
}
if (parents && zfs_name_valid(argv[0], type)) {
/*
* Now create the ancestors of target dataset. If the target
* already exists and '-p' option was used we should not
* complain.
*/
if (zfs_dataset_exists(g_zfs, argv[0], type)) {
ret = 0;
goto error;
}
if (verbose) {
(void) printf(parseable ? "create_ancestors\t%s\n" :
dryrun ? "would create ancestors of %s\n" :
"create ancestors of %s\n", argv[0]);
}
if (!dryrun) {
if (zfs_create_ancestors(g_zfs, argv[0]) != 0) {
goto error;
}
}
}
if (verbose) {
nvpair_t *nvp = NULL;
(void) printf(parseable ? "create\t%s\n" :
dryrun ? "would create %s\n" : "create %s\n", argv[0]);
while ((nvp = nvlist_next_nvpair(props, nvp)) != NULL) {
uint64_t uval;
const char *sval;
switch (nvpair_type(nvp)) {
case DATA_TYPE_UINT64:
VERIFY0(nvpair_value_uint64(nvp, &uval));
(void) printf(parseable ?
"property\t%s\t%llu\n" : "\t%s=%llu\n",
nvpair_name(nvp), (u_longlong_t)uval);
break;
case DATA_TYPE_STRING:
VERIFY0(nvpair_value_string(nvp, &sval));
(void) printf(parseable ?
"property\t%s\t%s\n" : "\t%s=%s\n",
nvpair_name(nvp), sval);
break;
default:
(void) fprintf(stderr, "property '%s' "
"has illegal type %d\n",
nvpair_name(nvp), nvpair_type(nvp));
abort();
}
}
}
if (dryrun) {
ret = 0;
goto error;
}
/* pass to libzfs */
if (zfs_create(g_zfs, argv[0], type, props) != 0)
goto error;
if (log_history) {
(void) zpool_log_history(g_zfs, history_str);
log_history = B_FALSE;
}
if (nomount) {
ret = 0;
goto error;
}
ret = zfs_mount_and_share(g_zfs, argv[0], ZFS_TYPE_DATASET);
error:
nvlist_free(props);
return (ret);
badusage:
nvlist_free(props);
usage(B_FALSE);
return (2);
}
/*
* zfs destroy [-rRf] <fs, vol>
* zfs destroy [-rRd] <snap>
*
* -r Recursively destroy all children
* -R Recursively destroy all dependents, including clones
* -f Force unmounting of any dependents
* -d If we can't destroy now, mark for deferred destruction
*
* Destroys the given dataset. By default, it will unmount any filesystems,
* and refuse to destroy a dataset that has any dependents. A dependent can
* either be a child, or a clone of a child.
*/
typedef struct destroy_cbdata {
boolean_t cb_first;
boolean_t cb_force;
boolean_t cb_recurse;
boolean_t cb_error;
boolean_t cb_doclones;
zfs_handle_t *cb_target;
boolean_t cb_defer_destroy;
boolean_t cb_verbose;
boolean_t cb_parsable;
boolean_t cb_dryrun;
nvlist_t *cb_nvl;
nvlist_t *cb_batchedsnaps;
/* first snap in contiguous run */
char *cb_firstsnap;
/* previous snap in contiguous run */
char *cb_prevsnap;
int64_t cb_snapused;
char *cb_snapspec;
char *cb_bookmark;
uint64_t cb_snap_count;
} destroy_cbdata_t;
/*
* Check for any dependents based on the '-r' or '-R' flags.
*/
static int
destroy_check_dependent(zfs_handle_t *zhp, void *data)
{
destroy_cbdata_t *cbp = data;
const char *tname = zfs_get_name(cbp->cb_target);
const char *name = zfs_get_name(zhp);
if (strncmp(tname, name, strlen(tname)) == 0 &&
(name[strlen(tname)] == '/' || name[strlen(tname)] == '@')) {
/*
* This is a direct descendant, not a clone somewhere else in
* the hierarchy.
*/
if (cbp->cb_recurse)
goto out;
if (cbp->cb_first) {
(void) fprintf(stderr, gettext("cannot destroy '%s': "
"%s has children\n"),
zfs_get_name(cbp->cb_target),
zfs_type_to_name(zfs_get_type(cbp->cb_target)));
(void) fprintf(stderr, gettext("use '-r' to destroy "
"the following datasets:\n"));
cbp->cb_first = B_FALSE;
cbp->cb_error = B_TRUE;
}
(void) fprintf(stderr, "%s\n", zfs_get_name(zhp));
} else {
/*
* This is a clone. We only want to report this if the '-r'
* wasn't specified, or the target is a snapshot.
*/
if (!cbp->cb_recurse &&
zfs_get_type(cbp->cb_target) != ZFS_TYPE_SNAPSHOT)
goto out;
if (cbp->cb_first) {
(void) fprintf(stderr, gettext("cannot destroy '%s': "
"%s has dependent clones\n"),
zfs_get_name(cbp->cb_target),
zfs_type_to_name(zfs_get_type(cbp->cb_target)));
(void) fprintf(stderr, gettext("use '-R' to destroy "
"the following datasets:\n"));
cbp->cb_first = B_FALSE;
cbp->cb_error = B_TRUE;
cbp->cb_dryrun = B_TRUE;
}
(void) fprintf(stderr, "%s\n", zfs_get_name(zhp));
}
out:
zfs_close(zhp);
return (0);
}
static int
destroy_batched(destroy_cbdata_t *cb)
{
int error = zfs_destroy_snaps_nvl(g_zfs,
cb->cb_batchedsnaps, B_FALSE);
fnvlist_free(cb->cb_batchedsnaps);
cb->cb_batchedsnaps = fnvlist_alloc();
return (error);
}
static int
destroy_callback(zfs_handle_t *zhp, void *data)
{
destroy_cbdata_t *cb = data;
const char *name = zfs_get_name(zhp);
int error;
if (cb->cb_verbose) {
if (cb->cb_parsable) {
(void) printf("destroy\t%s\n", name);
} else if (cb->cb_dryrun) {
(void) printf(gettext("would destroy %s\n"),
name);
} else {
(void) printf(gettext("will destroy %s\n"),
name);
}
}
/*
* Ignore pools (which we've already flagged as an error before getting
* here).
*/
if (strchr(zfs_get_name(zhp), '/') == NULL &&
zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) {
zfs_close(zhp);
return (0);
}
if (cb->cb_dryrun) {
zfs_close(zhp);
return (0);
}
/*
* We batch up all contiguous snapshots (even of different
* filesystems) and destroy them with one ioctl. We can't
* simply do all snap deletions and then all fs deletions,
* because we must delete a clone before its origin.
*/
if (zfs_get_type(zhp) == ZFS_TYPE_SNAPSHOT) {
cb->cb_snap_count++;
fnvlist_add_boolean(cb->cb_batchedsnaps, name);
if (cb->cb_snap_count % 10 == 0 && cb->cb_defer_destroy) {
error = destroy_batched(cb);
if (error != 0) {
zfs_close(zhp);
return (-1);
}
}
} else {
error = destroy_batched(cb);
if (error != 0 ||
zfs_unmount(zhp, NULL, cb->cb_force ? MS_FORCE : 0) != 0 ||
zfs_destroy(zhp, cb->cb_defer_destroy) != 0) {
zfs_close(zhp);
/*
* When performing a recursive destroy we ignore errors
* so that the recursive destroy could continue
* destroying past problem datasets
*/
if (cb->cb_recurse) {
cb->cb_error = B_TRUE;
return (0);
}
return (-1);
}
}
zfs_close(zhp);
return (0);
}
static int
destroy_print_cb(zfs_handle_t *zhp, void *arg)
{
destroy_cbdata_t *cb = arg;
const char *name = zfs_get_name(zhp);
int err = 0;
if (nvlist_exists(cb->cb_nvl, name)) {
if (cb->cb_firstsnap == NULL)
cb->cb_firstsnap = strdup(name);
if (cb->cb_prevsnap != NULL)
free(cb->cb_prevsnap);
/* this snap continues the current range */
cb->cb_prevsnap = strdup(name);
if (cb->cb_firstsnap == NULL || cb->cb_prevsnap == NULL)
nomem();
if (cb->cb_verbose) {
if (cb->cb_parsable) {
(void) printf("destroy\t%s\n", name);
} else if (cb->cb_dryrun) {
(void) printf(gettext("would destroy %s\n"),
name);
} else {
(void) printf(gettext("will destroy %s\n"),
name);
}
}
} else if (cb->cb_firstsnap != NULL) {
/* end of this range */
uint64_t used = 0;
err = lzc_snaprange_space(cb->cb_firstsnap,
cb->cb_prevsnap, &used);
cb->cb_snapused += used;
free(cb->cb_firstsnap);
cb->cb_firstsnap = NULL;
free(cb->cb_prevsnap);
cb->cb_prevsnap = NULL;
}
zfs_close(zhp);
return (err);
}
static int
destroy_print_snapshots(zfs_handle_t *fs_zhp, destroy_cbdata_t *cb)
{
int err;
assert(cb->cb_firstsnap == NULL);
assert(cb->cb_prevsnap == NULL);
err = zfs_iter_snapshots_sorted_v2(fs_zhp, 0, destroy_print_cb, cb, 0,
0);
if (cb->cb_firstsnap != NULL) {
uint64_t used = 0;
if (err == 0) {
err = lzc_snaprange_space(cb->cb_firstsnap,
cb->cb_prevsnap, &used);
}
cb->cb_snapused += used;
free(cb->cb_firstsnap);
cb->cb_firstsnap = NULL;
free(cb->cb_prevsnap);
cb->cb_prevsnap = NULL;
}
return (err);
}
static int
snapshot_to_nvl_cb(zfs_handle_t *zhp, void *arg)
{
destroy_cbdata_t *cb = arg;
int err = 0;
/* Check for clones. */
if (!cb->cb_doclones && !cb->cb_defer_destroy) {
cb->cb_target = zhp;
cb->cb_first = B_TRUE;
err = zfs_iter_dependents_v2(zhp, 0, B_TRUE,
destroy_check_dependent, cb);
}
if (err == 0) {
if (nvlist_add_boolean(cb->cb_nvl, zfs_get_name(zhp)))
nomem();
}
zfs_close(zhp);
return (err);
}
static int
gather_snapshots(zfs_handle_t *zhp, void *arg)
{
destroy_cbdata_t *cb = arg;
int err = 0;
err = zfs_iter_snapspec_v2(zhp, 0, cb->cb_snapspec,
snapshot_to_nvl_cb, cb);
if (err == ENOENT)
err = 0;
if (err != 0)
goto out;
if (cb->cb_verbose) {
err = destroy_print_snapshots(zhp, cb);
if (err != 0)
goto out;
}
if (cb->cb_recurse)
err = zfs_iter_filesystems_v2(zhp, 0, gather_snapshots, cb);
out:
zfs_close(zhp);
return (err);
}
static int
destroy_clones(destroy_cbdata_t *cb)
{
nvpair_t *pair;
for (pair = nvlist_next_nvpair(cb->cb_nvl, NULL);
pair != NULL;
pair = nvlist_next_nvpair(cb->cb_nvl, pair)) {
zfs_handle_t *zhp = zfs_open(g_zfs, nvpair_name(pair),
ZFS_TYPE_SNAPSHOT);
if (zhp != NULL) {
boolean_t defer = cb->cb_defer_destroy;
int err;
/*
* We can't defer destroy non-snapshots, so set it to
* false while destroying the clones.
*/
cb->cb_defer_destroy = B_FALSE;
err = zfs_iter_dependents_v2(zhp, 0, B_FALSE,
destroy_callback, cb);
cb->cb_defer_destroy = defer;
zfs_close(zhp);
if (err != 0)
return (err);
}
}
return (0);
}
static int
zfs_do_destroy(int argc, char **argv)
{
destroy_cbdata_t cb = { 0 };
int rv = 0;
int err = 0;
int c;
zfs_handle_t *zhp = NULL;
char *at, *pound;
zfs_type_t type = ZFS_TYPE_DATASET;
/* check options */
while ((c = getopt(argc, argv, "vpndfrR")) != -1) {
switch (c) {
case 'v':
cb.cb_verbose = B_TRUE;
break;
case 'p':
cb.cb_verbose = B_TRUE;
cb.cb_parsable = B_TRUE;
break;
case 'n':
cb.cb_dryrun = B_TRUE;
break;
case 'd':
cb.cb_defer_destroy = B_TRUE;
type = ZFS_TYPE_SNAPSHOT;
break;
case 'f':
cb.cb_force = B_TRUE;
break;
case 'r':
cb.cb_recurse = B_TRUE;
break;
case 'R':
cb.cb_recurse = B_TRUE;
cb.cb_doclones = B_TRUE;
break;
case '?':
default:
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc == 0) {
(void) fprintf(stderr, gettext("missing dataset argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
at = strchr(argv[0], '@');
pound = strchr(argv[0], '#');
if (at != NULL) {
/* Build the list of snaps to destroy in cb_nvl. */
cb.cb_nvl = fnvlist_alloc();
*at = '\0';
zhp = zfs_open(g_zfs, argv[0],
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL) {
nvlist_free(cb.cb_nvl);
return (1);
}
cb.cb_snapspec = at + 1;
if (gather_snapshots(zfs_handle_dup(zhp), &cb) != 0 ||
cb.cb_error) {
rv = 1;
goto out;
}
if (nvlist_empty(cb.cb_nvl)) {
(void) fprintf(stderr, gettext("could not find any "
"snapshots to destroy; check snapshot names.\n"));
rv = 1;
goto out;
}
if (cb.cb_verbose) {
char buf[16];
zfs_nicebytes(cb.cb_snapused, buf, sizeof (buf));
if (cb.cb_parsable) {
(void) printf("reclaim\t%llu\n",
(u_longlong_t)cb.cb_snapused);
} else if (cb.cb_dryrun) {
(void) printf(gettext("would reclaim %s\n"),
buf);
} else {
(void) printf(gettext("will reclaim %s\n"),
buf);
}
}
if (!cb.cb_dryrun) {
if (cb.cb_doclones) {
cb.cb_batchedsnaps = fnvlist_alloc();
err = destroy_clones(&cb);
if (err == 0) {
err = zfs_destroy_snaps_nvl(g_zfs,
cb.cb_batchedsnaps, B_FALSE);
}
if (err != 0) {
rv = 1;
goto out;
}
}
if (err == 0) {
err = zfs_destroy_snaps_nvl(g_zfs, cb.cb_nvl,
cb.cb_defer_destroy);
}
}
if (err != 0)
rv = 1;
} else if (pound != NULL) {
int err;
nvlist_t *nvl;
if (cb.cb_dryrun) {
(void) fprintf(stderr,
"dryrun is not supported with bookmark\n");
return (-1);
}
if (cb.cb_defer_destroy) {
(void) fprintf(stderr,
"defer destroy is not supported with bookmark\n");
return (-1);
}
if (cb.cb_recurse) {
(void) fprintf(stderr,
"recursive is not supported with bookmark\n");
return (-1);
}
/*
* Unfortunately, zfs_bookmark() doesn't honor the
* casesensitivity setting. However, we can't simply
* remove this check, because lzc_destroy_bookmarks()
* ignores non-existent bookmarks, so this is necessary
* to get a proper error message.
*/
if (!zfs_bookmark_exists(argv[0])) {
(void) fprintf(stderr, gettext("bookmark '%s' "
"does not exist.\n"), argv[0]);
return (1);
}
nvl = fnvlist_alloc();
fnvlist_add_boolean(nvl, argv[0]);
err = lzc_destroy_bookmarks(nvl, NULL);
if (err != 0) {
(void) zfs_standard_error(g_zfs, err,
"cannot destroy bookmark");
}
nvlist_free(nvl);
return (err);
} else {
/* Open the given dataset */
if ((zhp = zfs_open(g_zfs, argv[0], type)) == NULL)
return (1);
cb.cb_target = zhp;
/*
* Perform an explicit check for pools before going any further.
*/
if (!cb.cb_recurse && strchr(zfs_get_name(zhp), '/') == NULL &&
zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) {
(void) fprintf(stderr, gettext("cannot destroy '%s': "
"operation does not apply to pools\n"),
zfs_get_name(zhp));
(void) fprintf(stderr, gettext("use 'zfs destroy -r "
"%s' to destroy all datasets in the pool\n"),
zfs_get_name(zhp));
(void) fprintf(stderr, gettext("use 'zpool destroy %s' "
"to destroy the pool itself\n"), zfs_get_name(zhp));
rv = 1;
goto out;
}
/*
* Check for any dependents and/or clones.
*/
cb.cb_first = B_TRUE;
if (!cb.cb_doclones && zfs_iter_dependents_v2(zhp, 0, B_TRUE,
destroy_check_dependent, &cb) != 0) {
rv = 1;
goto out;
}
if (cb.cb_error) {
rv = 1;
goto out;
}
cb.cb_batchedsnaps = fnvlist_alloc();
if (zfs_iter_dependents_v2(zhp, 0, B_FALSE, destroy_callback,
&cb) != 0) {
rv = 1;
goto out;
}
/*
* Do the real thing. The callback will close the
* handle regardless of whether it succeeds or not.
*/
err = destroy_callback(zhp, &cb);
zhp = NULL;
if (err == 0) {
err = zfs_destroy_snaps_nvl(g_zfs,
cb.cb_batchedsnaps, cb.cb_defer_destroy);
}
if (err != 0 || cb.cb_error == B_TRUE)
rv = 1;
}
out:
fnvlist_free(cb.cb_batchedsnaps);
fnvlist_free(cb.cb_nvl);
if (zhp != NULL)
zfs_close(zhp);
return (rv);
}
static boolean_t
is_recvd_column(zprop_get_cbdata_t *cbp)
{
int i;
zfs_get_column_t col;
for (i = 0; i < ZFS_GET_NCOLS &&
(col = cbp->cb_columns[i]) != GET_COL_NONE; i++)
if (col == GET_COL_RECVD)
return (B_TRUE);
return (B_FALSE);
}
/*
* zfs get [-rHp] [-o all | field[,field]...] [-s source[,source]...]
* < all | property[,property]... > < fs | snap | vol > ...
*
* -r recurse over any child datasets
* -H scripted mode. Headers are stripped, and fields are separated
* by tabs instead of spaces.
* -o Set of fields to display. One of "name,property,value,
* received,source". Default is "name,property,value,source".
* "all" is an alias for all five.
* -s Set of sources to allow. One of
* "local,default,inherited,received,temporary,none". Default is
* all six.
* -p Display values in parsable (literal) format.
*
* Prints properties for the given datasets. The user can control which
* columns to display as well as which property types to allow.
*/
/*
* Invoked to display the properties for a single dataset.
*/
static int
get_callback(zfs_handle_t *zhp, void *data)
{
char buf[ZFS_MAXPROPLEN];
char rbuf[ZFS_MAXPROPLEN];
zprop_source_t sourcetype;
char source[ZFS_MAX_DATASET_NAME_LEN];
zprop_get_cbdata_t *cbp = data;
nvlist_t *user_props = zfs_get_user_props(zhp);
zprop_list_t *pl = cbp->cb_proplist;
nvlist_t *propval;
const char *strval;
const char *sourceval;
boolean_t received = is_recvd_column(cbp);
for (; pl != NULL; pl = pl->pl_next) {
char *recvdval = NULL;
/*
* Skip the special fake placeholder. This will also skip over
* the name property when 'all' is specified.
*/
if (pl->pl_prop == ZFS_PROP_NAME &&
pl == cbp->cb_proplist)
continue;
if (pl->pl_prop != ZPROP_USERPROP) {
if (zfs_prop_get(zhp, pl->pl_prop, buf,
sizeof (buf), &sourcetype, source,
sizeof (source),
cbp->cb_literal) != 0) {
if (pl->pl_all)
continue;
if (!zfs_prop_valid_for_type(pl->pl_prop,
ZFS_TYPE_DATASET, B_FALSE)) {
(void) fprintf(stderr,
gettext("No such property '%s'\n"),
zfs_prop_to_name(pl->pl_prop));
continue;
}
sourcetype = ZPROP_SRC_NONE;
(void) strlcpy(buf, "-", sizeof (buf));
}
if (received && (zfs_prop_get_recvd(zhp,
zfs_prop_to_name(pl->pl_prop), rbuf, sizeof (rbuf),
cbp->cb_literal) == 0))
recvdval = rbuf;
zprop_print_one_property(zfs_get_name(zhp), cbp,
zfs_prop_to_name(pl->pl_prop),
buf, sourcetype, source, recvdval);
} else if (zfs_prop_userquota(pl->pl_user_prop)) {
sourcetype = ZPROP_SRC_LOCAL;
if (zfs_prop_get_userquota(zhp, pl->pl_user_prop,
buf, sizeof (buf), cbp->cb_literal) != 0) {
sourcetype = ZPROP_SRC_NONE;
(void) strlcpy(buf, "-", sizeof (buf));
}
zprop_print_one_property(zfs_get_name(zhp), cbp,
pl->pl_user_prop, buf, sourcetype, source, NULL);
} else if (zfs_prop_written(pl->pl_user_prop)) {
sourcetype = ZPROP_SRC_LOCAL;
if (zfs_prop_get_written(zhp, pl->pl_user_prop,
buf, sizeof (buf), cbp->cb_literal) != 0) {
sourcetype = ZPROP_SRC_NONE;
(void) strlcpy(buf, "-", sizeof (buf));
}
zprop_print_one_property(zfs_get_name(zhp), cbp,
pl->pl_user_prop, buf, sourcetype, source, NULL);
} else {
if (nvlist_lookup_nvlist(user_props,
pl->pl_user_prop, &propval) != 0) {
if (pl->pl_all)
continue;
sourcetype = ZPROP_SRC_NONE;
strval = "-";
} else {
strval = fnvlist_lookup_string(propval,
ZPROP_VALUE);
sourceval = fnvlist_lookup_string(propval,
ZPROP_SOURCE);
if (strcmp(sourceval,
zfs_get_name(zhp)) == 0) {
sourcetype = ZPROP_SRC_LOCAL;
} else if (strcmp(sourceval,
ZPROP_SOURCE_VAL_RECVD) == 0) {
sourcetype = ZPROP_SRC_RECEIVED;
} else {
sourcetype = ZPROP_SRC_INHERITED;
(void) strlcpy(source,
sourceval, sizeof (source));
}
}
if (received && (zfs_prop_get_recvd(zhp,
pl->pl_user_prop, rbuf, sizeof (rbuf),
cbp->cb_literal) == 0))
recvdval = rbuf;
zprop_print_one_property(zfs_get_name(zhp), cbp,
pl->pl_user_prop, strval, sourcetype,
source, recvdval);
}
}
return (0);
}
static int
zfs_do_get(int argc, char **argv)
{
zprop_get_cbdata_t cb = { 0 };
int i, c, flags = ZFS_ITER_ARGS_CAN_BE_PATHS;
int types = ZFS_TYPE_DATASET | ZFS_TYPE_BOOKMARK;
char *fields;
int ret = 0;
int limit = 0;
zprop_list_t fake_name = { 0 };
/*
* Set up default columns and sources.
*/
cb.cb_sources = ZPROP_SRC_ALL;
cb.cb_columns[0] = GET_COL_NAME;
cb.cb_columns[1] = GET_COL_PROPERTY;
cb.cb_columns[2] = GET_COL_VALUE;
cb.cb_columns[3] = GET_COL_SOURCE;
cb.cb_type = ZFS_TYPE_DATASET;
/* check options */
while ((c = getopt(argc, argv, ":d:o:s:rt:Hp")) != -1) {
switch (c) {
case 'p':
cb.cb_literal = B_TRUE;
break;
case 'd':
limit = parse_depth(optarg, &flags);
break;
case 'r':
flags |= ZFS_ITER_RECURSE;
break;
case 'H':
cb.cb_scripted = B_TRUE;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case 'o':
/*
* Process the set of columns to display. We zero out
* the structure to give us a blank slate.
*/
memset(&cb.cb_columns, 0, sizeof (cb.cb_columns));
i = 0;
for (char *tok; (tok = strsep(&optarg, ",")); ) {
static const char *const col_subopts[] =
{ "name", "property", "value",
"received", "source", "all" };
static const zfs_get_column_t col_subopt_col[] =
{ GET_COL_NAME, GET_COL_PROPERTY, GET_COL_VALUE,
GET_COL_RECVD, GET_COL_SOURCE };
static const int col_subopt_flags[] =
{ 0, 0, 0, ZFS_ITER_RECVD_PROPS, 0 };
if (i == ZFS_GET_NCOLS) {
(void) fprintf(stderr, gettext("too "
"many fields given to -o "
"option\n"));
usage(B_FALSE);
}
for (c = 0; c < ARRAY_SIZE(col_subopts); ++c)
if (strcmp(tok, col_subopts[c]) == 0)
goto found;
(void) fprintf(stderr,
gettext("invalid column name '%s'\n"), tok);
usage(B_FALSE);
found:
if (c >= 5) {
if (i > 0) {
(void) fprintf(stderr,
gettext("\"all\" conflicts "
"with specific fields "
"given to -o option\n"));
usage(B_FALSE);
}
memcpy(cb.cb_columns, col_subopt_col,
sizeof (col_subopt_col));
flags |= ZFS_ITER_RECVD_PROPS;
i = ZFS_GET_NCOLS;
} else {
cb.cb_columns[i++] = col_subopt_col[c];
flags |= col_subopt_flags[c];
}
}
break;
case 's':
cb.cb_sources = 0;
for (char *tok; (tok = strsep(&optarg, ",")); ) {
static const char *const source_opt[] = {
"local", "default",
"inherited", "received",
"temporary", "none" };
static const int source_flg[] = {
ZPROP_SRC_LOCAL, ZPROP_SRC_DEFAULT,
ZPROP_SRC_INHERITED, ZPROP_SRC_RECEIVED,
ZPROP_SRC_TEMPORARY, ZPROP_SRC_NONE };
for (i = 0; i < ARRAY_SIZE(source_opt); ++i)
if (strcmp(tok, source_opt[i]) == 0) {
cb.cb_sources |= source_flg[i];
goto found2;
}
(void) fprintf(stderr,
gettext("invalid source '%s'\n"), tok);
usage(B_FALSE);
found2:;
}
break;
case 't':
types = 0;
flags &= ~ZFS_ITER_PROP_LISTSNAPS;
for (char *tok; (tok = strsep(&optarg, ",")); ) {
static const char *const type_opts[] = {
"filesystem", "volume",
"snapshot", "snap",
"bookmark",
"all" };
static const int type_types[] = {
ZFS_TYPE_FILESYSTEM, ZFS_TYPE_VOLUME,
ZFS_TYPE_SNAPSHOT, ZFS_TYPE_SNAPSHOT,
ZFS_TYPE_BOOKMARK,
ZFS_TYPE_DATASET | ZFS_TYPE_BOOKMARK };
for (i = 0; i < ARRAY_SIZE(type_opts); ++i)
if (strcmp(tok, type_opts[i]) == 0) {
types |= type_types[i];
goto found3;
}
(void) fprintf(stderr,
gettext("invalid type '%s'\n"), tok);
usage(B_FALSE);
found3:;
}
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing property "
"argument\n"));
usage(B_FALSE);
}
fields = argv[0];
/*
* Handle users who want to get all snapshots or bookmarks
* of a dataset (ex. 'zfs get -t snapshot refer <dataset>').
*/
if ((types == ZFS_TYPE_SNAPSHOT || types == ZFS_TYPE_BOOKMARK) &&
argc > 1 && (flags & ZFS_ITER_RECURSE) == 0 && limit == 0) {
flags |= (ZFS_ITER_DEPTH_LIMIT | ZFS_ITER_RECURSE);
limit = 1;
}
if (zprop_get_list(g_zfs, fields, &cb.cb_proplist, ZFS_TYPE_DATASET)
!= 0)
usage(B_FALSE);
argc--;
argv++;
/*
* As part of zfs_expand_proplist(), we keep track of the maximum column
* width for each property. For the 'NAME' (and 'SOURCE') columns, we
* need to know the maximum name length. However, the user likely did
* not specify 'name' as one of the properties to fetch, so we need to
* make sure we always include at least this property for
* print_get_headers() to work properly.
*/
if (cb.cb_proplist != NULL) {
fake_name.pl_prop = ZFS_PROP_NAME;
fake_name.pl_width = strlen(gettext("NAME"));
fake_name.pl_next = cb.cb_proplist;
cb.cb_proplist = &fake_name;
}
cb.cb_first = B_TRUE;
/* run for each object */
ret = zfs_for_each(argc, argv, flags, types, NULL,
&cb.cb_proplist, limit, get_callback, &cb);
if (cb.cb_proplist == &fake_name)
zprop_free_list(fake_name.pl_next);
else
zprop_free_list(cb.cb_proplist);
return (ret);
}
/*
* inherit [-rS] <property> <fs|vol> ...
*
* -r Recurse over all children
* -S Revert to received value, if any
*
* For each dataset specified on the command line, inherit the given property
* from its parent. Inheriting a property at the pool level will cause it to
* use the default value. The '-r' flag will recurse over all children, and is
* useful for setting a property on a hierarchy-wide basis, regardless of any
* local modifications for each dataset.
*/
typedef struct inherit_cbdata {
const char *cb_propname;
boolean_t cb_received;
} inherit_cbdata_t;
static int
inherit_recurse_cb(zfs_handle_t *zhp, void *data)
{
inherit_cbdata_t *cb = data;
zfs_prop_t prop = zfs_name_to_prop(cb->cb_propname);
/*
* If we're doing it recursively, then ignore properties that
* are not valid for this type of dataset.
*/
if (prop != ZPROP_INVAL &&
!zfs_prop_valid_for_type(prop, zfs_get_type(zhp), B_FALSE))
return (0);
return (zfs_prop_inherit(zhp, cb->cb_propname, cb->cb_received) != 0);
}
static int
inherit_cb(zfs_handle_t *zhp, void *data)
{
inherit_cbdata_t *cb = data;
return (zfs_prop_inherit(zhp, cb->cb_propname, cb->cb_received) != 0);
}
static int
zfs_do_inherit(int argc, char **argv)
{
int c;
zfs_prop_t prop;
inherit_cbdata_t cb = { 0 };
char *propname;
int ret = 0;
int flags = 0;
boolean_t received = B_FALSE;
/* check options */
while ((c = getopt(argc, argv, "rS")) != -1) {
switch (c) {
case 'r':
flags |= ZFS_ITER_RECURSE;
break;
case 'S':
received = B_TRUE;
break;
case '?':
default:
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing property argument\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing dataset argument\n"));
usage(B_FALSE);
}
propname = argv[0];
argc--;
argv++;
if ((prop = zfs_name_to_prop(propname)) != ZPROP_USERPROP) {
if (zfs_prop_readonly(prop)) {
(void) fprintf(stderr, gettext(
"%s property is read-only\n"),
propname);
return (1);
}
if (!zfs_prop_inheritable(prop) && !received) {
(void) fprintf(stderr, gettext("'%s' property cannot "
"be inherited\n"), propname);
if (prop == ZFS_PROP_QUOTA ||
prop == ZFS_PROP_RESERVATION ||
prop == ZFS_PROP_REFQUOTA ||
prop == ZFS_PROP_REFRESERVATION) {
(void) fprintf(stderr, gettext("use 'zfs set "
"%s=none' to clear\n"), propname);
(void) fprintf(stderr, gettext("use 'zfs "
"inherit -S %s' to revert to received "
"value\n"), propname);
}
return (1);
}
if (received && (prop == ZFS_PROP_VOLSIZE ||
prop == ZFS_PROP_VERSION)) {
(void) fprintf(stderr, gettext("'%s' property cannot "
"be reverted to a received value\n"), propname);
return (1);
}
} else if (!zfs_prop_user(propname)) {
(void) fprintf(stderr, gettext("invalid property '%s'\n"),
propname);
usage(B_FALSE);
}
cb.cb_propname = propname;
cb.cb_received = received;
if (flags & ZFS_ITER_RECURSE) {
ret = zfs_for_each(argc, argv, flags, ZFS_TYPE_DATASET,
NULL, NULL, 0, inherit_recurse_cb, &cb);
} else {
ret = zfs_for_each(argc, argv, flags, ZFS_TYPE_DATASET,
NULL, NULL, 0, inherit_cb, &cb);
}
return (ret);
}
typedef struct upgrade_cbdata {
uint64_t cb_numupgraded;
uint64_t cb_numsamegraded;
uint64_t cb_numfailed;
uint64_t cb_version;
boolean_t cb_newer;
boolean_t cb_foundone;
char cb_lastfs[ZFS_MAX_DATASET_NAME_LEN];
} upgrade_cbdata_t;
static int
same_pool(zfs_handle_t *zhp, const char *name)
{
int len1 = strcspn(name, "/@");
const char *zhname = zfs_get_name(zhp);
int len2 = strcspn(zhname, "/@");
if (len1 != len2)
return (B_FALSE);
return (strncmp(name, zhname, len1) == 0);
}
static int
upgrade_list_callback(zfs_handle_t *zhp, void *data)
{
upgrade_cbdata_t *cb = data;
int version = zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
/* list if it's old/new */
if ((!cb->cb_newer && version < ZPL_VERSION) ||
(cb->cb_newer && version > ZPL_VERSION)) {
char *str;
if (cb->cb_newer) {
str = gettext("The following filesystems are "
"formatted using a newer software version and\n"
"cannot be accessed on the current system.\n\n");
} else {
str = gettext("The following filesystems are "
"out of date, and can be upgraded. After being\n"
"upgraded, these filesystems (and any 'zfs send' "
"streams generated from\n"
"subsequent snapshots) will no longer be "
"accessible by older software versions.\n\n");
}
if (!cb->cb_foundone) {
(void) puts(str);
(void) printf(gettext("VER FILESYSTEM\n"));
(void) printf(gettext("--- ------------\n"));
cb->cb_foundone = B_TRUE;
}
(void) printf("%2u %s\n", version, zfs_get_name(zhp));
}
return (0);
}
static int
upgrade_set_callback(zfs_handle_t *zhp, void *data)
{
upgrade_cbdata_t *cb = data;
int version = zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
int needed_spa_version;
int spa_version;
if (zfs_spa_version(zhp, &spa_version) < 0)
return (-1);
needed_spa_version = zfs_spa_version_map(cb->cb_version);
if (needed_spa_version < 0)
return (-1);
if (spa_version < needed_spa_version) {
/* can't upgrade */
(void) printf(gettext("%s: can not be "
"upgraded; the pool version needs to first "
"be upgraded\nto version %d\n\n"),
zfs_get_name(zhp), needed_spa_version);
cb->cb_numfailed++;
return (0);
}
/* upgrade */
if (version < cb->cb_version) {
char verstr[24];
(void) snprintf(verstr, sizeof (verstr),
"%llu", (u_longlong_t)cb->cb_version);
if (cb->cb_lastfs[0] && !same_pool(zhp, cb->cb_lastfs)) {
/*
* If they did "zfs upgrade -a", then we could
* be doing ioctls to different pools. We need
* to log this history once to each pool, and bypass
* the normal history logging that happens in main().
*/
(void) zpool_log_history(g_zfs, history_str);
log_history = B_FALSE;
}
if (zfs_prop_set(zhp, "version", verstr) == 0)
cb->cb_numupgraded++;
else
cb->cb_numfailed++;
(void) strlcpy(cb->cb_lastfs, zfs_get_name(zhp),
sizeof (cb->cb_lastfs));
} else if (version > cb->cb_version) {
/* can't downgrade */
(void) printf(gettext("%s: can not be downgraded; "
"it is already at version %u\n"),
zfs_get_name(zhp), version);
cb->cb_numfailed++;
} else {
cb->cb_numsamegraded++;
}
return (0);
}
/*
* zfs upgrade
* zfs upgrade -v
* zfs upgrade [-r] [-V <version>] <-a | filesystem>
*/
static int
zfs_do_upgrade(int argc, char **argv)
{
boolean_t all = B_FALSE;
boolean_t showversions = B_FALSE;
int ret = 0;
upgrade_cbdata_t cb = { 0 };
int c;
int flags = ZFS_ITER_ARGS_CAN_BE_PATHS;
/* check options */
while ((c = getopt(argc, argv, "rvV:a")) != -1) {
switch (c) {
case 'r':
flags |= ZFS_ITER_RECURSE;
break;
case 'v':
showversions = B_TRUE;
break;
case 'V':
if (zfs_prop_string_to_index(ZFS_PROP_VERSION,
optarg, &cb.cb_version) != 0) {
(void) fprintf(stderr,
gettext("invalid version %s\n"), optarg);
usage(B_FALSE);
}
break;
case 'a':
all = B_TRUE;
break;
case '?':
default:
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if ((!all && !argc) && ((flags & ZFS_ITER_RECURSE) | cb.cb_version))
usage(B_FALSE);
if (showversions && (flags & ZFS_ITER_RECURSE || all ||
cb.cb_version || argc))
usage(B_FALSE);
if ((all || argc) && (showversions))
usage(B_FALSE);
if (all && argc)
usage(B_FALSE);
if (showversions) {
/* Show info on available versions. */
(void) printf(gettext("The following filesystem versions are "
"supported:\n\n"));
(void) printf(gettext("VER DESCRIPTION\n"));
(void) printf("--- -----------------------------------------"
"---------------\n");
(void) printf(gettext(" 1 Initial ZFS filesystem version\n"));
(void) printf(gettext(" 2 Enhanced directory entries\n"));
(void) printf(gettext(" 3 Case insensitive and filesystem "
"user identifier (FUID)\n"));
(void) printf(gettext(" 4 userquota, groupquota "
"properties\n"));
(void) printf(gettext(" 5 System attributes\n"));
(void) printf(gettext("\nFor more information on a particular "
"version, including supported releases,\n"));
(void) printf("see the ZFS Administration Guide.\n\n");
ret = 0;
} else if (argc || all) {
/* Upgrade filesystems */
if (cb.cb_version == 0)
cb.cb_version = ZPL_VERSION;
ret = zfs_for_each(argc, argv, flags, ZFS_TYPE_FILESYSTEM,
NULL, NULL, 0, upgrade_set_callback, &cb);
(void) printf(gettext("%llu filesystems upgraded\n"),
(u_longlong_t)cb.cb_numupgraded);
if (cb.cb_numsamegraded) {
(void) printf(gettext("%llu filesystems already at "
"this version\n"),
(u_longlong_t)cb.cb_numsamegraded);
}
if (cb.cb_numfailed != 0)
ret = 1;
} else {
/* List old-version filesystems */
boolean_t found;
(void) printf(gettext("This system is currently running "
"ZFS filesystem version %llu.\n\n"), ZPL_VERSION);
flags |= ZFS_ITER_RECURSE;
ret = zfs_for_each(0, NULL, flags, ZFS_TYPE_FILESYSTEM,
NULL, NULL, 0, upgrade_list_callback, &cb);
found = cb.cb_foundone;
cb.cb_foundone = B_FALSE;
cb.cb_newer = B_TRUE;
ret |= zfs_for_each(0, NULL, flags, ZFS_TYPE_FILESYSTEM,
NULL, NULL, 0, upgrade_list_callback, &cb);
if (!cb.cb_foundone && !found) {
(void) printf(gettext("All filesystems are "
"formatted with the current version.\n"));
}
}
return (ret);
}
/*
* zfs userspace [-Hinp] [-o field[,...]] [-s field [-s field]...]
* [-S field [-S field]...] [-t type[,...]]
* filesystem | snapshot | path
* zfs groupspace [-Hinp] [-o field[,...]] [-s field [-s field]...]
* [-S field [-S field]...] [-t type[,...]]
* filesystem | snapshot | path
* zfs projectspace [-Hp] [-o field[,...]] [-s field [-s field]...]
* [-S field [-S field]...] filesystem | snapshot | path
*
* -H Scripted mode; elide headers and separate columns by tabs.
* -i Translate SID to POSIX ID.
* -n Print numeric ID instead of user/group name.
* -o Control which fields to display.
* -p Use exact (parsable) numeric output.
* -s Specify sort columns, descending order.
* -S Specify sort columns, ascending order.
* -t Control which object types to display.
*
* Displays space consumed by, and quotas on, each user in the specified
* filesystem or snapshot.
*/
/* us_field_types, us_field_hdr and us_field_names should be kept in sync */
enum us_field_types {
USFIELD_TYPE,
USFIELD_NAME,
USFIELD_USED,
USFIELD_QUOTA,
USFIELD_OBJUSED,
USFIELD_OBJQUOTA
};
static const char *const us_field_hdr[] = { "TYPE", "NAME", "USED", "QUOTA",
"OBJUSED", "OBJQUOTA" };
static const char *const us_field_names[] = { "type", "name", "used", "quota",
"objused", "objquota" };
#define USFIELD_LAST (sizeof (us_field_names) / sizeof (char *))
#define USTYPE_PSX_GRP (1 << 0)
#define USTYPE_PSX_USR (1 << 1)
#define USTYPE_SMB_GRP (1 << 2)
#define USTYPE_SMB_USR (1 << 3)
#define USTYPE_PROJ (1 << 4)
#define USTYPE_ALL \
(USTYPE_PSX_GRP | USTYPE_PSX_USR | USTYPE_SMB_GRP | USTYPE_SMB_USR | \
USTYPE_PROJ)
static int us_type_bits[] = {
USTYPE_PSX_GRP,
USTYPE_PSX_USR,
USTYPE_SMB_GRP,
USTYPE_SMB_USR,
USTYPE_ALL
};
static const char *const us_type_names[] = { "posixgroup", "posixuser",
"smbgroup", "smbuser", "all" };
typedef struct us_node {
nvlist_t *usn_nvl;
uu_avl_node_t usn_avlnode;
uu_list_node_t usn_listnode;
} us_node_t;
typedef struct us_cbdata {
nvlist_t **cb_nvlp;
uu_avl_pool_t *cb_avl_pool;
uu_avl_t *cb_avl;
boolean_t cb_numname;
boolean_t cb_nicenum;
boolean_t cb_sid2posix;
zfs_userquota_prop_t cb_prop;
zfs_sort_column_t *cb_sortcol;
size_t cb_width[USFIELD_LAST];
} us_cbdata_t;
static boolean_t us_populated = B_FALSE;
typedef struct {
zfs_sort_column_t *si_sortcol;
boolean_t si_numname;
} us_sort_info_t;
static int
us_field_index(const char *field)
{
for (int i = 0; i < USFIELD_LAST; i++) {
if (strcmp(field, us_field_names[i]) == 0)
return (i);
}
return (-1);
}
static int
us_compare(const void *larg, const void *rarg, void *unused)
{
const us_node_t *l = larg;
const us_node_t *r = rarg;
us_sort_info_t *si = (us_sort_info_t *)unused;
zfs_sort_column_t *sortcol = si->si_sortcol;
boolean_t numname = si->si_numname;
nvlist_t *lnvl = l->usn_nvl;
nvlist_t *rnvl = r->usn_nvl;
int rc = 0;
boolean_t lvb, rvb;
for (; sortcol != NULL; sortcol = sortcol->sc_next) {
const char *lvstr = "";
const char *rvstr = "";
uint32_t lv32 = 0;
uint32_t rv32 = 0;
uint64_t lv64 = 0;
uint64_t rv64 = 0;
zfs_prop_t prop = sortcol->sc_prop;
const char *propname = NULL;
boolean_t reverse = sortcol->sc_reverse;
switch (prop) {
case ZFS_PROP_TYPE:
propname = "type";
(void) nvlist_lookup_uint32(lnvl, propname, &lv32);
(void) nvlist_lookup_uint32(rnvl, propname, &rv32);
if (rv32 != lv32)
rc = (rv32 < lv32) ? 1 : -1;
break;
case ZFS_PROP_NAME:
propname = "name";
if (numname) {
compare_nums:
(void) nvlist_lookup_uint64(lnvl, propname,
&lv64);
(void) nvlist_lookup_uint64(rnvl, propname,
&rv64);
if (rv64 != lv64)
rc = (rv64 < lv64) ? 1 : -1;
} else {
if ((nvlist_lookup_string(lnvl, propname,
&lvstr) == ENOENT) ||
(nvlist_lookup_string(rnvl, propname,
&rvstr) == ENOENT)) {
goto compare_nums;
}
rc = strcmp(lvstr, rvstr);
}
break;
case ZFS_PROP_USED:
case ZFS_PROP_QUOTA:
if (!us_populated)
break;
if (prop == ZFS_PROP_USED)
propname = "used";
else
propname = "quota";
(void) nvlist_lookup_uint64(lnvl, propname, &lv64);
(void) nvlist_lookup_uint64(rnvl, propname, &rv64);
if (rv64 != lv64)
rc = (rv64 < lv64) ? 1 : -1;
break;
default:
break;
}
if (rc != 0) {
if (rc < 0)
return (reverse ? 1 : -1);
else
return (reverse ? -1 : 1);
}
}
/*
* If entries still seem to be the same, check if they are of the same
* type (smbentity is added only if we are doing SID to POSIX ID
* translation where we can have duplicate type/name combinations).
*/
if (nvlist_lookup_boolean_value(lnvl, "smbentity", &lvb) == 0 &&
nvlist_lookup_boolean_value(rnvl, "smbentity", &rvb) == 0 &&
lvb != rvb)
return (lvb < rvb ? -1 : 1);
return (0);
}
static boolean_t
zfs_prop_is_user(unsigned p)
{
return (p == ZFS_PROP_USERUSED || p == ZFS_PROP_USERQUOTA ||
p == ZFS_PROP_USEROBJUSED || p == ZFS_PROP_USEROBJQUOTA);
}
static boolean_t
zfs_prop_is_group(unsigned p)
{
return (p == ZFS_PROP_GROUPUSED || p == ZFS_PROP_GROUPQUOTA ||
p == ZFS_PROP_GROUPOBJUSED || p == ZFS_PROP_GROUPOBJQUOTA);
}
static boolean_t
zfs_prop_is_project(unsigned p)
{
return (p == ZFS_PROP_PROJECTUSED || p == ZFS_PROP_PROJECTQUOTA ||
p == ZFS_PROP_PROJECTOBJUSED || p == ZFS_PROP_PROJECTOBJQUOTA);
}
static inline const char *
us_type2str(unsigned field_type)
{
switch (field_type) {
case USTYPE_PSX_USR:
return ("POSIX User");
case USTYPE_PSX_GRP:
return ("POSIX Group");
case USTYPE_SMB_USR:
return ("SMB User");
case USTYPE_SMB_GRP:
return ("SMB Group");
case USTYPE_PROJ:
return ("Project");
default:
return ("Undefined");
}
}
static int
userspace_cb(void *arg, const char *domain, uid_t rid, uint64_t space)
{
us_cbdata_t *cb = (us_cbdata_t *)arg;
zfs_userquota_prop_t prop = cb->cb_prop;
char *name = NULL;
const char *propname;
char sizebuf[32];
us_node_t *node;
uu_avl_pool_t *avl_pool = cb->cb_avl_pool;
uu_avl_t *avl = cb->cb_avl;
uu_avl_index_t idx;
nvlist_t *props;
us_node_t *n;
zfs_sort_column_t *sortcol = cb->cb_sortcol;
unsigned type = 0;
const char *typestr;
size_t namelen;
size_t typelen;
size_t sizelen;
int typeidx, nameidx, sizeidx;
us_sort_info_t sortinfo = { sortcol, cb->cb_numname };
boolean_t smbentity = B_FALSE;
if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
nomem();
node = safe_malloc(sizeof (us_node_t));
uu_avl_node_init(node, &node->usn_avlnode, avl_pool);
node->usn_nvl = props;
if (domain != NULL && domain[0] != '\0') {
#ifdef HAVE_IDMAP
/* SMB */
char sid[MAXNAMELEN + 32];
uid_t id;
uint64_t classes;
int err;
directory_error_t e;
smbentity = B_TRUE;
(void) snprintf(sid, sizeof (sid), "%s-%u", domain, rid);
if (prop == ZFS_PROP_GROUPUSED || prop == ZFS_PROP_GROUPQUOTA) {
type = USTYPE_SMB_GRP;
err = sid_to_id(sid, B_FALSE, &id);
} else {
type = USTYPE_SMB_USR;
err = sid_to_id(sid, B_TRUE, &id);
}
if (err == 0) {
rid = id;
if (!cb->cb_sid2posix) {
e = directory_name_from_sid(NULL, sid, &name,
&classes);
if (e != NULL)
directory_error_free(e);
if (name == NULL)
name = sid;
}
}
#else
nvlist_free(props);
free(node);
return (-1);
#endif /* HAVE_IDMAP */
}
if (cb->cb_sid2posix || domain == NULL || domain[0] == '\0') {
/* POSIX or -i */
if (zfs_prop_is_group(prop)) {
type = USTYPE_PSX_GRP;
if (!cb->cb_numname) {
struct group *g;
if ((g = getgrgid(rid)) != NULL)
name = g->gr_name;
}
} else if (zfs_prop_is_user(prop)) {
type = USTYPE_PSX_USR;
if (!cb->cb_numname) {
struct passwd *p;
if ((p = getpwuid(rid)) != NULL)
name = p->pw_name;
}
} else {
type = USTYPE_PROJ;
}
}
/*
* Make sure that the type/name combination is unique when doing
* SID to POSIX ID translation (hence changing the type from SMB to
* POSIX).
*/
if (cb->cb_sid2posix &&
nvlist_add_boolean_value(props, "smbentity", smbentity) != 0)
nomem();
/* Calculate/update width of TYPE field */
typestr = us_type2str(type);
typelen = strlen(gettext(typestr));
typeidx = us_field_index("type");
if (typelen > cb->cb_width[typeidx])
cb->cb_width[typeidx] = typelen;
if (nvlist_add_uint32(props, "type", type) != 0)
nomem();
/* Calculate/update width of NAME field */
if ((cb->cb_numname && cb->cb_sid2posix) || name == NULL) {
if (nvlist_add_uint64(props, "name", rid) != 0)
nomem();
namelen = snprintf(NULL, 0, "%u", rid);
} else {
if (nvlist_add_string(props, "name", name) != 0)
nomem();
namelen = strlen(name);
}
nameidx = us_field_index("name");
if (nameidx >= 0 && namelen > cb->cb_width[nameidx])
cb->cb_width[nameidx] = namelen;
/*
* Check if this type/name combination is in the list and update it;
* otherwise add new node to the list.
*/
if ((n = uu_avl_find(avl, node, &sortinfo, &idx)) == NULL) {
uu_avl_insert(avl, node, idx);
} else {
nvlist_free(props);
free(node);
node = n;
props = node->usn_nvl;
}
/* Calculate/update width of USED/QUOTA fields */
if (cb->cb_nicenum) {
if (prop == ZFS_PROP_USERUSED || prop == ZFS_PROP_GROUPUSED ||
prop == ZFS_PROP_USERQUOTA || prop == ZFS_PROP_GROUPQUOTA ||
prop == ZFS_PROP_PROJECTUSED ||
prop == ZFS_PROP_PROJECTQUOTA) {
zfs_nicebytes(space, sizebuf, sizeof (sizebuf));
} else {
zfs_nicenum(space, sizebuf, sizeof (sizebuf));
}
} else {
(void) snprintf(sizebuf, sizeof (sizebuf), "%llu",
(u_longlong_t)space);
}
sizelen = strlen(sizebuf);
if (prop == ZFS_PROP_USERUSED || prop == ZFS_PROP_GROUPUSED ||
prop == ZFS_PROP_PROJECTUSED) {
propname = "used";
if (!nvlist_exists(props, "quota"))
(void) nvlist_add_uint64(props, "quota", 0);
} else if (prop == ZFS_PROP_USERQUOTA || prop == ZFS_PROP_GROUPQUOTA ||
prop == ZFS_PROP_PROJECTQUOTA) {
propname = "quota";
if (!nvlist_exists(props, "used"))
(void) nvlist_add_uint64(props, "used", 0);
} else if (prop == ZFS_PROP_USEROBJUSED ||
prop == ZFS_PROP_GROUPOBJUSED || prop == ZFS_PROP_PROJECTOBJUSED) {
propname = "objused";
if (!nvlist_exists(props, "objquota"))
(void) nvlist_add_uint64(props, "objquota", 0);
} else if (prop == ZFS_PROP_USEROBJQUOTA ||
prop == ZFS_PROP_GROUPOBJQUOTA ||
prop == ZFS_PROP_PROJECTOBJQUOTA) {
propname = "objquota";
if (!nvlist_exists(props, "objused"))
(void) nvlist_add_uint64(props, "objused", 0);
} else {
return (-1);
}
sizeidx = us_field_index(propname);
if (sizeidx >= 0 && sizelen > cb->cb_width[sizeidx])
cb->cb_width[sizeidx] = sizelen;
if (nvlist_add_uint64(props, propname, space) != 0)
nomem();
return (0);
}
static void
print_us_node(boolean_t scripted, boolean_t parsable, int *fields, int types,
size_t *width, us_node_t *node)
{
nvlist_t *nvl = node->usn_nvl;
char valstr[MAXNAMELEN];
boolean_t first = B_TRUE;
int cfield = 0;
int field;
uint32_t ustype;
/* Check type */
(void) nvlist_lookup_uint32(nvl, "type", &ustype);
if (!(ustype & types))
return;
while ((field = fields[cfield]) != USFIELD_LAST) {
nvpair_t *nvp = NULL;
data_type_t type;
uint32_t val32 = -1;
uint64_t val64 = -1;
const char *strval = "-";
while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL)
if (strcmp(nvpair_name(nvp),
us_field_names[field]) == 0)
break;
type = nvp == NULL ? DATA_TYPE_UNKNOWN : nvpair_type(nvp);
switch (type) {
case DATA_TYPE_UINT32:
val32 = fnvpair_value_uint32(nvp);
break;
case DATA_TYPE_UINT64:
val64 = fnvpair_value_uint64(nvp);
break;
case DATA_TYPE_STRING:
strval = fnvpair_value_string(nvp);
break;
case DATA_TYPE_UNKNOWN:
break;
default:
(void) fprintf(stderr, "invalid data type\n");
}
switch (field) {
case USFIELD_TYPE:
if (type == DATA_TYPE_UINT32)
strval = us_type2str(val32);
break;
case USFIELD_NAME:
if (type == DATA_TYPE_UINT64) {
(void) sprintf(valstr, "%llu",
(u_longlong_t)val64);
strval = valstr;
}
break;
case USFIELD_USED:
case USFIELD_QUOTA:
if (type == DATA_TYPE_UINT64) {
if (parsable) {
(void) sprintf(valstr, "%llu",
(u_longlong_t)val64);
strval = valstr;
} else if (field == USFIELD_QUOTA &&
val64 == 0) {
strval = "none";
} else {
zfs_nicebytes(val64, valstr,
sizeof (valstr));
strval = valstr;
}
}
break;
case USFIELD_OBJUSED:
case USFIELD_OBJQUOTA:
if (type == DATA_TYPE_UINT64) {
if (parsable) {
(void) sprintf(valstr, "%llu",
(u_longlong_t)val64);
strval = valstr;
} else if (field == USFIELD_OBJQUOTA &&
val64 == 0) {
strval = "none";
} else {
zfs_nicenum(val64, valstr,
sizeof (valstr));
strval = valstr;
}
}
break;
}
if (!first) {
if (scripted)
(void) putchar('\t');
else
(void) fputs(" ", stdout);
}
if (scripted)
(void) fputs(strval, stdout);
else if (field == USFIELD_TYPE || field == USFIELD_NAME)
(void) printf("%-*s", (int)width[field], strval);
else
(void) printf("%*s", (int)width[field], strval);
first = B_FALSE;
cfield++;
}
(void) putchar('\n');
}
static void
print_us(boolean_t scripted, boolean_t parsable, int *fields, int types,
size_t *width, boolean_t rmnode, uu_avl_t *avl)
{
us_node_t *node;
const char *col;
int cfield = 0;
int field;
if (!scripted) {
boolean_t first = B_TRUE;
while ((field = fields[cfield]) != USFIELD_LAST) {
col = gettext(us_field_hdr[field]);
if (field == USFIELD_TYPE || field == USFIELD_NAME) {
(void) printf(first ? "%-*s" : " %-*s",
(int)width[field], col);
} else {
(void) printf(first ? "%*s" : " %*s",
(int)width[field], col);
}
first = B_FALSE;
cfield++;
}
(void) printf("\n");
}
for (node = uu_avl_first(avl); node; node = uu_avl_next(avl, node)) {
print_us_node(scripted, parsable, fields, types, width, node);
if (rmnode)
nvlist_free(node->usn_nvl);
}
}
static int
zfs_do_userspace(int argc, char **argv)
{
zfs_handle_t *zhp;
zfs_userquota_prop_t p;
uu_avl_pool_t *avl_pool;
uu_avl_t *avl_tree;
uu_avl_walk_t *walk;
char *delim;
char deffields[] = "type,name,used,quota,objused,objquota";
char *ofield = NULL;
char *tfield = NULL;
int cfield = 0;
int fields[256];
int i;
boolean_t scripted = B_FALSE;
boolean_t prtnum = B_FALSE;
boolean_t parsable = B_FALSE;
boolean_t sid2posix = B_FALSE;
int ret = 0;
int c;
zfs_sort_column_t *sortcol = NULL;
int types = USTYPE_PSX_USR | USTYPE_SMB_USR;
us_cbdata_t cb;
us_node_t *node;
us_node_t *rmnode;
uu_list_pool_t *listpool;
uu_list_t *list;
uu_avl_index_t idx = 0;
uu_list_index_t idx2 = 0;
if (argc < 2)
usage(B_FALSE);
if (strcmp(argv[0], "groupspace") == 0) {
/* Toggle default group types */
types = USTYPE_PSX_GRP | USTYPE_SMB_GRP;
} else if (strcmp(argv[0], "projectspace") == 0) {
types = USTYPE_PROJ;
prtnum = B_TRUE;
}
while ((c = getopt(argc, argv, "nHpo:s:S:t:i")) != -1) {
switch (c) {
case 'n':
if (types == USTYPE_PROJ) {
(void) fprintf(stderr,
gettext("invalid option 'n'\n"));
usage(B_FALSE);
}
prtnum = B_TRUE;
break;
case 'H':
scripted = B_TRUE;
break;
case 'p':
parsable = B_TRUE;
break;
case 'o':
ofield = optarg;
break;
case 's':
case 'S':
if (zfs_add_sort_column(&sortcol, optarg,
c == 's' ? B_FALSE : B_TRUE) != 0) {
(void) fprintf(stderr,
gettext("invalid field '%s'\n"), optarg);
usage(B_FALSE);
}
break;
case 't':
if (types == USTYPE_PROJ) {
(void) fprintf(stderr,
gettext("invalid option 't'\n"));
usage(B_FALSE);
}
tfield = optarg;
break;
case 'i':
if (types == USTYPE_PROJ) {
(void) fprintf(stderr,
gettext("invalid option 'i'\n"));
usage(B_FALSE);
}
sid2posix = B_TRUE;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing dataset name\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
/* Use default output fields if not specified using -o */
if (ofield == NULL)
ofield = deffields;
do {
if ((delim = strchr(ofield, ',')) != NULL)
*delim = '\0';
if ((fields[cfield++] = us_field_index(ofield)) == -1) {
(void) fprintf(stderr, gettext("invalid type '%s' "
"for -o option\n"), ofield);
return (-1);
}
if (delim != NULL)
ofield = delim + 1;
} while (delim != NULL);
fields[cfield] = USFIELD_LAST;
/* Override output types (-t option) */
if (tfield != NULL) {
types = 0;
do {
boolean_t found = B_FALSE;
if ((delim = strchr(tfield, ',')) != NULL)
*delim = '\0';
for (i = 0; i < sizeof (us_type_bits) / sizeof (int);
i++) {
if (strcmp(tfield, us_type_names[i]) == 0) {
found = B_TRUE;
types |= us_type_bits[i];
break;
}
}
if (!found) {
(void) fprintf(stderr, gettext("invalid type "
"'%s' for -t option\n"), tfield);
return (-1);
}
if (delim != NULL)
tfield = delim + 1;
} while (delim != NULL);
}
if ((zhp = zfs_path_to_zhandle(g_zfs, argv[0], ZFS_TYPE_FILESYSTEM |
ZFS_TYPE_SNAPSHOT)) == NULL)
return (1);
if (zfs_get_underlying_type(zhp) != ZFS_TYPE_FILESYSTEM) {
(void) fprintf(stderr, gettext("operation is only applicable "
"to filesystems and their snapshots\n"));
zfs_close(zhp);
return (1);
}
if ((avl_pool = uu_avl_pool_create("us_avl_pool", sizeof (us_node_t),
offsetof(us_node_t, usn_avlnode), us_compare, UU_DEFAULT)) == NULL)
nomem();
if ((avl_tree = uu_avl_create(avl_pool, NULL, UU_DEFAULT)) == NULL)
nomem();
/* Always add default sorting columns */
(void) zfs_add_sort_column(&sortcol, "type", B_FALSE);
(void) zfs_add_sort_column(&sortcol, "name", B_FALSE);
cb.cb_sortcol = sortcol;
cb.cb_numname = prtnum;
cb.cb_nicenum = !parsable;
cb.cb_avl_pool = avl_pool;
cb.cb_avl = avl_tree;
cb.cb_sid2posix = sid2posix;
for (i = 0; i < USFIELD_LAST; i++)
cb.cb_width[i] = strlen(gettext(us_field_hdr[i]));
for (p = 0; p < ZFS_NUM_USERQUOTA_PROPS; p++) {
if ((zfs_prop_is_user(p) &&
!(types & (USTYPE_PSX_USR | USTYPE_SMB_USR))) ||
(zfs_prop_is_group(p) &&
!(types & (USTYPE_PSX_GRP | USTYPE_SMB_GRP))) ||
(zfs_prop_is_project(p) && types != USTYPE_PROJ))
continue;
cb.cb_prop = p;
if ((ret = zfs_userspace(zhp, p, userspace_cb, &cb)) != 0) {
zfs_close(zhp);
return (ret);
}
}
zfs_close(zhp);
/* Sort the list */
if ((node = uu_avl_first(avl_tree)) == NULL)
return (0);
us_populated = B_TRUE;
listpool = uu_list_pool_create("tmplist", sizeof (us_node_t),
offsetof(us_node_t, usn_listnode), NULL, UU_DEFAULT);
list = uu_list_create(listpool, NULL, UU_DEFAULT);
uu_list_node_init(node, &node->usn_listnode, listpool);
while (node != NULL) {
rmnode = node;
node = uu_avl_next(avl_tree, node);
uu_avl_remove(avl_tree, rmnode);
if (uu_list_find(list, rmnode, NULL, &idx2) == NULL)
uu_list_insert(list, rmnode, idx2);
}
for (node = uu_list_first(list); node != NULL;
node = uu_list_next(list, node)) {
us_sort_info_t sortinfo = { sortcol, cb.cb_numname };
if (uu_avl_find(avl_tree, node, &sortinfo, &idx) == NULL)
uu_avl_insert(avl_tree, node, idx);
}
uu_list_destroy(list);
uu_list_pool_destroy(listpool);
/* Print and free node nvlist memory */
print_us(scripted, parsable, fields, types, cb.cb_width, B_TRUE,
cb.cb_avl);
zfs_free_sort_columns(sortcol);
/* Clean up the AVL tree */
if ((walk = uu_avl_walk_start(cb.cb_avl, UU_WALK_ROBUST)) == NULL)
nomem();
while ((node = uu_avl_walk_next(walk)) != NULL) {
uu_avl_remove(cb.cb_avl, node);
free(node);
}
uu_avl_walk_end(walk);
uu_avl_destroy(avl_tree);
uu_avl_pool_destroy(avl_pool);
return (ret);
}
/*
* list [-Hp][-r|-d max] [-o property[,...]] [-s property] ... [-S property]
* [-t type[,...]] [filesystem|volume|snapshot] ...
*
* -H Scripted mode; elide headers and separate columns by tabs
* -p Display values in parsable (literal) format.
* -r Recurse over all children
* -d Limit recursion by depth.
* -o Control which fields to display.
* -s Specify sort columns, descending order.
* -S Specify sort columns, ascending order.
* -t Control which object types to display.
*
* When given no arguments, list all filesystems in the system.
* Otherwise, list the specified datasets, optionally recursing down them if
* '-r' is specified.
*/
typedef struct list_cbdata {
boolean_t cb_first;
boolean_t cb_literal;
boolean_t cb_scripted;
zprop_list_t *cb_proplist;
} list_cbdata_t;
/*
* Given a list of columns to display, output appropriate headers for each one.
*/
static void
print_header(list_cbdata_t *cb)
{
zprop_list_t *pl = cb->cb_proplist;
char headerbuf[ZFS_MAXPROPLEN];
const char *header;
int i;
boolean_t first = B_TRUE;
boolean_t right_justify;
color_start(ANSI_BOLD);
for (; pl != NULL; pl = pl->pl_next) {
if (!first) {
(void) printf(" ");
} else {
first = B_FALSE;
}
right_justify = B_FALSE;
if (pl->pl_prop != ZPROP_USERPROP) {
header = zfs_prop_column_name(pl->pl_prop);
right_justify = zfs_prop_align_right(pl->pl_prop);
} else {
for (i = 0; pl->pl_user_prop[i] != '\0'; i++)
headerbuf[i] = toupper(pl->pl_user_prop[i]);
headerbuf[i] = '\0';
header = headerbuf;
}
if (pl->pl_next == NULL && !right_justify)
(void) printf("%s", header);
else if (right_justify)
(void) printf("%*s", (int)pl->pl_width, header);
else
(void) printf("%-*s", (int)pl->pl_width, header);
}
color_end();
(void) printf("\n");
}
/*
* Decides on the color that the avail value should be printed in.
* > 80% used = yellow
* > 90% used = red
*/
static const char *
zfs_list_avail_color(zfs_handle_t *zhp)
{
uint64_t used = zfs_prop_get_int(zhp, ZFS_PROP_USED);
uint64_t avail = zfs_prop_get_int(zhp, ZFS_PROP_AVAILABLE);
int percentage = (int)((double)avail / MAX(avail + used, 1) * 100);
if (percentage > 20)
return (NULL);
else if (percentage > 10)
return (ANSI_YELLOW);
else
return (ANSI_RED);
}
/*
* Given a dataset and a list of fields, print out all the properties according
* to the described layout.
*/
static void
print_dataset(zfs_handle_t *zhp, list_cbdata_t *cb)
{
zprop_list_t *pl = cb->cb_proplist;
boolean_t first = B_TRUE;
char property[ZFS_MAXPROPLEN];
nvlist_t *userprops = zfs_get_user_props(zhp);
nvlist_t *propval;
const char *propstr;
boolean_t right_justify;
for (; pl != NULL; pl = pl->pl_next) {
if (!first) {
if (cb->cb_scripted)
(void) putchar('\t');
else
(void) fputs(" ", stdout);
} else {
first = B_FALSE;
}
if (pl->pl_prop == ZFS_PROP_NAME) {
(void) strlcpy(property, zfs_get_name(zhp),
sizeof (property));
propstr = property;
right_justify = zfs_prop_align_right(pl->pl_prop);
} else if (pl->pl_prop != ZPROP_USERPROP) {
if (zfs_prop_get(zhp, pl->pl_prop, property,
sizeof (property), NULL, NULL, 0,
cb->cb_literal) != 0)
propstr = "-";
else
propstr = property;
right_justify = zfs_prop_align_right(pl->pl_prop);
} else if (zfs_prop_userquota(pl->pl_user_prop)) {
if (zfs_prop_get_userquota(zhp, pl->pl_user_prop,
property, sizeof (property), cb->cb_literal) != 0)
propstr = "-";
else
propstr = property;
right_justify = B_TRUE;
} else if (zfs_prop_written(pl->pl_user_prop)) {
if (zfs_prop_get_written(zhp, pl->pl_user_prop,
property, sizeof (property), cb->cb_literal) != 0)
propstr = "-";
else
propstr = property;
right_justify = B_TRUE;
} else {
if (nvlist_lookup_nvlist(userprops,
pl->pl_user_prop, &propval) != 0)
propstr = "-";
else
propstr = fnvlist_lookup_string(propval,
ZPROP_VALUE);
right_justify = B_FALSE;
}
/*
* zfs_list_avail_color() needs ZFS_PROP_AVAILABLE + USED
* - so we need another for() search for the USED part
* - when no colors wanted, we can skip the whole thing
*/
if (use_color() && pl->pl_prop == ZFS_PROP_AVAILABLE) {
zprop_list_t *pl2 = cb->cb_proplist;
for (; pl2 != NULL; pl2 = pl2->pl_next) {
if (pl2->pl_prop == ZFS_PROP_USED) {
color_start(zfs_list_avail_color(zhp));
/* found it, no need for more loops */
break;
}
}
}
/*
* If this is being called in scripted mode, or if this is the
* last column and it is left-justified, don't include a width
* format specifier.
*/
if (cb->cb_scripted || (pl->pl_next == NULL && !right_justify))
(void) fputs(propstr, stdout);
else if (right_justify)
(void) printf("%*s", (int)pl->pl_width, propstr);
else
(void) printf("%-*s", (int)pl->pl_width, propstr);
if (pl->pl_prop == ZFS_PROP_AVAILABLE)
color_end();
}
(void) putchar('\n');
}
/*
* Generic callback function to list a dataset or snapshot.
*/
static int
list_callback(zfs_handle_t *zhp, void *data)
{
list_cbdata_t *cbp = data;
if (cbp->cb_first) {
if (!cbp->cb_scripted)
print_header(cbp);
cbp->cb_first = B_FALSE;
}
print_dataset(zhp, cbp);
return (0);
}
static int
zfs_do_list(int argc, char **argv)
{
int c;
char default_fields[] =
"name,used,available,referenced,mountpoint";
int types = ZFS_TYPE_DATASET;
boolean_t types_specified = B_FALSE;
char *fields = default_fields;
list_cbdata_t cb = { 0 };
int limit = 0;
int ret = 0;
zfs_sort_column_t *sortcol = NULL;
int flags = ZFS_ITER_PROP_LISTSNAPS | ZFS_ITER_ARGS_CAN_BE_PATHS;
/* check options */
while ((c = getopt(argc, argv, "HS:d:o:prs:t:")) != -1) {
switch (c) {
case 'o':
fields = optarg;
break;
case 'p':
cb.cb_literal = B_TRUE;
flags |= ZFS_ITER_LITERAL_PROPS;
break;
case 'd':
limit = parse_depth(optarg, &flags);
break;
case 'r':
flags |= ZFS_ITER_RECURSE;
break;
case 'H':
cb.cb_scripted = B_TRUE;
break;
case 's':
if (zfs_add_sort_column(&sortcol, optarg,
B_FALSE) != 0) {
(void) fprintf(stderr,
gettext("invalid property '%s'\n"), optarg);
usage(B_FALSE);
}
break;
case 'S':
if (zfs_add_sort_column(&sortcol, optarg,
B_TRUE) != 0) {
(void) fprintf(stderr,
gettext("invalid property '%s'\n"), optarg);
usage(B_FALSE);
}
break;
case 't':
types = 0;
types_specified = B_TRUE;
flags &= ~ZFS_ITER_PROP_LISTSNAPS;
for (char *tok; (tok = strsep(&optarg, ",")); ) {
static const char *const type_subopts[] = {
- "filesystem", "volume",
- "snapshot", "snap",
+ "filesystem",
+ "fs",
+ "volume",
+ "vol",
+ "snapshot",
+ "snap",
"bookmark",
- "all" };
+ "all"
+ };
static const int type_types[] = {
- ZFS_TYPE_FILESYSTEM, ZFS_TYPE_VOLUME,
- ZFS_TYPE_SNAPSHOT, ZFS_TYPE_SNAPSHOT,
+ ZFS_TYPE_FILESYSTEM,
+ ZFS_TYPE_FILESYSTEM,
+ ZFS_TYPE_VOLUME,
+ ZFS_TYPE_VOLUME,
+ ZFS_TYPE_SNAPSHOT,
+ ZFS_TYPE_SNAPSHOT,
ZFS_TYPE_BOOKMARK,
- ZFS_TYPE_DATASET | ZFS_TYPE_BOOKMARK };
+ ZFS_TYPE_DATASET | ZFS_TYPE_BOOKMARK
+ };
for (c = 0; c < ARRAY_SIZE(type_subopts); ++c)
if (strcmp(tok, type_subopts[c]) == 0) {
types |= type_types[c];
goto found3;
}
(void) fprintf(stderr,
gettext("invalid type '%s'\n"), tok);
usage(B_FALSE);
found3:;
}
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/*
* If "-o space" and no types were specified, don't display snapshots.
*/
if (strcmp(fields, "space") == 0 && types_specified == B_FALSE)
types &= ~ZFS_TYPE_SNAPSHOT;
/*
* Handle users who want to list all snapshots or bookmarks
* of the current dataset (ex. 'zfs list -t snapshot <dataset>').
*/
if ((types == ZFS_TYPE_SNAPSHOT || types == ZFS_TYPE_BOOKMARK) &&
argc > 0 && (flags & ZFS_ITER_RECURSE) == 0 && limit == 0) {
flags |= (ZFS_ITER_DEPTH_LIMIT | ZFS_ITER_RECURSE);
limit = 1;
}
/*
* If the user specifies '-o all', the zprop_get_list() doesn't
* normally include the name of the dataset. For 'zfs list', we always
* want this property to be first.
*/
if (zprop_get_list(g_zfs, fields, &cb.cb_proplist, ZFS_TYPE_DATASET)
!= 0)
usage(B_FALSE);
cb.cb_first = B_TRUE;
/*
* If we are only going to list and sort by properties that are "fast"
* then we can use "simple" mode and avoid populating the properties
* nvlist.
*/
if (zfs_list_only_by_fast(cb.cb_proplist) &&
zfs_sort_only_by_fast(sortcol))
flags |= ZFS_ITER_SIMPLE;
ret = zfs_for_each(argc, argv, flags, types, sortcol, &cb.cb_proplist,
limit, list_callback, &cb);
zprop_free_list(cb.cb_proplist);
zfs_free_sort_columns(sortcol);
if (ret == 0 && cb.cb_first && !cb.cb_scripted)
(void) fprintf(stderr, gettext("no datasets available\n"));
return (ret);
}
/*
* zfs rename [-fu] <fs | snap | vol> <fs | snap | vol>
* zfs rename [-f] -p <fs | vol> <fs | vol>
* zfs rename [-u] -r <snap> <snap>
*
* Renames the given dataset to another of the same type.
*
* The '-p' flag creates all the non-existing ancestors of the target first.
* The '-u' flag prevents file systems from being remounted during rename.
*/
static int
zfs_do_rename(int argc, char **argv)
{
zfs_handle_t *zhp;
renameflags_t flags = { 0 };
int c;
int ret = 0;
int types;
boolean_t parents = B_FALSE;
/* check options */
while ((c = getopt(argc, argv, "pruf")) != -1) {
switch (c) {
case 'p':
parents = B_TRUE;
break;
case 'r':
flags.recursive = B_TRUE;
break;
case 'u':
flags.nounmount = B_TRUE;
break;
case 'f':
flags.forceunmount = B_TRUE;
break;
case '?':
default:
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing source dataset "
"argument\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing target dataset "
"argument\n"));
usage(B_FALSE);
}
if (argc > 2) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if (flags.recursive && parents) {
(void) fprintf(stderr, gettext("-p and -r options are mutually "
"exclusive\n"));
usage(B_FALSE);
}
if (flags.nounmount && parents) {
(void) fprintf(stderr, gettext("-u and -p options are mutually "
"exclusive\n"));
usage(B_FALSE);
}
if (flags.recursive && strchr(argv[0], '@') == 0) {
(void) fprintf(stderr, gettext("source dataset for recursive "
"rename must be a snapshot\n"));
usage(B_FALSE);
}
if (flags.nounmount)
types = ZFS_TYPE_FILESYSTEM;
else if (parents)
types = ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME;
else
types = ZFS_TYPE_DATASET;
if ((zhp = zfs_open(g_zfs, argv[0], types)) == NULL)
return (1);
/* If we were asked and the name looks good, try to create ancestors. */
if (parents && zfs_name_valid(argv[1], zfs_get_type(zhp)) &&
zfs_create_ancestors(g_zfs, argv[1]) != 0) {
zfs_close(zhp);
return (1);
}
ret = (zfs_rename(zhp, argv[1], flags) != 0);
zfs_close(zhp);
return (ret);
}
/*
* zfs promote <fs>
*
* Promotes the given clone fs to be the parent
*/
static int
zfs_do_promote(int argc, char **argv)
{
zfs_handle_t *zhp;
int ret = 0;
/* check options */
if (argc > 1 && argv[1][0] == '-') {
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
argv[1][1]);
usage(B_FALSE);
}
/* check number of arguments */
if (argc < 2) {
(void) fprintf(stderr, gettext("missing clone filesystem"
" argument\n"));
usage(B_FALSE);
}
if (argc > 2) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
zhp = zfs_open(g_zfs, argv[1], ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL)
return (1);
ret = (zfs_promote(zhp) != 0);
zfs_close(zhp);
return (ret);
}
static int
zfs_do_redact(int argc, char **argv)
{
char *snap = NULL;
char *bookname = NULL;
char **rsnaps = NULL;
int numrsnaps = 0;
argv++;
argc--;
if (argc < 3) {
(void) fprintf(stderr, gettext("too few arguments\n"));
usage(B_FALSE);
}
snap = argv[0];
bookname = argv[1];
rsnaps = argv + 2;
numrsnaps = argc - 2;
nvlist_t *rsnapnv = fnvlist_alloc();
for (int i = 0; i < numrsnaps; i++) {
fnvlist_add_boolean(rsnapnv, rsnaps[i]);
}
int err = lzc_redact(snap, bookname, rsnapnv);
fnvlist_free(rsnapnv);
switch (err) {
case 0:
break;
case ENOENT: {
zfs_handle_t *zhp = zfs_open(g_zfs, snap, ZFS_TYPE_SNAPSHOT);
if (zhp == NULL) {
(void) fprintf(stderr, gettext("provided snapshot %s "
"does not exist\n"), snap);
} else {
zfs_close(zhp);
}
for (int i = 0; i < numrsnaps; i++) {
zhp = zfs_open(g_zfs, rsnaps[i], ZFS_TYPE_SNAPSHOT);
if (zhp == NULL) {
(void) fprintf(stderr, gettext("provided "
"snapshot %s does not exist\n"), rsnaps[i]);
} else {
zfs_close(zhp);
}
}
break;
}
case EEXIST:
(void) fprintf(stderr, gettext("specified redaction bookmark "
"(%s) provided already exists\n"), bookname);
break;
case ENAMETOOLONG:
(void) fprintf(stderr, gettext("provided bookmark name cannot "
"be used, final name would be too long\n"));
break;
case E2BIG:
(void) fprintf(stderr, gettext("too many redaction snapshots "
"specified\n"));
break;
case EINVAL:
if (strchr(bookname, '#') != NULL)
(void) fprintf(stderr, gettext(
"redaction bookmark name must not contain '#'\n"));
else
(void) fprintf(stderr, gettext(
"redaction snapshot must be descendent of "
"snapshot being redacted\n"));
break;
case EALREADY:
(void) fprintf(stderr, gettext("attempted to redact redacted "
"dataset or with respect to redacted dataset\n"));
break;
case ENOTSUP:
(void) fprintf(stderr, gettext("redaction bookmarks feature "
"not enabled\n"));
break;
case EXDEV:
(void) fprintf(stderr, gettext("potentially invalid redaction "
"snapshot; full dataset names required\n"));
break;
default:
(void) fprintf(stderr, gettext("internal error: %s\n"),
strerror(errno));
}
return (err);
}
/*
* zfs rollback [-rRf] <snapshot>
*
* -r Delete any intervening snapshots before doing rollback
* -R Delete any snapshots and their clones
* -f ignored for backwards compatibility
*
* Given a filesystem, rollback to a specific snapshot, discarding any changes
* since then and making it the active dataset. If more recent snapshots exist,
* the command will complain unless the '-r' flag is given.
*/
typedef struct rollback_cbdata {
uint64_t cb_create;
uint8_t cb_younger_ds_printed;
boolean_t cb_first;
int cb_doclones;
char *cb_target;
int cb_error;
boolean_t cb_recurse;
} rollback_cbdata_t;
static int
rollback_check_dependent(zfs_handle_t *zhp, void *data)
{
rollback_cbdata_t *cbp = data;
if (cbp->cb_first && cbp->cb_recurse) {
(void) fprintf(stderr, gettext("cannot rollback to "
"'%s': clones of previous snapshots exist\n"),
cbp->cb_target);
(void) fprintf(stderr, gettext("use '-R' to "
"force deletion of the following clones and "
"dependents:\n"));
cbp->cb_first = 0;
cbp->cb_error = 1;
}
(void) fprintf(stderr, "%s\n", zfs_get_name(zhp));
zfs_close(zhp);
return (0);
}
/*
* Report some snapshots/bookmarks more recent than the one specified.
* Used when '-r' is not specified. We reuse this same callback for the
* snapshot dependents - if 'cb_dependent' is set, then this is a
* dependent and we should report it without checking the transaction group.
*/
static int
rollback_check(zfs_handle_t *zhp, void *data)
{
rollback_cbdata_t *cbp = data;
/*
* Max number of younger snapshots and/or bookmarks to display before
* we stop the iteration.
*/
const uint8_t max_younger = 32;
if (cbp->cb_doclones) {
zfs_close(zhp);
return (0);
}
if (zfs_prop_get_int(zhp, ZFS_PROP_CREATETXG) > cbp->cb_create) {
if (cbp->cb_first && !cbp->cb_recurse) {
(void) fprintf(stderr, gettext("cannot "
"rollback to '%s': more recent snapshots "
"or bookmarks exist\n"),
cbp->cb_target);
(void) fprintf(stderr, gettext("use '-r' to "
"force deletion of the following "
"snapshots and bookmarks:\n"));
cbp->cb_first = 0;
cbp->cb_error = 1;
}
if (cbp->cb_recurse) {
if (zfs_iter_dependents_v2(zhp, 0, B_TRUE,
rollback_check_dependent, cbp) != 0) {
zfs_close(zhp);
return (-1);
}
} else {
(void) fprintf(stderr, "%s\n",
zfs_get_name(zhp));
cbp->cb_younger_ds_printed++;
}
}
zfs_close(zhp);
if (cbp->cb_younger_ds_printed == max_younger) {
/*
* This non-recursive rollback is going to fail due to the
* presence of snapshots and/or bookmarks that are younger than
* the rollback target.
* We printed some of the offending objects, now we stop
* zfs_iter_snapshot/bookmark iteration so we can fail fast and
* avoid iterating over the rest of the younger objects
*/
(void) fprintf(stderr, gettext("Output limited to %d "
"snapshots/bookmarks\n"), max_younger);
return (-1);
}
return (0);
}
static int
zfs_do_rollback(int argc, char **argv)
{
int ret = 0;
int c;
boolean_t force = B_FALSE;
rollback_cbdata_t cb = { 0 };
zfs_handle_t *zhp, *snap;
char parentname[ZFS_MAX_DATASET_NAME_LEN];
char *delim;
uint64_t min_txg = 0;
/* check options */
while ((c = getopt(argc, argv, "rRf")) != -1) {
switch (c) {
case 'r':
cb.cb_recurse = 1;
break;
case 'R':
cb.cb_recurse = 1;
cb.cb_doclones = 1;
break;
case 'f':
force = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing dataset argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
/* open the snapshot */
if ((snap = zfs_open(g_zfs, argv[0], ZFS_TYPE_SNAPSHOT)) == NULL)
return (1);
/* open the parent dataset */
(void) strlcpy(parentname, argv[0], sizeof (parentname));
verify((delim = strrchr(parentname, '@')) != NULL);
*delim = '\0';
if ((zhp = zfs_open(g_zfs, parentname, ZFS_TYPE_DATASET)) == NULL) {
zfs_close(snap);
return (1);
}
/*
* Check for more recent snapshots and/or clones based on the presence
* of '-r' and '-R'.
*/
cb.cb_target = argv[0];
cb.cb_create = zfs_prop_get_int(snap, ZFS_PROP_CREATETXG);
cb.cb_first = B_TRUE;
cb.cb_error = 0;
if (cb.cb_create > 0)
min_txg = cb.cb_create;
if ((ret = zfs_iter_snapshots_v2(zhp, 0, rollback_check, &cb,
min_txg, 0)) != 0)
goto out;
if ((ret = zfs_iter_bookmarks_v2(zhp, 0, rollback_check, &cb)) != 0)
goto out;
if ((ret = cb.cb_error) != 0)
goto out;
/*
* Rollback parent to the given snapshot.
*/
ret = zfs_rollback(zhp, snap, force);
out:
zfs_close(snap);
zfs_close(zhp);
if (ret == 0)
return (0);
else
return (1);
}
/*
* zfs set property=value ... { fs | snap | vol } ...
*
* Sets the given properties for all datasets specified on the command line.
*/
static int
set_callback(zfs_handle_t *zhp, void *data)
{
zprop_set_cbdata_t *cb = data;
int ret = zfs_prop_set_list_flags(zhp, cb->cb_proplist, cb->cb_flags);
if (ret != 0 || libzfs_errno(g_zfs) != EZFS_SUCCESS) {
switch (libzfs_errno(g_zfs)) {
case EZFS_MOUNTFAILED:
(void) fprintf(stderr, gettext("property may be set "
"but unable to remount filesystem\n"));
break;
case EZFS_SHARENFSFAILED:
(void) fprintf(stderr, gettext("property may be set "
"but unable to reshare filesystem\n"));
break;
}
}
return (ret);
}
static int
zfs_do_set(int argc, char **argv)
{
zprop_set_cbdata_t cb = { 0 };
int ds_start = -1; /* argv idx of first dataset arg */
int ret = 0;
int i, c;
/* check options */
while ((c = getopt(argc, argv, "u")) != -1) {
switch (c) {
case 'u':
cb.cb_flags |= ZFS_SET_NOMOUNT;
break;
case '?':
default:
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing arguments\n"));
usage(B_FALSE);
}
if (argc < 2) {
if (strchr(argv[0], '=') == NULL) {
(void) fprintf(stderr, gettext("missing property=value "
"argument(s)\n"));
} else {
(void) fprintf(stderr, gettext("missing dataset "
"name(s)\n"));
}
usage(B_FALSE);
}
/* validate argument order: prop=val args followed by dataset args */
for (i = 0; i < argc; i++) {
if (strchr(argv[i], '=') != NULL) {
if (ds_start > 0) {
/* out-of-order prop=val argument */
(void) fprintf(stderr, gettext("invalid "
"argument order\n"));
usage(B_FALSE);
}
} else if (ds_start < 0) {
ds_start = i;
}
}
if (ds_start < 0) {
(void) fprintf(stderr, gettext("missing dataset name(s)\n"));
usage(B_FALSE);
}
/* Populate a list of property settings */
if (nvlist_alloc(&cb.cb_proplist, NV_UNIQUE_NAME, 0) != 0)
nomem();
for (i = 0; i < ds_start; i++) {
if (!parseprop(cb.cb_proplist, argv[i])) {
ret = -1;
goto error;
}
}
ret = zfs_for_each(argc - ds_start, argv + ds_start, 0,
ZFS_TYPE_DATASET, NULL, NULL, 0, set_callback, &cb);
error:
nvlist_free(cb.cb_proplist);
return (ret);
}
typedef struct snap_cbdata {
nvlist_t *sd_nvl;
boolean_t sd_recursive;
const char *sd_snapname;
} snap_cbdata_t;
static int
zfs_snapshot_cb(zfs_handle_t *zhp, void *arg)
{
snap_cbdata_t *sd = arg;
char *name;
int rv = 0;
int error;
if (sd->sd_recursive &&
zfs_prop_get_int(zhp, ZFS_PROP_INCONSISTENT) != 0) {
zfs_close(zhp);
return (0);
}
error = asprintf(&name, "%s@%s", zfs_get_name(zhp), sd->sd_snapname);
if (error == -1)
nomem();
fnvlist_add_boolean(sd->sd_nvl, name);
free(name);
if (sd->sd_recursive)
rv = zfs_iter_filesystems_v2(zhp, 0, zfs_snapshot_cb, sd);
zfs_close(zhp);
return (rv);
}
/*
* zfs snapshot [-r] [-o prop=value] ... <fs@snap>
*
* Creates a snapshot with the given name. While functionally equivalent to
* 'zfs create', it is a separate command to differentiate intent.
*/
static int
zfs_do_snapshot(int argc, char **argv)
{
int ret = 0;
int c;
nvlist_t *props;
snap_cbdata_t sd = { 0 };
boolean_t multiple_snaps = B_FALSE;
if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
nomem();
if (nvlist_alloc(&sd.sd_nvl, NV_UNIQUE_NAME, 0) != 0)
nomem();
/* check options */
while ((c = getopt(argc, argv, "ro:")) != -1) {
switch (c) {
case 'o':
if (!parseprop(props, optarg)) {
nvlist_free(sd.sd_nvl);
nvlist_free(props);
return (1);
}
break;
case 'r':
sd.sd_recursive = B_TRUE;
multiple_snaps = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
goto usage;
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing snapshot argument\n"));
goto usage;
}
if (argc > 1)
multiple_snaps = B_TRUE;
for (; argc > 0; argc--, argv++) {
char *atp;
zfs_handle_t *zhp;
atp = strchr(argv[0], '@');
if (atp == NULL)
goto usage;
*atp = '\0';
sd.sd_snapname = atp + 1;
zhp = zfs_open(g_zfs, argv[0],
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL)
goto usage;
if (zfs_snapshot_cb(zhp, &sd) != 0)
goto usage;
}
ret = zfs_snapshot_nvl(g_zfs, sd.sd_nvl, props);
nvlist_free(sd.sd_nvl);
nvlist_free(props);
if (ret != 0 && multiple_snaps)
(void) fprintf(stderr, gettext("no snapshots were created\n"));
return (ret != 0);
usage:
nvlist_free(sd.sd_nvl);
nvlist_free(props);
usage(B_FALSE);
return (-1);
}
/*
* Array of prefixes to exclude –
* a linear search, even if executed for each dataset,
* is plenty good enough.
*/
typedef struct zfs_send_exclude_arg {
size_t count;
const char **list;
} zfs_send_exclude_arg_t;
static boolean_t
zfs_do_send_exclude(zfs_handle_t *zhp, void *context)
{
zfs_send_exclude_arg_t *excludes = context;
const char *name = zfs_get_name(zhp);
for (size_t i = 0; i < excludes->count; ++i) {
size_t len = strlen(excludes->list[i]);
if (strncmp(name, excludes->list[i], len) == 0 &&
memchr("/@", name[len], sizeof ("/@")))
return (B_FALSE);
}
return (B_TRUE);
}
/*
* Send a backup stream to stdout.
*/
static int
zfs_do_send(int argc, char **argv)
{
char *fromname = NULL;
char *toname = NULL;
char *resume_token = NULL;
char *cp;
zfs_handle_t *zhp;
sendflags_t flags = { 0 };
int c, err;
nvlist_t *dbgnv = NULL;
char *redactbook = NULL;
zfs_send_exclude_arg_t excludes = { 0 };
struct option long_options[] = {
{"replicate", no_argument, NULL, 'R'},
{"skip-missing", no_argument, NULL, 's'},
{"redact", required_argument, NULL, 'd'},
{"props", no_argument, NULL, 'p'},
{"parsable", no_argument, NULL, 'P'},
{"dedup", no_argument, NULL, 'D'},
{"proctitle", no_argument, NULL, 'V'},
{"verbose", no_argument, NULL, 'v'},
{"dryrun", no_argument, NULL, 'n'},
{"large-block", no_argument, NULL, 'L'},
{"embed", no_argument, NULL, 'e'},
{"resume", required_argument, NULL, 't'},
{"compressed", no_argument, NULL, 'c'},
{"raw", no_argument, NULL, 'w'},
{"backup", no_argument, NULL, 'b'},
{"holds", no_argument, NULL, 'h'},
{"saved", no_argument, NULL, 'S'},
{"exclude", required_argument, NULL, 'X'},
{0, 0, 0, 0}
};
/* check options */
while ((c = getopt_long(argc, argv, ":i:I:RsDpVvnPLeht:cwbd:SX:",
long_options, NULL)) != -1) {
switch (c) {
case 'X':
for (char *ds; (ds = strsep(&optarg, ",")) != NULL; ) {
if (!zfs_name_valid(ds, ZFS_TYPE_DATASET) ||
strchr(ds, '/') == NULL) {
(void) fprintf(stderr, gettext("-X %s: "
"not a valid non-root dataset name"
".\n"), ds);
usage(B_FALSE);
}
excludes.list = safe_realloc(excludes.list,
sizeof (char *) * (excludes.count + 1));
excludes.list[excludes.count++] = ds;
}
break;
case 'i':
if (fromname)
usage(B_FALSE);
fromname = optarg;
break;
case 'I':
if (fromname)
usage(B_FALSE);
fromname = optarg;
flags.doall = B_TRUE;
break;
case 'R':
flags.replicate = B_TRUE;
break;
case 's':
flags.skipmissing = B_TRUE;
break;
case 'd':
redactbook = optarg;
break;
case 'p':
flags.props = B_TRUE;
break;
case 'b':
flags.backup = B_TRUE;
break;
case 'h':
flags.holds = B_TRUE;
break;
case 'P':
flags.parsable = B_TRUE;
break;
case 'V':
flags.progressastitle = B_TRUE;
break;
case 'v':
flags.verbosity++;
flags.progress = B_TRUE;
break;
case 'D':
(void) fprintf(stderr,
gettext("WARNING: deduplicated send is no "
"longer supported. A regular,\n"
"non-deduplicated stream will be generated.\n\n"));
break;
case 'n':
flags.dryrun = B_TRUE;
break;
case 'L':
flags.largeblock = B_TRUE;
break;
case 'e':
flags.embed_data = B_TRUE;
break;
case 't':
resume_token = optarg;
break;
case 'c':
flags.compress = B_TRUE;
break;
case 'w':
flags.raw = B_TRUE;
flags.compress = B_TRUE;
flags.embed_data = B_TRUE;
flags.largeblock = B_TRUE;
break;
case 'S':
flags.saved = B_TRUE;
break;
case ':':
/*
* If a parameter was not passed, optopt contains the
* value that would normally lead us into the
* appropriate case statement. If it's > 256, then this
* must be a longopt and we should look at argv to get
* the string. Otherwise it's just the character, so we
* should use it directly.
*/
if (optopt <= UINT8_MAX) {
(void) fprintf(stderr,
gettext("missing argument for '%c' "
"option\n"), optopt);
} else {
(void) fprintf(stderr,
gettext("missing argument for '%s' "
"option\n"), argv[optind - 1]);
}
free(excludes.list);
usage(B_FALSE);
break;
case '?':
default:
/*
* If an invalid flag was passed, optopt contains the
* character if it was a short flag, or 0 if it was a
* longopt.
*/
if (optopt != 0) {
(void) fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
} else {
(void) fprintf(stderr,
gettext("invalid option '%s'\n"),
argv[optind - 1]);
}
free(excludes.list);
usage(B_FALSE);
}
}
if ((flags.parsable || flags.progressastitle) && flags.verbosity == 0)
flags.verbosity = 1;
if (excludes.count > 0 && !flags.replicate) {
free(excludes.list);
(void) fprintf(stderr, gettext("Cannot specify "
"dataset exclusion (-X) on a non-recursive "
"send.\n"));
return (1);
}
argc -= optind;
argv += optind;
if (resume_token != NULL) {
if (fromname != NULL || flags.replicate || flags.props ||
flags.backup || flags.holds ||
flags.saved || redactbook != NULL) {
free(excludes.list);
(void) fprintf(stderr,
gettext("invalid flags combined with -t\n"));
usage(B_FALSE);
}
if (argc > 0) {
free(excludes.list);
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
} else {
if (argc < 1) {
free(excludes.list);
(void) fprintf(stderr,
gettext("missing snapshot argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
free(excludes.list);
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
}
if (flags.saved) {
if (fromname != NULL || flags.replicate || flags.props ||
flags.doall || flags.backup ||
flags.holds || flags.largeblock || flags.embed_data ||
flags.compress || flags.raw || redactbook != NULL) {
free(excludes.list);
(void) fprintf(stderr, gettext("incompatible flags "
"combined with saved send flag\n"));
usage(B_FALSE);
}
if (strchr(argv[0], '@') != NULL) {
free(excludes.list);
(void) fprintf(stderr, gettext("saved send must "
"specify the dataset with partially-received "
"state\n"));
usage(B_FALSE);
}
}
if (flags.raw && redactbook != NULL) {
free(excludes.list);
(void) fprintf(stderr,
gettext("Error: raw sends may not be redacted.\n"));
return (1);
}
if (!flags.dryrun && isatty(STDOUT_FILENO)) {
free(excludes.list);
(void) fprintf(stderr,
gettext("Error: Stream can not be written to a terminal.\n"
"You must redirect standard output.\n"));
return (1);
}
if (flags.saved) {
zhp = zfs_open(g_zfs, argv[0], ZFS_TYPE_DATASET);
if (zhp == NULL) {
free(excludes.list);
return (1);
}
err = zfs_send_saved(zhp, &flags, STDOUT_FILENO,
resume_token);
free(excludes.list);
zfs_close(zhp);
return (err != 0);
} else if (resume_token != NULL) {
free(excludes.list);
return (zfs_send_resume(g_zfs, &flags, STDOUT_FILENO,
resume_token));
}
if (flags.skipmissing && !flags.replicate) {
free(excludes.list);
(void) fprintf(stderr,
gettext("skip-missing flag can only be used in "
"conjunction with replicate\n"));
usage(B_FALSE);
}
/*
* For everything except -R and -I, use the new, cleaner code path.
*/
if (!(flags.replicate || flags.doall)) {
char frombuf[ZFS_MAX_DATASET_NAME_LEN];
if (fromname != NULL && (strchr(fromname, '#') == NULL &&
strchr(fromname, '@') == NULL)) {
/*
* Neither bookmark or snapshot was specified. Print a
* warning, and assume snapshot.
*/
(void) fprintf(stderr, "Warning: incremental source "
"didn't specify type, assuming snapshot. Use '@' "
"or '#' prefix to avoid ambiguity.\n");
(void) snprintf(frombuf, sizeof (frombuf), "@%s",
fromname);
fromname = frombuf;
}
if (fromname != NULL &&
(fromname[0] == '#' || fromname[0] == '@')) {
/*
* Incremental source name begins with # or @.
* Default to same fs as target.
*/
char tmpbuf[ZFS_MAX_DATASET_NAME_LEN];
(void) strlcpy(tmpbuf, fromname, sizeof (tmpbuf));
(void) strlcpy(frombuf, argv[0], sizeof (frombuf));
cp = strchr(frombuf, '@');
if (cp != NULL)
*cp = '\0';
(void) strlcat(frombuf, tmpbuf, sizeof (frombuf));
fromname = frombuf;
}
zhp = zfs_open(g_zfs, argv[0], ZFS_TYPE_DATASET);
if (zhp == NULL) {
free(excludes.list);
return (1);
}
err = zfs_send_one(zhp, fromname, STDOUT_FILENO, &flags,
redactbook);
free(excludes.list);
zfs_close(zhp);
return (err != 0);
}
if (fromname != NULL && strchr(fromname, '#')) {
(void) fprintf(stderr,
gettext("Error: multiple snapshots cannot be "
"sent from a bookmark.\n"));
free(excludes.list);
return (1);
}
if (redactbook != NULL) {
(void) fprintf(stderr, gettext("Error: multiple snapshots "
"cannot be sent redacted.\n"));
free(excludes.list);
return (1);
}
if ((cp = strchr(argv[0], '@')) == NULL) {
(void) fprintf(stderr, gettext("Error: "
"Unsupported flag with filesystem or bookmark.\n"));
free(excludes.list);
return (1);
}
*cp = '\0';
toname = cp + 1;
zhp = zfs_open(g_zfs, argv[0], ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL) {
free(excludes.list);
return (1);
}
/*
* If they specified the full path to the snapshot, chop off
* everything except the short name of the snapshot, but special
* case if they specify the origin.
*/
if (fromname && (cp = strchr(fromname, '@')) != NULL) {
char origin[ZFS_MAX_DATASET_NAME_LEN];
zprop_source_t src;
(void) zfs_prop_get(zhp, ZFS_PROP_ORIGIN,
origin, sizeof (origin), &src, NULL, 0, B_FALSE);
if (strcmp(origin, fromname) == 0) {
fromname = NULL;
flags.fromorigin = B_TRUE;
} else {
*cp = '\0';
if (cp != fromname && strcmp(argv[0], fromname)) {
zfs_close(zhp);
free(excludes.list);
(void) fprintf(stderr,
gettext("incremental source must be "
"in same filesystem\n"));
usage(B_FALSE);
}
fromname = cp + 1;
if (strchr(fromname, '@') || strchr(fromname, '/')) {
zfs_close(zhp);
free(excludes.list);
(void) fprintf(stderr,
gettext("invalid incremental source\n"));
usage(B_FALSE);
}
}
}
if (flags.replicate && fromname == NULL)
flags.doall = B_TRUE;
err = zfs_send(zhp, fromname, toname, &flags, STDOUT_FILENO,
excludes.count > 0 ? zfs_do_send_exclude : NULL,
&excludes, flags.verbosity >= 3 ? &dbgnv : NULL);
if (flags.verbosity >= 3 && dbgnv != NULL) {
/*
* dump_nvlist prints to stdout, but that's been
* redirected to a file. Make it print to stderr
* instead.
*/
(void) dup2(STDERR_FILENO, STDOUT_FILENO);
dump_nvlist(dbgnv, 0);
nvlist_free(dbgnv);
}
zfs_close(zhp);
free(excludes.list);
return (err != 0);
}
/*
* Restore a backup stream from stdin.
*/
static int
zfs_do_receive(int argc, char **argv)
{
int c, err = 0;
recvflags_t flags = { 0 };
boolean_t abort_resumable = B_FALSE;
nvlist_t *props;
if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
nomem();
/* check options */
while ((c = getopt(argc, argv, ":o:x:dehMnuvFsAc")) != -1) {
switch (c) {
case 'o':
if (!parseprop(props, optarg)) {
nvlist_free(props);
usage(B_FALSE);
}
break;
case 'x':
if (!parsepropname(props, optarg)) {
nvlist_free(props);
usage(B_FALSE);
}
break;
case 'd':
if (flags.istail) {
(void) fprintf(stderr, gettext("invalid option "
"combination: -d and -e are mutually "
"exclusive\n"));
usage(B_FALSE);
}
flags.isprefix = B_TRUE;
break;
case 'e':
if (flags.isprefix) {
(void) fprintf(stderr, gettext("invalid option "
"combination: -d and -e are mutually "
"exclusive\n"));
usage(B_FALSE);
}
flags.istail = B_TRUE;
break;
case 'h':
flags.skipholds = B_TRUE;
break;
case 'M':
flags.forceunmount = B_TRUE;
break;
case 'n':
flags.dryrun = B_TRUE;
break;
case 'u':
flags.nomount = B_TRUE;
break;
case 'v':
flags.verbose = B_TRUE;
break;
case 's':
flags.resumable = B_TRUE;
break;
case 'F':
flags.force = B_TRUE;
break;
case 'A':
abort_resumable = B_TRUE;
break;
case 'c':
flags.heal = B_TRUE;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* zfs recv -e (use "tail" name) implies -d (remove dataset "head") */
if (flags.istail)
flags.isprefix = B_TRUE;
/* check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing snapshot argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if (abort_resumable) {
if (flags.isprefix || flags.istail || flags.dryrun ||
flags.resumable || flags.nomount) {
(void) fprintf(stderr, gettext("invalid option\n"));
usage(B_FALSE);
}
char namebuf[ZFS_MAX_DATASET_NAME_LEN];
(void) snprintf(namebuf, sizeof (namebuf),
"%s/%%recv", argv[0]);
if (zfs_dataset_exists(g_zfs, namebuf,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME)) {
zfs_handle_t *zhp = zfs_open(g_zfs,
namebuf, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL) {
nvlist_free(props);
return (1);
}
err = zfs_destroy(zhp, B_FALSE);
zfs_close(zhp);
} else {
zfs_handle_t *zhp = zfs_open(g_zfs,
argv[0], ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL)
usage(B_FALSE);
if (!zfs_prop_get_int(zhp, ZFS_PROP_INCONSISTENT) ||
zfs_prop_get(zhp, ZFS_PROP_RECEIVE_RESUME_TOKEN,
NULL, 0, NULL, NULL, 0, B_TRUE) == -1) {
(void) fprintf(stderr,
gettext("'%s' does not have any "
"resumable receive state to abort\n"),
argv[0]);
nvlist_free(props);
zfs_close(zhp);
return (1);
}
err = zfs_destroy(zhp, B_FALSE);
zfs_close(zhp);
}
nvlist_free(props);
return (err != 0);
}
if (isatty(STDIN_FILENO)) {
(void) fprintf(stderr,
gettext("Error: Backup stream can not be read "
"from a terminal.\n"
"You must redirect standard input.\n"));
nvlist_free(props);
return (1);
}
err = zfs_receive(g_zfs, argv[0], props, &flags, STDIN_FILENO, NULL);
nvlist_free(props);
return (err != 0);
}
/*
* allow/unallow stuff
*/
/* copied from zfs/sys/dsl_deleg.h */
#define ZFS_DELEG_PERM_CREATE "create"
#define ZFS_DELEG_PERM_DESTROY "destroy"
#define ZFS_DELEG_PERM_SNAPSHOT "snapshot"
#define ZFS_DELEG_PERM_ROLLBACK "rollback"
#define ZFS_DELEG_PERM_CLONE "clone"
#define ZFS_DELEG_PERM_PROMOTE "promote"
#define ZFS_DELEG_PERM_RENAME "rename"
#define ZFS_DELEG_PERM_MOUNT "mount"
#define ZFS_DELEG_PERM_SHARE "share"
#define ZFS_DELEG_PERM_SEND "send"
#define ZFS_DELEG_PERM_RECEIVE "receive"
#define ZFS_DELEG_PERM_ALLOW "allow"
#define ZFS_DELEG_PERM_USERPROP "userprop"
#define ZFS_DELEG_PERM_VSCAN "vscan" /* ??? */
#define ZFS_DELEG_PERM_USERQUOTA "userquota"
#define ZFS_DELEG_PERM_GROUPQUOTA "groupquota"
#define ZFS_DELEG_PERM_USERUSED "userused"
#define ZFS_DELEG_PERM_GROUPUSED "groupused"
#define ZFS_DELEG_PERM_USEROBJQUOTA "userobjquota"
#define ZFS_DELEG_PERM_GROUPOBJQUOTA "groupobjquota"
#define ZFS_DELEG_PERM_USEROBJUSED "userobjused"
#define ZFS_DELEG_PERM_GROUPOBJUSED "groupobjused"
#define ZFS_DELEG_PERM_HOLD "hold"
#define ZFS_DELEG_PERM_RELEASE "release"
#define ZFS_DELEG_PERM_DIFF "diff"
#define ZFS_DELEG_PERM_BOOKMARK "bookmark"
#define ZFS_DELEG_PERM_LOAD_KEY "load-key"
#define ZFS_DELEG_PERM_CHANGE_KEY "change-key"
#define ZFS_DELEG_PERM_PROJECTUSED "projectused"
#define ZFS_DELEG_PERM_PROJECTQUOTA "projectquota"
#define ZFS_DELEG_PERM_PROJECTOBJUSED "projectobjused"
#define ZFS_DELEG_PERM_PROJECTOBJQUOTA "projectobjquota"
#define ZFS_NUM_DELEG_NOTES ZFS_DELEG_NOTE_NONE
static zfs_deleg_perm_tab_t zfs_deleg_perm_tbl[] = {
{ ZFS_DELEG_PERM_ALLOW, ZFS_DELEG_NOTE_ALLOW },
{ ZFS_DELEG_PERM_CLONE, ZFS_DELEG_NOTE_CLONE },
{ ZFS_DELEG_PERM_CREATE, ZFS_DELEG_NOTE_CREATE },
{ ZFS_DELEG_PERM_DESTROY, ZFS_DELEG_NOTE_DESTROY },
{ ZFS_DELEG_PERM_DIFF, ZFS_DELEG_NOTE_DIFF},
{ ZFS_DELEG_PERM_HOLD, ZFS_DELEG_NOTE_HOLD },
{ ZFS_DELEG_PERM_MOUNT, ZFS_DELEG_NOTE_MOUNT },
{ ZFS_DELEG_PERM_PROMOTE, ZFS_DELEG_NOTE_PROMOTE },
{ ZFS_DELEG_PERM_RECEIVE, ZFS_DELEG_NOTE_RECEIVE },
{ ZFS_DELEG_PERM_RELEASE, ZFS_DELEG_NOTE_RELEASE },
{ ZFS_DELEG_PERM_RENAME, ZFS_DELEG_NOTE_RENAME },
{ ZFS_DELEG_PERM_ROLLBACK, ZFS_DELEG_NOTE_ROLLBACK },
{ ZFS_DELEG_PERM_SEND, ZFS_DELEG_NOTE_SEND },
{ ZFS_DELEG_PERM_SHARE, ZFS_DELEG_NOTE_SHARE },
{ ZFS_DELEG_PERM_SNAPSHOT, ZFS_DELEG_NOTE_SNAPSHOT },
{ ZFS_DELEG_PERM_BOOKMARK, ZFS_DELEG_NOTE_BOOKMARK },
{ ZFS_DELEG_PERM_LOAD_KEY, ZFS_DELEG_NOTE_LOAD_KEY },
{ ZFS_DELEG_PERM_CHANGE_KEY, ZFS_DELEG_NOTE_CHANGE_KEY },
{ ZFS_DELEG_PERM_GROUPQUOTA, ZFS_DELEG_NOTE_GROUPQUOTA },
{ ZFS_DELEG_PERM_GROUPUSED, ZFS_DELEG_NOTE_GROUPUSED },
{ ZFS_DELEG_PERM_USERPROP, ZFS_DELEG_NOTE_USERPROP },
{ ZFS_DELEG_PERM_USERQUOTA, ZFS_DELEG_NOTE_USERQUOTA },
{ ZFS_DELEG_PERM_USERUSED, ZFS_DELEG_NOTE_USERUSED },
{ ZFS_DELEG_PERM_USEROBJQUOTA, ZFS_DELEG_NOTE_USEROBJQUOTA },
{ ZFS_DELEG_PERM_USEROBJUSED, ZFS_DELEG_NOTE_USEROBJUSED },
{ ZFS_DELEG_PERM_GROUPOBJQUOTA, ZFS_DELEG_NOTE_GROUPOBJQUOTA },
{ ZFS_DELEG_PERM_GROUPOBJUSED, ZFS_DELEG_NOTE_GROUPOBJUSED },
{ ZFS_DELEG_PERM_PROJECTUSED, ZFS_DELEG_NOTE_PROJECTUSED },
{ ZFS_DELEG_PERM_PROJECTQUOTA, ZFS_DELEG_NOTE_PROJECTQUOTA },
{ ZFS_DELEG_PERM_PROJECTOBJUSED, ZFS_DELEG_NOTE_PROJECTOBJUSED },
{ ZFS_DELEG_PERM_PROJECTOBJQUOTA, ZFS_DELEG_NOTE_PROJECTOBJQUOTA },
{ NULL, ZFS_DELEG_NOTE_NONE }
};
/* permission structure */
typedef struct deleg_perm {
zfs_deleg_who_type_t dp_who_type;
const char *dp_name;
boolean_t dp_local;
boolean_t dp_descend;
} deleg_perm_t;
/* */
typedef struct deleg_perm_node {
deleg_perm_t dpn_perm;
uu_avl_node_t dpn_avl_node;
} deleg_perm_node_t;
typedef struct fs_perm fs_perm_t;
/* permissions set */
typedef struct who_perm {
zfs_deleg_who_type_t who_type;
const char *who_name; /* id */
char who_ug_name[256]; /* user/group name */
fs_perm_t *who_fsperm; /* uplink */
uu_avl_t *who_deleg_perm_avl; /* permissions */
} who_perm_t;
/* */
typedef struct who_perm_node {
who_perm_t who_perm;
uu_avl_node_t who_avl_node;
} who_perm_node_t;
typedef struct fs_perm_set fs_perm_set_t;
/* fs permissions */
struct fs_perm {
const char *fsp_name;
uu_avl_t *fsp_sc_avl; /* sets,create */
uu_avl_t *fsp_uge_avl; /* user,group,everyone */
fs_perm_set_t *fsp_set; /* uplink */
};
/* */
typedef struct fs_perm_node {
fs_perm_t fspn_fsperm;
uu_avl_t *fspn_avl;
uu_list_node_t fspn_list_node;
} fs_perm_node_t;
/* top level structure */
struct fs_perm_set {
uu_list_pool_t *fsps_list_pool;
uu_list_t *fsps_list; /* list of fs_perms */
uu_avl_pool_t *fsps_named_set_avl_pool;
uu_avl_pool_t *fsps_who_perm_avl_pool;
uu_avl_pool_t *fsps_deleg_perm_avl_pool;
};
static inline const char *
deleg_perm_type(zfs_deleg_note_t note)
{
/* subcommands */
switch (note) {
/* SUBCOMMANDS */
/* OTHER */
case ZFS_DELEG_NOTE_GROUPQUOTA:
case ZFS_DELEG_NOTE_GROUPUSED:
case ZFS_DELEG_NOTE_USERPROP:
case ZFS_DELEG_NOTE_USERQUOTA:
case ZFS_DELEG_NOTE_USERUSED:
case ZFS_DELEG_NOTE_USEROBJQUOTA:
case ZFS_DELEG_NOTE_USEROBJUSED:
case ZFS_DELEG_NOTE_GROUPOBJQUOTA:
case ZFS_DELEG_NOTE_GROUPOBJUSED:
case ZFS_DELEG_NOTE_PROJECTUSED:
case ZFS_DELEG_NOTE_PROJECTQUOTA:
case ZFS_DELEG_NOTE_PROJECTOBJUSED:
case ZFS_DELEG_NOTE_PROJECTOBJQUOTA:
/* other */
return (gettext("other"));
default:
return (gettext("subcommand"));
}
}
static int
who_type2weight(zfs_deleg_who_type_t who_type)
{
int res;
switch (who_type) {
case ZFS_DELEG_NAMED_SET_SETS:
case ZFS_DELEG_NAMED_SET:
res = 0;
break;
case ZFS_DELEG_CREATE_SETS:
case ZFS_DELEG_CREATE:
res = 1;
break;
case ZFS_DELEG_USER_SETS:
case ZFS_DELEG_USER:
res = 2;
break;
case ZFS_DELEG_GROUP_SETS:
case ZFS_DELEG_GROUP:
res = 3;
break;
case ZFS_DELEG_EVERYONE_SETS:
case ZFS_DELEG_EVERYONE:
res = 4;
break;
default:
res = -1;
}
return (res);
}
static int
who_perm_compare(const void *larg, const void *rarg, void *unused)
{
(void) unused;
const who_perm_node_t *l = larg;
const who_perm_node_t *r = rarg;
zfs_deleg_who_type_t ltype = l->who_perm.who_type;
zfs_deleg_who_type_t rtype = r->who_perm.who_type;
int lweight = who_type2weight(ltype);
int rweight = who_type2weight(rtype);
int res = lweight - rweight;
if (res == 0)
res = strncmp(l->who_perm.who_name, r->who_perm.who_name,
ZFS_MAX_DELEG_NAME-1);
if (res == 0)
return (0);
if (res > 0)
return (1);
else
return (-1);
}
static int
deleg_perm_compare(const void *larg, const void *rarg, void *unused)
{
(void) unused;
const deleg_perm_node_t *l = larg;
const deleg_perm_node_t *r = rarg;
int res = strncmp(l->dpn_perm.dp_name, r->dpn_perm.dp_name,
ZFS_MAX_DELEG_NAME-1);
if (res == 0)
return (0);
if (res > 0)
return (1);
else
return (-1);
}
static inline void
fs_perm_set_init(fs_perm_set_t *fspset)
{
memset(fspset, 0, sizeof (fs_perm_set_t));
if ((fspset->fsps_list_pool = uu_list_pool_create("fsps_list_pool",
sizeof (fs_perm_node_t), offsetof(fs_perm_node_t, fspn_list_node),
NULL, UU_DEFAULT)) == NULL)
nomem();
if ((fspset->fsps_list = uu_list_create(fspset->fsps_list_pool, NULL,
UU_DEFAULT)) == NULL)
nomem();
if ((fspset->fsps_named_set_avl_pool = uu_avl_pool_create(
"named_set_avl_pool", sizeof (who_perm_node_t), offsetof(
who_perm_node_t, who_avl_node), who_perm_compare,
UU_DEFAULT)) == NULL)
nomem();
if ((fspset->fsps_who_perm_avl_pool = uu_avl_pool_create(
"who_perm_avl_pool", sizeof (who_perm_node_t), offsetof(
who_perm_node_t, who_avl_node), who_perm_compare,
UU_DEFAULT)) == NULL)
nomem();
if ((fspset->fsps_deleg_perm_avl_pool = uu_avl_pool_create(
"deleg_perm_avl_pool", sizeof (deleg_perm_node_t), offsetof(
deleg_perm_node_t, dpn_avl_node), deleg_perm_compare, UU_DEFAULT))
== NULL)
nomem();
}
static inline void fs_perm_fini(fs_perm_t *);
static inline void who_perm_fini(who_perm_t *);
static inline void
fs_perm_set_fini(fs_perm_set_t *fspset)
{
fs_perm_node_t *node = uu_list_first(fspset->fsps_list);
while (node != NULL) {
fs_perm_node_t *next_node =
uu_list_next(fspset->fsps_list, node);
fs_perm_t *fsperm = &node->fspn_fsperm;
fs_perm_fini(fsperm);
uu_list_remove(fspset->fsps_list, node);
free(node);
node = next_node;
}
uu_avl_pool_destroy(fspset->fsps_named_set_avl_pool);
uu_avl_pool_destroy(fspset->fsps_who_perm_avl_pool);
uu_avl_pool_destroy(fspset->fsps_deleg_perm_avl_pool);
}
static inline void
deleg_perm_init(deleg_perm_t *deleg_perm, zfs_deleg_who_type_t type,
const char *name)
{
deleg_perm->dp_who_type = type;
deleg_perm->dp_name = name;
}
static inline void
who_perm_init(who_perm_t *who_perm, fs_perm_t *fsperm,
zfs_deleg_who_type_t type, const char *name)
{
uu_avl_pool_t *pool;
pool = fsperm->fsp_set->fsps_deleg_perm_avl_pool;
memset(who_perm, 0, sizeof (who_perm_t));
if ((who_perm->who_deleg_perm_avl = uu_avl_create(pool, NULL,
UU_DEFAULT)) == NULL)
nomem();
who_perm->who_type = type;
who_perm->who_name = name;
who_perm->who_fsperm = fsperm;
}
static inline void
who_perm_fini(who_perm_t *who_perm)
{
deleg_perm_node_t *node = uu_avl_first(who_perm->who_deleg_perm_avl);
while (node != NULL) {
deleg_perm_node_t *next_node =
uu_avl_next(who_perm->who_deleg_perm_avl, node);
uu_avl_remove(who_perm->who_deleg_perm_avl, node);
free(node);
node = next_node;
}
uu_avl_destroy(who_perm->who_deleg_perm_avl);
}
static inline void
fs_perm_init(fs_perm_t *fsperm, fs_perm_set_t *fspset, const char *fsname)
{
uu_avl_pool_t *nset_pool = fspset->fsps_named_set_avl_pool;
uu_avl_pool_t *who_pool = fspset->fsps_who_perm_avl_pool;
memset(fsperm, 0, sizeof (fs_perm_t));
if ((fsperm->fsp_sc_avl = uu_avl_create(nset_pool, NULL, UU_DEFAULT))
== NULL)
nomem();
if ((fsperm->fsp_uge_avl = uu_avl_create(who_pool, NULL, UU_DEFAULT))
== NULL)
nomem();
fsperm->fsp_set = fspset;
fsperm->fsp_name = fsname;
}
static inline void
fs_perm_fini(fs_perm_t *fsperm)
{
who_perm_node_t *node = uu_avl_first(fsperm->fsp_sc_avl);
while (node != NULL) {
who_perm_node_t *next_node = uu_avl_next(fsperm->fsp_sc_avl,
node);
who_perm_t *who_perm = &node->who_perm;
who_perm_fini(who_perm);
uu_avl_remove(fsperm->fsp_sc_avl, node);
free(node);
node = next_node;
}
node = uu_avl_first(fsperm->fsp_uge_avl);
while (node != NULL) {
who_perm_node_t *next_node = uu_avl_next(fsperm->fsp_uge_avl,
node);
who_perm_t *who_perm = &node->who_perm;
who_perm_fini(who_perm);
uu_avl_remove(fsperm->fsp_uge_avl, node);
free(node);
node = next_node;
}
uu_avl_destroy(fsperm->fsp_sc_avl);
uu_avl_destroy(fsperm->fsp_uge_avl);
}
static void
set_deleg_perm_node(uu_avl_t *avl, deleg_perm_node_t *node,
zfs_deleg_who_type_t who_type, const char *name, char locality)
{
uu_avl_index_t idx = 0;
deleg_perm_node_t *found_node = NULL;
deleg_perm_t *deleg_perm = &node->dpn_perm;
deleg_perm_init(deleg_perm, who_type, name);
if ((found_node = uu_avl_find(avl, node, NULL, &idx))
== NULL)
uu_avl_insert(avl, node, idx);
else {
node = found_node;
deleg_perm = &node->dpn_perm;
}
switch (locality) {
case ZFS_DELEG_LOCAL:
deleg_perm->dp_local = B_TRUE;
break;
case ZFS_DELEG_DESCENDENT:
deleg_perm->dp_descend = B_TRUE;
break;
case ZFS_DELEG_NA:
break;
default:
assert(B_FALSE); /* invalid locality */
}
}
static inline int
parse_who_perm(who_perm_t *who_perm, nvlist_t *nvl, char locality)
{
nvpair_t *nvp = NULL;
fs_perm_set_t *fspset = who_perm->who_fsperm->fsp_set;
uu_avl_t *avl = who_perm->who_deleg_perm_avl;
zfs_deleg_who_type_t who_type = who_perm->who_type;
while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
const char *name = nvpair_name(nvp);
data_type_t type = nvpair_type(nvp);
uu_avl_pool_t *avl_pool = fspset->fsps_deleg_perm_avl_pool;
deleg_perm_node_t *node =
safe_malloc(sizeof (deleg_perm_node_t));
VERIFY(type == DATA_TYPE_BOOLEAN);
uu_avl_node_init(node, &node->dpn_avl_node, avl_pool);
set_deleg_perm_node(avl, node, who_type, name, locality);
}
return (0);
}
static inline int
parse_fs_perm(fs_perm_t *fsperm, nvlist_t *nvl)
{
nvpair_t *nvp = NULL;
fs_perm_set_t *fspset = fsperm->fsp_set;
while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
nvlist_t *nvl2 = NULL;
const char *name = nvpair_name(nvp);
uu_avl_t *avl = NULL;
uu_avl_pool_t *avl_pool = NULL;
zfs_deleg_who_type_t perm_type = name[0];
char perm_locality = name[1];
const char *perm_name = name + 3;
who_perm_t *who_perm = NULL;
assert('$' == name[2]);
if (nvpair_value_nvlist(nvp, &nvl2) != 0)
return (-1);
switch (perm_type) {
case ZFS_DELEG_CREATE:
case ZFS_DELEG_CREATE_SETS:
case ZFS_DELEG_NAMED_SET:
case ZFS_DELEG_NAMED_SET_SETS:
avl_pool = fspset->fsps_named_set_avl_pool;
avl = fsperm->fsp_sc_avl;
break;
case ZFS_DELEG_USER:
case ZFS_DELEG_USER_SETS:
case ZFS_DELEG_GROUP:
case ZFS_DELEG_GROUP_SETS:
case ZFS_DELEG_EVERYONE:
case ZFS_DELEG_EVERYONE_SETS:
avl_pool = fspset->fsps_who_perm_avl_pool;
avl = fsperm->fsp_uge_avl;
break;
default:
assert(!"unhandled zfs_deleg_who_type_t");
}
who_perm_node_t *found_node = NULL;
who_perm_node_t *node = safe_malloc(
sizeof (who_perm_node_t));
who_perm = &node->who_perm;
uu_avl_index_t idx = 0;
uu_avl_node_init(node, &node->who_avl_node, avl_pool);
who_perm_init(who_perm, fsperm, perm_type, perm_name);
if ((found_node = uu_avl_find(avl, node, NULL, &idx))
== NULL) {
if (avl == fsperm->fsp_uge_avl) {
uid_t rid = 0;
struct passwd *p = NULL;
struct group *g = NULL;
const char *nice_name = NULL;
switch (perm_type) {
case ZFS_DELEG_USER_SETS:
case ZFS_DELEG_USER:
rid = atoi(perm_name);
p = getpwuid(rid);
if (p)
nice_name = p->pw_name;
break;
case ZFS_DELEG_GROUP_SETS:
case ZFS_DELEG_GROUP:
rid = atoi(perm_name);
g = getgrgid(rid);
if (g)
nice_name = g->gr_name;
break;
default:
break;
}
if (nice_name != NULL) {
(void) strlcpy(
node->who_perm.who_ug_name,
nice_name, 256);
} else {
/* User or group unknown */
(void) snprintf(
node->who_perm.who_ug_name,
sizeof (node->who_perm.who_ug_name),
"(unknown: %d)", rid);
}
}
uu_avl_insert(avl, node, idx);
} else {
node = found_node;
who_perm = &node->who_perm;
}
assert(who_perm != NULL);
(void) parse_who_perm(who_perm, nvl2, perm_locality);
}
return (0);
}
static inline int
parse_fs_perm_set(fs_perm_set_t *fspset, nvlist_t *nvl)
{
nvpair_t *nvp = NULL;
uu_avl_index_t idx = 0;
while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
nvlist_t *nvl2 = NULL;
const char *fsname = nvpair_name(nvp);
data_type_t type = nvpair_type(nvp);
fs_perm_t *fsperm = NULL;
fs_perm_node_t *node = safe_malloc(sizeof (fs_perm_node_t));
fsperm = &node->fspn_fsperm;
VERIFY(DATA_TYPE_NVLIST == type);
uu_list_node_init(node, &node->fspn_list_node,
fspset->fsps_list_pool);
idx = uu_list_numnodes(fspset->fsps_list);
fs_perm_init(fsperm, fspset, fsname);
if (nvpair_value_nvlist(nvp, &nvl2) != 0)
return (-1);
(void) parse_fs_perm(fsperm, nvl2);
uu_list_insert(fspset->fsps_list, node, idx);
}
return (0);
}
static inline const char *
deleg_perm_comment(zfs_deleg_note_t note)
{
const char *str = "";
/* subcommands */
switch (note) {
/* SUBCOMMANDS */
case ZFS_DELEG_NOTE_ALLOW:
str = gettext("Must also have the permission that is being"
"\n\t\t\t\tallowed");
break;
case ZFS_DELEG_NOTE_CLONE:
str = gettext("Must also have the 'create' ability and 'mount'"
"\n\t\t\t\tability in the origin file system");
break;
case ZFS_DELEG_NOTE_CREATE:
str = gettext("Must also have the 'mount' ability");
break;
case ZFS_DELEG_NOTE_DESTROY:
str = gettext("Must also have the 'mount' ability");
break;
case ZFS_DELEG_NOTE_DIFF:
str = gettext("Allows lookup of paths within a dataset;"
"\n\t\t\t\tgiven an object number. Ordinary users need this"
"\n\t\t\t\tin order to use zfs diff");
break;
case ZFS_DELEG_NOTE_HOLD:
str = gettext("Allows adding a user hold to a snapshot");
break;
case ZFS_DELEG_NOTE_MOUNT:
str = gettext("Allows mount/umount of ZFS datasets");
break;
case ZFS_DELEG_NOTE_PROMOTE:
str = gettext("Must also have the 'mount'\n\t\t\t\tand"
" 'promote' ability in the origin file system");
break;
case ZFS_DELEG_NOTE_RECEIVE:
str = gettext("Must also have the 'mount' and 'create'"
" ability");
break;
case ZFS_DELEG_NOTE_RELEASE:
str = gettext("Allows releasing a user hold which\n\t\t\t\t"
"might destroy the snapshot");
break;
case ZFS_DELEG_NOTE_RENAME:
str = gettext("Must also have the 'mount' and 'create'"
"\n\t\t\t\tability in the new parent");
break;
case ZFS_DELEG_NOTE_ROLLBACK:
str = gettext("");
break;
case ZFS_DELEG_NOTE_SEND:
str = gettext("");
break;
case ZFS_DELEG_NOTE_SHARE:
str = gettext("Allows sharing file systems over NFS or SMB"
"\n\t\t\t\tprotocols");
break;
case ZFS_DELEG_NOTE_SNAPSHOT:
str = gettext("");
break;
case ZFS_DELEG_NOTE_LOAD_KEY:
str = gettext("Allows loading or unloading an encryption key");
break;
case ZFS_DELEG_NOTE_CHANGE_KEY:
str = gettext("Allows changing or adding an encryption key");
break;
/*
* case ZFS_DELEG_NOTE_VSCAN:
* str = gettext("");
* break;
*/
/* OTHER */
case ZFS_DELEG_NOTE_GROUPQUOTA:
str = gettext("Allows accessing any groupquota@... property");
break;
case ZFS_DELEG_NOTE_GROUPUSED:
str = gettext("Allows reading any groupused@... property");
break;
case ZFS_DELEG_NOTE_USERPROP:
str = gettext("Allows changing any user property");
break;
case ZFS_DELEG_NOTE_USERQUOTA:
str = gettext("Allows accessing any userquota@... property");
break;
case ZFS_DELEG_NOTE_USERUSED:
str = gettext("Allows reading any userused@... property");
break;
case ZFS_DELEG_NOTE_USEROBJQUOTA:
str = gettext("Allows accessing any userobjquota@... property");
break;
case ZFS_DELEG_NOTE_GROUPOBJQUOTA:
str = gettext("Allows accessing any \n\t\t\t\t"
"groupobjquota@... property");
break;
case ZFS_DELEG_NOTE_GROUPOBJUSED:
str = gettext("Allows reading any groupobjused@... property");
break;
case ZFS_DELEG_NOTE_USEROBJUSED:
str = gettext("Allows reading any userobjused@... property");
break;
case ZFS_DELEG_NOTE_PROJECTQUOTA:
str = gettext("Allows accessing any projectquota@... property");
break;
case ZFS_DELEG_NOTE_PROJECTOBJQUOTA:
str = gettext("Allows accessing any \n\t\t\t\t"
"projectobjquota@... property");
break;
case ZFS_DELEG_NOTE_PROJECTUSED:
str = gettext("Allows reading any projectused@... property");
break;
case ZFS_DELEG_NOTE_PROJECTOBJUSED:
str = gettext("Allows accessing any \n\t\t\t\t"
"projectobjused@... property");
break;
/* other */
default:
str = "";
}
return (str);
}
struct allow_opts {
boolean_t local;
boolean_t descend;
boolean_t user;
boolean_t group;
boolean_t everyone;
boolean_t create;
boolean_t set;
boolean_t recursive; /* unallow only */
boolean_t prt_usage;
boolean_t prt_perms;
char *who;
char *perms;
const char *dataset;
};
static inline int
prop_cmp(const void *a, const void *b)
{
const char *str1 = *(const char **)a;
const char *str2 = *(const char **)b;
return (strcmp(str1, str2));
}
static void
allow_usage(boolean_t un, boolean_t requested, const char *msg)
{
const char *opt_desc[] = {
"-h", gettext("show this help message and exit"),
"-l", gettext("set permission locally"),
"-d", gettext("set permission for descents"),
"-u", gettext("set permission for user"),
"-g", gettext("set permission for group"),
"-e", gettext("set permission for everyone"),
"-c", gettext("set create time permission"),
"-s", gettext("define permission set"),
/* unallow only */
"-r", gettext("remove permissions recursively"),
};
size_t unallow_size = sizeof (opt_desc) / sizeof (char *);
size_t allow_size = unallow_size - 2;
const char *props[ZFS_NUM_PROPS];
int i;
size_t count = 0;
FILE *fp = requested ? stdout : stderr;
zprop_desc_t *pdtbl = zfs_prop_get_table();
const char *fmt = gettext("%-16s %-14s\t%s\n");
(void) fprintf(fp, gettext("Usage: %s\n"), get_usage(un ? HELP_UNALLOW :
HELP_ALLOW));
(void) fprintf(fp, gettext("Options:\n"));
for (i = 0; i < (un ? unallow_size : allow_size); i += 2) {
const char *opt = opt_desc[i];
const char *optdsc = opt_desc[i + 1];
(void) fprintf(fp, gettext(" %-10s %s\n"), opt, optdsc);
}
(void) fprintf(fp, gettext("\nThe following permissions are "
"supported:\n\n"));
(void) fprintf(fp, fmt, gettext("NAME"), gettext("TYPE"),
gettext("NOTES"));
for (i = 0; i < ZFS_NUM_DELEG_NOTES; i++) {
const char *perm_name = zfs_deleg_perm_tbl[i].z_perm;
zfs_deleg_note_t perm_note = zfs_deleg_perm_tbl[i].z_note;
const char *perm_type = deleg_perm_type(perm_note);
const char *perm_comment = deleg_perm_comment(perm_note);
(void) fprintf(fp, fmt, perm_name, perm_type, perm_comment);
}
for (i = 0; i < ZFS_NUM_PROPS; i++) {
zprop_desc_t *pd = &pdtbl[i];
if (pd->pd_visible != B_TRUE)
continue;
if (pd->pd_attr == PROP_READONLY)
continue;
props[count++] = pd->pd_name;
}
props[count] = NULL;
qsort(props, count, sizeof (char *), prop_cmp);
for (i = 0; i < count; i++)
(void) fprintf(fp, fmt, props[i], gettext("property"), "");
if (msg != NULL)
(void) fprintf(fp, gettext("\nzfs: error: %s"), msg);
exit(requested ? 0 : 2);
}
static inline const char *
munge_args(int argc, char **argv, boolean_t un, size_t expected_argc,
char **permsp)
{
if (un && argc == expected_argc - 1)
*permsp = NULL;
else if (argc == expected_argc)
*permsp = argv[argc - 2];
else
allow_usage(un, B_FALSE,
gettext("wrong number of parameters\n"));
return (argv[argc - 1]);
}
static void
parse_allow_args(int argc, char **argv, boolean_t un, struct allow_opts *opts)
{
int uge_sum = opts->user + opts->group + opts->everyone;
int csuge_sum = opts->create + opts->set + uge_sum;
int ldcsuge_sum = csuge_sum + opts->local + opts->descend;
int all_sum = un ? ldcsuge_sum + opts->recursive : ldcsuge_sum;
if (uge_sum > 1)
allow_usage(un, B_FALSE,
gettext("-u, -g, and -e are mutually exclusive\n"));
if (opts->prt_usage) {
if (argc == 0 && all_sum == 0)
allow_usage(un, B_TRUE, NULL);
else
usage(B_FALSE);
}
if (opts->set) {
if (csuge_sum > 1)
allow_usage(un, B_FALSE,
gettext("invalid options combined with -s\n"));
opts->dataset = munge_args(argc, argv, un, 3, &opts->perms);
if (argv[0][0] != '@')
allow_usage(un, B_FALSE,
gettext("invalid set name: missing '@' prefix\n"));
opts->who = argv[0];
} else if (opts->create) {
if (ldcsuge_sum > 1)
allow_usage(un, B_FALSE,
gettext("invalid options combined with -c\n"));
opts->dataset = munge_args(argc, argv, un, 2, &opts->perms);
} else if (opts->everyone) {
if (csuge_sum > 1)
allow_usage(un, B_FALSE,
gettext("invalid options combined with -e\n"));
opts->dataset = munge_args(argc, argv, un, 2, &opts->perms);
} else if (uge_sum == 0 && argc > 0 && strcmp(argv[0], "everyone")
== 0) {
opts->everyone = B_TRUE;
argc--;
argv++;
opts->dataset = munge_args(argc, argv, un, 2, &opts->perms);
} else if (argc == 1 && !un) {
opts->prt_perms = B_TRUE;
opts->dataset = argv[argc-1];
} else {
opts->dataset = munge_args(argc, argv, un, 3, &opts->perms);
opts->who = argv[0];
}
if (!opts->local && !opts->descend) {
opts->local = B_TRUE;
opts->descend = B_TRUE;
}
}
static void
store_allow_perm(zfs_deleg_who_type_t type, boolean_t local, boolean_t descend,
const char *who, char *perms, nvlist_t *top_nvl)
{
int i;
char ld[2] = { '\0', '\0' };
char who_buf[MAXNAMELEN + 32];
char base_type = '\0';
char set_type = '\0';
nvlist_t *base_nvl = NULL;
nvlist_t *set_nvl = NULL;
nvlist_t *nvl;
if (nvlist_alloc(&base_nvl, NV_UNIQUE_NAME, 0) != 0)
nomem();
if (nvlist_alloc(&set_nvl, NV_UNIQUE_NAME, 0) != 0)
nomem();
switch (type) {
case ZFS_DELEG_NAMED_SET_SETS:
case ZFS_DELEG_NAMED_SET:
set_type = ZFS_DELEG_NAMED_SET_SETS;
base_type = ZFS_DELEG_NAMED_SET;
ld[0] = ZFS_DELEG_NA;
break;
case ZFS_DELEG_CREATE_SETS:
case ZFS_DELEG_CREATE:
set_type = ZFS_DELEG_CREATE_SETS;
base_type = ZFS_DELEG_CREATE;
ld[0] = ZFS_DELEG_NA;
break;
case ZFS_DELEG_USER_SETS:
case ZFS_DELEG_USER:
set_type = ZFS_DELEG_USER_SETS;
base_type = ZFS_DELEG_USER;
if (local)
ld[0] = ZFS_DELEG_LOCAL;
if (descend)
ld[1] = ZFS_DELEG_DESCENDENT;
break;
case ZFS_DELEG_GROUP_SETS:
case ZFS_DELEG_GROUP:
set_type = ZFS_DELEG_GROUP_SETS;
base_type = ZFS_DELEG_GROUP;
if (local)
ld[0] = ZFS_DELEG_LOCAL;
if (descend)
ld[1] = ZFS_DELEG_DESCENDENT;
break;
case ZFS_DELEG_EVERYONE_SETS:
case ZFS_DELEG_EVERYONE:
set_type = ZFS_DELEG_EVERYONE_SETS;
base_type = ZFS_DELEG_EVERYONE;
if (local)
ld[0] = ZFS_DELEG_LOCAL;
if (descend)
ld[1] = ZFS_DELEG_DESCENDENT;
break;
default:
assert(set_type != '\0' && base_type != '\0');
}
if (perms != NULL) {
char *curr = perms;
char *end = curr + strlen(perms);
while (curr < end) {
char *delim = strchr(curr, ',');
if (delim == NULL)
delim = end;
else
*delim = '\0';
if (curr[0] == '@')
nvl = set_nvl;
else
nvl = base_nvl;
(void) nvlist_add_boolean(nvl, curr);
if (delim != end)
*delim = ',';
curr = delim + 1;
}
for (i = 0; i < 2; i++) {
char locality = ld[i];
if (locality == 0)
continue;
if (!nvlist_empty(base_nvl)) {
if (who != NULL)
(void) snprintf(who_buf,
sizeof (who_buf), "%c%c$%s",
base_type, locality, who);
else
(void) snprintf(who_buf,
sizeof (who_buf), "%c%c$",
base_type, locality);
(void) nvlist_add_nvlist(top_nvl, who_buf,
base_nvl);
}
if (!nvlist_empty(set_nvl)) {
if (who != NULL)
(void) snprintf(who_buf,
sizeof (who_buf), "%c%c$%s",
set_type, locality, who);
else
(void) snprintf(who_buf,
sizeof (who_buf), "%c%c$",
set_type, locality);
(void) nvlist_add_nvlist(top_nvl, who_buf,
set_nvl);
}
}
} else {
for (i = 0; i < 2; i++) {
char locality = ld[i];
if (locality == 0)
continue;
if (who != NULL)
(void) snprintf(who_buf, sizeof (who_buf),
"%c%c$%s", base_type, locality, who);
else
(void) snprintf(who_buf, sizeof (who_buf),
"%c%c$", base_type, locality);
(void) nvlist_add_boolean(top_nvl, who_buf);
if (who != NULL)
(void) snprintf(who_buf, sizeof (who_buf),
"%c%c$%s", set_type, locality, who);
else
(void) snprintf(who_buf, sizeof (who_buf),
"%c%c$", set_type, locality);
(void) nvlist_add_boolean(top_nvl, who_buf);
}
}
}
static int
construct_fsacl_list(boolean_t un, struct allow_opts *opts, nvlist_t **nvlp)
{
if (nvlist_alloc(nvlp, NV_UNIQUE_NAME, 0) != 0)
nomem();
if (opts->set) {
store_allow_perm(ZFS_DELEG_NAMED_SET, opts->local,
opts->descend, opts->who, opts->perms, *nvlp);
} else if (opts->create) {
store_allow_perm(ZFS_DELEG_CREATE, opts->local,
opts->descend, NULL, opts->perms, *nvlp);
} else if (opts->everyone) {
store_allow_perm(ZFS_DELEG_EVERYONE, opts->local,
opts->descend, NULL, opts->perms, *nvlp);
} else {
char *curr = opts->who;
char *end = curr + strlen(curr);
while (curr < end) {
const char *who;
zfs_deleg_who_type_t who_type = ZFS_DELEG_WHO_UNKNOWN;
char *endch;
char *delim = strchr(curr, ',');
char errbuf[256];
char id[64];
struct passwd *p = NULL;
struct group *g = NULL;
uid_t rid;
if (delim == NULL)
delim = end;
else
*delim = '\0';
rid = (uid_t)strtol(curr, &endch, 0);
if (opts->user) {
who_type = ZFS_DELEG_USER;
if (*endch != '\0')
p = getpwnam(curr);
else
p = getpwuid(rid);
if (p != NULL)
rid = p->pw_uid;
else if (*endch != '\0') {
(void) snprintf(errbuf, sizeof (errbuf),
gettext("invalid user %s\n"), curr);
allow_usage(un, B_TRUE, errbuf);
}
} else if (opts->group) {
who_type = ZFS_DELEG_GROUP;
if (*endch != '\0')
g = getgrnam(curr);
else
g = getgrgid(rid);
if (g != NULL)
rid = g->gr_gid;
else if (*endch != '\0') {
(void) snprintf(errbuf, sizeof (errbuf),
gettext("invalid group %s\n"),
curr);
allow_usage(un, B_TRUE, errbuf);
}
} else {
if (*endch != '\0') {
p = getpwnam(curr);
} else {
p = getpwuid(rid);
}
if (p == NULL) {
if (*endch != '\0') {
g = getgrnam(curr);
} else {
g = getgrgid(rid);
}
}
if (p != NULL) {
who_type = ZFS_DELEG_USER;
rid = p->pw_uid;
} else if (g != NULL) {
who_type = ZFS_DELEG_GROUP;
rid = g->gr_gid;
} else {
(void) snprintf(errbuf, sizeof (errbuf),
gettext("invalid user/group %s\n"),
curr);
allow_usage(un, B_TRUE, errbuf);
}
}
(void) sprintf(id, "%u", rid);
who = id;
store_allow_perm(who_type, opts->local,
opts->descend, who, opts->perms, *nvlp);
curr = delim + 1;
}
}
return (0);
}
static void
print_set_creat_perms(uu_avl_t *who_avl)
{
const char *sc_title[] = {
gettext("Permission sets:\n"),
gettext("Create time permissions:\n"),
NULL
};
who_perm_node_t *who_node = NULL;
int prev_weight = -1;
for (who_node = uu_avl_first(who_avl); who_node != NULL;
who_node = uu_avl_next(who_avl, who_node)) {
uu_avl_t *avl = who_node->who_perm.who_deleg_perm_avl;
zfs_deleg_who_type_t who_type = who_node->who_perm.who_type;
const char *who_name = who_node->who_perm.who_name;
int weight = who_type2weight(who_type);
boolean_t first = B_TRUE;
deleg_perm_node_t *deleg_node;
if (prev_weight != weight) {
(void) printf("%s", sc_title[weight]);
prev_weight = weight;
}
if (who_name == NULL || strnlen(who_name, 1) == 0)
(void) printf("\t");
else
(void) printf("\t%s ", who_name);
for (deleg_node = uu_avl_first(avl); deleg_node != NULL;
deleg_node = uu_avl_next(avl, deleg_node)) {
if (first) {
(void) printf("%s",
deleg_node->dpn_perm.dp_name);
first = B_FALSE;
} else
(void) printf(",%s",
deleg_node->dpn_perm.dp_name);
}
(void) printf("\n");
}
}
static void
print_uge_deleg_perms(uu_avl_t *who_avl, boolean_t local, boolean_t descend,
const char *title)
{
who_perm_node_t *who_node = NULL;
boolean_t prt_title = B_TRUE;
uu_avl_walk_t *walk;
if ((walk = uu_avl_walk_start(who_avl, UU_WALK_ROBUST)) == NULL)
nomem();
while ((who_node = uu_avl_walk_next(walk)) != NULL) {
const char *who_name = who_node->who_perm.who_name;
const char *nice_who_name = who_node->who_perm.who_ug_name;
uu_avl_t *avl = who_node->who_perm.who_deleg_perm_avl;
zfs_deleg_who_type_t who_type = who_node->who_perm.who_type;
char delim = ' ';
deleg_perm_node_t *deleg_node;
boolean_t prt_who = B_TRUE;
for (deleg_node = uu_avl_first(avl);
deleg_node != NULL;
deleg_node = uu_avl_next(avl, deleg_node)) {
if (local != deleg_node->dpn_perm.dp_local ||
descend != deleg_node->dpn_perm.dp_descend)
continue;
if (prt_who) {
const char *who = NULL;
if (prt_title) {
prt_title = B_FALSE;
(void) printf("%s", title);
}
switch (who_type) {
case ZFS_DELEG_USER_SETS:
case ZFS_DELEG_USER:
who = gettext("user");
if (nice_who_name)
who_name = nice_who_name;
break;
case ZFS_DELEG_GROUP_SETS:
case ZFS_DELEG_GROUP:
who = gettext("group");
if (nice_who_name)
who_name = nice_who_name;
break;
case ZFS_DELEG_EVERYONE_SETS:
case ZFS_DELEG_EVERYONE:
who = gettext("everyone");
who_name = NULL;
break;
default:
assert(who != NULL);
}
prt_who = B_FALSE;
if (who_name == NULL)
(void) printf("\t%s", who);
else
(void) printf("\t%s %s", who, who_name);
}
(void) printf("%c%s", delim,
deleg_node->dpn_perm.dp_name);
delim = ',';
}
if (!prt_who)
(void) printf("\n");
}
uu_avl_walk_end(walk);
}
static void
print_fs_perms(fs_perm_set_t *fspset)
{
fs_perm_node_t *node = NULL;
char buf[MAXNAMELEN + 32];
const char *dsname = buf;
for (node = uu_list_first(fspset->fsps_list); node != NULL;
node = uu_list_next(fspset->fsps_list, node)) {
uu_avl_t *sc_avl = node->fspn_fsperm.fsp_sc_avl;
uu_avl_t *uge_avl = node->fspn_fsperm.fsp_uge_avl;
int left = 0;
(void) snprintf(buf, sizeof (buf),
gettext("---- Permissions on %s "),
node->fspn_fsperm.fsp_name);
(void) printf("%s", dsname);
left = 70 - strlen(buf);
while (left-- > 0)
(void) printf("-");
(void) printf("\n");
print_set_creat_perms(sc_avl);
print_uge_deleg_perms(uge_avl, B_TRUE, B_FALSE,
gettext("Local permissions:\n"));
print_uge_deleg_perms(uge_avl, B_FALSE, B_TRUE,
gettext("Descendent permissions:\n"));
print_uge_deleg_perms(uge_avl, B_TRUE, B_TRUE,
gettext("Local+Descendent permissions:\n"));
}
}
static fs_perm_set_t fs_perm_set = { NULL, NULL, NULL, NULL };
struct deleg_perms {
boolean_t un;
nvlist_t *nvl;
};
static int
set_deleg_perms(zfs_handle_t *zhp, void *data)
{
struct deleg_perms *perms = (struct deleg_perms *)data;
zfs_type_t zfs_type = zfs_get_type(zhp);
if (zfs_type != ZFS_TYPE_FILESYSTEM && zfs_type != ZFS_TYPE_VOLUME)
return (0);
return (zfs_set_fsacl(zhp, perms->un, perms->nvl));
}
static int
zfs_do_allow_unallow_impl(int argc, char **argv, boolean_t un)
{
zfs_handle_t *zhp;
nvlist_t *perm_nvl = NULL;
nvlist_t *update_perm_nvl = NULL;
int error = 1;
int c;
struct allow_opts opts = { 0 };
const char *optstr = un ? "ldugecsrh" : "ldugecsh";
/* check opts */
while ((c = getopt(argc, argv, optstr)) != -1) {
switch (c) {
case 'l':
opts.local = B_TRUE;
break;
case 'd':
opts.descend = B_TRUE;
break;
case 'u':
opts.user = B_TRUE;
break;
case 'g':
opts.group = B_TRUE;
break;
case 'e':
opts.everyone = B_TRUE;
break;
case 's':
opts.set = B_TRUE;
break;
case 'c':
opts.create = B_TRUE;
break;
case 'r':
opts.recursive = B_TRUE;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case 'h':
opts.prt_usage = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check arguments */
parse_allow_args(argc, argv, un, &opts);
/* try to open the dataset */
if ((zhp = zfs_open(g_zfs, opts.dataset, ZFS_TYPE_FILESYSTEM |
ZFS_TYPE_VOLUME)) == NULL) {
(void) fprintf(stderr, "Failed to open dataset: %s\n",
opts.dataset);
return (-1);
}
if (zfs_get_fsacl(zhp, &perm_nvl) != 0)
goto cleanup2;
fs_perm_set_init(&fs_perm_set);
if (parse_fs_perm_set(&fs_perm_set, perm_nvl) != 0) {
(void) fprintf(stderr, "Failed to parse fsacl permissions\n");
goto cleanup1;
}
if (opts.prt_perms)
print_fs_perms(&fs_perm_set);
else {
(void) construct_fsacl_list(un, &opts, &update_perm_nvl);
if (zfs_set_fsacl(zhp, un, update_perm_nvl) != 0)
goto cleanup0;
if (un && opts.recursive) {
struct deleg_perms data = { un, update_perm_nvl };
if (zfs_iter_filesystems_v2(zhp, 0, set_deleg_perms,
&data) != 0)
goto cleanup0;
}
}
error = 0;
cleanup0:
nvlist_free(perm_nvl);
nvlist_free(update_perm_nvl);
cleanup1:
fs_perm_set_fini(&fs_perm_set);
cleanup2:
zfs_close(zhp);
return (error);
}
static int
zfs_do_allow(int argc, char **argv)
{
return (zfs_do_allow_unallow_impl(argc, argv, B_FALSE));
}
static int
zfs_do_unallow(int argc, char **argv)
{
return (zfs_do_allow_unallow_impl(argc, argv, B_TRUE));
}
static int
zfs_do_hold_rele_impl(int argc, char **argv, boolean_t holding)
{
int errors = 0;
int i;
const char *tag;
boolean_t recursive = B_FALSE;
const char *opts = holding ? "rt" : "r";
int c;
/* check options */
while ((c = getopt(argc, argv, opts)) != -1) {
switch (c) {
case 'r':
recursive = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 2)
usage(B_FALSE);
tag = argv[0];
--argc;
++argv;
if (holding && tag[0] == '.') {
/* tags starting with '.' are reserved for libzfs */
(void) fprintf(stderr, gettext("tag may not start with '.'\n"));
usage(B_FALSE);
}
for (i = 0; i < argc; ++i) {
zfs_handle_t *zhp;
char parent[ZFS_MAX_DATASET_NAME_LEN];
const char *delim;
char *path = argv[i];
delim = strchr(path, '@');
if (delim == NULL) {
(void) fprintf(stderr,
gettext("'%s' is not a snapshot\n"), path);
++errors;
continue;
}
(void) strlcpy(parent, path, MIN(sizeof (parent),
delim - path + 1));
zhp = zfs_open(g_zfs, parent,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL) {
++errors;
continue;
}
if (holding) {
if (zfs_hold(zhp, delim+1, tag, recursive, -1) != 0)
++errors;
} else {
if (zfs_release(zhp, delim+1, tag, recursive) != 0)
++errors;
}
zfs_close(zhp);
}
return (errors != 0);
}
/*
* zfs hold [-r] [-t] <tag> <snap> ...
*
* -r Recursively hold
*
* Apply a user-hold with the given tag to the list of snapshots.
*/
static int
zfs_do_hold(int argc, char **argv)
{
return (zfs_do_hold_rele_impl(argc, argv, B_TRUE));
}
/*
* zfs release [-r] <tag> <snap> ...
*
* -r Recursively release
*
* Release a user-hold with the given tag from the list of snapshots.
*/
static int
zfs_do_release(int argc, char **argv)
{
return (zfs_do_hold_rele_impl(argc, argv, B_FALSE));
}
typedef struct holds_cbdata {
boolean_t cb_recursive;
const char *cb_snapname;
nvlist_t **cb_nvlp;
size_t cb_max_namelen;
size_t cb_max_taglen;
} holds_cbdata_t;
#define STRFTIME_FMT_STR "%a %b %e %H:%M %Y"
#define DATETIME_BUF_LEN (32)
/*
*
*/
static void
print_holds(boolean_t scripted, int nwidth, int tagwidth, nvlist_t *nvl,
boolean_t parsable)
{
int i;
nvpair_t *nvp = NULL;
const char *const hdr_cols[] = { "NAME", "TAG", "TIMESTAMP" };
const char *col;
if (!scripted) {
for (i = 0; i < 3; i++) {
col = gettext(hdr_cols[i]);
if (i < 2)
(void) printf("%-*s ", i ? tagwidth : nwidth,
col);
else
(void) printf("%s\n", col);
}
}
while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
const char *zname = nvpair_name(nvp);
nvlist_t *nvl2;
nvpair_t *nvp2 = NULL;
(void) nvpair_value_nvlist(nvp, &nvl2);
while ((nvp2 = nvlist_next_nvpair(nvl2, nvp2)) != NULL) {
char tsbuf[DATETIME_BUF_LEN];
const char *tagname = nvpair_name(nvp2);
uint64_t val = 0;
time_t time;
struct tm t;
(void) nvpair_value_uint64(nvp2, &val);
time = (time_t)val;
(void) localtime_r(&time, &t);
(void) strftime(tsbuf, DATETIME_BUF_LEN,
gettext(STRFTIME_FMT_STR), &t);
if (scripted) {
if (parsable) {
(void) printf("%s\t%s\t%ld\n", zname,
tagname, (unsigned long)time);
} else {
(void) printf("%s\t%s\t%s\n", zname,
tagname, tsbuf);
}
} else {
if (parsable) {
(void) printf("%-*s %-*s %ld\n",
nwidth, zname, tagwidth,
tagname, (unsigned long)time);
} else {
(void) printf("%-*s %-*s %s\n",
nwidth, zname, tagwidth,
tagname, tsbuf);
}
}
}
}
}
/*
* Generic callback function to list a dataset or snapshot.
*/
static int
holds_callback(zfs_handle_t *zhp, void *data)
{
holds_cbdata_t *cbp = data;
nvlist_t *top_nvl = *cbp->cb_nvlp;
nvlist_t *nvl = NULL;
nvpair_t *nvp = NULL;
const char *zname = zfs_get_name(zhp);
size_t znamelen = strlen(zname);
if (cbp->cb_recursive) {
const char *snapname;
char *delim = strchr(zname, '@');
if (delim == NULL)
return (0);
snapname = delim + 1;
if (strcmp(cbp->cb_snapname, snapname))
return (0);
}
if (zfs_get_holds(zhp, &nvl) != 0)
return (-1);
if (znamelen > cbp->cb_max_namelen)
cbp->cb_max_namelen = znamelen;
while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
const char *tag = nvpair_name(nvp);
size_t taglen = strlen(tag);
if (taglen > cbp->cb_max_taglen)
cbp->cb_max_taglen = taglen;
}
return (nvlist_add_nvlist(top_nvl, zname, nvl));
}
/*
* zfs holds [-rHp] <snap> ...
*
* -r Lists holds that are set on the named snapshots recursively.
* -H Scripted mode; elide headers and separate columns by tabs.
* -p Display values in parsable (literal) format.
*/
static int
zfs_do_holds(int argc, char **argv)
{
int c;
boolean_t errors = B_FALSE;
boolean_t scripted = B_FALSE;
boolean_t recursive = B_FALSE;
boolean_t parsable = B_FALSE;
int types = ZFS_TYPE_SNAPSHOT;
holds_cbdata_t cb = { 0 };
int limit = 0;
int ret = 0;
int flags = 0;
/* check options */
while ((c = getopt(argc, argv, "rHp")) != -1) {
switch (c) {
case 'r':
recursive = B_TRUE;
break;
case 'H':
scripted = B_TRUE;
break;
case 'p':
parsable = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
if (recursive) {
types |= ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME;
flags |= ZFS_ITER_RECURSE;
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 1)
usage(B_FALSE);
nvlist_t *nvl = fnvlist_alloc();
for (int i = 0; i < argc; ++i) {
char *snapshot = argv[i];
const char *delim;
const char *snapname;
delim = strchr(snapshot, '@');
if (delim == NULL) {
(void) fprintf(stderr,
gettext("'%s' is not a snapshot\n"), snapshot);
errors = B_TRUE;
continue;
}
snapname = delim + 1;
if (recursive)
snapshot[delim - snapshot] = '\0';
cb.cb_recursive = recursive;
cb.cb_snapname = snapname;
cb.cb_nvlp = &nvl;
/*
* 1. collect holds data, set format options
*/
ret = zfs_for_each(1, argv + i, flags, types, NULL, NULL, limit,
holds_callback, &cb);
if (ret != 0)
errors = B_TRUE;
}
/*
* 2. print holds data
*/
print_holds(scripted, cb.cb_max_namelen, cb.cb_max_taglen, nvl,
parsable);
if (nvlist_empty(nvl))
(void) fprintf(stderr, gettext("no datasets available\n"));
nvlist_free(nvl);
return (errors);
}
#define CHECK_SPINNER 30
#define SPINNER_TIME 3 /* seconds */
#define MOUNT_TIME 1 /* seconds */
typedef struct get_all_state {
boolean_t ga_verbose;
get_all_cb_t *ga_cbp;
} get_all_state_t;
static int
get_one_dataset(zfs_handle_t *zhp, void *data)
{
static const char *const spin[] = { "-", "\\", "|", "/" };
static int spinval = 0;
static int spincheck = 0;
static time_t last_spin_time = (time_t)0;
get_all_state_t *state = data;
zfs_type_t type = zfs_get_type(zhp);
if (state->ga_verbose) {
if (--spincheck < 0) {
time_t now = time(NULL);
if (last_spin_time + SPINNER_TIME < now) {
update_progress(spin[spinval++ % 4]);
last_spin_time = now;
}
spincheck = CHECK_SPINNER;
}
}
/*
* Iterate over any nested datasets.
*/
if (zfs_iter_filesystems_v2(zhp, 0, get_one_dataset, data) != 0) {
zfs_close(zhp);
return (1);
}
/*
* Skip any datasets whose type does not match.
*/
if ((type & ZFS_TYPE_FILESYSTEM) == 0) {
zfs_close(zhp);
return (0);
}
libzfs_add_handle(state->ga_cbp, zhp);
assert(state->ga_cbp->cb_used <= state->ga_cbp->cb_alloc);
return (0);
}
static void
get_all_datasets(get_all_cb_t *cbp, boolean_t verbose)
{
get_all_state_t state = {
.ga_verbose = verbose,
.ga_cbp = cbp
};
if (verbose)
set_progress_header(gettext("Reading ZFS config"));
(void) zfs_iter_root(g_zfs, get_one_dataset, &state);
if (verbose)
finish_progress(gettext("done."));
}
/*
* Generic callback for sharing or mounting filesystems. Because the code is so
* similar, we have a common function with an extra parameter to determine which
* mode we are using.
*/
typedef enum { OP_SHARE, OP_MOUNT } share_mount_op_t;
typedef struct share_mount_state {
share_mount_op_t sm_op;
boolean_t sm_verbose;
int sm_flags;
char *sm_options;
enum sa_protocol sm_proto; /* only valid for OP_SHARE */
pthread_mutex_t sm_lock; /* protects the remaining fields */
uint_t sm_total; /* number of filesystems to process */
uint_t sm_done; /* number of filesystems processed */
int sm_status; /* -1 if any of the share/mount operations failed */
} share_mount_state_t;
/*
* Share or mount a dataset.
*/
static int
share_mount_one(zfs_handle_t *zhp, int op, int flags, enum sa_protocol protocol,
boolean_t explicit, const char *options)
{
char mountpoint[ZFS_MAXPROPLEN];
char shareopts[ZFS_MAXPROPLEN];
char smbshareopts[ZFS_MAXPROPLEN];
const char *cmdname = op == OP_SHARE ? "share" : "mount";
struct mnttab mnt;
uint64_t zoned, canmount;
boolean_t shared_nfs, shared_smb;
assert(zfs_get_type(zhp) & ZFS_TYPE_FILESYSTEM);
/*
* Check to make sure we can mount/share this dataset. If we
* are in the global zone and the filesystem is exported to a
* local zone, or if we are in a local zone and the
* filesystem is not exported, then it is an error.
*/
zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED);
if (zoned && getzoneid() == GLOBAL_ZONEID) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot %s '%s': "
"dataset is exported to a local zone\n"), cmdname,
zfs_get_name(zhp));
return (1);
} else if (!zoned && getzoneid() != GLOBAL_ZONEID) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot %s '%s': "
"permission denied\n"), cmdname,
zfs_get_name(zhp));
return (1);
}
/*
* Ignore any filesystems which don't apply to us. This
* includes those with a legacy mountpoint, or those with
* legacy share options.
*/
verify(zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, mountpoint,
sizeof (mountpoint), NULL, NULL, 0, B_FALSE) == 0);
verify(zfs_prop_get(zhp, ZFS_PROP_SHARENFS, shareopts,
sizeof (shareopts), NULL, NULL, 0, B_FALSE) == 0);
verify(zfs_prop_get(zhp, ZFS_PROP_SHARESMB, smbshareopts,
sizeof (smbshareopts), NULL, NULL, 0, B_FALSE) == 0);
if (op == OP_SHARE && strcmp(shareopts, "off") == 0 &&
strcmp(smbshareopts, "off") == 0) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot share '%s': "
"legacy share\n"), zfs_get_name(zhp));
(void) fprintf(stderr, gettext("use exports(5) or "
"smb.conf(5) to share this filesystem, or set "
"the sharenfs or sharesmb property\n"));
return (1);
}
/*
* We cannot share or mount legacy filesystems. If the
* shareopts is non-legacy but the mountpoint is legacy, we
* treat it as a legacy share.
*/
if (strcmp(mountpoint, "legacy") == 0) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot %s '%s': "
"legacy mountpoint\n"), cmdname, zfs_get_name(zhp));
(void) fprintf(stderr, gettext("use %s(8) to "
"%s this filesystem\n"), cmdname, cmdname);
return (1);
}
if (strcmp(mountpoint, "none") == 0) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot %s '%s': no "
"mountpoint set\n"), cmdname, zfs_get_name(zhp));
return (1);
}
/*
* canmount explicit outcome
* on no pass through
* on yes pass through
* off no return 0
* off yes display error, return 1
* noauto no return 0
* noauto yes pass through
*/
canmount = zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT);
if (canmount == ZFS_CANMOUNT_OFF) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot %s '%s': "
"'canmount' property is set to 'off'\n"), cmdname,
zfs_get_name(zhp));
return (1);
} else if (canmount == ZFS_CANMOUNT_NOAUTO && !explicit) {
/*
* When performing a 'zfs mount -a', we skip any mounts for
* datasets that have 'noauto' set. Sharing a dataset with
* 'noauto' set is only allowed if it's mounted.
*/
if (op == OP_MOUNT)
return (0);
if (op == OP_SHARE && !zfs_is_mounted(zhp, NULL)) {
/* also purge it from existing exports */
zfs_unshare(zhp, mountpoint, NULL);
return (0);
}
}
/*
* If this filesystem is encrypted and does not have
* a loaded key, we can not mount it.
*/
if ((flags & MS_CRYPT) == 0 &&
zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION) != ZIO_CRYPT_OFF &&
zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS) ==
ZFS_KEYSTATUS_UNAVAILABLE) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot %s '%s': "
"encryption key not loaded\n"), cmdname, zfs_get_name(zhp));
return (1);
}
/*
* If this filesystem is inconsistent and has a receive resume
* token, we can not mount it.
*/
if (zfs_prop_get_int(zhp, ZFS_PROP_INCONSISTENT) &&
zfs_prop_get(zhp, ZFS_PROP_RECEIVE_RESUME_TOKEN,
NULL, 0, NULL, NULL, 0, B_TRUE) == 0) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot %s '%s': "
"Contains partially-completed state from "
"\"zfs receive -s\", which can be resumed with "
"\"zfs send -t\"\n"),
cmdname, zfs_get_name(zhp));
return (1);
}
if (zfs_prop_get_int(zhp, ZFS_PROP_REDACTED) && !(flags & MS_FORCE)) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot %s '%s': "
"Dataset is not complete, was created by receiving "
"a redacted zfs send stream.\n"), cmdname,
zfs_get_name(zhp));
return (1);
}
/*
* At this point, we have verified that the mountpoint and/or
* shareopts are appropriate for auto management. If the
* filesystem is already mounted or shared, return (failing
* for explicit requests); otherwise mount or share the
* filesystem.
*/
switch (op) {
case OP_SHARE: {
enum sa_protocol prot[] = {SA_PROTOCOL_NFS, SA_NO_PROTOCOL};
shared_nfs = zfs_is_shared(zhp, NULL, prot);
*prot = SA_PROTOCOL_SMB;
shared_smb = zfs_is_shared(zhp, NULL, prot);
if ((shared_nfs && shared_smb) ||
(shared_nfs && strcmp(shareopts, "on") == 0 &&
strcmp(smbshareopts, "off") == 0) ||
(shared_smb && strcmp(smbshareopts, "on") == 0 &&
strcmp(shareopts, "off") == 0)) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot share "
"'%s': filesystem already shared\n"),
zfs_get_name(zhp));
return (1);
}
if (!zfs_is_mounted(zhp, NULL) &&
zfs_mount(zhp, NULL, flags) != 0)
return (1);
*prot = protocol;
if (zfs_share(zhp, protocol == SA_NO_PROTOCOL ? NULL : prot))
return (1);
}
break;
case OP_MOUNT:
mnt.mnt_mntopts = (char *)(options ?: "");
if (!hasmntopt(&mnt, MNTOPT_REMOUNT) &&
zfs_is_mounted(zhp, NULL)) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot mount "
"'%s': filesystem already mounted\n"),
zfs_get_name(zhp));
return (1);
}
if (zfs_mount(zhp, options, flags) != 0)
return (1);
break;
}
return (0);
}
/*
* Reports progress in the form "(current/total)". Not thread-safe.
*/
static void
report_mount_progress(int current, int total)
{
static time_t last_progress_time = 0;
time_t now = time(NULL);
char info[32];
/* display header if we're here for the first time */
if (current == 1) {
set_progress_header(gettext("Mounting ZFS filesystems"));
} else if (current != total && last_progress_time + MOUNT_TIME >= now) {
/* too soon to report again */
return;
}
last_progress_time = now;
(void) sprintf(info, "(%d/%d)", current, total);
if (current == total)
finish_progress(info);
else
update_progress(info);
}
/*
* zfs_foreach_mountpoint() callback that mounts or shares one filesystem and
* updates the progress meter.
*/
static int
share_mount_one_cb(zfs_handle_t *zhp, void *arg)
{
share_mount_state_t *sms = arg;
int ret;
ret = share_mount_one(zhp, sms->sm_op, sms->sm_flags, sms->sm_proto,
B_FALSE, sms->sm_options);
pthread_mutex_lock(&sms->sm_lock);
if (ret != 0)
sms->sm_status = ret;
sms->sm_done++;
if (sms->sm_verbose)
report_mount_progress(sms->sm_done, sms->sm_total);
pthread_mutex_unlock(&sms->sm_lock);
return (ret);
}
static void
append_options(char *mntopts, char *newopts)
{
int len = strlen(mntopts);
/* original length plus new string to append plus 1 for the comma */
if (len + 1 + strlen(newopts) >= MNT_LINE_MAX) {
(void) fprintf(stderr, gettext("the opts argument for "
"'%s' option is too long (more than %d chars)\n"),
"-o", MNT_LINE_MAX);
usage(B_FALSE);
}
if (*mntopts)
mntopts[len++] = ',';
(void) strcpy(&mntopts[len], newopts);
}
static enum sa_protocol
sa_protocol_decode(const char *protocol)
{
for (enum sa_protocol i = 0; i < ARRAY_SIZE(sa_protocol_names); ++i)
if (strcmp(protocol, sa_protocol_names[i]) == 0)
return (i);
(void) fputs(gettext("share type must be one of: "), stderr);
for (enum sa_protocol i = 0;
i < ARRAY_SIZE(sa_protocol_names); ++i)
(void) fprintf(stderr, "%s%s",
i != 0 ? ", " : "", sa_protocol_names[i]);
(void) fputc('\n', stderr);
usage(B_FALSE);
}
static int
share_mount(int op, int argc, char **argv)
{
int do_all = 0;
boolean_t verbose = B_FALSE;
int c, ret = 0;
char *options = NULL;
int flags = 0;
/* check options */
while ((c = getopt(argc, argv, op == OP_MOUNT ? ":alvo:Of" : "al"))
!= -1) {
switch (c) {
case 'a':
do_all = 1;
break;
case 'v':
verbose = B_TRUE;
break;
case 'l':
flags |= MS_CRYPT;
break;
case 'o':
if (*optarg == '\0') {
(void) fprintf(stderr, gettext("empty mount "
"options (-o) specified\n"));
usage(B_FALSE);
}
if (options == NULL)
options = safe_malloc(MNT_LINE_MAX + 1);
/* option validation is done later */
append_options(options, optarg);
break;
case 'O':
flags |= MS_OVERLAY;
break;
case 'f':
flags |= MS_FORCE;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (do_all) {
enum sa_protocol protocol = SA_NO_PROTOCOL;
if (op == OP_SHARE && argc > 0) {
protocol = sa_protocol_decode(argv[0]);
argc--;
argv++;
}
if (argc != 0) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
start_progress_timer();
get_all_cb_t cb = { 0 };
get_all_datasets(&cb, verbose);
if (cb.cb_used == 0) {
free(options);
return (0);
}
share_mount_state_t share_mount_state = { 0 };
share_mount_state.sm_op = op;
share_mount_state.sm_verbose = verbose;
share_mount_state.sm_flags = flags;
share_mount_state.sm_options = options;
share_mount_state.sm_proto = protocol;
share_mount_state.sm_total = cb.cb_used;
pthread_mutex_init(&share_mount_state.sm_lock, NULL);
/* For a 'zfs share -a' operation start with a clean slate. */
- zfs_truncate_shares(NULL);
+ if (op == OP_SHARE)
+ zfs_truncate_shares(NULL);
/*
* libshare isn't mt-safe, so only do the operation in parallel
* if we're mounting. Additionally, the key-loading option must
* be serialized so that we can prompt the user for their keys
* in a consistent manner.
*/
zfs_foreach_mountpoint(g_zfs, cb.cb_handles, cb.cb_used,
share_mount_one_cb, &share_mount_state,
op == OP_MOUNT && !(flags & MS_CRYPT));
zfs_commit_shares(NULL);
ret = share_mount_state.sm_status;
for (int i = 0; i < cb.cb_used; i++)
zfs_close(cb.cb_handles[i]);
free(cb.cb_handles);
} else if (argc == 0) {
FILE *mnttab;
struct mnttab entry;
if ((op == OP_SHARE) || (options != NULL)) {
(void) fprintf(stderr, gettext("missing filesystem "
"argument (specify -a for all)\n"));
usage(B_FALSE);
}
/*
* When mount is given no arguments, go through
* /proc/self/mounts and display any active ZFS mounts.
* We hide any snapshots, since they are controlled
* automatically.
*/
if ((mnttab = fopen(MNTTAB, "re")) == NULL) {
free(options);
return (ENOENT);
}
while (getmntent(mnttab, &entry) == 0) {
if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0 ||
strchr(entry.mnt_special, '@') != NULL)
continue;
(void) printf("%-30s %s\n", entry.mnt_special,
entry.mnt_mountp);
}
(void) fclose(mnttab);
} else {
zfs_handle_t *zhp;
if (argc > 1) {
(void) fprintf(stderr,
gettext("too many arguments\n"));
usage(B_FALSE);
}
if ((zhp = zfs_open(g_zfs, argv[0],
ZFS_TYPE_FILESYSTEM)) == NULL) {
ret = 1;
} else {
ret = share_mount_one(zhp, op, flags, SA_NO_PROTOCOL,
B_TRUE, options);
zfs_commit_shares(NULL);
zfs_close(zhp);
}
}
free(options);
return (ret);
}
/*
* zfs mount -a
* zfs mount filesystem
*
* Mount all filesystems, or mount the given filesystem.
*/
static int
zfs_do_mount(int argc, char **argv)
{
return (share_mount(OP_MOUNT, argc, argv));
}
/*
* zfs share -a [nfs | smb]
* zfs share filesystem
*
* Share all filesystems, or share the given filesystem.
*/
static int
zfs_do_share(int argc, char **argv)
{
return (share_mount(OP_SHARE, argc, argv));
}
typedef struct unshare_unmount_node {
zfs_handle_t *un_zhp;
char *un_mountp;
uu_avl_node_t un_avlnode;
} unshare_unmount_node_t;
static int
unshare_unmount_compare(const void *larg, const void *rarg, void *unused)
{
(void) unused;
const unshare_unmount_node_t *l = larg;
const unshare_unmount_node_t *r = rarg;
return (strcmp(l->un_mountp, r->un_mountp));
}
/*
* Convenience routine used by zfs_do_umount() and manual_unmount(). Given an
* absolute path, find the entry /proc/self/mounts, verify that it's a
* ZFS filesystem, and unmount it appropriately.
*/
static int
unshare_unmount_path(int op, char *path, int flags, boolean_t is_manual)
{
zfs_handle_t *zhp;
int ret = 0;
struct stat64 statbuf;
struct extmnttab entry;
const char *cmdname = (op == OP_SHARE) ? "unshare" : "unmount";
ino_t path_inode;
/*
* Search for the given (major,minor) pair in the mount table.
*/
if (getextmntent(path, &entry, &statbuf) != 0) {
if (op == OP_SHARE) {
(void) fprintf(stderr, gettext("cannot %s '%s': not "
"currently mounted\n"), cmdname, path);
return (1);
}
(void) fprintf(stderr, gettext("warning: %s not in"
"/proc/self/mounts\n"), path);
if ((ret = umount2(path, flags)) != 0)
(void) fprintf(stderr, gettext("%s: %s\n"), path,
strerror(errno));
return (ret != 0);
}
path_inode = statbuf.st_ino;
if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0) {
(void) fprintf(stderr, gettext("cannot %s '%s': not a ZFS "
"filesystem\n"), cmdname, path);
return (1);
}
if ((zhp = zfs_open(g_zfs, entry.mnt_special,
ZFS_TYPE_FILESYSTEM)) == NULL)
return (1);
ret = 1;
if (stat64(entry.mnt_mountp, &statbuf) != 0) {
(void) fprintf(stderr, gettext("cannot %s '%s': %s\n"),
cmdname, path, strerror(errno));
goto out;
} else if (statbuf.st_ino != path_inode) {
(void) fprintf(stderr, gettext("cannot "
"%s '%s': not a mountpoint\n"), cmdname, path);
goto out;
}
if (op == OP_SHARE) {
char nfs_mnt_prop[ZFS_MAXPROPLEN];
char smbshare_prop[ZFS_MAXPROPLEN];
verify(zfs_prop_get(zhp, ZFS_PROP_SHARENFS, nfs_mnt_prop,
sizeof (nfs_mnt_prop), NULL, NULL, 0, B_FALSE) == 0);
verify(zfs_prop_get(zhp, ZFS_PROP_SHARESMB, smbshare_prop,
sizeof (smbshare_prop), NULL, NULL, 0, B_FALSE) == 0);
if (strcmp(nfs_mnt_prop, "off") == 0 &&
strcmp(smbshare_prop, "off") == 0) {
(void) fprintf(stderr, gettext("cannot unshare "
"'%s': legacy share\n"), path);
(void) fprintf(stderr, gettext("use exportfs(8) "
"or smbcontrol(1) to unshare this filesystem\n"));
} else if (!zfs_is_shared(zhp, NULL, NULL)) {
(void) fprintf(stderr, gettext("cannot unshare '%s': "
"not currently shared\n"), path);
} else {
ret = zfs_unshare(zhp, path, NULL);
zfs_commit_shares(NULL);
}
} else {
char mtpt_prop[ZFS_MAXPROPLEN];
verify(zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, mtpt_prop,
sizeof (mtpt_prop), NULL, NULL, 0, B_FALSE) == 0);
if (is_manual) {
ret = zfs_unmount(zhp, NULL, flags);
} else if (strcmp(mtpt_prop, "legacy") == 0) {
(void) fprintf(stderr, gettext("cannot unmount "
"'%s': legacy mountpoint\n"),
zfs_get_name(zhp));
(void) fprintf(stderr, gettext("use umount(8) "
"to unmount this filesystem\n"));
} else {
ret = zfs_unmountall(zhp, flags);
}
}
out:
zfs_close(zhp);
return (ret != 0);
}
/*
* Generic callback for unsharing or unmounting a filesystem.
*/
static int
unshare_unmount(int op, int argc, char **argv)
{
int do_all = 0;
int flags = 0;
int ret = 0;
int c;
zfs_handle_t *zhp;
char nfs_mnt_prop[ZFS_MAXPROPLEN];
char sharesmb[ZFS_MAXPROPLEN];
/* check options */
while ((c = getopt(argc, argv, op == OP_SHARE ? ":a" : "afu")) != -1) {
switch (c) {
case 'a':
do_all = 1;
break;
case 'f':
flags |= MS_FORCE;
break;
case 'u':
flags |= MS_CRYPT;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (do_all) {
/*
* We could make use of zfs_for_each() to walk all datasets in
* the system, but this would be very inefficient, especially
* since we would have to linearly search /proc/self/mounts for
* each one. Instead, do one pass through /proc/self/mounts
* looking for zfs entries and call zfs_unmount() for each one.
*
* Things get a little tricky if the administrator has created
* mountpoints beneath other ZFS filesystems. In this case, we
* have to unmount the deepest filesystems first. To accomplish
* this, we place all the mountpoints in an AVL tree sorted by
* the special type (dataset name), and walk the result in
* reverse to make sure to get any snapshots first.
*/
FILE *mnttab;
struct mnttab entry;
uu_avl_pool_t *pool;
uu_avl_t *tree = NULL;
unshare_unmount_node_t *node;
uu_avl_index_t idx;
uu_avl_walk_t *walk;
enum sa_protocol *protocol = NULL,
single_protocol[] = {SA_NO_PROTOCOL, SA_NO_PROTOCOL};
if (op == OP_SHARE && argc > 0) {
*single_protocol = sa_protocol_decode(argv[0]);
protocol = single_protocol;
argc--;
argv++;
}
if (argc != 0) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if (((pool = uu_avl_pool_create("unmount_pool",
sizeof (unshare_unmount_node_t),
offsetof(unshare_unmount_node_t, un_avlnode),
unshare_unmount_compare, UU_DEFAULT)) == NULL) ||
((tree = uu_avl_create(pool, NULL, UU_DEFAULT)) == NULL))
nomem();
if ((mnttab = fopen(MNTTAB, "re")) == NULL) {
uu_avl_destroy(tree);
uu_avl_pool_destroy(pool);
return (ENOENT);
}
while (getmntent(mnttab, &entry) == 0) {
/* ignore non-ZFS entries */
if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0)
continue;
/* ignore snapshots */
if (strchr(entry.mnt_special, '@') != NULL)
continue;
if ((zhp = zfs_open(g_zfs, entry.mnt_special,
ZFS_TYPE_FILESYSTEM)) == NULL) {
ret = 1;
continue;
}
/*
* Ignore datasets that are excluded/restricted by
* parent pool name.
*/
if (zpool_skip_pool(zfs_get_pool_name(zhp))) {
zfs_close(zhp);
continue;
}
switch (op) {
case OP_SHARE:
verify(zfs_prop_get(zhp, ZFS_PROP_SHARENFS,
nfs_mnt_prop,
sizeof (nfs_mnt_prop),
NULL, NULL, 0, B_FALSE) == 0);
if (strcmp(nfs_mnt_prop, "off") != 0)
break;
verify(zfs_prop_get(zhp, ZFS_PROP_SHARESMB,
nfs_mnt_prop,
sizeof (nfs_mnt_prop),
NULL, NULL, 0, B_FALSE) == 0);
if (strcmp(nfs_mnt_prop, "off") == 0)
continue;
break;
case OP_MOUNT:
/* Ignore legacy mounts */
verify(zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT,
nfs_mnt_prop,
sizeof (nfs_mnt_prop),
NULL, NULL, 0, B_FALSE) == 0);
if (strcmp(nfs_mnt_prop, "legacy") == 0)
continue;
/* Ignore canmount=noauto mounts */
if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) ==
ZFS_CANMOUNT_NOAUTO)
continue;
break;
default:
break;
}
node = safe_malloc(sizeof (unshare_unmount_node_t));
node->un_zhp = zhp;
node->un_mountp = safe_strdup(entry.mnt_mountp);
uu_avl_node_init(node, &node->un_avlnode, pool);
if (uu_avl_find(tree, node, NULL, &idx) == NULL) {
uu_avl_insert(tree, node, idx);
} else {
zfs_close(node->un_zhp);
free(node->un_mountp);
free(node);
}
}
(void) fclose(mnttab);
/*
* Walk the AVL tree in reverse, unmounting each filesystem and
* removing it from the AVL tree in the process.
*/
if ((walk = uu_avl_walk_start(tree,
UU_WALK_REVERSE | UU_WALK_ROBUST)) == NULL)
nomem();
while ((node = uu_avl_walk_next(walk)) != NULL) {
const char *mntarg = NULL;
uu_avl_remove(tree, node);
switch (op) {
case OP_SHARE:
if (zfs_unshare(node->un_zhp,
node->un_mountp, protocol) != 0)
ret = 1;
break;
case OP_MOUNT:
if (zfs_unmount(node->un_zhp,
mntarg, flags) != 0)
ret = 1;
break;
}
zfs_close(node->un_zhp);
free(node->un_mountp);
free(node);
}
if (op == OP_SHARE)
zfs_commit_shares(protocol);
uu_avl_walk_end(walk);
uu_avl_destroy(tree);
uu_avl_pool_destroy(pool);
} else {
if (argc != 1) {
if (argc == 0)
(void) fprintf(stderr,
gettext("missing filesystem argument\n"));
else
(void) fprintf(stderr,
gettext("too many arguments\n"));
usage(B_FALSE);
}
/*
* We have an argument, but it may be a full path or a ZFS
* filesystem. Pass full paths off to unmount_path() (shared by
* manual_unmount), otherwise open the filesystem and pass to
* zfs_unmount().
*/
if (argv[0][0] == '/')
return (unshare_unmount_path(op, argv[0],
flags, B_FALSE));
if ((zhp = zfs_open(g_zfs, argv[0],
ZFS_TYPE_FILESYSTEM)) == NULL)
return (1);
verify(zfs_prop_get(zhp, op == OP_SHARE ?
ZFS_PROP_SHARENFS : ZFS_PROP_MOUNTPOINT,
nfs_mnt_prop, sizeof (nfs_mnt_prop), NULL,
NULL, 0, B_FALSE) == 0);
switch (op) {
case OP_SHARE:
verify(zfs_prop_get(zhp, ZFS_PROP_SHARENFS,
nfs_mnt_prop,
sizeof (nfs_mnt_prop),
NULL, NULL, 0, B_FALSE) == 0);
verify(zfs_prop_get(zhp, ZFS_PROP_SHARESMB,
sharesmb, sizeof (sharesmb), NULL, NULL,
0, B_FALSE) == 0);
if (strcmp(nfs_mnt_prop, "off") == 0 &&
strcmp(sharesmb, "off") == 0) {
(void) fprintf(stderr, gettext("cannot "
"unshare '%s': legacy share\n"),
zfs_get_name(zhp));
(void) fprintf(stderr, gettext("use "
"exports(5) or smb.conf(5) to unshare "
"this filesystem\n"));
ret = 1;
} else if (!zfs_is_shared(zhp, NULL, NULL)) {
(void) fprintf(stderr, gettext("cannot "
"unshare '%s': not currently "
"shared\n"), zfs_get_name(zhp));
ret = 1;
} else if (zfs_unshareall(zhp, NULL) != 0) {
ret = 1;
}
break;
case OP_MOUNT:
if (strcmp(nfs_mnt_prop, "legacy") == 0) {
(void) fprintf(stderr, gettext("cannot "
"unmount '%s': legacy "
"mountpoint\n"), zfs_get_name(zhp));
(void) fprintf(stderr, gettext("use "
"umount(8) to unmount this "
"filesystem\n"));
ret = 1;
} else if (!zfs_is_mounted(zhp, NULL)) {
(void) fprintf(stderr, gettext("cannot "
"unmount '%s': not currently "
"mounted\n"),
zfs_get_name(zhp));
ret = 1;
} else if (zfs_unmountall(zhp, flags) != 0) {
ret = 1;
}
break;
}
zfs_close(zhp);
}
return (ret);
}
/*
* zfs unmount [-fu] -a
* zfs unmount [-fu] filesystem
*
* Unmount all filesystems, or a specific ZFS filesystem.
*/
static int
zfs_do_unmount(int argc, char **argv)
{
return (unshare_unmount(OP_MOUNT, argc, argv));
}
/*
* zfs unshare -a
* zfs unshare filesystem
*
* Unshare all filesystems, or a specific ZFS filesystem.
*/
static int
zfs_do_unshare(int argc, char **argv)
{
return (unshare_unmount(OP_SHARE, argc, argv));
}
static int
find_command_idx(const char *command, int *idx)
{
int i;
for (i = 0; i < NCOMMAND; i++) {
if (command_table[i].name == NULL)
continue;
if (strcmp(command, command_table[i].name) == 0) {
*idx = i;
return (0);
}
}
return (1);
}
static int
zfs_do_diff(int argc, char **argv)
{
zfs_handle_t *zhp;
int flags = 0;
char *tosnap = NULL;
char *fromsnap = NULL;
char *atp, *copy;
int err = 0;
int c;
struct sigaction sa;
while ((c = getopt(argc, argv, "FHth")) != -1) {
switch (c) {
case 'F':
flags |= ZFS_DIFF_CLASSIFY;
break;
case 'H':
flags |= ZFS_DIFF_PARSEABLE;
break;
case 't':
flags |= ZFS_DIFF_TIMESTAMP;
break;
case 'h':
flags |= ZFS_DIFF_NO_MANGLE;
break;
default:
(void) fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr,
gettext("must provide at least one snapshot name\n"));
usage(B_FALSE);
}
if (argc > 2) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
fromsnap = argv[0];
tosnap = (argc == 2) ? argv[1] : NULL;
copy = NULL;
if (*fromsnap != '@')
copy = strdup(fromsnap);
else if (tosnap)
copy = strdup(tosnap);
if (copy == NULL)
usage(B_FALSE);
if ((atp = strchr(copy, '@')) != NULL)
*atp = '\0';
if ((zhp = zfs_open(g_zfs, copy, ZFS_TYPE_FILESYSTEM)) == NULL) {
free(copy);
return (1);
}
free(copy);
/*
* Ignore SIGPIPE so that the library can give us
* information on any failure
*/
if (sigemptyset(&sa.sa_mask) == -1) {
err = errno;
goto out;
}
sa.sa_flags = 0;
sa.sa_handler = SIG_IGN;
if (sigaction(SIGPIPE, &sa, NULL) == -1) {
err = errno;
goto out;
}
err = zfs_show_diffs(zhp, STDOUT_FILENO, fromsnap, tosnap, flags);
out:
zfs_close(zhp);
return (err != 0);
}
/*
* zfs bookmark <fs@source>|<fs#source> <fs#bookmark>
*
* Creates a bookmark with the given name from the source snapshot
* or creates a copy of an existing source bookmark.
*/
static int
zfs_do_bookmark(int argc, char **argv)
{
char *source, *bookname;
char expbuf[ZFS_MAX_DATASET_NAME_LEN];
int source_type;
nvlist_t *nvl;
int ret = 0;
int c;
/* check options */
while ((c = getopt(argc, argv, "")) != -1) {
switch (c) {
case '?':
(void) fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
goto usage;
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing source argument\n"));
goto usage;
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing bookmark argument\n"));
goto usage;
}
source = argv[0];
bookname = argv[1];
if (strchr(source, '@') == NULL && strchr(source, '#') == NULL) {
(void) fprintf(stderr,
gettext("invalid source name '%s': "
"must contain a '@' or '#'\n"), source);
goto usage;
}
if (strchr(bookname, '#') == NULL) {
(void) fprintf(stderr,
gettext("invalid bookmark name '%s': "
"must contain a '#'\n"), bookname);
goto usage;
}
/*
* expand source or bookname to full path:
* one of them may be specified as short name
*/
{
char **expand;
char *source_short, *bookname_short;
source_short = strpbrk(source, "@#");
bookname_short = strpbrk(bookname, "#");
if (source_short == source &&
bookname_short == bookname) {
(void) fprintf(stderr, gettext(
"either source or bookmark must be specified as "
"full dataset paths"));
goto usage;
} else if (source_short != source &&
bookname_short != bookname) {
expand = NULL;
} else if (source_short != source) {
strlcpy(expbuf, source, sizeof (expbuf));
expand = &bookname;
} else if (bookname_short != bookname) {
strlcpy(expbuf, bookname, sizeof (expbuf));
expand = &source;
} else {
abort();
}
if (expand != NULL) {
*strpbrk(expbuf, "@#") = '\0'; /* dataset name in buf */
(void) strlcat(expbuf, *expand, sizeof (expbuf));
*expand = expbuf;
}
}
/* determine source type */
switch (*strpbrk(source, "@#")) {
case '@': source_type = ZFS_TYPE_SNAPSHOT; break;
case '#': source_type = ZFS_TYPE_BOOKMARK; break;
default: abort();
}
/* test the source exists */
zfs_handle_t *zhp;
zhp = zfs_open(g_zfs, source, source_type);
if (zhp == NULL)
goto usage;
zfs_close(zhp);
nvl = fnvlist_alloc();
fnvlist_add_string(nvl, bookname, source);
ret = lzc_bookmark(nvl, NULL);
fnvlist_free(nvl);
if (ret != 0) {
const char *err_msg = NULL;
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot create bookmark '%s'"), bookname);
switch (ret) {
case EXDEV:
err_msg = "bookmark is in a different pool";
break;
case ZFS_ERR_BOOKMARK_SOURCE_NOT_ANCESTOR:
err_msg = "source is not an ancestor of the "
"new bookmark's dataset";
break;
case EEXIST:
err_msg = "bookmark exists";
break;
case EINVAL:
err_msg = "invalid argument";
break;
case ENOTSUP:
err_msg = "bookmark feature not enabled";
break;
case ENOSPC:
err_msg = "out of space";
break;
case ENOENT:
err_msg = "dataset does not exist";
break;
default:
(void) zfs_standard_error(g_zfs, ret, errbuf);
break;
}
if (err_msg != NULL) {
(void) fprintf(stderr, "%s: %s\n", errbuf,
dgettext(TEXT_DOMAIN, err_msg));
}
}
return (ret != 0);
usage:
usage(B_FALSE);
return (-1);
}
static int
zfs_do_channel_program(int argc, char **argv)
{
int ret, fd, c;
size_t progsize, progread;
nvlist_t *outnvl = NULL;
uint64_t instrlimit = ZCP_DEFAULT_INSTRLIMIT;
uint64_t memlimit = ZCP_DEFAULT_MEMLIMIT;
boolean_t sync_flag = B_TRUE, json_output = B_FALSE;
zpool_handle_t *zhp;
/* check options */
while ((c = getopt(argc, argv, "nt:m:j")) != -1) {
switch (c) {
case 't':
case 'm': {
uint64_t arg;
char *endp;
errno = 0;
arg = strtoull(optarg, &endp, 0);
if (errno != 0 || *endp != '\0') {
(void) fprintf(stderr, gettext(
"invalid argument "
"'%s': expected integer\n"), optarg);
goto usage;
}
if (c == 't') {
instrlimit = arg;
} else {
ASSERT3U(c, ==, 'm');
memlimit = arg;
}
break;
}
case 'n': {
sync_flag = B_FALSE;
break;
}
case 'j': {
json_output = B_TRUE;
break;
}
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
goto usage;
}
}
argc -= optind;
argv += optind;
if (argc < 2) {
(void) fprintf(stderr,
gettext("invalid number of arguments\n"));
goto usage;
}
const char *poolname = argv[0];
const char *filename = argv[1];
if (strcmp(filename, "-") == 0) {
fd = 0;
filename = "standard input";
} else if ((fd = open(filename, O_RDONLY)) < 0) {
(void) fprintf(stderr, gettext("cannot open '%s': %s\n"),
filename, strerror(errno));
return (1);
}
if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {
(void) fprintf(stderr, gettext("cannot open pool '%s'\n"),
poolname);
if (fd != 0)
(void) close(fd);
return (1);
}
zpool_close(zhp);
/*
* Read in the channel program, expanding the program buffer as
* necessary.
*/
progread = 0;
progsize = 1024;
char *progbuf = safe_malloc(progsize);
do {
ret = read(fd, progbuf + progread, progsize - progread);
progread += ret;
if (progread == progsize && ret > 0) {
progsize *= 2;
progbuf = safe_realloc(progbuf, progsize);
}
} while (ret > 0);
if (fd != 0)
(void) close(fd);
if (ret < 0) {
free(progbuf);
(void) fprintf(stderr,
gettext("cannot read '%s': %s\n"),
filename, strerror(errno));
return (1);
}
progbuf[progread] = '\0';
/*
* Any remaining arguments are passed as arguments to the lua script as
* a string array:
* {
* "argv" -> [ "arg 1", ... "arg n" ],
* }
*/
nvlist_t *argnvl = fnvlist_alloc();
fnvlist_add_string_array(argnvl, ZCP_ARG_CLIARGV,
(const char **)argv + 2, argc - 2);
if (sync_flag) {
ret = lzc_channel_program(poolname, progbuf,
instrlimit, memlimit, argnvl, &outnvl);
} else {
ret = lzc_channel_program_nosync(poolname, progbuf,
instrlimit, memlimit, argnvl, &outnvl);
}
if (ret != 0) {
/*
* On error, report the error message handed back by lua if one
* exists. Otherwise, generate an appropriate error message,
* falling back on strerror() for an unexpected return code.
*/
const char *errstring = NULL;
const char *msg = gettext("Channel program execution failed");
uint64_t instructions = 0;
if (outnvl != NULL && nvlist_exists(outnvl, ZCP_RET_ERROR)) {
const char *es = NULL;
(void) nvlist_lookup_string(outnvl,
ZCP_RET_ERROR, &es);
if (es == NULL)
errstring = strerror(ret);
else
errstring = es;
if (ret == ETIME) {
(void) nvlist_lookup_uint64(outnvl,
ZCP_ARG_INSTRLIMIT, &instructions);
}
} else {
switch (ret) {
case EINVAL:
errstring =
"Invalid instruction or memory limit.";
break;
case ENOMEM:
errstring = "Return value too large.";
break;
case ENOSPC:
errstring = "Memory limit exhausted.";
break;
case ETIME:
errstring = "Timed out.";
break;
case EPERM:
errstring = "Permission denied. Channel "
"programs must be run as root.";
break;
default:
(void) zfs_standard_error(g_zfs, ret, msg);
}
}
if (errstring != NULL)
(void) fprintf(stderr, "%s:\n%s\n", msg, errstring);
if (ret == ETIME && instructions != 0)
(void) fprintf(stderr,
gettext("%llu Lua instructions\n"),
(u_longlong_t)instructions);
} else {
if (json_output) {
(void) nvlist_print_json(stdout, outnvl);
} else if (nvlist_empty(outnvl)) {
(void) fprintf(stdout, gettext("Channel program fully "
"executed and did not produce output.\n"));
} else {
(void) fprintf(stdout, gettext("Channel program fully "
"executed and produced output:\n"));
dump_nvlist(outnvl, 4);
}
}
free(progbuf);
fnvlist_free(outnvl);
fnvlist_free(argnvl);
return (ret != 0);
usage:
usage(B_FALSE);
return (-1);
}
typedef struct loadkey_cbdata {
boolean_t cb_loadkey;
boolean_t cb_recursive;
boolean_t cb_noop;
char *cb_keylocation;
uint64_t cb_numfailed;
uint64_t cb_numattempted;
} loadkey_cbdata_t;
static int
load_key_callback(zfs_handle_t *zhp, void *data)
{
int ret;
boolean_t is_encroot;
loadkey_cbdata_t *cb = data;
uint64_t keystatus = zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS);
/*
* If we are working recursively, we want to skip loading / unloading
* keys for non-encryption roots and datasets whose keys are already
* in the desired end-state.
*/
if (cb->cb_recursive) {
ret = zfs_crypto_get_encryption_root(zhp, &is_encroot, NULL);
if (ret != 0)
return (ret);
if (!is_encroot)
return (0);
if ((cb->cb_loadkey && keystatus == ZFS_KEYSTATUS_AVAILABLE) ||
(!cb->cb_loadkey && keystatus == ZFS_KEYSTATUS_UNAVAILABLE))
return (0);
}
cb->cb_numattempted++;
if (cb->cb_loadkey)
ret = zfs_crypto_load_key(zhp, cb->cb_noop, cb->cb_keylocation);
else
ret = zfs_crypto_unload_key(zhp);
if (ret != 0) {
cb->cb_numfailed++;
return (ret);
}
return (0);
}
static int
load_unload_keys(int argc, char **argv, boolean_t loadkey)
{
int c, ret = 0, flags = 0;
boolean_t do_all = B_FALSE;
loadkey_cbdata_t cb = { 0 };
cb.cb_loadkey = loadkey;
while ((c = getopt(argc, argv, "anrL:")) != -1) {
/* noop and alternate keylocations only apply to zfs load-key */
if (loadkey) {
switch (c) {
case 'n':
cb.cb_noop = B_TRUE;
continue;
case 'L':
cb.cb_keylocation = optarg;
continue;
default:
break;
}
}
switch (c) {
case 'a':
do_all = B_TRUE;
cb.cb_recursive = B_TRUE;
break;
case 'r':
flags |= ZFS_ITER_RECURSE;
cb.cb_recursive = B_TRUE;
break;
default:
(void) fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (!do_all && argc == 0) {
(void) fprintf(stderr,
gettext("Missing dataset argument or -a option\n"));
usage(B_FALSE);
}
if (do_all && argc != 0) {
(void) fprintf(stderr,
gettext("Cannot specify dataset with -a option\n"));
usage(B_FALSE);
}
if (cb.cb_recursive && cb.cb_keylocation != NULL &&
strcmp(cb.cb_keylocation, "prompt") != 0) {
(void) fprintf(stderr, gettext("alternate keylocation may only "
"be 'prompt' with -r or -a\n"));
usage(B_FALSE);
}
ret = zfs_for_each(argc, argv, flags,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME, NULL, NULL, 0,
load_key_callback, &cb);
if (cb.cb_noop || (cb.cb_recursive && cb.cb_numattempted != 0)) {
(void) printf(gettext("%llu / %llu key(s) successfully %s\n"),
(u_longlong_t)(cb.cb_numattempted - cb.cb_numfailed),
(u_longlong_t)cb.cb_numattempted,
loadkey ? (cb.cb_noop ? "verified" : "loaded") :
"unloaded");
}
if (cb.cb_numfailed != 0)
ret = -1;
return (ret);
}
static int
zfs_do_load_key(int argc, char **argv)
{
return (load_unload_keys(argc, argv, B_TRUE));
}
static int
zfs_do_unload_key(int argc, char **argv)
{
return (load_unload_keys(argc, argv, B_FALSE));
}
static int
zfs_do_change_key(int argc, char **argv)
{
int c, ret;
uint64_t keystatus;
boolean_t loadkey = B_FALSE, inheritkey = B_FALSE;
zfs_handle_t *zhp = NULL;
nvlist_t *props = fnvlist_alloc();
while ((c = getopt(argc, argv, "lio:")) != -1) {
switch (c) {
case 'l':
loadkey = B_TRUE;
break;
case 'i':
inheritkey = B_TRUE;
break;
case 'o':
if (!parseprop(props, optarg)) {
nvlist_free(props);
return (1);
}
break;
default:
(void) fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
usage(B_FALSE);
}
}
if (inheritkey && !nvlist_empty(props)) {
(void) fprintf(stderr,
gettext("Properties not allowed for inheriting\n"));
usage(B_FALSE);
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("Missing dataset argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("Too many arguments\n"));
usage(B_FALSE);
}
zhp = zfs_open(g_zfs, argv[argc - 1],
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL)
usage(B_FALSE);
if (loadkey) {
keystatus = zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS);
if (keystatus != ZFS_KEYSTATUS_AVAILABLE) {
ret = zfs_crypto_load_key(zhp, B_FALSE, NULL);
if (ret != 0) {
nvlist_free(props);
zfs_close(zhp);
return (-1);
}
}
/* refresh the properties so the new keystatus is visible */
zfs_refresh_properties(zhp);
}
ret = zfs_crypto_rewrap(zhp, props, inheritkey);
if (ret != 0) {
nvlist_free(props);
zfs_close(zhp);
return (-1);
}
nvlist_free(props);
zfs_close(zhp);
return (0);
}
/*
* 1) zfs project [-d|-r] <file|directory ...>
* List project ID and inherit flag of file(s) or directories.
* -d: List the directory itself, not its children.
* -r: List subdirectories recursively.
*
* 2) zfs project -C [-k] [-r] <file|directory ...>
* Clear project inherit flag and/or ID on the file(s) or directories.
* -k: Keep the project ID unchanged. If not specified, the project ID
* will be reset as zero.
* -r: Clear on subdirectories recursively.
*
* 3) zfs project -c [-0] [-d|-r] [-p id] <file|directory ...>
* Check project ID and inherit flag on the file(s) or directories,
* report the outliers.
* -0: Print file name followed by a NUL instead of newline.
* -d: Check the directory itself, not its children.
* -p: Specify the referenced ID for comparing with the target file(s)
* or directories' project IDs. If not specified, the target (top)
* directory's project ID will be used as the referenced one.
* -r: Check subdirectories recursively.
*
* 4) zfs project [-p id] [-r] [-s] <file|directory ...>
* Set project ID and/or inherit flag on the file(s) or directories.
* -p: Set the project ID as the given id.
* -r: Set on subdirectories recursively. If not specify "-p" option,
* it will use top-level directory's project ID as the given id,
* then set both project ID and inherit flag on all descendants
* of the top-level directory.
* -s: Set project inherit flag.
*/
static int
zfs_do_project(int argc, char **argv)
{
zfs_project_control_t zpc = {
.zpc_expected_projid = ZFS_INVALID_PROJID,
.zpc_op = ZFS_PROJECT_OP_DEFAULT,
.zpc_dironly = B_FALSE,
.zpc_keep_projid = B_FALSE,
.zpc_newline = B_TRUE,
.zpc_recursive = B_FALSE,
.zpc_set_flag = B_FALSE,
};
int ret = 0, c;
if (argc < 2)
usage(B_FALSE);
while ((c = getopt(argc, argv, "0Ccdkp:rs")) != -1) {
switch (c) {
case '0':
zpc.zpc_newline = B_FALSE;
break;
case 'C':
if (zpc.zpc_op != ZFS_PROJECT_OP_DEFAULT) {
(void) fprintf(stderr, gettext("cannot "
"specify '-C' '-c' '-s' together\n"));
usage(B_FALSE);
}
zpc.zpc_op = ZFS_PROJECT_OP_CLEAR;
break;
case 'c':
if (zpc.zpc_op != ZFS_PROJECT_OP_DEFAULT) {
(void) fprintf(stderr, gettext("cannot "
"specify '-C' '-c' '-s' together\n"));
usage(B_FALSE);
}
zpc.zpc_op = ZFS_PROJECT_OP_CHECK;
break;
case 'd':
zpc.zpc_dironly = B_TRUE;
/* overwrite "-r" option */
zpc.zpc_recursive = B_FALSE;
break;
case 'k':
zpc.zpc_keep_projid = B_TRUE;
break;
case 'p': {
char *endptr;
errno = 0;
zpc.zpc_expected_projid = strtoull(optarg, &endptr, 0);
if (errno != 0 || *endptr != '\0') {
(void) fprintf(stderr,
gettext("project ID must be less than "
"%u\n"), UINT32_MAX);
usage(B_FALSE);
}
if (zpc.zpc_expected_projid >= UINT32_MAX) {
(void) fprintf(stderr,
gettext("invalid project ID\n"));
usage(B_FALSE);
}
break;
}
case 'r':
zpc.zpc_recursive = B_TRUE;
/* overwrite "-d" option */
zpc.zpc_dironly = B_FALSE;
break;
case 's':
if (zpc.zpc_op != ZFS_PROJECT_OP_DEFAULT) {
(void) fprintf(stderr, gettext("cannot "
"specify '-C' '-c' '-s' together\n"));
usage(B_FALSE);
}
zpc.zpc_set_flag = B_TRUE;
zpc.zpc_op = ZFS_PROJECT_OP_SET;
break;
default:
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
if (zpc.zpc_op == ZFS_PROJECT_OP_DEFAULT) {
if (zpc.zpc_expected_projid != ZFS_INVALID_PROJID)
zpc.zpc_op = ZFS_PROJECT_OP_SET;
else
zpc.zpc_op = ZFS_PROJECT_OP_LIST;
}
switch (zpc.zpc_op) {
case ZFS_PROJECT_OP_LIST:
if (zpc.zpc_keep_projid) {
(void) fprintf(stderr,
gettext("'-k' is only valid together with '-C'\n"));
usage(B_FALSE);
}
if (!zpc.zpc_newline) {
(void) fprintf(stderr,
gettext("'-0' is only valid together with '-c'\n"));
usage(B_FALSE);
}
break;
case ZFS_PROJECT_OP_CHECK:
if (zpc.zpc_keep_projid) {
(void) fprintf(stderr,
gettext("'-k' is only valid together with '-C'\n"));
usage(B_FALSE);
}
break;
case ZFS_PROJECT_OP_CLEAR:
if (zpc.zpc_dironly) {
(void) fprintf(stderr,
gettext("'-d' is useless together with '-C'\n"));
usage(B_FALSE);
}
if (!zpc.zpc_newline) {
(void) fprintf(stderr,
gettext("'-0' is only valid together with '-c'\n"));
usage(B_FALSE);
}
if (zpc.zpc_expected_projid != ZFS_INVALID_PROJID) {
(void) fprintf(stderr,
gettext("'-p' is useless together with '-C'\n"));
usage(B_FALSE);
}
break;
case ZFS_PROJECT_OP_SET:
if (zpc.zpc_dironly) {
(void) fprintf(stderr,
gettext("'-d' is useless for set project ID and/or "
"inherit flag\n"));
usage(B_FALSE);
}
if (zpc.zpc_keep_projid) {
(void) fprintf(stderr,
gettext("'-k' is only valid together with '-C'\n"));
usage(B_FALSE);
}
if (!zpc.zpc_newline) {
(void) fprintf(stderr,
gettext("'-0' is only valid together with '-c'\n"));
usage(B_FALSE);
}
break;
default:
ASSERT(0);
break;
}
argv += optind;
argc -= optind;
if (argc == 0) {
(void) fprintf(stderr,
gettext("missing file or directory target(s)\n"));
usage(B_FALSE);
}
for (int i = 0; i < argc; i++) {
int err;
err = zfs_project_handle(argv[i], &zpc);
if (err && !ret)
ret = err;
}
return (ret);
}
static int
zfs_do_wait(int argc, char **argv)
{
boolean_t enabled[ZFS_WAIT_NUM_ACTIVITIES];
int error = 0, i;
int c;
/* By default, wait for all types of activity. */
for (i = 0; i < ZFS_WAIT_NUM_ACTIVITIES; i++)
enabled[i] = B_TRUE;
while ((c = getopt(argc, argv, "t:")) != -1) {
switch (c) {
case 't':
/* Reset activities array */
memset(&enabled, 0, sizeof (enabled));
for (char *tok; (tok = strsep(&optarg, ",")); ) {
static const char *const col_subopts[
ZFS_WAIT_NUM_ACTIVITIES] = { "deleteq" };
for (i = 0; i < ARRAY_SIZE(col_subopts); ++i)
if (strcmp(tok, col_subopts[i]) == 0) {
enabled[i] = B_TRUE;
goto found;
}
(void) fprintf(stderr,
gettext("invalid activity '%s'\n"), tok);
usage(B_FALSE);
found:;
}
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argv += optind;
argc -= optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing 'filesystem' "
"argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
zfs_handle_t *zhp = zfs_open(g_zfs, argv[0], ZFS_TYPE_FILESYSTEM);
if (zhp == NULL)
return (1);
for (;;) {
boolean_t missing = B_FALSE;
boolean_t any_waited = B_FALSE;
for (int i = 0; i < ZFS_WAIT_NUM_ACTIVITIES; i++) {
boolean_t waited;
if (!enabled[i])
continue;
error = zfs_wait_status(zhp, i, &missing, &waited);
if (error != 0 || missing)
break;
any_waited = (any_waited || waited);
}
if (error != 0 || missing || !any_waited)
break;
}
zfs_close(zhp);
return (error);
}
/*
* Display version message
*/
static int
zfs_do_version(int argc, char **argv)
{
(void) argc, (void) argv;
return (zfs_version_print() != 0);
}
/* Display documentation */
static int
zfs_do_help(int argc, char **argv)
{
char page[MAXNAMELEN];
if (argc < 3 || strcmp(argv[2], "zfs") == 0)
strcpy(page, "zfs");
else if (strcmp(argv[2], "concepts") == 0 ||
strcmp(argv[2], "props") == 0)
snprintf(page, sizeof (page), "zfs%s", argv[2]);
else
snprintf(page, sizeof (page), "zfs-%s", argv[2]);
execlp("man", "man", page, NULL);
fprintf(stderr, "couldn't run man program: %s", strerror(errno));
return (-1);
}
int
main(int argc, char **argv)
{
int ret = 0;
int i = 0;
const char *cmdname;
char **newargv;
(void) setlocale(LC_ALL, "");
(void) setlocale(LC_NUMERIC, "C");
(void) textdomain(TEXT_DOMAIN);
opterr = 0;
/*
* Make sure the user has specified some command.
*/
if (argc < 2) {
(void) fprintf(stderr, gettext("missing command\n"));
usage(B_FALSE);
}
cmdname = argv[1];
/*
* The 'umount' command is an alias for 'unmount'
*/
if (strcmp(cmdname, "umount") == 0)
cmdname = "unmount";
/*
* The 'recv' command is an alias for 'receive'
*/
if (strcmp(cmdname, "recv") == 0)
cmdname = "receive";
/*
* The 'snap' command is an alias for 'snapshot'
*/
if (strcmp(cmdname, "snap") == 0)
cmdname = "snapshot";
/*
* Special case '-?'
*/
if ((strcmp(cmdname, "-?") == 0) ||
(strcmp(cmdname, "--help") == 0))
usage(B_TRUE);
/*
* Special case '-V|--version'
*/
if ((strcmp(cmdname, "-V") == 0) || (strcmp(cmdname, "--version") == 0))
return (zfs_do_version(argc, argv));
/*
* Special case 'help'
*/
if (strcmp(cmdname, "help") == 0)
return (zfs_do_help(argc, argv));
if ((g_zfs = libzfs_init()) == NULL) {
(void) fprintf(stderr, "%s\n", libzfs_error_init(errno));
return (1);
}
zfs_save_arguments(argc, argv, history_str, sizeof (history_str));
libzfs_print_on_error(g_zfs, B_TRUE);
zfs_setproctitle_init(argc, argv, environ);
/*
* Many commands modify input strings for string parsing reasons.
* We create a copy to protect the original argv.
*/
newargv = safe_malloc((argc + 1) * sizeof (newargv[0]));
for (i = 0; i < argc; i++)
newargv[i] = strdup(argv[i]);
newargv[argc] = NULL;
/*
* Run the appropriate command.
*/
libzfs_mnttab_cache(g_zfs, B_TRUE);
if (find_command_idx(cmdname, &i) == 0) {
current_command = &command_table[i];
ret = command_table[i].func(argc - 1, newargv + 1);
} else if (strchr(cmdname, '=') != NULL) {
verify(find_command_idx("set", &i) == 0);
current_command = &command_table[i];
ret = command_table[i].func(argc, newargv);
} else {
(void) fprintf(stderr, gettext("unrecognized "
"command '%s'\n"), cmdname);
usage(B_FALSE);
ret = 1;
}
for (i = 0; i < argc; i++)
free(newargv[i]);
free(newargv);
if (ret == 0 && log_history)
(void) zpool_log_history(g_zfs, history_str);
libzfs_fini(g_zfs);
/*
* The 'ZFS_ABORT' environment variable causes us to dump core on exit
* for the purposes of running ::findleaks.
*/
if (getenv("ZFS_ABORT") != NULL) {
(void) printf("dumping core by request\n");
abort();
}
return (ret);
}
/*
* zfs zone nsfile filesystem
*
* Add or delete the given dataset to/from the namespace.
*/
#ifdef __linux__
static int
zfs_do_zone_impl(int argc, char **argv, boolean_t attach)
{
zfs_handle_t *zhp;
int ret;
if (argc < 3) {
(void) fprintf(stderr, gettext("missing argument(s)\n"));
usage(B_FALSE);
}
if (argc > 3) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
zhp = zfs_open(g_zfs, argv[2], ZFS_TYPE_FILESYSTEM);
if (zhp == NULL)
return (1);
ret = (zfs_userns(zhp, argv[1], attach) != 0);
zfs_close(zhp);
return (ret);
}
static int
zfs_do_zone(int argc, char **argv)
{
return (zfs_do_zone_impl(argc, argv, B_TRUE));
}
static int
zfs_do_unzone(int argc, char **argv)
{
return (zfs_do_zone_impl(argc, argv, B_FALSE));
}
#endif
#ifdef __FreeBSD__
#include <sys/jail.h>
#include <jail.h>
/*
* Attach/detach the given dataset to/from the given jail
*/
static int
zfs_do_jail_impl(int argc, char **argv, boolean_t attach)
{
zfs_handle_t *zhp;
int jailid, ret;
/* check number of arguments */
if (argc < 3) {
(void) fprintf(stderr, gettext("missing argument(s)\n"));
usage(B_FALSE);
}
if (argc > 3) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
jailid = jail_getid(argv[1]);
if (jailid < 0) {
(void) fprintf(stderr, gettext("invalid jail id or name\n"));
usage(B_FALSE);
}
zhp = zfs_open(g_zfs, argv[2], ZFS_TYPE_FILESYSTEM);
if (zhp == NULL)
return (1);
ret = (zfs_jail(zhp, jailid, attach) != 0);
zfs_close(zhp);
return (ret);
}
/*
* zfs jail jailid filesystem
*
* Attach the given dataset to the given jail
*/
static int
zfs_do_jail(int argc, char **argv)
{
return (zfs_do_jail_impl(argc, argv, B_TRUE));
}
/*
* zfs unjail jailid filesystem
*
* Detach the given dataset from the given jail
*/
static int
zfs_do_unjail(int argc, char **argv)
{
return (zfs_do_jail_impl(argc, argv, B_FALSE));
}
#endif
diff --git a/sys/contrib/openzfs/cmd/zpool/os/freebsd/zpool_vdev_os.c b/sys/contrib/openzfs/cmd/zpool/os/freebsd/zpool_vdev_os.c
index 231ca97f1f6f..c57c689afa9c 100644
--- a/sys/contrib/openzfs/cmd/zpool/os/freebsd/zpool_vdev_os.c
+++ b/sys/contrib/openzfs/cmd/zpool/os/freebsd/zpool_vdev_os.c
@@ -1,126 +1,147 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2018 by Delphix. All rights reserved.
* Copyright (c) 2016, 2017 Intel Corporation.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>.
*/
/*
* Functions to convert between a list of vdevs and an nvlist representing the
* configuration. Each entry in the list can be one of:
*
* Device vdevs
* disk=(path=..., devid=...)
* file=(path=...)
*
* Group vdevs
* raidz[1|2]=(...)
* mirror=(...)
*
* Hot spares
*
* While the underlying implementation supports it, group vdevs cannot contain
* other group vdevs. All userland verification of devices is contained within
* this file. If successful, the nvlist returned can be passed directly to the
* kernel; we've done as much verification as possible in userland.
*
* Hot spares are a special case, and passed down as an array of disk vdevs, at
* the same level as the root of the vdev tree.
*
* The only function exported by this file is 'make_root_vdev'. The
* function performs several passes:
*
* 1. Construct the vdev specification. Performs syntax validation and
* makes sure each device is valid.
* 2. Check for devices in use. Using libdiskmgt, makes sure that no
* devices are also in use. Some can be overridden using the 'force'
* flag, others cannot.
* 3. Check for replication errors if the 'force' flag is not specified.
* validates that the replication level is consistent across the
* entire pool.
* 4. Call libzfs to label any whole disks with an EFI label.
*/
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <libintl.h>
#include <libnvpair.h>
#include <libzutil.h>
#include <limits.h>
#include <sys/spa.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <paths.h>
#include <sys/stat.h>
#include <sys/disk.h>
#include <sys/mntent.h>
#include <libgeom.h>
#include "zpool_util.h"
#include <sys/zfs_context.h>
int
check_device(const char *name, boolean_t force, boolean_t isspare,
boolean_t iswholedisk)
{
(void) iswholedisk;
char path[MAXPATHLEN];
if (strncmp(name, _PATH_DEV, sizeof (_PATH_DEV) - 1) != 0)
snprintf(path, sizeof (path), "%s%s", _PATH_DEV, name);
else
strlcpy(path, name, sizeof (path));
return (check_file(path, force, isspare));
}
boolean_t
check_sector_size_database(char *path, int *sector_size)
{
(void) path, (void) sector_size;
return (0);
}
void
after_zpool_upgrade(zpool_handle_t *zhp)
{
char bootfs[ZPOOL_MAXPROPLEN];
if (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
sizeof (bootfs), NULL, B_FALSE) == 0 &&
strcmp(bootfs, "-") != 0) {
(void) printf(gettext("Pool '%s' has the bootfs "
"property set, you might need to update\nthe boot "
"code. See gptzfsboot(8) and loader.efi(8) for "
"details.\n"), zpool_get_name(zhp));
}
}
int
check_file(const char *file, boolean_t force, boolean_t isspare)
{
return (check_file_generic(file, force, isspare));
}
+
+int
+zpool_power_current_state(zpool_handle_t *zhp, char *vdev)
+{
+
+ (void) zhp;
+ (void) vdev;
+ /* Enclosure slot power not supported on FreeBSD yet */
+ return (-1);
+}
+
+int
+zpool_power(zpool_handle_t *zhp, char *vdev, boolean_t turn_on)
+{
+
+ (void) zhp;
+ (void) vdev;
+ (void) turn_on;
+ /* Enclosure slot power not supported on FreeBSD yet */
+ return (ENOTSUP);
+}
diff --git a/sys/contrib/openzfs/cmd/zpool/os/linux/zpool_vdev_os.c b/sys/contrib/openzfs/cmd/zpool/os/linux/zpool_vdev_os.c
index 7f4486e062fe..006a3a7d8e01 100644
--- a/sys/contrib/openzfs/cmd/zpool/os/linux/zpool_vdev_os.c
+++ b/sys/contrib/openzfs/cmd/zpool/os/linux/zpool_vdev_os.c
@@ -1,418 +1,673 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2018 by Delphix. All rights reserved.
* Copyright (c) 2016, 2017 Intel Corporation.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>.
*/
/*
* Functions to convert between a list of vdevs and an nvlist representing the
* configuration. Each entry in the list can be one of:
*
* Device vdevs
* disk=(path=..., devid=...)
* file=(path=...)
*
* Group vdevs
* raidz[1|2]=(...)
* mirror=(...)
*
* Hot spares
*
* While the underlying implementation supports it, group vdevs cannot contain
* other group vdevs. All userland verification of devices is contained within
* this file. If successful, the nvlist returned can be passed directly to the
* kernel; we've done as much verification as possible in userland.
*
* Hot spares are a special case, and passed down as an array of disk vdevs, at
* the same level as the root of the vdev tree.
*
* The only function exported by this file is 'make_root_vdev'. The
* function performs several passes:
*
* 1. Construct the vdev specification. Performs syntax validation and
* makes sure each device is valid.
* 2. Check for devices in use. Using libblkid to make sure that no
* devices are also in use. Some can be overridden using the 'force'
* flag, others cannot.
* 3. Check for replication errors if the 'force' flag is not specified.
* validates that the replication level is consistent across the
* entire pool.
* 4. Call libzfs to label any whole disks with an EFI label.
*/
#include <assert.h>
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
#include <libintl.h>
#include <libnvpair.h>
#include <libzutil.h>
#include <limits.h>
#include <sys/spa.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include "zpool_util.h"
#include <sys/zfs_context.h>
#include <scsi/scsi.h>
#include <scsi/sg.h>
#include <sys/efi_partition.h>
#include <sys/stat.h>
#include <sys/mntent.h>
#include <uuid/uuid.h>
#include <blkid/blkid.h>
typedef struct vdev_disk_db_entry
{
char id[24];
int sector_size;
} vdev_disk_db_entry_t;
/*
* Database of block devices that lie about physical sector sizes. The
* identification string must be precisely 24 characters to avoid false
* negatives
*/
static vdev_disk_db_entry_t vdev_disk_database[] = {
{"ATA ADATA SSD S396 3", 8192},
{"ATA APPLE SSD SM128E", 8192},
{"ATA APPLE SSD SM256E", 8192},
{"ATA APPLE SSD SM512E", 8192},
{"ATA APPLE SSD SM768E", 8192},
{"ATA C400-MTFDDAC064M", 8192},
{"ATA C400-MTFDDAC128M", 8192},
{"ATA C400-MTFDDAC256M", 8192},
{"ATA C400-MTFDDAC512M", 8192},
{"ATA Corsair Force 3 ", 8192},
{"ATA Corsair Force GS", 8192},
{"ATA INTEL SSDSA2CT04", 8192},
{"ATA INTEL SSDSA2BZ10", 8192},
{"ATA INTEL SSDSA2BZ20", 8192},
{"ATA INTEL SSDSA2BZ30", 8192},
{"ATA INTEL SSDSA2CW04", 8192},
{"ATA INTEL SSDSA2CW08", 8192},
{"ATA INTEL SSDSA2CW12", 8192},
{"ATA INTEL SSDSA2CW16", 8192},
{"ATA INTEL SSDSA2CW30", 8192},
{"ATA INTEL SSDSA2CW60", 8192},
{"ATA INTEL SSDSC2CT06", 8192},
{"ATA INTEL SSDSC2CT12", 8192},
{"ATA INTEL SSDSC2CT18", 8192},
{"ATA INTEL SSDSC2CT24", 8192},
{"ATA INTEL SSDSC2CW06", 8192},
{"ATA INTEL SSDSC2CW12", 8192},
{"ATA INTEL SSDSC2CW18", 8192},
{"ATA INTEL SSDSC2CW24", 8192},
{"ATA INTEL SSDSC2CW48", 8192},
{"ATA KINGSTON SH100S3", 8192},
{"ATA KINGSTON SH103S3", 8192},
{"ATA M4-CT064M4SSD2 ", 8192},
{"ATA M4-CT128M4SSD2 ", 8192},
{"ATA M4-CT256M4SSD2 ", 8192},
{"ATA M4-CT512M4SSD2 ", 8192},
{"ATA OCZ-AGILITY2 ", 8192},
{"ATA OCZ-AGILITY3 ", 8192},
{"ATA OCZ-VERTEX2 3.5 ", 8192},
{"ATA OCZ-VERTEX3 ", 8192},
{"ATA OCZ-VERTEX3 LT ", 8192},
{"ATA OCZ-VERTEX3 MI ", 8192},
{"ATA OCZ-VERTEX4 ", 8192},
{"ATA SAMSUNG MZ7WD120", 8192},
{"ATA SAMSUNG MZ7WD240", 8192},
{"ATA SAMSUNG MZ7WD480", 8192},
{"ATA SAMSUNG MZ7WD960", 8192},
{"ATA SAMSUNG SSD 830 ", 8192},
{"ATA Samsung SSD 840 ", 8192},
{"ATA SanDisk SSD U100", 8192},
{"ATA TOSHIBA THNSNH06", 8192},
{"ATA TOSHIBA THNSNH12", 8192},
{"ATA TOSHIBA THNSNH25", 8192},
{"ATA TOSHIBA THNSNH51", 8192},
{"ATA APPLE SSD TS064C", 4096},
{"ATA APPLE SSD TS128C", 4096},
{"ATA APPLE SSD TS256C", 4096},
{"ATA APPLE SSD TS512C", 4096},
{"ATA INTEL SSDSA2M040", 4096},
{"ATA INTEL SSDSA2M080", 4096},
{"ATA INTEL SSDSA2M160", 4096},
{"ATA INTEL SSDSC2MH12", 4096},
{"ATA INTEL SSDSC2MH25", 4096},
{"ATA OCZ CORE_SSD ", 4096},
{"ATA OCZ-VERTEX ", 4096},
{"ATA SAMSUNG MCCOE32G", 4096},
{"ATA SAMSUNG MCCOE64G", 4096},
{"ATA SAMSUNG SSD PM80", 4096},
/* Flash drives optimized for 4KB IOs on larger pages */
{"ATA INTEL SSDSC2BA10", 4096},
{"ATA INTEL SSDSC2BA20", 4096},
{"ATA INTEL SSDSC2BA40", 4096},
{"ATA INTEL SSDSC2BA80", 4096},
{"ATA INTEL SSDSC2BB08", 4096},
{"ATA INTEL SSDSC2BB12", 4096},
{"ATA INTEL SSDSC2BB16", 4096},
{"ATA INTEL SSDSC2BB24", 4096},
{"ATA INTEL SSDSC2BB30", 4096},
{"ATA INTEL SSDSC2BB40", 4096},
{"ATA INTEL SSDSC2BB48", 4096},
{"ATA INTEL SSDSC2BB60", 4096},
{"ATA INTEL SSDSC2BB80", 4096},
{"ATA INTEL SSDSC2BW24", 4096},
{"ATA INTEL SSDSC2BW48", 4096},
{"ATA INTEL SSDSC2BP24", 4096},
{"ATA INTEL SSDSC2BP48", 4096},
{"NA SmrtStorSDLKAE9W", 4096},
{"NVMe Amazon EC2 NVMe ", 4096},
/* Imported from Open Solaris */
{"ATA MARVELL SD88SA02", 4096},
/* Advanced format Hard drives */
{"ATA Hitachi HDS5C303", 4096},
{"ATA SAMSUNG HD204UI ", 4096},
{"ATA ST2000DL004 HD20", 4096},
{"ATA WDC WD10EARS-00M", 4096},
{"ATA WDC WD10EARS-00S", 4096},
{"ATA WDC WD10EARS-00Z", 4096},
{"ATA WDC WD15EARS-00M", 4096},
{"ATA WDC WD15EARS-00S", 4096},
{"ATA WDC WD15EARS-00Z", 4096},
{"ATA WDC WD20EARS-00M", 4096},
{"ATA WDC WD20EARS-00S", 4096},
{"ATA WDC WD20EARS-00Z", 4096},
{"ATA WDC WD1600BEVT-0", 4096},
{"ATA WDC WD2500BEVT-0", 4096},
{"ATA WDC WD3200BEVT-0", 4096},
{"ATA WDC WD5000BEVT-0", 4096},
};
#define INQ_REPLY_LEN 96
#define INQ_CMD_LEN 6
static const int vdev_disk_database_size =
sizeof (vdev_disk_database) / sizeof (vdev_disk_database[0]);
boolean_t
check_sector_size_database(char *path, int *sector_size)
{
unsigned char inq_buff[INQ_REPLY_LEN];
unsigned char sense_buffer[32];
unsigned char inq_cmd_blk[INQ_CMD_LEN] =
{INQUIRY, 0, 0, 0, INQ_REPLY_LEN, 0};
sg_io_hdr_t io_hdr;
int error;
int fd;
int i;
/* Prepare INQUIRY command */
memset(&io_hdr, 0, sizeof (sg_io_hdr_t));
io_hdr.interface_id = 'S';
io_hdr.cmd_len = sizeof (inq_cmd_blk);
io_hdr.mx_sb_len = sizeof (sense_buffer);
io_hdr.dxfer_direction = SG_DXFER_FROM_DEV;
io_hdr.dxfer_len = INQ_REPLY_LEN;
io_hdr.dxferp = inq_buff;
io_hdr.cmdp = inq_cmd_blk;
io_hdr.sbp = sense_buffer;
io_hdr.timeout = 10; /* 10 milliseconds is ample time */
if ((fd = open(path, O_RDONLY|O_DIRECT)) < 0)
return (B_FALSE);
error = ioctl(fd, SG_IO, (unsigned long) &io_hdr);
(void) close(fd);
if (error < 0)
return (B_FALSE);
if ((io_hdr.info & SG_INFO_OK_MASK) != SG_INFO_OK)
return (B_FALSE);
for (i = 0; i < vdev_disk_database_size; i++) {
if (memcmp(inq_buff + 8, vdev_disk_database[i].id, 24))
continue;
*sector_size = vdev_disk_database[i].sector_size;
return (B_TRUE);
}
return (B_FALSE);
}
static int
check_slice(const char *path, blkid_cache cache, int force, boolean_t isspare)
{
int err;
char *value;
/* No valid type detected device is safe to use */
value = blkid_get_tag_value(cache, "TYPE", path);
if (value == NULL)
return (0);
/*
* If libblkid detects a ZFS device, we check the device
* using check_file() to see if it's safe. The one safe
* case is a spare device shared between multiple pools.
*/
if (strcmp(value, "zfs_member") == 0) {
err = check_file(path, force, isspare);
} else {
if (force) {
err = 0;
} else {
err = -1;
vdev_error(gettext("%s contains a filesystem of "
"type '%s'\n"), path, value);
}
}
free(value);
return (err);
}
/*
* Validate that a disk including all partitions are safe to use.
*
* For EFI labeled disks this can done relatively easily with the libefi
* library. The partition numbers are extracted from the label and used
* to generate the expected /dev/ paths. Each partition can then be
* checked for conflicts.
*
* For non-EFI labeled disks (MBR/EBR/etc) the same process is possible
* but due to the lack of a readily available libraries this scanning is
* not implemented. Instead only the device path as given is checked.
*/
static int
check_disk(const char *path, blkid_cache cache, int force,
boolean_t isspare, boolean_t iswholedisk)
{
struct dk_gpt *vtoc;
char slice_path[MAXPATHLEN];
int err = 0;
int fd, i;
int flags = O_RDONLY|O_DIRECT;
if (!iswholedisk)
return (check_slice(path, cache, force, isspare));
/* only spares can be shared, other devices require exclusive access */
if (!isspare)
flags |= O_EXCL;
if ((fd = open(path, flags)) < 0) {
char *value = blkid_get_tag_value(cache, "TYPE", path);
(void) fprintf(stderr, gettext("%s is in use and contains "
"a %s filesystem.\n"), path, value ? value : "unknown");
free(value);
return (-1);
}
/*
* Expected to fail for non-EFI labeled disks. Just check the device
* as given and do not attempt to detect and scan partitions.
*/
err = efi_alloc_and_read(fd, &vtoc);
if (err) {
(void) close(fd);
return (check_slice(path, cache, force, isspare));
}
/*
* The primary efi partition label is damaged however the secondary
* label at the end of the device is intact. Rather than use this
* label we should play it safe and treat this as a non efi device.
*/
if (vtoc->efi_flags & EFI_GPT_PRIMARY_CORRUPT) {
efi_free(vtoc);
(void) close(fd);
if (force) {
/* Partitions will now be created using the backup */
return (0);
} else {
vdev_error(gettext("%s contains a corrupt primary "
"EFI label.\n"), path);
return (-1);
}
}
for (i = 0; i < vtoc->efi_nparts; i++) {
if (vtoc->efi_parts[i].p_tag == V_UNASSIGNED ||
uuid_is_null((uchar_t *)&vtoc->efi_parts[i].p_guid))
continue;
if (strncmp(path, UDISK_ROOT, strlen(UDISK_ROOT)) == 0)
(void) snprintf(slice_path, sizeof (slice_path),
"%s%s%d", path, "-part", i+1);
else
(void) snprintf(slice_path, sizeof (slice_path),
"%s%s%d", path, isdigit(path[strlen(path)-1]) ?
"p" : "", i+1);
err = check_slice(slice_path, cache, force, isspare);
if (err)
break;
}
efi_free(vtoc);
(void) close(fd);
return (err);
}
int
check_device(const char *path, boolean_t force,
boolean_t isspare, boolean_t iswholedisk)
{
blkid_cache cache;
int error;
error = blkid_get_cache(&cache, NULL);
if (error != 0) {
(void) fprintf(stderr, gettext("unable to access the blkid "
"cache.\n"));
return (-1);
}
error = check_disk(path, cache, force, isspare, iswholedisk);
blkid_put_cache(cache);
return (error);
}
void
after_zpool_upgrade(zpool_handle_t *zhp)
{
(void) zhp;
}
int
check_file(const char *file, boolean_t force, boolean_t isspare)
{
return (check_file_generic(file, force, isspare));
}
+
+/*
+ * Read from a sysfs file and return an allocated string. Removes
+ * the newline from the end of the string if there is one.
+ *
+ * Returns a string on success (which must be freed), or NULL on error.
+ */
+static char *zpool_sysfs_gets(char *path)
+{
+ int fd;
+ struct stat statbuf;
+ char *buf = NULL;
+ ssize_t count = 0;
+ fd = open(path, O_RDONLY);
+ if (fd < 0)
+ return (NULL);
+
+ if (fstat(fd, &statbuf) != 0) {
+ close(fd);
+ return (NULL);
+ }
+
+ buf = calloc(sizeof (*buf), statbuf.st_size + 1);
+ if (buf == NULL) {
+ close(fd);
+ return (NULL);
+ }
+
+ /*
+ * Note, we can read less bytes than st_size, and that's ok. Sysfs
+ * files will report their size is 4k even if they only return a small
+ * string.
+ */
+ count = read(fd, buf, statbuf.st_size);
+ if (count < 0) {
+ /* Error doing read() or we overran the buffer */
+ close(fd);
+ free(buf);
+ return (NULL);
+ }
+
+ /* Remove trailing newline */
+ if (buf[count - 1] == '\n')
+ buf[count - 1] = 0;
+
+ close(fd);
+
+ return (buf);
+}
+
+/*
+ * Write a string to a sysfs file.
+ *
+ * Returns 0 on success, non-zero otherwise.
+ */
+static int zpool_sysfs_puts(char *path, char *str)
+{
+ FILE *file;
+
+ file = fopen(path, "w");
+ if (!file) {
+ return (-1);
+ }
+
+ if (fputs(str, file) < 0) {
+ fclose(file);
+ return (-2);
+ }
+ fclose(file);
+ return (0);
+}
+
+/* Given a vdev nvlist_t, rescan its enclosure sysfs path */
+static void
+rescan_vdev_config_dev_sysfs_path(nvlist_t *vdev_nv)
+{
+ update_vdev_config_dev_sysfs_path(vdev_nv,
+ fnvlist_lookup_string(vdev_nv, ZPOOL_CONFIG_PATH),
+ ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH);
+}
+
+/*
+ * Given a power string: "on", "off", "1", or "0", return 0 if it's an
+ * off value, 1 if it's an on value, and -1 if the value is unrecognized.
+ */
+static int zpool_power_parse_value(char *str)
+{
+ if ((strcmp(str, "off") == 0) || (strcmp(str, "0") == 0))
+ return (0);
+
+ if ((strcmp(str, "on") == 0) || (strcmp(str, "1") == 0))
+ return (1);
+
+ return (-1);
+}
+
+/*
+ * Given a vdev string return an allocated string containing the sysfs path to
+ * its power control file. Also do a check if the power control file really
+ * exists and has correct permissions.
+ *
+ * Example returned strings:
+ *
+ * /sys/class/enclosure/0:0:122:0/10/power_status
+ * /sys/bus/pci/slots/10/power
+ *
+ * Returns allocated string on success (which must be freed), NULL on failure.
+ */
+static char *
+zpool_power_sysfs_path(zpool_handle_t *zhp, char *vdev)
+{
+ const char *enc_sysfs_dir = NULL;
+ char *path = NULL;
+ nvlist_t *vdev_nv = zpool_find_vdev(zhp, vdev, NULL, NULL, NULL);
+
+ if (vdev_nv == NULL) {
+ return (NULL);
+ }
+
+ /* Make sure we're getting the updated enclosure sysfs path */
+ rescan_vdev_config_dev_sysfs_path(vdev_nv);
+
+ if (nvlist_lookup_string(vdev_nv, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
+ &enc_sysfs_dir) != 0) {
+ return (NULL);
+ }
+
+ if (asprintf(&path, "%s/power_status", enc_sysfs_dir) == -1)
+ return (NULL);
+
+ if (access(path, W_OK) != 0) {
+ free(path);
+ path = NULL;
+ /* No HDD 'power_control' file, maybe it's NVMe? */
+ if (asprintf(&path, "%s/power", enc_sysfs_dir) == -1) {
+ return (NULL);
+ }
+
+ if (access(path, R_OK | W_OK) != 0) {
+ /* Not NVMe either */
+ free(path);
+ return (NULL);
+ }
+ }
+
+ return (path);
+}
+
+/*
+ * Given a path to a sysfs power control file, return B_TRUE if you should use
+ * "on/off" words to control it, or B_FALSE otherwise ("0/1" to control).
+ */
+static boolean_t
+zpool_power_use_word(char *sysfs_path)
+{
+ if (strcmp(&sysfs_path[strlen(sysfs_path) - strlen("power_status")],
+ "power_status") == 0) {
+ return (B_TRUE);
+ }
+ return (B_FALSE);
+}
+
+/*
+ * Check the sysfs power control value for a vdev.
+ *
+ * Returns:
+ * 0 - Power is off
+ * 1 - Power is on
+ * -1 - Error or unsupported
+ */
+int
+zpool_power_current_state(zpool_handle_t *zhp, char *vdev)
+{
+ char *val;
+ int rc;
+
+ char *path = zpool_power_sysfs_path(zhp, vdev);
+ if (path == NULL)
+ return (-1);
+
+ val = zpool_sysfs_gets(path);
+ if (val == NULL) {
+ free(path);
+ return (-1);
+ }
+
+ rc = zpool_power_parse_value(val);
+ free(val);
+ free(path);
+ return (rc);
+}
+
+/*
+ * Turn on or off the slot to a device
+ *
+ * Device path is the full path to the device (like /dev/sda or /dev/sda1).
+ *
+ * Return code:
+ * 0: Success
+ * ENOTSUP: Power control not supported for OS
+ * EBADSLT: Couldn't read current power state
+ * ENOENT: No sysfs path to power control
+ * EIO: Couldn't write sysfs power value
+ * EBADE: Sysfs power value didn't change
+ */
+int
+zpool_power(zpool_handle_t *zhp, char *vdev, boolean_t turn_on)
+{
+ char *sysfs_path;
+ const char *val;
+ int rc;
+ int timeout_ms;
+
+ rc = zpool_power_current_state(zhp, vdev);
+ if (rc == -1) {
+ return (EBADSLT);
+ }
+
+ /* Already correct value? */
+ if (rc == (int)turn_on)
+ return (0);
+
+ sysfs_path = zpool_power_sysfs_path(zhp, vdev);
+ if (sysfs_path == NULL)
+ return (ENOENT);
+
+ if (zpool_power_use_word(sysfs_path)) {
+ val = turn_on ? "on" : "off";
+ } else {
+ val = turn_on ? "1" : "0";
+ }
+
+ rc = zpool_sysfs_puts(sysfs_path, (char *)val);
+
+ free(sysfs_path);
+ if (rc != 0) {
+ return (EIO);
+ }
+
+ /*
+ * Wait up to 30 seconds for sysfs power value to change after
+ * writing it.
+ */
+ timeout_ms = zpool_getenv_int("ZPOOL_POWER_ON_SLOT_TIMEOUT_MS", 30000);
+ for (int i = 0; i < MAX(1, timeout_ms / 200); i++) {
+ rc = zpool_power_current_state(zhp, vdev);
+ if (rc == (int)turn_on)
+ return (0); /* success */
+
+ fsleep(0.200); /* 200ms */
+ }
+
+ /* sysfs value never changed */
+ return (EBADE);
+}
diff --git a/sys/contrib/openzfs/cmd/zpool/zpool.d/ses b/sys/contrib/openzfs/cmd/zpool/zpool.d/ses
index 638145c95d47..19ef92ad67b2 100755
--- a/sys/contrib/openzfs/cmd/zpool/zpool.d/ses
+++ b/sys/contrib/openzfs/cmd/zpool/zpool.d/ses
@@ -1,61 +1,69 @@
#!/bin/sh
#
# Print SCSI Enclosure Services (SES) info. The output is dependent on the name
# of the script/symlink used to call it.
#
helpstr="
enc: Show disk enclosure w:x:y:z value.
slot: Show disk slot number as reported by the enclosure.
encdev: Show /dev/sg* device associated with the enclosure disk slot.
fault_led: Show value of the disk enclosure slot fault LED.
locate_led: Show value of the disk enclosure slot locate LED.
ses: Show disk's enc, enc device, slot, and fault/locate LED values."
script="${0##*/}"
if [ "$1" = "-h" ] ; then
echo "$helpstr" | grep "$script:" | tr -s '\t' | cut -f 2-
exit
fi
if [ "$script" = "ses" ] ; then
scripts='enc encdev slot fault_led locate_led'
else
scripts="$script"
fi
for i in $scripts ; do
# shellcheck disable=SC2154
if [ -z "$VDEV_ENC_SYSFS_PATH" ] ; then
echo "$i="
continue
fi
val=""
case $i in
enc)
- val=$(ls "$VDEV_ENC_SYSFS_PATH/../../" 2>/dev/null)
+ if echo "$VDEV_ENC_SYSFS_PATH" | grep -q '/sys/bus/pci/slots' ; then
+ val="$VDEV_ENC_SYSFS_PATH"
+ else
+ val="$(ls """$VDEV_ENC_SYSFS_PATH/../../""" 2>/dev/null)"
+ fi
;;
slot)
- val=$(cat "$VDEV_ENC_SYSFS_PATH/slot" 2>/dev/null)
+ if echo "$VDEV_ENC_SYSFS_PATH" | grep -q '/sys/bus/pci/slots' ; then
+ val="$(basename """$VDEV_ENC_SYSFS_PATH""")"
+ else
+ val="$(cat """$VDEV_ENC_SYSFS_PATH/slot""" 2>/dev/null)"
+ fi
;;
encdev)
val=$(ls "$VDEV_ENC_SYSFS_PATH/../device/scsi_generic" 2>/dev/null)
;;
fault_led)
# JBODs fault LED is called 'fault', NVMe fault LED is called
# 'attention'.
if [ -f "$VDEV_ENC_SYSFS_PATH/fault" ] ; then
val=$(cat "$VDEV_ENC_SYSFS_PATH/fault" 2>/dev/null)
elif [ -f "$VDEV_ENC_SYSFS_PATH/attention" ] ; then
val=$(cat "$VDEV_ENC_SYSFS_PATH/attention" 2>/dev/null)
fi
;;
locate_led)
val=$(cat "$VDEV_ENC_SYSFS_PATH/locate" 2>/dev/null)
;;
*)
val=invalid
;;
esac
echo "$i=$val"
done
diff --git a/sys/contrib/openzfs/cmd/zpool/zpool_iter.c b/sys/contrib/openzfs/cmd/zpool/zpool_iter.c
index 506b529dce48..ae2e9da9108d 100644
--- a/sys/contrib/openzfs/cmd/zpool/zpool_iter.c
+++ b/sys/contrib/openzfs/cmd/zpool/zpool_iter.c
@@ -1,707 +1,711 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>.
*/
#include <libintl.h>
#include <libuutil.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <thread_pool.h>
#include <libzfs.h>
#include <libzutil.h>
#include <sys/zfs_context.h>
#include <sys/wait.h>
#include "zpool_util.h"
/*
* Private interface for iterating over pools specified on the command line.
* Most consumers will call for_each_pool, but in order to support iostat, we
* allow fined grained control through the zpool_list_t interface.
*/
typedef struct zpool_node {
zpool_handle_t *zn_handle;
uu_avl_node_t zn_avlnode;
int zn_mark;
} zpool_node_t;
struct zpool_list {
boolean_t zl_findall;
boolean_t zl_literal;
uu_avl_t *zl_avl;
uu_avl_pool_t *zl_pool;
zprop_list_t **zl_proplist;
zfs_type_t zl_type;
};
static int
zpool_compare(const void *larg, const void *rarg, void *unused)
{
(void) unused;
zpool_handle_t *l = ((zpool_node_t *)larg)->zn_handle;
zpool_handle_t *r = ((zpool_node_t *)rarg)->zn_handle;
const char *lname = zpool_get_name(l);
const char *rname = zpool_get_name(r);
return (strcmp(lname, rname));
}
/*
* Callback function for pool_list_get(). Adds the given pool to the AVL tree
* of known pools.
*/
static int
add_pool(zpool_handle_t *zhp, void *data)
{
zpool_list_t *zlp = data;
zpool_node_t *node = safe_malloc(sizeof (zpool_node_t));
uu_avl_index_t idx;
node->zn_handle = zhp;
uu_avl_node_init(node, &node->zn_avlnode, zlp->zl_pool);
if (uu_avl_find(zlp->zl_avl, node, NULL, &idx) == NULL) {
if (zlp->zl_proplist &&
zpool_expand_proplist(zhp, zlp->zl_proplist,
zlp->zl_type, zlp->zl_literal) != 0) {
zpool_close(zhp);
free(node);
return (-1);
}
uu_avl_insert(zlp->zl_avl, node, idx);
} else {
zpool_close(zhp);
free(node);
return (-1);
}
return (0);
}
/*
* Create a list of pools based on the given arguments. If we're given no
* arguments, then iterate over all pools in the system and add them to the AVL
* tree. Otherwise, add only those pool explicitly specified on the command
* line.
*/
zpool_list_t *
pool_list_get(int argc, char **argv, zprop_list_t **proplist, zfs_type_t type,
boolean_t literal, int *err)
{
zpool_list_t *zlp;
zlp = safe_malloc(sizeof (zpool_list_t));
zlp->zl_pool = uu_avl_pool_create("zfs_pool", sizeof (zpool_node_t),
offsetof(zpool_node_t, zn_avlnode), zpool_compare, UU_DEFAULT);
if (zlp->zl_pool == NULL)
zpool_no_memory();
if ((zlp->zl_avl = uu_avl_create(zlp->zl_pool, NULL,
UU_DEFAULT)) == NULL)
zpool_no_memory();
zlp->zl_proplist = proplist;
zlp->zl_type = type;
zlp->zl_literal = literal;
if (argc == 0) {
(void) zpool_iter(g_zfs, add_pool, zlp);
zlp->zl_findall = B_TRUE;
} else {
int i;
for (i = 0; i < argc; i++) {
zpool_handle_t *zhp;
if ((zhp = zpool_open_canfail(g_zfs, argv[i])) !=
NULL) {
if (add_pool(zhp, zlp) != 0)
*err = B_TRUE;
} else {
*err = B_TRUE;
}
}
}
return (zlp);
}
/*
* Search for any new pools, adding them to the list. We only add pools when no
* options were given on the command line. Otherwise, we keep the list fixed as
* those that were explicitly specified.
*/
void
pool_list_update(zpool_list_t *zlp)
{
if (zlp->zl_findall)
(void) zpool_iter(g_zfs, add_pool, zlp);
}
/*
* Iterate over all pools in the list, executing the callback for each
*/
int
pool_list_iter(zpool_list_t *zlp, int unavail, zpool_iter_f func,
void *data)
{
zpool_node_t *node, *next_node;
int ret = 0;
for (node = uu_avl_first(zlp->zl_avl); node != NULL; node = next_node) {
next_node = uu_avl_next(zlp->zl_avl, node);
if (zpool_get_state(node->zn_handle) != POOL_STATE_UNAVAIL ||
unavail)
ret |= func(node->zn_handle, data);
}
return (ret);
}
/*
* Remove the given pool from the list. When running iostat, we want to remove
* those pools that no longer exist.
*/
void
pool_list_remove(zpool_list_t *zlp, zpool_handle_t *zhp)
{
zpool_node_t search, *node;
search.zn_handle = zhp;
if ((node = uu_avl_find(zlp->zl_avl, &search, NULL, NULL)) != NULL) {
uu_avl_remove(zlp->zl_avl, node);
zpool_close(node->zn_handle);
free(node);
}
}
/*
* Free all the handles associated with this list.
*/
void
pool_list_free(zpool_list_t *zlp)
{
uu_avl_walk_t *walk;
zpool_node_t *node;
if ((walk = uu_avl_walk_start(zlp->zl_avl, UU_WALK_ROBUST)) == NULL) {
(void) fprintf(stderr,
gettext("internal error: out of memory"));
exit(1);
}
while ((node = uu_avl_walk_next(walk)) != NULL) {
uu_avl_remove(zlp->zl_avl, node);
zpool_close(node->zn_handle);
free(node);
}
uu_avl_walk_end(walk);
uu_avl_destroy(zlp->zl_avl);
uu_avl_pool_destroy(zlp->zl_pool);
free(zlp);
}
/*
* Returns the number of elements in the pool list.
*/
int
pool_list_count(zpool_list_t *zlp)
{
return (uu_avl_numnodes(zlp->zl_avl));
}
/*
* High level function which iterates over all pools given on the command line,
* using the pool_list_* interfaces.
*/
int
for_each_pool(int argc, char **argv, boolean_t unavail,
zprop_list_t **proplist, zfs_type_t type, boolean_t literal,
zpool_iter_f func, void *data)
{
zpool_list_t *list;
int ret = 0;
if ((list = pool_list_get(argc, argv, proplist, type, literal,
&ret)) == NULL)
return (1);
if (pool_list_iter(list, unavail, func, data) != 0)
ret = 1;
pool_list_free(list);
return (ret);
}
/*
* This is the equivalent of for_each_pool() for vdevs. It iterates thorough
* all vdevs in the pool, ignoring root vdevs and holes, calling func() on
* each one.
*
* @zhp: Zpool handle
* @func: Function to call on each vdev
* @data: Custom data to pass to the function
*/
int
for_each_vdev(zpool_handle_t *zhp, pool_vdev_iter_f func, void *data)
{
nvlist_t *config, *nvroot = NULL;
if ((config = zpool_get_config(zhp, NULL)) != NULL) {
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
}
return (for_each_vdev_cb((void *) zhp, nvroot, func, data));
}
/*
* Process the vcdl->vdev_cmd_data[] array to figure out all the unique column
* names and their widths. When this function is done, vcdl->uniq_cols,
* vcdl->uniq_cols_cnt, and vcdl->uniq_cols_width will be filled in.
*/
static void
process_unique_cmd_columns(vdev_cmd_data_list_t *vcdl)
{
char **uniq_cols = NULL, **tmp = NULL;
int *uniq_cols_width;
vdev_cmd_data_t *data;
int cnt = 0;
int k;
/* For each vdev */
for (int i = 0; i < vcdl->count; i++) {
data = &vcdl->data[i];
/* For each column the vdev reported */
for (int j = 0; j < data->cols_cnt; j++) {
/* Is this column in our list of unique column names? */
for (k = 0; k < cnt; k++) {
if (strcmp(data->cols[j], uniq_cols[k]) == 0)
break; /* yes it is */
}
if (k == cnt) {
/* No entry for column, add to list */
tmp = realloc(uniq_cols, sizeof (*uniq_cols) *
(cnt + 1));
if (tmp == NULL)
break; /* Nothing we can do... */
uniq_cols = tmp;
uniq_cols[cnt] = data->cols[j];
cnt++;
}
}
}
/*
* We now have a list of all the unique column names. Figure out the
* max width of each column by looking at the column name and all its
* values.
*/
uniq_cols_width = safe_malloc(sizeof (*uniq_cols_width) * cnt);
for (int i = 0; i < cnt; i++) {
/* Start off with the column title's width */
uniq_cols_width[i] = strlen(uniq_cols[i]);
/* For each vdev */
for (int j = 0; j < vcdl->count; j++) {
/* For each of the vdev's values in a column */
data = &vcdl->data[j];
for (k = 0; k < data->cols_cnt; k++) {
/* Does this vdev have a value for this col? */
if (strcmp(data->cols[k], uniq_cols[i]) == 0) {
/* Is the value width larger? */
uniq_cols_width[i] =
MAX(uniq_cols_width[i],
strlen(data->lines[k]));
}
}
}
}
vcdl->uniq_cols = uniq_cols;
vcdl->uniq_cols_cnt = cnt;
vcdl->uniq_cols_width = uniq_cols_width;
}
/*
* Process a line of command output
*
* When running 'zpool iostat|status -c' the lines of output can either be
* in the form of:
*
* column_name=value
*
* Or just:
*
* value
*
* Process the column_name (if any) and value.
*
* Returns 0 if line was processed, and there are more lines can still be
* processed.
*
* Returns 1 if this was the last line to process, or error.
*/
static int
vdev_process_cmd_output(vdev_cmd_data_t *data, char *line)
{
char *col = NULL;
char *val = line;
char *equals;
char **tmp;
if (line == NULL)
return (1);
equals = strchr(line, '=');
if (equals != NULL) {
/*
* We have a 'column=value' type line. Split it into the
* column and value strings by turning the '=' into a '\0'.
*/
*equals = '\0';
col = line;
val = equals + 1;
} else {
val = line;
}
/* Do we already have a column by this name? If so, skip it. */
if (col != NULL) {
for (int i = 0; i < data->cols_cnt; i++) {
if (strcmp(col, data->cols[i]) == 0)
return (0); /* Duplicate, skip */
}
}
if (val != NULL) {
tmp = realloc(data->lines,
(data->lines_cnt + 1) * sizeof (*data->lines));
if (tmp == NULL)
return (1);
data->lines = tmp;
data->lines[data->lines_cnt] = strdup(val);
data->lines_cnt++;
}
if (col != NULL) {
tmp = realloc(data->cols,
(data->cols_cnt + 1) * sizeof (*data->cols));
if (tmp == NULL)
return (1);
data->cols = tmp;
data->cols[data->cols_cnt] = strdup(col);
data->cols_cnt++;
}
if (val != NULL && col == NULL)
return (1);
return (0);
}
/*
* Run the cmd and store results in *data.
*/
static void
vdev_run_cmd(vdev_cmd_data_t *data, char *cmd)
{
int rc;
char *argv[2] = {cmd};
char **env;
char **lines = NULL;
int lines_cnt = 0;
int i;
env = zpool_vdev_script_alloc_env(data->pool, data->path, data->upath,
data->vdev_enc_sysfs_path, NULL, NULL);
if (env == NULL)
goto out;
/* Run the command */
rc = libzfs_run_process_get_stdout_nopath(cmd, argv, env, &lines,
&lines_cnt);
zpool_vdev_script_free_env(env);
if (rc != 0)
goto out;
/* Process the output we got */
for (i = 0; i < lines_cnt; i++)
if (vdev_process_cmd_output(data, lines[i]) != 0)
break;
out:
if (lines != NULL)
libzfs_free_str_array(lines, lines_cnt);
}
/*
* Generate the search path for zpool iostat/status -c scripts.
* The string returned must be freed.
*/
char *
zpool_get_cmd_search_path(void)
{
const char *env;
char *sp = NULL;
env = getenv("ZPOOL_SCRIPTS_PATH");
if (env != NULL)
return (strdup(env));
env = getenv("HOME");
if (env != NULL) {
if (asprintf(&sp, "%s/.zpool.d:%s",
env, ZPOOL_SCRIPTS_DIR) != -1) {
return (sp);
}
}
if (asprintf(&sp, "%s", ZPOOL_SCRIPTS_DIR) != -1)
return (sp);
return (NULL);
}
/* Thread function run for each vdev */
static void
vdev_run_cmd_thread(void *cb_cmd_data)
{
vdev_cmd_data_t *data = cb_cmd_data;
char *cmd = NULL, *cmddup, *cmdrest;
cmddup = strdup(data->cmd);
if (cmddup == NULL)
return;
cmdrest = cmddup;
while ((cmd = strtok_r(cmdrest, ",", &cmdrest))) {
char *dir = NULL, *sp, *sprest;
char fullpath[MAXPATHLEN];
if (strchr(cmd, '/') != NULL)
continue;
sp = zpool_get_cmd_search_path();
if (sp == NULL)
continue;
sprest = sp;
while ((dir = strtok_r(sprest, ":", &sprest))) {
if (snprintf(fullpath, sizeof (fullpath),
"%s/%s", dir, cmd) == -1)
continue;
if (access(fullpath, X_OK) == 0) {
vdev_run_cmd(data, fullpath);
break;
}
}
free(sp);
}
free(cmddup);
}
/* For each vdev in the pool run a command */
static int
for_each_vdev_run_cb(void *zhp_data, nvlist_t *nv, void *cb_vcdl)
{
vdev_cmd_data_list_t *vcdl = cb_vcdl;
vdev_cmd_data_t *data;
const char *path = NULL;
char *vname = NULL;
const char *vdev_enc_sysfs_path = NULL;
int i, match = 0;
zpool_handle_t *zhp = zhp_data;
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
return (1);
+ /* Make sure we're getting the updated enclosure sysfs path */
+ update_vdev_config_dev_sysfs_path(nv, path,
+ ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH);
+
nvlist_lookup_string(nv, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
&vdev_enc_sysfs_path);
/* Spares show more than once if they're in use, so skip if exists */
for (i = 0; i < vcdl->count; i++) {
if ((strcmp(vcdl->data[i].path, path) == 0) &&
(strcmp(vcdl->data[i].pool, zpool_get_name(zhp)) == 0)) {
/* vdev already exists, skip it */
return (0);
}
}
/* Check for selected vdevs here, if any */
for (i = 0; i < vcdl->vdev_names_count; i++) {
vname = zpool_vdev_name(g_zfs, zhp, nv, vcdl->cb_name_flags);
if (strcmp(vcdl->vdev_names[i], vname) == 0) {
free(vname);
match = 1;
break; /* match */
}
free(vname);
}
/* If we selected vdevs, and this isn't one of them, then bail out */
if (!match && vcdl->vdev_names_count)
return (0);
/*
* Resize our array and add in the new element.
*/
if (!(vcdl->data = realloc(vcdl->data,
sizeof (*vcdl->data) * (vcdl->count + 1))))
return (ENOMEM); /* couldn't realloc */
data = &vcdl->data[vcdl->count];
data->pool = strdup(zpool_get_name(zhp));
data->path = strdup(path);
data->upath = zfs_get_underlying_path(path);
data->cmd = vcdl->cmd;
data->lines = data->cols = NULL;
data->lines_cnt = data->cols_cnt = 0;
if (vdev_enc_sysfs_path)
data->vdev_enc_sysfs_path = strdup(vdev_enc_sysfs_path);
else
data->vdev_enc_sysfs_path = NULL;
vcdl->count++;
return (0);
}
/* Get the names and count of the vdevs */
static int
all_pools_for_each_vdev_gather_cb(zpool_handle_t *zhp, void *cb_vcdl)
{
return (for_each_vdev(zhp, for_each_vdev_run_cb, cb_vcdl));
}
/*
* Now that vcdl is populated with our complete list of vdevs, spawn
* off the commands.
*/
static void
all_pools_for_each_vdev_run_vcdl(vdev_cmd_data_list_t *vcdl)
{
tpool_t *t;
t = tpool_create(1, 5 * sysconf(_SC_NPROCESSORS_ONLN), 0, NULL);
if (t == NULL)
return;
/* Spawn off the command for each vdev */
for (int i = 0; i < vcdl->count; i++) {
(void) tpool_dispatch(t, vdev_run_cmd_thread,
(void *) &vcdl->data[i]);
}
/* Wait for threads to finish */
tpool_wait(t);
tpool_destroy(t);
}
/*
* Run command 'cmd' on all vdevs in all pools in argv. Saves the first line of
* output from the command in vcdk->data[].line for all vdevs. If you want
* to run the command on only certain vdevs, fill in g_zfs, vdev_names,
* vdev_names_count, and cb_name_flags. Otherwise leave them as zero.
*
* Returns a vdev_cmd_data_list_t that must be freed with
* free_vdev_cmd_data_list();
*/
vdev_cmd_data_list_t *
all_pools_for_each_vdev_run(int argc, char **argv, char *cmd,
libzfs_handle_t *g_zfs, char **vdev_names, int vdev_names_count,
int cb_name_flags)
{
vdev_cmd_data_list_t *vcdl;
vcdl = safe_malloc(sizeof (vdev_cmd_data_list_t));
vcdl->cmd = cmd;
vcdl->vdev_names = vdev_names;
vcdl->vdev_names_count = vdev_names_count;
vcdl->cb_name_flags = cb_name_flags;
vcdl->g_zfs = g_zfs;
/* Gather our list of all vdevs in all pools */
for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
B_FALSE, all_pools_for_each_vdev_gather_cb, vcdl);
/* Run command on all vdevs in all pools */
all_pools_for_each_vdev_run_vcdl(vcdl);
/*
* vcdl->data[] now contains all the column names and values for each
* vdev. We need to process that into a master list of unique column
* names, and figure out the width of each column.
*/
process_unique_cmd_columns(vcdl);
return (vcdl);
}
/*
* Free the vdev_cmd_data_list_t created by all_pools_for_each_vdev_run()
*/
void
free_vdev_cmd_data_list(vdev_cmd_data_list_t *vcdl)
{
free(vcdl->uniq_cols);
free(vcdl->uniq_cols_width);
for (int i = 0; i < vcdl->count; i++) {
free(vcdl->data[i].path);
free(vcdl->data[i].pool);
free(vcdl->data[i].upath);
for (int j = 0; j < vcdl->data[i].lines_cnt; j++)
free(vcdl->data[i].lines[j]);
free(vcdl->data[i].lines);
for (int j = 0; j < vcdl->data[i].cols_cnt; j++)
free(vcdl->data[i].cols[j]);
free(vcdl->data[i].cols);
free(vcdl->data[i].vdev_enc_sysfs_path);
}
free(vcdl->data);
free(vcdl);
}
diff --git a/sys/contrib/openzfs/cmd/zpool/zpool_main.c b/sys/contrib/openzfs/cmd/zpool/zpool_main.c
index 5507f9d3fd67..69bf9649acf6 100644
--- a/sys/contrib/openzfs/cmd/zpool/zpool_main.c
+++ b/sys/contrib/openzfs/cmd/zpool/zpool_main.c
@@ -1,11237 +1,11468 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2012 by Frederik Wessels. All rights reserved.
* Copyright (c) 2012 by Cyril Plisko. All rights reserved.
* Copyright (c) 2013 by Prasad Joshi (sTec). All rights reserved.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>.
* Copyright (c) 2017 Datto Inc.
* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>
* Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
* Copyright (c) 2021, Klara Inc.
* Copyright [2021] Hewlett Packard Enterprise Development LP
*/
#include <assert.h>
#include <ctype.h>
#include <dirent.h>
#include <errno.h>
#include <fcntl.h>
#include <getopt.h>
#include <libgen.h>
#include <libintl.h>
#include <libuutil.h>
#include <locale.h>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
#include <pwd.h>
#include <zone.h>
#include <sys/wait.h>
#include <zfs_prop.h>
#include <sys/fs/zfs.h>
#include <sys/stat.h>
#include <sys/systeminfo.h>
#include <sys/fm/fs/zfs.h>
#include <sys/fm/util.h>
#include <sys/fm/protocol.h>
#include <sys/zfs_ioctl.h>
#include <sys/mount.h>
#include <sys/sysmacros.h>
#include <math.h>
#include <libzfs.h>
#include <libzutil.h>
#include "zpool_util.h"
#include "zfs_comutil.h"
#include "zfeature_common.h"
#include "statcommon.h"
libzfs_handle_t *g_zfs;
static int zpool_do_create(int, char **);
static int zpool_do_destroy(int, char **);
static int zpool_do_add(int, char **);
static int zpool_do_remove(int, char **);
static int zpool_do_labelclear(int, char **);
static int zpool_do_checkpoint(int, char **);
static int zpool_do_list(int, char **);
static int zpool_do_iostat(int, char **);
static int zpool_do_status(int, char **);
static int zpool_do_online(int, char **);
static int zpool_do_offline(int, char **);
static int zpool_do_clear(int, char **);
static int zpool_do_reopen(int, char **);
static int zpool_do_reguid(int, char **);
static int zpool_do_attach(int, char **);
static int zpool_do_detach(int, char **);
static int zpool_do_replace(int, char **);
static int zpool_do_split(int, char **);
static int zpool_do_initialize(int, char **);
static int zpool_do_scrub(int, char **);
static int zpool_do_resilver(int, char **);
static int zpool_do_trim(int, char **);
static int zpool_do_import(int, char **);
static int zpool_do_export(int, char **);
static int zpool_do_upgrade(int, char **);
static int zpool_do_history(int, char **);
static int zpool_do_events(int, char **);
static int zpool_do_get(int, char **);
static int zpool_do_set(int, char **);
static int zpool_do_sync(int, char **);
static int zpool_do_version(int, char **);
static int zpool_do_wait(int, char **);
static int zpool_do_help(int argc, char **argv);
static zpool_compat_status_t zpool_do_load_compat(
const char *, boolean_t *);
/*
* These libumem hooks provide a reasonable set of defaults for the allocator's
* debugging facilities.
*/
#ifdef DEBUG
const char *
_umem_debug_init(void)
{
return ("default,verbose"); /* $UMEM_DEBUG setting */
}
const char *
_umem_logging_init(void)
{
return ("fail,contents"); /* $UMEM_LOGGING setting */
}
#endif
typedef enum {
HELP_ADD,
HELP_ATTACH,
HELP_CLEAR,
HELP_CREATE,
HELP_CHECKPOINT,
HELP_DESTROY,
HELP_DETACH,
HELP_EXPORT,
HELP_HISTORY,
HELP_IMPORT,
HELP_IOSTAT,
HELP_LABELCLEAR,
HELP_LIST,
HELP_OFFLINE,
HELP_ONLINE,
HELP_REPLACE,
HELP_REMOVE,
HELP_INITIALIZE,
HELP_SCRUB,
HELP_RESILVER,
HELP_TRIM,
HELP_STATUS,
HELP_UPGRADE,
HELP_EVENTS,
HELP_GET,
HELP_SET,
HELP_SPLIT,
HELP_SYNC,
HELP_REGUID,
HELP_REOPEN,
HELP_VERSION,
HELP_WAIT
} zpool_help_t;
/*
* Flags for stats to display with "zpool iostats"
*/
enum iostat_type {
IOS_DEFAULT = 0,
IOS_LATENCY = 1,
IOS_QUEUES = 2,
IOS_L_HISTO = 3,
IOS_RQ_HISTO = 4,
IOS_COUNT, /* always last element */
};
/* iostat_type entries as bitmasks */
#define IOS_DEFAULT_M (1ULL << IOS_DEFAULT)
#define IOS_LATENCY_M (1ULL << IOS_LATENCY)
#define IOS_QUEUES_M (1ULL << IOS_QUEUES)
#define IOS_L_HISTO_M (1ULL << IOS_L_HISTO)
#define IOS_RQ_HISTO_M (1ULL << IOS_RQ_HISTO)
/* Mask of all the histo bits */
#define IOS_ANYHISTO_M (IOS_L_HISTO_M | IOS_RQ_HISTO_M)
/*
* Lookup table for iostat flags to nvlist names. Basically a list
* of all the nvlists a flag requires. Also specifies the order in
* which data gets printed in zpool iostat.
*/
static const char *vsx_type_to_nvlist[IOS_COUNT][15] = {
[IOS_L_HISTO] = {
ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
NULL},
[IOS_LATENCY] = {
ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
NULL},
[IOS_QUEUES] = {
ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,
NULL},
[IOS_RQ_HISTO] = {
ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO,
ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO,
ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO,
ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO,
ZPOOL_CONFIG_VDEV_AGG_TRIM_HISTO,
ZPOOL_CONFIG_VDEV_IND_REBUILD_HISTO,
ZPOOL_CONFIG_VDEV_AGG_REBUILD_HISTO,
NULL},
};
/*
* Given a cb->cb_flags with a histogram bit set, return the iostat_type.
* Right now, only one histo bit is ever set at one time, so we can
* just do a highbit64(a)
*/
#define IOS_HISTO_IDX(a) (highbit64(a & IOS_ANYHISTO_M) - 1)
typedef struct zpool_command {
const char *name;
int (*func)(int, char **);
zpool_help_t usage;
} zpool_command_t;
/*
* Master command table. Each ZFS command has a name, associated function, and
* usage message. The usage messages need to be internationalized, so we have
* to have a function to return the usage message based on a command index.
*
* These commands are organized according to how they are displayed in the usage
* message. An empty command (one with a NULL name) indicates an empty line in
* the generic usage message.
*/
static zpool_command_t command_table[] = {
{ "version", zpool_do_version, HELP_VERSION },
{ NULL },
{ "create", zpool_do_create, HELP_CREATE },
{ "destroy", zpool_do_destroy, HELP_DESTROY },
{ NULL },
{ "add", zpool_do_add, HELP_ADD },
{ "remove", zpool_do_remove, HELP_REMOVE },
{ NULL },
{ "labelclear", zpool_do_labelclear, HELP_LABELCLEAR },
{ NULL },
{ "checkpoint", zpool_do_checkpoint, HELP_CHECKPOINT },
{ NULL },
{ "list", zpool_do_list, HELP_LIST },
{ "iostat", zpool_do_iostat, HELP_IOSTAT },
{ "status", zpool_do_status, HELP_STATUS },
{ NULL },
{ "online", zpool_do_online, HELP_ONLINE },
{ "offline", zpool_do_offline, HELP_OFFLINE },
{ "clear", zpool_do_clear, HELP_CLEAR },
{ "reopen", zpool_do_reopen, HELP_REOPEN },
{ NULL },
{ "attach", zpool_do_attach, HELP_ATTACH },
{ "detach", zpool_do_detach, HELP_DETACH },
{ "replace", zpool_do_replace, HELP_REPLACE },
{ "split", zpool_do_split, HELP_SPLIT },
{ NULL },
{ "initialize", zpool_do_initialize, HELP_INITIALIZE },
{ "resilver", zpool_do_resilver, HELP_RESILVER },
{ "scrub", zpool_do_scrub, HELP_SCRUB },
{ "trim", zpool_do_trim, HELP_TRIM },
{ NULL },
{ "import", zpool_do_import, HELP_IMPORT },
{ "export", zpool_do_export, HELP_EXPORT },
{ "upgrade", zpool_do_upgrade, HELP_UPGRADE },
{ "reguid", zpool_do_reguid, HELP_REGUID },
{ NULL },
{ "history", zpool_do_history, HELP_HISTORY },
{ "events", zpool_do_events, HELP_EVENTS },
{ NULL },
{ "get", zpool_do_get, HELP_GET },
{ "set", zpool_do_set, HELP_SET },
{ "sync", zpool_do_sync, HELP_SYNC },
{ NULL },
{ "wait", zpool_do_wait, HELP_WAIT },
};
#define NCOMMAND (ARRAY_SIZE(command_table))
#define VDEV_ALLOC_CLASS_LOGS "logs"
static zpool_command_t *current_command;
static zfs_type_t current_prop_type = (ZFS_TYPE_POOL | ZFS_TYPE_VDEV);
static char history_str[HIS_MAX_RECORD_LEN];
static boolean_t log_history = B_TRUE;
static uint_t timestamp_fmt = NODATE;
static const char *
get_usage(zpool_help_t idx)
{
switch (idx) {
case HELP_ADD:
return (gettext("\tadd [-fgLnP] [-o property=value] "
"<pool> <vdev> ...\n"));
case HELP_ATTACH:
return (gettext("\tattach [-fsw] [-o property=value] "
"<pool> <device> <new-device>\n"));
case HELP_CLEAR:
- return (gettext("\tclear [-nF] <pool> [device]\n"));
+ return (gettext("\tclear [[--power]|[-nF]] <pool> [device]\n"));
case HELP_CREATE:
return (gettext("\tcreate [-fnd] [-o property=value] ... \n"
"\t [-O file-system-property=value] ... \n"
"\t [-m mountpoint] [-R root] <pool> <vdev> ...\n"));
case HELP_CHECKPOINT:
return (gettext("\tcheckpoint [-d [-w]] <pool> ...\n"));
case HELP_DESTROY:
return (gettext("\tdestroy [-f] <pool>\n"));
case HELP_DETACH:
return (gettext("\tdetach <pool> <device>\n"));
case HELP_EXPORT:
return (gettext("\texport [-af] <pool> ...\n"));
case HELP_HISTORY:
return (gettext("\thistory [-il] [<pool>] ...\n"));
case HELP_IMPORT:
return (gettext("\timport [-d dir] [-D]\n"
"\timport [-o mntopts] [-o property=value] ... \n"
"\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "
"[-R root] [-F [-n]] -a\n"
"\timport [-o mntopts] [-o property=value] ... \n"
"\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "
"[-R root] [-F [-n]]\n"
"\t [--rewind-to-checkpoint] <pool | id> [newpool]\n"));
case HELP_IOSTAT:
return (gettext("\tiostat [[[-c [script1,script2,...]"
"[-lq]]|[-rw]] [-T d | u] [-ghHLpPvy]\n"
"\t [[pool ...]|[pool vdev ...]|[vdev ...]]"
" [[-n] interval [count]]\n"));
case HELP_LABELCLEAR:
return (gettext("\tlabelclear [-f] <vdev>\n"));
case HELP_LIST:
return (gettext("\tlist [-gHLpPv] [-o property[,...]] "
"[-T d|u] [pool] ... \n"
"\t [interval [count]]\n"));
case HELP_OFFLINE:
- return (gettext("\toffline [-f] [-t] <pool> <device> ...\n"));
+ return (gettext("\toffline [--power]|[[-f][-t]] <pool> "
+ "<device> ...\n"));
case HELP_ONLINE:
- return (gettext("\tonline [-e] <pool> <device> ...\n"));
+ return (gettext("\tonline [--power][-e] <pool> <device> "
+ "...\n"));
case HELP_REPLACE:
return (gettext("\treplace [-fsw] [-o property=value] "
"<pool> <device> [new-device]\n"));
case HELP_REMOVE:
return (gettext("\tremove [-npsw] <pool> <device> ...\n"));
case HELP_REOPEN:
return (gettext("\treopen [-n] <pool>\n"));
case HELP_INITIALIZE:
return (gettext("\tinitialize [-c | -s | -u] [-w] <pool> "
"[<device> ...]\n"));
case HELP_SCRUB:
return (gettext("\tscrub [-s | -p] [-w] [-e] <pool> ...\n"));
case HELP_RESILVER:
return (gettext("\tresilver <pool> ...\n"));
case HELP_TRIM:
return (gettext("\ttrim [-dw] [-r <rate>] [-c | -s] <pool> "
"[<device> ...]\n"));
case HELP_STATUS:
- return (gettext("\tstatus [-c [script1,script2,...]] "
+ return (gettext("\tstatus [--power] [-c [script1,script2,...]] "
"[-igLpPstvxD] [-T d|u] [pool] ... \n"
"\t [interval [count]]\n"));
case HELP_UPGRADE:
return (gettext("\tupgrade\n"
"\tupgrade -v\n"
"\tupgrade [-V version] <-a | pool ...>\n"));
case HELP_EVENTS:
return (gettext("\tevents [-vHf [pool] | -c]\n"));
case HELP_GET:
return (gettext("\tget [-Hp] [-o \"all\" | field[,...]] "
"<\"all\" | property[,...]> <pool> ...\n"));
case HELP_SET:
return (gettext("\tset <property=value> <pool>\n"
"\tset <vdev_property=value> <pool> <vdev>\n"));
case HELP_SPLIT:
return (gettext("\tsplit [-gLnPl] [-R altroot] [-o mntopts]\n"
"\t [-o property=value] <pool> <newpool> "
"[<device> ...]\n"));
case HELP_REGUID:
return (gettext("\treguid <pool>\n"));
case HELP_SYNC:
return (gettext("\tsync [pool] ...\n"));
case HELP_VERSION:
return (gettext("\tversion\n"));
case HELP_WAIT:
return (gettext("\twait [-Hp] [-T d|u] [-t <activity>[,...]] "
"<pool> [interval]\n"));
default:
__builtin_unreachable();
}
}
static void
zpool_collect_leaves(zpool_handle_t *zhp, nvlist_t *nvroot, nvlist_t *res)
{
uint_t children = 0;
nvlist_t **child;
uint_t i;
(void) nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children);
if (children == 0) {
char *path = zpool_vdev_name(g_zfs, zhp, nvroot,
VDEV_NAME_PATH);
if (strcmp(path, VDEV_TYPE_INDIRECT) != 0 &&
strcmp(path, VDEV_TYPE_HOLE) != 0)
fnvlist_add_boolean(res, path);
free(path);
return;
}
for (i = 0; i < children; i++) {
zpool_collect_leaves(zhp, child[i], res);
}
}
/*
* Callback routine that will print out a pool property value.
*/
static int
print_pool_prop_cb(int prop, void *cb)
{
FILE *fp = cb;
(void) fprintf(fp, "\t%-19s ", zpool_prop_to_name(prop));
if (zpool_prop_readonly(prop))
(void) fprintf(fp, " NO ");
else
(void) fprintf(fp, " YES ");
if (zpool_prop_values(prop) == NULL)
(void) fprintf(fp, "-\n");
else
(void) fprintf(fp, "%s\n", zpool_prop_values(prop));
return (ZPROP_CONT);
}
/*
* Callback routine that will print out a vdev property value.
*/
static int
print_vdev_prop_cb(int prop, void *cb)
{
FILE *fp = cb;
(void) fprintf(fp, "\t%-19s ", vdev_prop_to_name(prop));
if (vdev_prop_readonly(prop))
(void) fprintf(fp, " NO ");
else
(void) fprintf(fp, " YES ");
if (vdev_prop_values(prop) == NULL)
(void) fprintf(fp, "-\n");
else
(void) fprintf(fp, "%s\n", vdev_prop_values(prop));
return (ZPROP_CONT);
}
+/*
+ * Given a leaf vdev name like 'L5' return its VDEV_CONFIG_PATH like
+ * '/dev/disk/by-vdev/L5'.
+ */
+static const char *
+vdev_name_to_path(zpool_handle_t *zhp, char *vdev)
+{
+ nvlist_t *vdev_nv = zpool_find_vdev(zhp, vdev, NULL, NULL, NULL);
+ if (vdev_nv == NULL) {
+ return (NULL);
+ }
+ return (fnvlist_lookup_string(vdev_nv, ZPOOL_CONFIG_PATH));
+}
+
+static int
+zpool_power_on(zpool_handle_t *zhp, char *vdev)
+{
+ return (zpool_power(zhp, vdev, B_TRUE));
+}
+
+static int
+zpool_power_on_and_disk_wait(zpool_handle_t *zhp, char *vdev)
+{
+ int rc;
+
+ rc = zpool_power_on(zhp, vdev);
+ if (rc != 0)
+ return (rc);
+
+ zpool_disk_wait(vdev_name_to_path(zhp, vdev));
+
+ return (0);
+}
+
+static int
+zpool_power_on_pool_and_wait_for_devices(zpool_handle_t *zhp)
+{
+ nvlist_t *nv;
+ const char *path = NULL;
+ int rc;
+
+ /* Power up all the devices first */
+ FOR_EACH_REAL_LEAF_VDEV(zhp, nv) {
+ path = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH);
+ if (path != NULL) {
+ rc = zpool_power_on(zhp, (char *)path);
+ if (rc != 0) {
+ return (rc);
+ }
+ }
+ }
+
+ /*
+ * Wait for their devices to show up. Since we powered them on
+ * at roughly the same time, they should all come online around
+ * the same time.
+ */
+ FOR_EACH_REAL_LEAF_VDEV(zhp, nv) {
+ path = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH);
+ zpool_disk_wait(path);
+ }
+
+ return (0);
+}
+
+static int
+zpool_power_off(zpool_handle_t *zhp, char *vdev)
+{
+ return (zpool_power(zhp, vdev, B_FALSE));
+}
+
/*
* Display usage message. If we're inside a command, display only the usage for
* that command. Otherwise, iterate over the entire command table and display
* a complete usage message.
*/
static __attribute__((noreturn)) void
usage(boolean_t requested)
{
FILE *fp = requested ? stdout : stderr;
if (current_command == NULL) {
int i;
(void) fprintf(fp, gettext("usage: zpool command args ...\n"));
(void) fprintf(fp,
gettext("where 'command' is one of the following:\n\n"));
for (i = 0; i < NCOMMAND; i++) {
if (command_table[i].name == NULL)
(void) fprintf(fp, "\n");
else
(void) fprintf(fp, "%s",
get_usage(command_table[i].usage));
}
(void) fprintf(fp,
gettext("\nFor further help on a command or topic, "
"run: %s\n"), "zpool help [<topic>]");
} else {
(void) fprintf(fp, gettext("usage:\n"));
(void) fprintf(fp, "%s", get_usage(current_command->usage));
}
if (current_command != NULL &&
current_prop_type != (ZFS_TYPE_POOL | ZFS_TYPE_VDEV) &&
((strcmp(current_command->name, "set") == 0) ||
(strcmp(current_command->name, "get") == 0) ||
(strcmp(current_command->name, "list") == 0))) {
(void) fprintf(fp, "%s",
gettext("\nthe following properties are supported:\n"));
(void) fprintf(fp, "\n\t%-19s %s %s\n\n",
"PROPERTY", "EDIT", "VALUES");
/* Iterate over all properties */
if (current_prop_type == ZFS_TYPE_POOL) {
(void) zprop_iter(print_pool_prop_cb, fp, B_FALSE,
B_TRUE, current_prop_type);
(void) fprintf(fp, "\t%-19s ", "feature@...");
(void) fprintf(fp, "YES "
"disabled | enabled | active\n");
(void) fprintf(fp, gettext("\nThe feature@ properties "
"must be appended with a feature name.\n"
"See zpool-features(7).\n"));
} else if (current_prop_type == ZFS_TYPE_VDEV) {
(void) zprop_iter(print_vdev_prop_cb, fp, B_FALSE,
B_TRUE, current_prop_type);
}
}
/*
* See comments at end of main().
*/
if (getenv("ZFS_ABORT") != NULL) {
(void) printf("dumping core by request\n");
abort();
}
exit(requested ? 0 : 2);
}
/*
* zpool initialize [-c | -s | -u] [-w] <pool> [<vdev> ...]
* Initialize all unused blocks in the specified vdevs, or all vdevs in the pool
* if none specified.
*
* -c Cancel. Ends active initializing.
* -s Suspend. Initializing can then be restarted with no flags.
* -u Uninitialize. Clears initialization state.
* -w Wait. Blocks until initializing has completed.
*/
int
zpool_do_initialize(int argc, char **argv)
{
int c;
char *poolname;
zpool_handle_t *zhp;
nvlist_t *vdevs;
int err = 0;
boolean_t wait = B_FALSE;
struct option long_options[] = {
{"cancel", no_argument, NULL, 'c'},
{"suspend", no_argument, NULL, 's'},
{"uninit", no_argument, NULL, 'u'},
{"wait", no_argument, NULL, 'w'},
{0, 0, 0, 0}
};
pool_initialize_func_t cmd_type = POOL_INITIALIZE_START;
while ((c = getopt_long(argc, argv, "csuw", long_options,
NULL)) != -1) {
switch (c) {
case 'c':
if (cmd_type != POOL_INITIALIZE_START &&
cmd_type != POOL_INITIALIZE_CANCEL) {
(void) fprintf(stderr, gettext("-c cannot be "
"combined with other options\n"));
usage(B_FALSE);
}
cmd_type = POOL_INITIALIZE_CANCEL;
break;
case 's':
if (cmd_type != POOL_INITIALIZE_START &&
cmd_type != POOL_INITIALIZE_SUSPEND) {
(void) fprintf(stderr, gettext("-s cannot be "
"combined with other options\n"));
usage(B_FALSE);
}
cmd_type = POOL_INITIALIZE_SUSPEND;
break;
case 'u':
if (cmd_type != POOL_INITIALIZE_START &&
cmd_type != POOL_INITIALIZE_UNINIT) {
(void) fprintf(stderr, gettext("-u cannot be "
"combined with other options\n"));
usage(B_FALSE);
}
cmd_type = POOL_INITIALIZE_UNINIT;
break;
case 'w':
wait = B_TRUE;
break;
case '?':
if (optopt != 0) {
(void) fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
} else {
(void) fprintf(stderr,
gettext("invalid option '%s'\n"),
argv[optind - 1]);
}
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
return (-1);
}
if (wait && (cmd_type != POOL_INITIALIZE_START)) {
(void) fprintf(stderr, gettext("-w cannot be used with -c, -s"
"or -u\n"));
usage(B_FALSE);
}
poolname = argv[0];
zhp = zpool_open(g_zfs, poolname);
if (zhp == NULL)
return (-1);
vdevs = fnvlist_alloc();
if (argc == 1) {
/* no individual leaf vdevs specified, so add them all */
nvlist_t *config = zpool_get_config(zhp, NULL);
nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
ZPOOL_CONFIG_VDEV_TREE);
zpool_collect_leaves(zhp, nvroot, vdevs);
} else {
for (int i = 1; i < argc; i++) {
fnvlist_add_boolean(vdevs, argv[i]);
}
}
if (wait)
err = zpool_initialize_wait(zhp, cmd_type, vdevs);
else
err = zpool_initialize(zhp, cmd_type, vdevs);
fnvlist_free(vdevs);
zpool_close(zhp);
return (err);
}
/*
* print a pool vdev config for dry runs
*/
static void
print_vdev_tree(zpool_handle_t *zhp, const char *name, nvlist_t *nv, int indent,
const char *match, int name_flags)
{
nvlist_t **child;
uint_t c, children;
char *vname;
boolean_t printed = B_FALSE;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0) {
if (name != NULL)
(void) printf("\t%*s%s\n", indent, "", name);
return;
}
for (c = 0; c < children; c++) {
uint64_t is_log = B_FALSE, is_hole = B_FALSE;
const char *class = "";
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
&is_hole);
if (is_hole == B_TRUE) {
continue;
}
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&is_log);
if (is_log)
class = VDEV_ALLOC_BIAS_LOG;
(void) nvlist_lookup_string(child[c],
ZPOOL_CONFIG_ALLOCATION_BIAS, &class);
if (strcmp(match, class) != 0)
continue;
if (!printed && name != NULL) {
(void) printf("\t%*s%s\n", indent, "", name);
printed = B_TRUE;
}
vname = zpool_vdev_name(g_zfs, zhp, child[c], name_flags);
print_vdev_tree(zhp, vname, child[c], indent + 2, "",
name_flags);
free(vname);
}
}
/*
* Print the list of l2cache devices for dry runs.
*/
static void
print_cache_list(nvlist_t *nv, int indent)
{
nvlist_t **child;
uint_t c, children;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
&child, &children) == 0 && children > 0) {
(void) printf("\t%*s%s\n", indent, "", "cache");
} else {
return;
}
for (c = 0; c < children; c++) {
char *vname;
vname = zpool_vdev_name(g_zfs, NULL, child[c], 0);
(void) printf("\t%*s%s\n", indent + 2, "", vname);
free(vname);
}
}
/*
* Print the list of spares for dry runs.
*/
static void
print_spare_list(nvlist_t *nv, int indent)
{
nvlist_t **child;
uint_t c, children;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
&child, &children) == 0 && children > 0) {
(void) printf("\t%*s%s\n", indent, "", "spares");
} else {
return;
}
for (c = 0; c < children; c++) {
char *vname;
vname = zpool_vdev_name(g_zfs, NULL, child[c], 0);
(void) printf("\t%*s%s\n", indent + 2, "", vname);
free(vname);
}
}
static boolean_t
prop_list_contains_feature(nvlist_t *proplist)
{
nvpair_t *nvp;
for (nvp = nvlist_next_nvpair(proplist, NULL); NULL != nvp;
nvp = nvlist_next_nvpair(proplist, nvp)) {
if (zpool_prop_feature(nvpair_name(nvp)))
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Add a property pair (name, string-value) into a property nvlist.
*/
static int
add_prop_list(const char *propname, const char *propval, nvlist_t **props,
boolean_t poolprop)
{
zpool_prop_t prop = ZPOOL_PROP_INVAL;
nvlist_t *proplist;
const char *normnm;
const char *strval;
if (*props == NULL &&
nvlist_alloc(props, NV_UNIQUE_NAME, 0) != 0) {
(void) fprintf(stderr,
gettext("internal error: out of memory\n"));
return (1);
}
proplist = *props;
if (poolprop) {
const char *vname = zpool_prop_to_name(ZPOOL_PROP_VERSION);
const char *cname =
zpool_prop_to_name(ZPOOL_PROP_COMPATIBILITY);
if ((prop = zpool_name_to_prop(propname)) == ZPOOL_PROP_INVAL &&
(!zpool_prop_feature(propname) &&
!zpool_prop_vdev(propname))) {
(void) fprintf(stderr, gettext("property '%s' is "
"not a valid pool or vdev property\n"), propname);
return (2);
}
/*
* feature@ properties and version should not be specified
* at the same time.
*/
if ((prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname) &&
nvlist_exists(proplist, vname)) ||
(prop == ZPOOL_PROP_VERSION &&
prop_list_contains_feature(proplist))) {
(void) fprintf(stderr, gettext("'feature@' and "
"'version' properties cannot be specified "
"together\n"));
return (2);
}
/*
* if version is specified, only "legacy" compatibility
* may be requested
*/
if ((prop == ZPOOL_PROP_COMPATIBILITY &&
strcmp(propval, ZPOOL_COMPAT_LEGACY) != 0 &&
nvlist_exists(proplist, vname)) ||
(prop == ZPOOL_PROP_VERSION &&
nvlist_exists(proplist, cname) &&
strcmp(fnvlist_lookup_string(proplist, cname),
ZPOOL_COMPAT_LEGACY) != 0)) {
(void) fprintf(stderr, gettext("when 'version' is "
"specified, the 'compatibility' feature may only "
"be set to '" ZPOOL_COMPAT_LEGACY "'\n"));
return (2);
}
if (zpool_prop_feature(propname) || zpool_prop_vdev(propname))
normnm = propname;
else
normnm = zpool_prop_to_name(prop);
} else {
zfs_prop_t fsprop = zfs_name_to_prop(propname);
if (zfs_prop_valid_for_type(fsprop, ZFS_TYPE_FILESYSTEM,
B_FALSE)) {
normnm = zfs_prop_to_name(fsprop);
} else if (zfs_prop_user(propname) ||
zfs_prop_userquota(propname)) {
normnm = propname;
} else {
(void) fprintf(stderr, gettext("property '%s' is "
"not a valid filesystem property\n"), propname);
return (2);
}
}
if (nvlist_lookup_string(proplist, normnm, &strval) == 0 &&
prop != ZPOOL_PROP_CACHEFILE) {
(void) fprintf(stderr, gettext("property '%s' "
"specified multiple times\n"), propname);
return (2);
}
if (nvlist_add_string(proplist, normnm, propval) != 0) {
(void) fprintf(stderr, gettext("internal "
"error: out of memory\n"));
return (1);
}
return (0);
}
/*
* Set a default property pair (name, string-value) in a property nvlist
*/
static int
add_prop_list_default(const char *propname, const char *propval,
nvlist_t **props)
{
const char *pval;
if (nvlist_lookup_string(*props, propname, &pval) == 0)
return (0);
return (add_prop_list(propname, propval, props, B_TRUE));
}
/*
* zpool add [-fgLnP] [-o property=value] <pool> <vdev> ...
*
* -f Force addition of devices, even if they appear in use
* -g Display guid for individual vdev name.
* -L Follow links when resolving vdev path name.
* -n Do not add the devices, but display the resulting layout if
* they were to be added.
* -o Set property=value.
* -P Display full path for vdev name.
*
* Adds the given vdevs to 'pool'. As with create, the bulk of this work is
* handled by make_root_vdev(), which constructs the nvlist needed to pass to
* libzfs.
*/
int
zpool_do_add(int argc, char **argv)
{
boolean_t force = B_FALSE;
boolean_t dryrun = B_FALSE;
int name_flags = 0;
int c;
nvlist_t *nvroot;
char *poolname;
int ret;
zpool_handle_t *zhp;
nvlist_t *config;
nvlist_t *props = NULL;
char *propval;
/* check options */
while ((c = getopt(argc, argv, "fgLno:P")) != -1) {
switch (c) {
case 'f':
force = B_TRUE;
break;
case 'g':
name_flags |= VDEV_NAME_GUID;
break;
case 'L':
name_flags |= VDEV_NAME_FOLLOW_LINKS;
break;
case 'n':
dryrun = B_TRUE;
break;
case 'o':
if ((propval = strchr(optarg, '=')) == NULL) {
(void) fprintf(stderr, gettext("missing "
"'=' for -o option\n"));
usage(B_FALSE);
}
*propval = '\0';
propval++;
if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) ||
(add_prop_list(optarg, propval, &props, B_TRUE)))
usage(B_FALSE);
break;
case 'P':
name_flags |= VDEV_NAME_PATH;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing vdev specification\n"));
usage(B_FALSE);
}
poolname = argv[0];
argc--;
argv++;
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
if ((config = zpool_get_config(zhp, NULL)) == NULL) {
(void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),
poolname);
zpool_close(zhp);
return (1);
}
/* unless manually specified use "ashift" pool property (if set) */
if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {
int intval;
zprop_source_t src;
char strval[ZPOOL_MAXPROPLEN];
intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);
if (src != ZPROP_SRC_DEFAULT) {
(void) sprintf(strval, "%" PRId32, intval);
verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,
&props, B_TRUE) == 0);
}
}
/* pass off to make_root_vdev for processing */
nvroot = make_root_vdev(zhp, props, force, !force, B_FALSE, dryrun,
argc, argv);
if (nvroot == NULL) {
zpool_close(zhp);
return (1);
}
if (dryrun) {
nvlist_t *poolnvroot;
nvlist_t **l2child, **sparechild;
uint_t l2children, sparechildren, c;
char *vname;
boolean_t hadcache = B_FALSE, hadspare = B_FALSE;
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&poolnvroot) == 0);
(void) printf(gettext("would update '%s' to the following "
"configuration:\n\n"), zpool_get_name(zhp));
/* print original main pool and new tree */
print_vdev_tree(zhp, poolname, poolnvroot, 0, "",
name_flags | VDEV_NAME_TYPE_ID);
print_vdev_tree(zhp, NULL, nvroot, 0, "", name_flags);
/* print other classes: 'dedup', 'special', and 'log' */
if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_DEDUP)) {
print_vdev_tree(zhp, "dedup", poolnvroot, 0,
VDEV_ALLOC_BIAS_DEDUP, name_flags);
print_vdev_tree(zhp, NULL, nvroot, 0,
VDEV_ALLOC_BIAS_DEDUP, name_flags);
} else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_DEDUP)) {
print_vdev_tree(zhp, "dedup", nvroot, 0,
VDEV_ALLOC_BIAS_DEDUP, name_flags);
}
if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_SPECIAL)) {
print_vdev_tree(zhp, "special", poolnvroot, 0,
VDEV_ALLOC_BIAS_SPECIAL, name_flags);
print_vdev_tree(zhp, NULL, nvroot, 0,
VDEV_ALLOC_BIAS_SPECIAL, name_flags);
} else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_SPECIAL)) {
print_vdev_tree(zhp, "special", nvroot, 0,
VDEV_ALLOC_BIAS_SPECIAL, name_flags);
}
if (num_logs(poolnvroot) > 0) {
print_vdev_tree(zhp, "logs", poolnvroot, 0,
VDEV_ALLOC_BIAS_LOG, name_flags);
print_vdev_tree(zhp, NULL, nvroot, 0,
VDEV_ALLOC_BIAS_LOG, name_flags);
} else if (num_logs(nvroot) > 0) {
print_vdev_tree(zhp, "logs", nvroot, 0,
VDEV_ALLOC_BIAS_LOG, name_flags);
}
/* Do the same for the caches */
if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_L2CACHE,
&l2child, &l2children) == 0 && l2children) {
hadcache = B_TRUE;
(void) printf(gettext("\tcache\n"));
for (c = 0; c < l2children; c++) {
vname = zpool_vdev_name(g_zfs, NULL,
l2child[c], name_flags);
(void) printf("\t %s\n", vname);
free(vname);
}
}
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2child, &l2children) == 0 && l2children) {
if (!hadcache)
(void) printf(gettext("\tcache\n"));
for (c = 0; c < l2children; c++) {
vname = zpool_vdev_name(g_zfs, NULL,
l2child[c], name_flags);
(void) printf("\t %s\n", vname);
free(vname);
}
}
/* And finally the spares */
if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_SPARES,
&sparechild, &sparechildren) == 0 && sparechildren > 0) {
hadspare = B_TRUE;
(void) printf(gettext("\tspares\n"));
for (c = 0; c < sparechildren; c++) {
vname = zpool_vdev_name(g_zfs, NULL,
sparechild[c], name_flags);
(void) printf("\t %s\n", vname);
free(vname);
}
}
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&sparechild, &sparechildren) == 0 && sparechildren > 0) {
if (!hadspare)
(void) printf(gettext("\tspares\n"));
for (c = 0; c < sparechildren; c++) {
vname = zpool_vdev_name(g_zfs, NULL,
sparechild[c], name_flags);
(void) printf("\t %s\n", vname);
free(vname);
}
}
ret = 0;
} else {
ret = (zpool_add(zhp, nvroot) != 0);
}
nvlist_free(props);
nvlist_free(nvroot);
zpool_close(zhp);
return (ret);
}
/*
* zpool remove [-npsw] <pool> <vdev> ...
*
* Removes the given vdev from the pool.
*/
int
zpool_do_remove(int argc, char **argv)
{
char *poolname;
int i, ret = 0;
zpool_handle_t *zhp = NULL;
boolean_t stop = B_FALSE;
int c;
boolean_t noop = B_FALSE;
boolean_t parsable = B_FALSE;
boolean_t wait = B_FALSE;
/* check options */
while ((c = getopt(argc, argv, "npsw")) != -1) {
switch (c) {
case 'n':
noop = B_TRUE;
break;
case 'p':
parsable = B_TRUE;
break;
case 's':
stop = B_TRUE;
break;
case 'w':
wait = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
}
poolname = argv[0];
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
if (stop && noop) {
zpool_close(zhp);
(void) fprintf(stderr, gettext("stop request ignored\n"));
return (0);
}
if (stop) {
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if (zpool_vdev_remove_cancel(zhp) != 0)
ret = 1;
if (wait) {
(void) fprintf(stderr, gettext("invalid option "
"combination: -w cannot be used with -s\n"));
usage(B_FALSE);
}
} else {
if (argc < 2) {
(void) fprintf(stderr, gettext("missing device\n"));
usage(B_FALSE);
}
for (i = 1; i < argc; i++) {
if (noop) {
uint64_t size;
if (zpool_vdev_indirect_size(zhp, argv[i],
&size) != 0) {
ret = 1;
break;
}
if (parsable) {
(void) printf("%s %llu\n",
argv[i], (unsigned long long)size);
} else {
char valstr[32];
zfs_nicenum(size, valstr,
sizeof (valstr));
(void) printf("Memory that will be "
"used after removing %s: %s\n",
argv[i], valstr);
}
} else {
if (zpool_vdev_remove(zhp, argv[i]) != 0)
ret = 1;
}
}
if (ret == 0 && wait)
ret = zpool_wait(zhp, ZPOOL_WAIT_REMOVE);
}
zpool_close(zhp);
return (ret);
}
/*
* Return 1 if a vdev is active (being used in a pool)
* Return 0 if a vdev is inactive (offlined or faulted, or not in active pool)
*
* This is useful for checking if a disk in an active pool is offlined or
* faulted.
*/
static int
vdev_is_active(char *vdev_path)
{
int fd;
fd = open(vdev_path, O_EXCL);
if (fd < 0) {
return (1); /* cant open O_EXCL - disk is active */
}
close(fd);
return (0); /* disk is inactive in the pool */
}
/*
* zpool labelclear [-f] <vdev>
*
* -f Force clearing the label for the vdevs which are members of
* the exported or foreign pools.
*
* Verifies that the vdev is not active and zeros out the label information
* on the device.
*/
int
zpool_do_labelclear(int argc, char **argv)
{
char vdev[MAXPATHLEN];
char *name = NULL;
int c, fd = -1, ret = 0;
nvlist_t *config;
pool_state_t state;
boolean_t inuse = B_FALSE;
boolean_t force = B_FALSE;
/* check options */
while ((c = getopt(argc, argv, "f")) != -1) {
switch (c) {
case 'f':
force = B_TRUE;
break;
default:
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get vdev name */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing vdev name\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
(void) strlcpy(vdev, argv[0], sizeof (vdev));
/*
* If we cannot open an absolute path, we quit.
* Otherwise if the provided vdev name doesn't point to a file,
* try prepending expected disk paths and partition numbers.
*/
if ((fd = open(vdev, O_RDWR)) < 0) {
int error;
if (vdev[0] == '/') {
(void) fprintf(stderr, gettext("failed to open "
"%s: %s\n"), vdev, strerror(errno));
return (1);
}
error = zfs_resolve_shortname(argv[0], vdev, MAXPATHLEN);
if (error == 0 && zfs_dev_is_whole_disk(vdev)) {
if (zfs_append_partition(vdev, MAXPATHLEN) == -1)
error = ENOENT;
}
if (error || ((fd = open(vdev, O_RDWR)) < 0)) {
if (errno == ENOENT) {
(void) fprintf(stderr, gettext(
"failed to find device %s, try "
"specifying absolute path instead\n"),
argv[0]);
return (1);
}
(void) fprintf(stderr, gettext("failed to open %s:"
" %s\n"), vdev, strerror(errno));
return (1);
}
}
/*
* Flush all dirty pages for the block device. This should not be
* fatal when the device does not support BLKFLSBUF as would be the
* case for a file vdev.
*/
if ((zfs_dev_flush(fd) != 0) && (errno != ENOTTY))
(void) fprintf(stderr, gettext("failed to invalidate "
"cache for %s: %s\n"), vdev, strerror(errno));
if (zpool_read_label(fd, &config, NULL) != 0) {
(void) fprintf(stderr,
gettext("failed to read label from %s\n"), vdev);
ret = 1;
goto errout;
}
nvlist_free(config);
ret = zpool_in_use(g_zfs, fd, &state, &name, &inuse);
if (ret != 0) {
(void) fprintf(stderr,
gettext("failed to check state for %s\n"), vdev);
ret = 1;
goto errout;
}
if (!inuse)
goto wipe_label;
switch (state) {
default:
case POOL_STATE_ACTIVE:
case POOL_STATE_SPARE:
case POOL_STATE_L2CACHE:
/*
* We allow the user to call 'zpool offline -f'
* on an offlined disk in an active pool. We can check if
* the disk is online by calling vdev_is_active().
*/
if (force && !vdev_is_active(vdev))
break;
(void) fprintf(stderr, gettext(
"%s is a member (%s) of pool \"%s\""),
vdev, zpool_pool_state_to_name(state), name);
if (force) {
(void) fprintf(stderr, gettext(
". Offline the disk first to clear its label."));
}
printf("\n");
ret = 1;
goto errout;
case POOL_STATE_EXPORTED:
if (force)
break;
(void) fprintf(stderr, gettext(
"use '-f' to override the following error:\n"
"%s is a member of exported pool \"%s\"\n"),
vdev, name);
ret = 1;
goto errout;
case POOL_STATE_POTENTIALLY_ACTIVE:
if (force)
break;
(void) fprintf(stderr, gettext(
"use '-f' to override the following error:\n"
"%s is a member of potentially active pool \"%s\"\n"),
vdev, name);
ret = 1;
goto errout;
case POOL_STATE_DESTROYED:
/* inuse should never be set for a destroyed pool */
assert(0);
break;
}
wipe_label:
ret = zpool_clear_label(fd);
if (ret != 0) {
(void) fprintf(stderr,
gettext("failed to clear label for %s\n"), vdev);
}
errout:
free(name);
(void) close(fd);
return (ret);
}
/*
* zpool create [-fnd] [-o property=value] ...
* [-O file-system-property=value] ...
* [-R root] [-m mountpoint] <pool> <dev> ...
*
* -f Force creation, even if devices appear in use
* -n Do not create the pool, but display the resulting layout if it
* were to be created.
* -R Create a pool under an alternate root
* -m Set default mountpoint for the root dataset. By default it's
* '/<pool>'
* -o Set property=value.
* -o Set feature@feature=enabled|disabled.
* -d Don't automatically enable all supported pool features
* (individual features can be enabled with -o).
* -O Set fsproperty=value in the pool's root file system
*
* Creates the named pool according to the given vdev specification. The
* bulk of the vdev processing is done in make_root_vdev() in zpool_vdev.c.
* Once we get the nvlist back from make_root_vdev(), we either print out the
* contents (if '-n' was specified), or pass it to libzfs to do the creation.
*/
int
zpool_do_create(int argc, char **argv)
{
boolean_t force = B_FALSE;
boolean_t dryrun = B_FALSE;
boolean_t enable_pool_features = B_TRUE;
int c;
nvlist_t *nvroot = NULL;
char *poolname;
char *tname = NULL;
int ret = 1;
char *altroot = NULL;
char *compat = NULL;
char *mountpoint = NULL;
nvlist_t *fsprops = NULL;
nvlist_t *props = NULL;
char *propval;
/* check options */
while ((c = getopt(argc, argv, ":fndR:m:o:O:t:")) != -1) {
switch (c) {
case 'f':
force = B_TRUE;
break;
case 'n':
dryrun = B_TRUE;
break;
case 'd':
enable_pool_features = B_FALSE;
break;
case 'R':
altroot = optarg;
if (add_prop_list(zpool_prop_to_name(
ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))
goto errout;
if (add_prop_list_default(zpool_prop_to_name(
ZPOOL_PROP_CACHEFILE), "none", &props))
goto errout;
break;
case 'm':
/* Equivalent to -O mountpoint=optarg */
mountpoint = optarg;
break;
case 'o':
if ((propval = strchr(optarg, '=')) == NULL) {
(void) fprintf(stderr, gettext("missing "
"'=' for -o option\n"));
goto errout;
}
*propval = '\0';
propval++;
if (add_prop_list(optarg, propval, &props, B_TRUE))
goto errout;
/*
* If the user is creating a pool that doesn't support
* feature flags, don't enable any features.
*/
if (zpool_name_to_prop(optarg) == ZPOOL_PROP_VERSION) {
char *end;
u_longlong_t ver;
ver = strtoull(propval, &end, 10);
if (*end == '\0' &&
ver < SPA_VERSION_FEATURES) {
enable_pool_features = B_FALSE;
}
}
if (zpool_name_to_prop(optarg) == ZPOOL_PROP_ALTROOT)
altroot = propval;
if (zpool_name_to_prop(optarg) ==
ZPOOL_PROP_COMPATIBILITY)
compat = propval;
break;
case 'O':
if ((propval = strchr(optarg, '=')) == NULL) {
(void) fprintf(stderr, gettext("missing "
"'=' for -O option\n"));
goto errout;
}
*propval = '\0';
propval++;
/*
* Mountpoints are checked and then added later.
* Uniquely among properties, they can be specified
* more than once, to avoid conflict with -m.
*/
if (0 == strcmp(optarg,
zfs_prop_to_name(ZFS_PROP_MOUNTPOINT))) {
mountpoint = propval;
} else if (add_prop_list(optarg, propval, &fsprops,
B_FALSE)) {
goto errout;
}
break;
case 't':
/*
* Sanity check temporary pool name.
*/
if (strchr(optarg, '/') != NULL) {
(void) fprintf(stderr, gettext("cannot create "
"'%s': invalid character '/' in temporary "
"name\n"), optarg);
(void) fprintf(stderr, gettext("use 'zfs "
"create' to create a dataset\n"));
goto errout;
}
if (add_prop_list(zpool_prop_to_name(
ZPOOL_PROP_TNAME), optarg, &props, B_TRUE))
goto errout;
if (add_prop_list_default(zpool_prop_to_name(
ZPOOL_PROP_CACHEFILE), "none", &props))
goto errout;
tname = optarg;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
goto badusage;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
goto badusage;
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
goto badusage;
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing vdev specification\n"));
goto badusage;
}
poolname = argv[0];
/*
* As a special case, check for use of '/' in the name, and direct the
* user to use 'zfs create' instead.
*/
if (strchr(poolname, '/') != NULL) {
(void) fprintf(stderr, gettext("cannot create '%s': invalid "
"character '/' in pool name\n"), poolname);
(void) fprintf(stderr, gettext("use 'zfs create' to "
"create a dataset\n"));
goto errout;
}
/* pass off to make_root_vdev for bulk processing */
nvroot = make_root_vdev(NULL, props, force, !force, B_FALSE, dryrun,
argc - 1, argv + 1);
if (nvroot == NULL)
goto errout;
/* make_root_vdev() allows 0 toplevel children if there are spares */
if (!zfs_allocatable_devs(nvroot)) {
(void) fprintf(stderr, gettext("invalid vdev "
"specification: at least one toplevel vdev must be "
"specified\n"));
goto errout;
}
if (altroot != NULL && altroot[0] != '/') {
(void) fprintf(stderr, gettext("invalid alternate root '%s': "
"must be an absolute path\n"), altroot);
goto errout;
}
/*
* Check the validity of the mountpoint and direct the user to use the
* '-m' mountpoint option if it looks like its in use.
*/
if (mountpoint == NULL ||
(strcmp(mountpoint, ZFS_MOUNTPOINT_LEGACY) != 0 &&
strcmp(mountpoint, ZFS_MOUNTPOINT_NONE) != 0)) {
char buf[MAXPATHLEN];
DIR *dirp;
if (mountpoint && mountpoint[0] != '/') {
(void) fprintf(stderr, gettext("invalid mountpoint "
"'%s': must be an absolute path, 'legacy', or "
"'none'\n"), mountpoint);
goto errout;
}
if (mountpoint == NULL) {
if (altroot != NULL)
(void) snprintf(buf, sizeof (buf), "%s/%s",
altroot, poolname);
else
(void) snprintf(buf, sizeof (buf), "/%s",
poolname);
} else {
if (altroot != NULL)
(void) snprintf(buf, sizeof (buf), "%s%s",
altroot, mountpoint);
else
(void) snprintf(buf, sizeof (buf), "%s",
mountpoint);
}
if ((dirp = opendir(buf)) == NULL && errno != ENOENT) {
(void) fprintf(stderr, gettext("mountpoint '%s' : "
"%s\n"), buf, strerror(errno));
(void) fprintf(stderr, gettext("use '-m' "
"option to provide a different default\n"));
goto errout;
} else if (dirp) {
int count = 0;
while (count < 3 && readdir(dirp) != NULL)
count++;
(void) closedir(dirp);
if (count > 2) {
(void) fprintf(stderr, gettext("mountpoint "
"'%s' exists and is not empty\n"), buf);
(void) fprintf(stderr, gettext("use '-m' "
"option to provide a "
"different default\n"));
goto errout;
}
}
}
/*
* Now that the mountpoint's validity has been checked, ensure that
* the property is set appropriately prior to creating the pool.
*/
if (mountpoint != NULL) {
ret = add_prop_list(zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
mountpoint, &fsprops, B_FALSE);
if (ret != 0)
goto errout;
}
ret = 1;
if (dryrun) {
/*
* For a dry run invocation, print out a basic message and run
* through all the vdevs in the list and print out in an
* appropriate hierarchy.
*/
(void) printf(gettext("would create '%s' with the "
"following layout:\n\n"), poolname);
print_vdev_tree(NULL, poolname, nvroot, 0, "", 0);
print_vdev_tree(NULL, "dedup", nvroot, 0,
VDEV_ALLOC_BIAS_DEDUP, 0);
print_vdev_tree(NULL, "special", nvroot, 0,
VDEV_ALLOC_BIAS_SPECIAL, 0);
print_vdev_tree(NULL, "logs", nvroot, 0,
VDEV_ALLOC_BIAS_LOG, 0);
print_cache_list(nvroot, 0);
print_spare_list(nvroot, 0);
ret = 0;
} else {
/*
* Load in feature set.
* Note: if compatibility property not given, we'll have
* NULL, which means 'all features'.
*/
boolean_t requested_features[SPA_FEATURES];
if (zpool_do_load_compat(compat, requested_features) !=
ZPOOL_COMPATIBILITY_OK)
goto errout;
/*
* props contains list of features to enable.
* For each feature:
* - remove it if feature@name=disabled
* - leave it there if feature@name=enabled
* - add it if:
* - enable_pool_features (ie: no '-d' or '-o version')
* - it's supported by the kernel module
* - it's in the requested feature set
* - warn if it's enabled but not in compat
*/
for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
char propname[MAXPATHLEN];
const char *propval;
zfeature_info_t *feat = &spa_feature_table[i];
(void) snprintf(propname, sizeof (propname),
"feature@%s", feat->fi_uname);
if (!nvlist_lookup_string(props, propname, &propval)) {
if (strcmp(propval,
ZFS_FEATURE_DISABLED) == 0) {
(void) nvlist_remove_all(props,
propname);
} else if (strcmp(propval,
ZFS_FEATURE_ENABLED) == 0 &&
!requested_features[i]) {
(void) fprintf(stderr, gettext(
"Warning: feature \"%s\" enabled "
"but is not in specified "
"'compatibility' feature set.\n"),
feat->fi_uname);
}
} else if (
enable_pool_features &&
feat->fi_zfs_mod_supported &&
requested_features[i]) {
ret = add_prop_list(propname,
ZFS_FEATURE_ENABLED, &props, B_TRUE);
if (ret != 0)
goto errout;
}
}
ret = 1;
if (zpool_create(g_zfs, poolname,
nvroot, props, fsprops) == 0) {
zfs_handle_t *pool = zfs_open(g_zfs,
tname ? tname : poolname, ZFS_TYPE_FILESYSTEM);
if (pool != NULL) {
if (zfs_mount(pool, NULL, 0) == 0) {
ret = zfs_share(pool, NULL);
zfs_commit_shares(NULL);
}
zfs_close(pool);
}
} else if (libzfs_errno(g_zfs) == EZFS_INVALIDNAME) {
(void) fprintf(stderr, gettext("pool name may have "
"been omitted\n"));
}
}
errout:
nvlist_free(nvroot);
nvlist_free(fsprops);
nvlist_free(props);
return (ret);
badusage:
nvlist_free(fsprops);
nvlist_free(props);
usage(B_FALSE);
return (2);
}
/*
* zpool destroy <pool>
*
* -f Forcefully unmount any datasets
*
* Destroy the given pool. Automatically unmounts any datasets in the pool.
*/
int
zpool_do_destroy(int argc, char **argv)
{
boolean_t force = B_FALSE;
int c;
char *pool;
zpool_handle_t *zhp;
int ret;
/* check options */
while ((c = getopt(argc, argv, "f")) != -1) {
switch (c) {
case 'f':
force = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
pool = argv[0];
if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {
/*
* As a special case, check for use of '/' in the name, and
* direct the user to use 'zfs destroy' instead.
*/
if (strchr(pool, '/') != NULL)
(void) fprintf(stderr, gettext("use 'zfs destroy' to "
"destroy a dataset\n"));
return (1);
}
if (zpool_disable_datasets(zhp, force) != 0) {
(void) fprintf(stderr, gettext("could not destroy '%s': "
"could not unmount datasets\n"), zpool_get_name(zhp));
zpool_close(zhp);
return (1);
}
/* The history must be logged as part of the export */
log_history = B_FALSE;
ret = (zpool_destroy(zhp, history_str) != 0);
zpool_close(zhp);
return (ret);
}
typedef struct export_cbdata {
boolean_t force;
boolean_t hardforce;
} export_cbdata_t;
/*
* Export one pool
*/
static int
zpool_export_one(zpool_handle_t *zhp, void *data)
{
export_cbdata_t *cb = data;
if (zpool_disable_datasets(zhp, cb->force) != 0)
return (1);
/* The history must be logged as part of the export */
log_history = B_FALSE;
if (cb->hardforce) {
if (zpool_export_force(zhp, history_str) != 0)
return (1);
} else if (zpool_export(zhp, cb->force, history_str) != 0) {
return (1);
}
return (0);
}
/*
* zpool export [-f] <pool> ...
*
* -a Export all pools
* -f Forcefully unmount datasets
*
* Export the given pools. By default, the command will attempt to cleanly
* unmount any active datasets within the pool. If the '-f' flag is specified,
* then the datasets will be forcefully unmounted.
*/
int
zpool_do_export(int argc, char **argv)
{
export_cbdata_t cb;
boolean_t do_all = B_FALSE;
boolean_t force = B_FALSE;
boolean_t hardforce = B_FALSE;
int c, ret;
/* check options */
while ((c = getopt(argc, argv, "afF")) != -1) {
switch (c) {
case 'a':
do_all = B_TRUE;
break;
case 'f':
force = B_TRUE;
break;
case 'F':
hardforce = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
cb.force = force;
cb.hardforce = hardforce;
argc -= optind;
argv += optind;
if (do_all) {
if (argc != 0) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
return (for_each_pool(argc, argv, B_TRUE, NULL,
ZFS_TYPE_POOL, B_FALSE, zpool_export_one, &cb));
}
/* check arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool argument\n"));
usage(B_FALSE);
}
ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
B_FALSE, zpool_export_one, &cb);
return (ret);
}
/*
* Given a vdev configuration, determine the maximum width needed for the device
* name column.
*/
static int
max_width(zpool_handle_t *zhp, nvlist_t *nv, int depth, int max,
int name_flags)
{
static const char *const subtypes[] =
{ZPOOL_CONFIG_SPARES, ZPOOL_CONFIG_L2CACHE, ZPOOL_CONFIG_CHILDREN};
char *name = zpool_vdev_name(g_zfs, zhp, nv, name_flags);
max = MAX(strlen(name) + depth, max);
free(name);
nvlist_t **child;
uint_t children;
for (size_t i = 0; i < ARRAY_SIZE(subtypes); ++i)
if (nvlist_lookup_nvlist_array(nv, subtypes[i],
&child, &children) == 0)
for (uint_t c = 0; c < children; ++c)
max = MAX(max_width(zhp, child[c], depth + 2,
max, name_flags), max);
return (max);
}
typedef struct spare_cbdata {
uint64_t cb_guid;
zpool_handle_t *cb_zhp;
} spare_cbdata_t;
static boolean_t
find_vdev(nvlist_t *nv, uint64_t search)
{
uint64_t guid;
nvlist_t **child;
uint_t c, children;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 &&
search == guid)
return (B_TRUE);
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0) {
for (c = 0; c < children; c++)
if (find_vdev(child[c], search))
return (B_TRUE);
}
return (B_FALSE);
}
static int
find_spare(zpool_handle_t *zhp, void *data)
{
spare_cbdata_t *cbp = data;
nvlist_t *config, *nvroot;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
if (find_vdev(nvroot, cbp->cb_guid)) {
cbp->cb_zhp = zhp;
return (1);
}
zpool_close(zhp);
return (0);
}
typedef struct status_cbdata {
int cb_count;
int cb_name_flags;
int cb_namewidth;
boolean_t cb_allpools;
boolean_t cb_verbose;
boolean_t cb_literal;
boolean_t cb_explain;
boolean_t cb_first;
boolean_t cb_dedup_stats;
+ boolean_t cb_print_unhealthy;
boolean_t cb_print_status;
boolean_t cb_print_slow_ios;
boolean_t cb_print_vdev_init;
boolean_t cb_print_vdev_trim;
vdev_cmd_data_list_t *vcdl;
+ boolean_t cb_print_power;
} status_cbdata_t;
/* Return 1 if string is NULL, empty, or whitespace; return 0 otherwise. */
static boolean_t
is_blank_str(const char *str)
{
for (; str != NULL && *str != '\0'; ++str)
if (!isblank(*str))
return (B_FALSE);
return (B_TRUE);
}
/* Print command output lines for specific vdev in a specific pool */
static void
zpool_print_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, const char *path)
{
vdev_cmd_data_t *data;
int i, j;
const char *val;
for (i = 0; i < vcdl->count; i++) {
if ((strcmp(vcdl->data[i].path, path) != 0) ||
(strcmp(vcdl->data[i].pool, pool) != 0)) {
/* Not the vdev we're looking for */
continue;
}
data = &vcdl->data[i];
/* Print out all the output values for this vdev */
for (j = 0; j < vcdl->uniq_cols_cnt; j++) {
val = NULL;
/* Does this vdev have values for this column? */
for (int k = 0; k < data->cols_cnt; k++) {
if (strcmp(data->cols[k],
vcdl->uniq_cols[j]) == 0) {
/* yes it does, record the value */
val = data->lines[k];
break;
}
}
/*
* Mark empty values with dashes to make output
* awk-able.
*/
if (val == NULL || is_blank_str(val))
val = "-";
printf("%*s", vcdl->uniq_cols_width[j], val);
if (j < vcdl->uniq_cols_cnt - 1)
fputs(" ", stdout);
}
/* Print out any values that aren't in a column at the end */
for (j = data->cols_cnt; j < data->lines_cnt; j++) {
/* Did we have any columns? If so print a spacer. */
if (vcdl->uniq_cols_cnt > 0)
fputs(" ", stdout);
val = data->lines[j];
fputs(val ?: "", stdout);
}
break;
}
}
/*
* Print vdev initialization status for leaves
*/
static void
print_status_initialize(vdev_stat_t *vs, boolean_t verbose)
{
if (verbose) {
if ((vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE ||
vs->vs_initialize_state == VDEV_INITIALIZE_SUSPENDED ||
vs->vs_initialize_state == VDEV_INITIALIZE_COMPLETE) &&
!vs->vs_scan_removing) {
char zbuf[1024];
char tbuf[256];
struct tm zaction_ts;
time_t t = vs->vs_initialize_action_time;
int initialize_pct = 100;
if (vs->vs_initialize_state !=
VDEV_INITIALIZE_COMPLETE) {
initialize_pct = (vs->vs_initialize_bytes_done *
100 / (vs->vs_initialize_bytes_est + 1));
}
(void) localtime_r(&t, &zaction_ts);
(void) strftime(tbuf, sizeof (tbuf), "%c", &zaction_ts);
switch (vs->vs_initialize_state) {
case VDEV_INITIALIZE_SUSPENDED:
(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
gettext("suspended, started at"), tbuf);
break;
case VDEV_INITIALIZE_ACTIVE:
(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
gettext("started at"), tbuf);
break;
case VDEV_INITIALIZE_COMPLETE:
(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
gettext("completed at"), tbuf);
break;
}
(void) printf(gettext(" (%d%% initialized%s)"),
initialize_pct, zbuf);
} else {
(void) printf(gettext(" (uninitialized)"));
}
} else if (vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE) {
(void) printf(gettext(" (initializing)"));
}
}
/*
* Print vdev TRIM status for leaves
*/
static void
print_status_trim(vdev_stat_t *vs, boolean_t verbose)
{
if (verbose) {
if ((vs->vs_trim_state == VDEV_TRIM_ACTIVE ||
vs->vs_trim_state == VDEV_TRIM_SUSPENDED ||
vs->vs_trim_state == VDEV_TRIM_COMPLETE) &&
!vs->vs_scan_removing) {
char zbuf[1024];
char tbuf[256];
struct tm zaction_ts;
time_t t = vs->vs_trim_action_time;
int trim_pct = 100;
if (vs->vs_trim_state != VDEV_TRIM_COMPLETE) {
trim_pct = (vs->vs_trim_bytes_done *
100 / (vs->vs_trim_bytes_est + 1));
}
(void) localtime_r(&t, &zaction_ts);
(void) strftime(tbuf, sizeof (tbuf), "%c", &zaction_ts);
switch (vs->vs_trim_state) {
case VDEV_TRIM_SUSPENDED:
(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
gettext("suspended, started at"), tbuf);
break;
case VDEV_TRIM_ACTIVE:
(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
gettext("started at"), tbuf);
break;
case VDEV_TRIM_COMPLETE:
(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
gettext("completed at"), tbuf);
break;
}
(void) printf(gettext(" (%d%% trimmed%s)"),
trim_pct, zbuf);
} else if (vs->vs_trim_notsup) {
(void) printf(gettext(" (trim unsupported)"));
} else {
(void) printf(gettext(" (untrimmed)"));
}
} else if (vs->vs_trim_state == VDEV_TRIM_ACTIVE) {
(void) printf(gettext(" (trimming)"));
}
}
/*
* Return the color associated with a health string. This includes returning
* NULL for no color change.
*/
static const char *
health_str_to_color(const char *health)
{
if (strcmp(health, gettext("FAULTED")) == 0 ||
strcmp(health, gettext("SUSPENDED")) == 0 ||
strcmp(health, gettext("UNAVAIL")) == 0) {
return (ANSI_RED);
}
if (strcmp(health, gettext("OFFLINE")) == 0 ||
strcmp(health, gettext("DEGRADED")) == 0 ||
strcmp(health, gettext("REMOVED")) == 0) {
return (ANSI_YELLOW);
}
return (NULL);
}
+/*
+ * Called for each leaf vdev. Returns 0 if the vdev is healthy.
+ * A vdev is unhealthy if any of the following are true:
+ * 1) there are read, write, or checksum errors,
+ * 2) its state is not ONLINE, or
+ * 3) slow IO reporting was requested (-s) and there are slow IOs.
+ */
+static int
+vdev_health_check_cb(void *hdl_data, nvlist_t *nv, void *data)
+{
+ status_cbdata_t *cb = data;
+ vdev_stat_t *vs;
+ uint_t vsc;
+ (void) hdl_data;
+
+ if (nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
+ (uint64_t **)&vs, &vsc) != 0)
+ return (1);
+
+ if (vs->vs_checksum_errors || vs->vs_read_errors ||
+ vs->vs_write_errors || vs->vs_state != VDEV_STATE_HEALTHY)
+ return (1);
+
+ if (cb->cb_print_slow_ios && vs->vs_slow_ios)
+ return (1);
+
+ return (0);
+}
+
/*
* Print out configuration state as requested by status_callback.
*/
static void
print_status_config(zpool_handle_t *zhp, status_cbdata_t *cb, const char *name,
nvlist_t *nv, int depth, boolean_t isspare, vdev_rebuild_stat_t *vrs)
{
nvlist_t **child, *root;
uint_t c, i, vsc, children;
pool_scan_stat_t *ps = NULL;
vdev_stat_t *vs;
char rbuf[6], wbuf[6], cbuf[6];
char *vname;
uint64_t notpresent;
spare_cbdata_t spare_cb;
const char *state;
const char *type;
const char *path = NULL;
- const char *rcolor = NULL, *wcolor = NULL, *ccolor = NULL;
+ const char *rcolor = NULL, *wcolor = NULL, *ccolor = NULL,
+ *scolor = NULL;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
children = 0;
verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &vsc) == 0);
verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
if (strcmp(type, VDEV_TYPE_INDIRECT) == 0)
return;
state = zpool_state_to_name(vs->vs_state, vs->vs_aux);
if (isspare) {
/*
* For hot spares, we use the terms 'INUSE' and 'AVAILABLE' for
* online drives.
*/
if (vs->vs_aux == VDEV_AUX_SPARED)
state = gettext("INUSE");
else if (vs->vs_state == VDEV_STATE_HEALTHY)
state = gettext("AVAIL");
}
+ /*
+ * If '-e' is specified then top-level vdevs and their children
+ * can be pruned if all of their leaves are healthy.
+ */
+ if (cb->cb_print_unhealthy && depth > 0 &&
+ for_each_vdev_in_nvlist(nv, vdev_health_check_cb, cb) == 0) {
+ return;
+ }
+
printf_color(health_str_to_color(state),
"\t%*s%-*s %-8s", depth, "", cb->cb_namewidth - depth,
name, state);
if (!isspare) {
if (vs->vs_read_errors)
rcolor = ANSI_RED;
if (vs->vs_write_errors)
wcolor = ANSI_RED;
if (vs->vs_checksum_errors)
ccolor = ANSI_RED;
+ if (vs->vs_slow_ios)
+ scolor = ANSI_BLUE;
+
if (cb->cb_literal) {
fputc(' ', stdout);
printf_color(rcolor, "%5llu",
(u_longlong_t)vs->vs_read_errors);
fputc(' ', stdout);
printf_color(wcolor, "%5llu",
(u_longlong_t)vs->vs_write_errors);
fputc(' ', stdout);
printf_color(ccolor, "%5llu",
(u_longlong_t)vs->vs_checksum_errors);
} else {
zfs_nicenum(vs->vs_read_errors, rbuf, sizeof (rbuf));
zfs_nicenum(vs->vs_write_errors, wbuf, sizeof (wbuf));
zfs_nicenum(vs->vs_checksum_errors, cbuf,
sizeof (cbuf));
fputc(' ', stdout);
printf_color(rcolor, "%5s", rbuf);
fputc(' ', stdout);
printf_color(wcolor, "%5s", wbuf);
fputc(' ', stdout);
printf_color(ccolor, "%5s", cbuf);
}
if (cb->cb_print_slow_ios) {
if (children == 0) {
/* Only leafs vdevs have slow IOs */
zfs_nicenum(vs->vs_slow_ios, rbuf,
sizeof (rbuf));
} else {
snprintf(rbuf, sizeof (rbuf), "-");
}
if (cb->cb_literal)
- printf(" %5llu", (u_longlong_t)vs->vs_slow_ios);
+ printf_color(scolor, " %5llu",
+ (u_longlong_t)vs->vs_slow_ios);
else
- printf(" %5s", rbuf);
+ printf_color(scolor, " %5s", rbuf);
+ }
+ if (cb->cb_print_power) {
+ if (children == 0) {
+ /* Only leaf vdevs have physical slots */
+ switch (zpool_power_current_state(zhp, (char *)
+ fnvlist_lookup_string(nv,
+ ZPOOL_CONFIG_PATH))) {
+ case 0:
+ printf_color(ANSI_RED, " %5s",
+ gettext("off"));
+ break;
+ case 1:
+ printf(" %5s", gettext("on"));
+ break;
+ default:
+ printf(" %5s", "-");
+ }
+ } else {
+ printf(" %5s", "-");
+ }
}
}
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
&notpresent) == 0) {
verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0);
(void) printf(" %s %s", gettext("was"), path);
} else if (vs->vs_aux != 0) {
(void) printf(" ");
color_start(ANSI_RED);
switch (vs->vs_aux) {
case VDEV_AUX_OPEN_FAILED:
(void) printf(gettext("cannot open"));
break;
case VDEV_AUX_BAD_GUID_SUM:
(void) printf(gettext("missing device"));
break;
case VDEV_AUX_NO_REPLICAS:
(void) printf(gettext("insufficient replicas"));
break;
case VDEV_AUX_VERSION_NEWER:
(void) printf(gettext("newer version"));
break;
case VDEV_AUX_UNSUP_FEAT:
(void) printf(gettext("unsupported feature(s)"));
break;
case VDEV_AUX_ASHIFT_TOO_BIG:
(void) printf(gettext("unsupported minimum blocksize"));
break;
case VDEV_AUX_SPARED:
verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
&spare_cb.cb_guid) == 0);
if (zpool_iter(g_zfs, find_spare, &spare_cb) == 1) {
if (strcmp(zpool_get_name(spare_cb.cb_zhp),
zpool_get_name(zhp)) == 0)
(void) printf(gettext("currently in "
"use"));
else
(void) printf(gettext("in use by "
"pool '%s'"),
zpool_get_name(spare_cb.cb_zhp));
zpool_close(spare_cb.cb_zhp);
} else {
(void) printf(gettext("currently in use"));
}
break;
case VDEV_AUX_ERR_EXCEEDED:
(void) printf(gettext("too many errors"));
break;
case VDEV_AUX_IO_FAILURE:
(void) printf(gettext("experienced I/O failures"));
break;
case VDEV_AUX_BAD_LOG:
(void) printf(gettext("bad intent log"));
break;
case VDEV_AUX_EXTERNAL:
(void) printf(gettext("external device fault"));
break;
case VDEV_AUX_SPLIT_POOL:
(void) printf(gettext("split into new pool"));
break;
case VDEV_AUX_ACTIVE:
(void) printf(gettext("currently in use"));
break;
case VDEV_AUX_CHILDREN_OFFLINE:
(void) printf(gettext("all children offline"));
break;
case VDEV_AUX_BAD_LABEL:
(void) printf(gettext("invalid label"));
break;
default:
(void) printf(gettext("corrupted data"));
break;
}
color_end();
} else if (children == 0 && !isspare &&
getenv("ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE") == NULL &&
VDEV_STAT_VALID(vs_physical_ashift, vsc) &&
vs->vs_configured_ashift < vs->vs_physical_ashift) {
(void) printf(
gettext(" block size: %dB configured, %dB native"),
1 << vs->vs_configured_ashift, 1 << vs->vs_physical_ashift);
}
if (vs->vs_scan_removing != 0) {
(void) printf(gettext(" (removing)"));
} else if (VDEV_STAT_VALID(vs_noalloc, vsc) && vs->vs_noalloc != 0) {
(void) printf(gettext(" (non-allocating)"));
}
/* The root vdev has the scrub/resilver stats */
root = fnvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
ZPOOL_CONFIG_VDEV_TREE);
(void) nvlist_lookup_uint64_array(root, ZPOOL_CONFIG_SCAN_STATS,
(uint64_t **)&ps, &c);
/*
* If you force fault a drive that's resilvering, its scan stats can
* get frozen in time, giving the false impression that it's
* being resilvered. That's why we check the state to see if the vdev
* is healthy before reporting "resilvering" or "repairing".
*/
if (ps != NULL && ps->pss_state == DSS_SCANNING && children == 0 &&
vs->vs_state == VDEV_STATE_HEALTHY) {
if (vs->vs_scan_processed != 0) {
(void) printf(gettext(" (%s)"),
(ps->pss_func == POOL_SCAN_RESILVER) ?
"resilvering" : "repairing");
} else if (vs->vs_resilver_deferred) {
(void) printf(gettext(" (awaiting resilver)"));
}
}
/* The top-level vdevs have the rebuild stats */
if (vrs != NULL && vrs->vrs_state == VDEV_REBUILD_ACTIVE &&
children == 0 && vs->vs_state == VDEV_STATE_HEALTHY) {
if (vs->vs_rebuild_processed != 0) {
(void) printf(gettext(" (resilvering)"));
}
}
if (cb->vcdl != NULL) {
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
printf(" ");
zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);
}
}
/* Display vdev initialization and trim status for leaves. */
if (children == 0) {
print_status_initialize(vs, cb->cb_print_vdev_init);
print_status_trim(vs, cb->cb_print_vdev_trim);
}
(void) printf("\n");
for (c = 0; c < children; c++) {
uint64_t islog = B_FALSE, ishole = B_FALSE;
/* Don't print logs or holes here */
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&islog);
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
&ishole);
if (islog || ishole)
continue;
/* Only print normal classes here */
if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
continue;
/* Provide vdev_rebuild_stats to children if available */
if (vrs == NULL) {
(void) nvlist_lookup_uint64_array(nv,
ZPOOL_CONFIG_REBUILD_STATS,
(uint64_t **)&vrs, &i);
}
vname = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags | VDEV_NAME_TYPE_ID);
print_status_config(zhp, cb, vname, child[c], depth + 2,
isspare, vrs);
free(vname);
}
}
/*
* Print the configuration of an exported pool. Iterate over all vdevs in the
* pool, printing out the name and status for each one.
*/
static void
print_import_config(status_cbdata_t *cb, const char *name, nvlist_t *nv,
int depth)
{
nvlist_t **child;
uint_t c, children;
vdev_stat_t *vs;
const char *type;
char *vname;
verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
if (strcmp(type, VDEV_TYPE_MISSING) == 0 ||
strcmp(type, VDEV_TYPE_HOLE) == 0)
return;
verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) == 0);
(void) printf("\t%*s%-*s", depth, "", cb->cb_namewidth - depth, name);
(void) printf(" %s", zpool_state_to_name(vs->vs_state, vs->vs_aux));
if (vs->vs_aux != 0) {
(void) printf(" ");
switch (vs->vs_aux) {
case VDEV_AUX_OPEN_FAILED:
(void) printf(gettext("cannot open"));
break;
case VDEV_AUX_BAD_GUID_SUM:
(void) printf(gettext("missing device"));
break;
case VDEV_AUX_NO_REPLICAS:
(void) printf(gettext("insufficient replicas"));
break;
case VDEV_AUX_VERSION_NEWER:
(void) printf(gettext("newer version"));
break;
case VDEV_AUX_UNSUP_FEAT:
(void) printf(gettext("unsupported feature(s)"));
break;
case VDEV_AUX_ERR_EXCEEDED:
(void) printf(gettext("too many errors"));
break;
case VDEV_AUX_ACTIVE:
(void) printf(gettext("currently in use"));
break;
case VDEV_AUX_CHILDREN_OFFLINE:
(void) printf(gettext("all children offline"));
break;
case VDEV_AUX_BAD_LABEL:
(void) printf(gettext("invalid label"));
break;
default:
(void) printf(gettext("corrupted data"));
break;
}
}
(void) printf("\n");
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
return;
for (c = 0; c < children; c++) {
uint64_t is_log = B_FALSE;
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&is_log);
if (is_log)
continue;
if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
continue;
vname = zpool_vdev_name(g_zfs, NULL, child[c],
cb->cb_name_flags | VDEV_NAME_TYPE_ID);
print_import_config(cb, vname, child[c], depth + 2);
free(vname);
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
&child, &children) == 0) {
(void) printf(gettext("\tcache\n"));
for (c = 0; c < children; c++) {
vname = zpool_vdev_name(g_zfs, NULL, child[c],
cb->cb_name_flags);
(void) printf("\t %s\n", vname);
free(vname);
}
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
&child, &children) == 0) {
(void) printf(gettext("\tspares\n"));
for (c = 0; c < children; c++) {
vname = zpool_vdev_name(g_zfs, NULL, child[c],
cb->cb_name_flags);
(void) printf("\t %s\n", vname);
free(vname);
}
}
}
/*
* Print specialized class vdevs.
*
* These are recorded as top level vdevs in the main pool child array
* but with "is_log" set to 1 or an "alloc_bias" string. We use either
* print_status_config() or print_import_config() to print the top level
* class vdevs then any of their children (eg mirrored slogs) are printed
* recursively - which works because only the top level vdev is marked.
*/
static void
print_class_vdevs(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
const char *class)
{
uint_t c, children;
nvlist_t **child;
boolean_t printed = B_FALSE;
assert(zhp != NULL || !cb->cb_verbose);
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child,
&children) != 0)
return;
for (c = 0; c < children; c++) {
uint64_t is_log = B_FALSE;
const char *bias = NULL;
const char *type = NULL;
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&is_log);
if (is_log) {
bias = (char *)VDEV_ALLOC_CLASS_LOGS;
} else {
(void) nvlist_lookup_string(child[c],
ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
(void) nvlist_lookup_string(child[c],
ZPOOL_CONFIG_TYPE, &type);
}
if (bias == NULL || strcmp(bias, class) != 0)
continue;
if (!is_log && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
continue;
if (!printed) {
(void) printf("\t%s\t\n", gettext(class));
printed = B_TRUE;
}
char *name = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags | VDEV_NAME_TYPE_ID);
if (cb->cb_print_status)
print_status_config(zhp, cb, name, child[c], 2,
B_FALSE, NULL);
else
print_import_config(cb, name, child[c], 2);
free(name);
}
}
/*
* Display the status for the given pool.
*/
static int
show_import(nvlist_t *config, boolean_t report_error)
{
uint64_t pool_state;
vdev_stat_t *vs;
const char *name;
uint64_t guid;
uint64_t hostid = 0;
const char *msgid;
const char *hostname = "unknown";
nvlist_t *nvroot, *nvinfo;
zpool_status_t reason;
zpool_errata_t errata;
const char *health;
uint_t vsc;
const char *comment;
status_cbdata_t cb = { 0 };
verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
&name) == 0);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
&guid) == 0);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
&pool_state) == 0);
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &vsc) == 0);
health = zpool_state_to_name(vs->vs_state, vs->vs_aux);
reason = zpool_import_status(config, &msgid, &errata);
/*
* If we're importing using a cachefile, then we won't report any
* errors unless we are in the scan phase of the import.
*/
if (reason != ZPOOL_STATUS_OK && !report_error)
return (reason);
(void) printf(gettext(" pool: %s\n"), name);
(void) printf(gettext(" id: %llu\n"), (u_longlong_t)guid);
(void) printf(gettext(" state: %s"), health);
if (pool_state == POOL_STATE_DESTROYED)
(void) printf(gettext(" (DESTROYED)"));
(void) printf("\n");
switch (reason) {
case ZPOOL_STATUS_MISSING_DEV_R:
case ZPOOL_STATUS_MISSING_DEV_NR:
case ZPOOL_STATUS_BAD_GUID_SUM:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices are "
"missing from the system.\n"));
break;
case ZPOOL_STATUS_CORRUPT_LABEL_R:
case ZPOOL_STATUS_CORRUPT_LABEL_NR:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices contains"
" corrupted data.\n"));
break;
case ZPOOL_STATUS_CORRUPT_DATA:
(void) printf(
gettext(" status: The pool data is corrupted.\n"));
break;
case ZPOOL_STATUS_OFFLINE_DEV:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices "
"are offlined.\n"));
break;
case ZPOOL_STATUS_CORRUPT_POOL:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool metadata is "
"corrupted.\n"));
break;
case ZPOOL_STATUS_VERSION_OLDER:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
"a legacy on-disk version.\n"));
break;
case ZPOOL_STATUS_VERSION_NEWER:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
"an incompatible version.\n"));
break;
case ZPOOL_STATUS_FEAT_DISABLED:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("Some supported "
"features are not enabled on the pool.\n\t"
"(Note that they may be intentionally disabled "
"if the\n\t'compatibility' property is set.)\n"));
break;
case ZPOOL_STATUS_COMPATIBILITY_ERR:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("Error reading or parsing "
"the file(s) indicated by the 'compatibility'\n"
"property.\n"));
break;
case ZPOOL_STATUS_INCOMPATIBLE_FEAT:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more features "
"are enabled on the pool despite not being\n"
"requested by the 'compatibility' property.\n"));
break;
case ZPOOL_STATUS_UNSUP_FEAT_READ:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool uses the following "
"feature(s) not supported on this system:\n"));
color_start(ANSI_YELLOW);
zpool_print_unsup_feat(config);
color_end();
break;
case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool can only be "
"accessed in read-only mode on this system. It\n\tcannot be"
" accessed in read-write mode because it uses the "
"following\n\tfeature(s) not supported on this system:\n"));
color_start(ANSI_YELLOW);
zpool_print_unsup_feat(config);
color_end();
break;
case ZPOOL_STATUS_HOSTID_ACTIVE:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool is currently "
"imported by another system.\n"));
break;
case ZPOOL_STATUS_HOSTID_REQUIRED:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool has the "
"multihost property on. It cannot\n\tbe safely imported "
"when the system hostid is not set.\n"));
break;
case ZPOOL_STATUS_HOSTID_MISMATCH:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool was last accessed "
"by another system.\n"));
break;
case ZPOOL_STATUS_FAULTED_DEV_R:
case ZPOOL_STATUS_FAULTED_DEV_NR:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices are "
"faulted.\n"));
break;
case ZPOOL_STATUS_BAD_LOG:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("An intent log record cannot "
"be read.\n"));
break;
case ZPOOL_STATUS_RESILVERING:
case ZPOOL_STATUS_REBUILDING:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices were "
"being resilvered.\n"));
break;
case ZPOOL_STATUS_ERRATA:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("Errata #%d detected.\n"),
errata);
break;
case ZPOOL_STATUS_NON_NATIVE_ASHIFT:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices are "
"configured to use a non-native block size.\n"
"\tExpect reduced performance.\n"));
break;
default:
/*
* No other status can be seen when importing pools.
*/
assert(reason == ZPOOL_STATUS_OK);
}
/*
* Print out an action according to the overall state of the pool.
*/
if (vs->vs_state == VDEV_STATE_HEALTHY) {
if (reason == ZPOOL_STATUS_VERSION_OLDER ||
reason == ZPOOL_STATUS_FEAT_DISABLED) {
(void) printf(gettext(" action: The pool can be "
"imported using its name or numeric identifier, "
"though\n\tsome features will not be available "
"without an explicit 'zpool upgrade'.\n"));
} else if (reason == ZPOOL_STATUS_COMPATIBILITY_ERR) {
(void) printf(gettext(" action: The pool can be "
"imported using its name or numeric\n\tidentifier, "
"though the file(s) indicated by its "
"'compatibility'\n\tproperty cannot be parsed at "
"this time.\n"));
} else if (reason == ZPOOL_STATUS_HOSTID_MISMATCH) {
(void) printf(gettext(" action: The pool can be "
"imported using its name or numeric "
"identifier and\n\tthe '-f' flag.\n"));
} else if (reason == ZPOOL_STATUS_ERRATA) {
switch (errata) {
case ZPOOL_ERRATA_NONE:
break;
case ZPOOL_ERRATA_ZOL_2094_SCRUB:
(void) printf(gettext(" action: The pool can "
"be imported using its name or numeric "
"identifier,\n\thowever there is a compat"
"ibility issue which should be corrected"
"\n\tby running 'zpool scrub'\n"));
break;
case ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY:
(void) printf(gettext(" action: The pool can"
"not be imported with this version of ZFS "
"due to\n\tan active asynchronous destroy. "
"Revert to an earlier version\n\tand "
"allow the destroy to complete before "
"updating.\n"));
break;
case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:
(void) printf(gettext(" action: Existing "
"encrypted datasets contain an on-disk "
"incompatibility, which\n\tneeds to be "
"corrected. Backup these datasets to new "
"encrypted datasets\n\tand destroy the "
"old ones.\n"));
break;
case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION:
(void) printf(gettext(" action: Existing "
"encrypted snapshots and bookmarks contain "
"an on-disk\n\tincompatibility. This may "
"cause on-disk corruption if they are used"
"\n\twith 'zfs recv'. To correct the "
"issue, enable the bookmark_v2 feature.\n\t"
"No additional action is needed if there "
"are no encrypted snapshots or\n\t"
"bookmarks. If preserving the encrypted "
"snapshots and bookmarks is\n\trequired, "
"use a non-raw send to backup and restore "
"them. Alternately,\n\tthey may be removed"
" to resolve the incompatibility.\n"));
break;
default:
/*
* All errata must contain an action message.
*/
assert(0);
}
} else {
(void) printf(gettext(" action: The pool can be "
"imported using its name or numeric "
"identifier.\n"));
}
} else if (vs->vs_state == VDEV_STATE_DEGRADED) {
(void) printf(gettext(" action: The pool can be imported "
"despite missing or damaged devices. The\n\tfault "
"tolerance of the pool may be compromised if imported.\n"));
} else {
switch (reason) {
case ZPOOL_STATUS_VERSION_NEWER:
(void) printf(gettext(" action: The pool cannot be "
"imported. Access the pool on a system running "
"newer\n\tsoftware, or recreate the pool from "
"backup.\n"));
break;
case ZPOOL_STATUS_UNSUP_FEAT_READ:
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("The pool cannot be "
"imported. Access the pool on a system that "
"supports\n\tthe required feature(s), or recreate "
"the pool from backup.\n"));
break;
case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("The pool cannot be "
"imported in read-write mode. Import the pool "
"with\n"
"\t\"-o readonly=on\", access the pool on a system "
"that supports the\n\trequired feature(s), or "
"recreate the pool from backup.\n"));
break;
case ZPOOL_STATUS_MISSING_DEV_R:
case ZPOOL_STATUS_MISSING_DEV_NR:
case ZPOOL_STATUS_BAD_GUID_SUM:
(void) printf(gettext(" action: The pool cannot be "
"imported. Attach the missing\n\tdevices and try "
"again.\n"));
break;
case ZPOOL_STATUS_HOSTID_ACTIVE:
VERIFY0(nvlist_lookup_nvlist(config,
ZPOOL_CONFIG_LOAD_INFO, &nvinfo));
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))
hostname = fnvlist_lookup_string(nvinfo,
ZPOOL_CONFIG_MMP_HOSTNAME);
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))
hostid = fnvlist_lookup_uint64(nvinfo,
ZPOOL_CONFIG_MMP_HOSTID);
(void) printf(gettext(" action: The pool must be "
"exported from %s (hostid=%"PRIx64")\n\tbefore it "
"can be safely imported.\n"), hostname, hostid);
break;
case ZPOOL_STATUS_HOSTID_REQUIRED:
(void) printf(gettext(" action: Set a unique system "
"hostid with the zgenhostid(8) command.\n"));
break;
default:
(void) printf(gettext(" action: The pool cannot be "
"imported due to damaged devices or data.\n"));
}
}
/* Print the comment attached to the pool. */
if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
(void) printf(gettext("comment: %s\n"), comment);
/*
* If the state is "closed" or "can't open", and the aux state
* is "corrupt data":
*/
if (((vs->vs_state == VDEV_STATE_CLOSED) ||
(vs->vs_state == VDEV_STATE_CANT_OPEN)) &&
(vs->vs_aux == VDEV_AUX_CORRUPT_DATA)) {
if (pool_state == POOL_STATE_DESTROYED)
(void) printf(gettext("\tThe pool was destroyed, "
"but can be imported using the '-Df' flags.\n"));
else if (pool_state != POOL_STATE_EXPORTED)
(void) printf(gettext("\tThe pool may be active on "
"another system, but can be imported using\n\t"
"the '-f' flag.\n"));
}
if (msgid != NULL) {
(void) printf(gettext(
" see: https://openzfs.github.io/openzfs-docs/msg/%s\n"),
msgid);
}
(void) printf(gettext(" config:\n\n"));
cb.cb_namewidth = max_width(NULL, nvroot, 0, strlen(name),
VDEV_NAME_TYPE_ID);
if (cb.cb_namewidth < 10)
cb.cb_namewidth = 10;
print_import_config(&cb, name, nvroot, 0);
print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_DEDUP);
print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_SPECIAL);
print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_CLASS_LOGS);
if (reason == ZPOOL_STATUS_BAD_GUID_SUM) {
(void) printf(gettext("\n\tAdditional devices are known to "
"be part of this pool, though their\n\texact "
"configuration cannot be determined.\n"));
}
return (0);
}
static boolean_t
zfs_force_import_required(nvlist_t *config)
{
uint64_t state;
uint64_t hostid = 0;
nvlist_t *nvinfo;
state = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE);
nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
/*
* The hostid on LOAD_INFO comes from the MOS label via
* spa_tryimport(). If its not there then we're likely talking to an
* older kernel, so use the top one, which will be from the label
* discovered in zpool_find_import(), or if a cachefile is in use, the
* local hostid.
*/
if (nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_HOSTID, &hostid) != 0)
nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID, &hostid);
if (state != POOL_STATE_EXPORTED && hostid != get_system_hostid())
return (B_TRUE);
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE)) {
mmp_state_t mmp_state = fnvlist_lookup_uint64(nvinfo,
ZPOOL_CONFIG_MMP_STATE);
if (mmp_state != MMP_STATE_INACTIVE)
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Perform the import for the given configuration. This passes the heavy
* lifting off to zpool_import_props(), and then mounts the datasets contained
* within the pool.
*/
static int
do_import(nvlist_t *config, const char *newname, const char *mntopts,
nvlist_t *props, int flags)
{
int ret = 0;
int ms_status = 0;
zpool_handle_t *zhp;
const char *name;
uint64_t version;
name = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME);
version = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);
if (!SPA_VERSION_IS_SUPPORTED(version)) {
(void) fprintf(stderr, gettext("cannot import '%s': pool "
"is formatted using an unsupported ZFS version\n"), name);
return (1);
} else if (zfs_force_import_required(config) &&
!(flags & ZFS_IMPORT_ANY_HOST)) {
mmp_state_t mmp_state = MMP_STATE_INACTIVE;
nvlist_t *nvinfo;
nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE))
mmp_state = fnvlist_lookup_uint64(nvinfo,
ZPOOL_CONFIG_MMP_STATE);
if (mmp_state == MMP_STATE_ACTIVE) {
const char *hostname = "<unknown>";
uint64_t hostid = 0;
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))
hostname = fnvlist_lookup_string(nvinfo,
ZPOOL_CONFIG_MMP_HOSTNAME);
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))
hostid = fnvlist_lookup_uint64(nvinfo,
ZPOOL_CONFIG_MMP_HOSTID);
(void) fprintf(stderr, gettext("cannot import '%s': "
"pool is imported on %s (hostid: "
"0x%"PRIx64")\nExport the pool on the other "
"system, then run 'zpool import'.\n"),
name, hostname, hostid);
} else if (mmp_state == MMP_STATE_NO_HOSTID) {
(void) fprintf(stderr, gettext("Cannot import '%s': "
"pool has the multihost property on and the\n"
"system's hostid is not set. Set a unique hostid "
"with the zgenhostid(8) command.\n"), name);
} else {
const char *hostname = "<unknown>";
time_t timestamp = 0;
uint64_t hostid = 0;
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_HOSTNAME))
hostname = fnvlist_lookup_string(nvinfo,
ZPOOL_CONFIG_HOSTNAME);
else if (nvlist_exists(config, ZPOOL_CONFIG_HOSTNAME))
hostname = fnvlist_lookup_string(config,
ZPOOL_CONFIG_HOSTNAME);
if (nvlist_exists(config, ZPOOL_CONFIG_TIMESTAMP))
timestamp = fnvlist_lookup_uint64(config,
ZPOOL_CONFIG_TIMESTAMP);
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_HOSTID))
hostid = fnvlist_lookup_uint64(nvinfo,
ZPOOL_CONFIG_HOSTID);
else if (nvlist_exists(config, ZPOOL_CONFIG_HOSTID))
hostid = fnvlist_lookup_uint64(config,
ZPOOL_CONFIG_HOSTID);
(void) fprintf(stderr, gettext("cannot import '%s': "
"pool was previously in use from another system.\n"
"Last accessed by %s (hostid=%"PRIx64") at %s"
"The pool can be imported, use 'zpool import -f' "
"to import the pool.\n"), name, hostname,
hostid, ctime(&timestamp));
}
return (1);
}
if (zpool_import_props(g_zfs, config, newname, props, flags) != 0)
return (1);
if (newname != NULL)
name = newname;
if ((zhp = zpool_open_canfail(g_zfs, name)) == NULL)
return (1);
/*
* Loading keys is best effort. We don't want to return immediately
* if it fails but we do want to give the error to the caller.
*/
if (flags & ZFS_IMPORT_LOAD_KEYS &&
zfs_crypto_attempt_load_keys(g_zfs, name) != 0)
ret = 1;
if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL &&
!(flags & ZFS_IMPORT_ONLY)) {
ms_status = zpool_enable_datasets(zhp, mntopts, 0);
if (ms_status == EZFS_SHAREFAILED) {
(void) fprintf(stderr, gettext("Import was "
"successful, but unable to share some datasets"));
} else if (ms_status == EZFS_MOUNTFAILED) {
(void) fprintf(stderr, gettext("Import was "
"successful, but unable to mount some datasets"));
}
}
zpool_close(zhp);
return (ret);
}
static int
import_pools(nvlist_t *pools, nvlist_t *props, char *mntopts, int flags,
char *orig_name, char *new_name,
boolean_t do_destroyed, boolean_t pool_specified, boolean_t do_all,
importargs_t *import)
{
nvlist_t *config = NULL;
nvlist_t *found_config = NULL;
uint64_t pool_state;
/*
* At this point we have a list of import candidate configs. Even if
* we were searching by pool name or guid, we still need to
* post-process the list to deal with pool state and possible
* duplicate names.
*/
int err = 0;
nvpair_t *elem = NULL;
boolean_t first = B_TRUE;
while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) {
verify(nvpair_value_nvlist(elem, &config) == 0);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
&pool_state) == 0);
if (!do_destroyed && pool_state == POOL_STATE_DESTROYED)
continue;
if (do_destroyed && pool_state != POOL_STATE_DESTROYED)
continue;
verify(nvlist_add_nvlist(config, ZPOOL_LOAD_POLICY,
import->policy) == 0);
if (!pool_specified) {
if (first)
first = B_FALSE;
else if (!do_all)
(void) fputc('\n', stdout);
if (do_all) {
err |= do_import(config, NULL, mntopts,
props, flags);
} else {
/*
* If we're importing from cachefile, then
* we don't want to report errors until we
* are in the scan phase of the import. If
* we get an error, then we return that error
* to invoke the scan phase.
*/
if (import->cachefile && !import->scan)
err = show_import(config, B_FALSE);
else
(void) show_import(config, B_TRUE);
}
} else if (import->poolname != NULL) {
const char *name;
/*
* We are searching for a pool based on name.
*/
verify(nvlist_lookup_string(config,
ZPOOL_CONFIG_POOL_NAME, &name) == 0);
if (strcmp(name, import->poolname) == 0) {
if (found_config != NULL) {
(void) fprintf(stderr, gettext(
"cannot import '%s': more than "
"one matching pool\n"),
import->poolname);
(void) fprintf(stderr, gettext(
"import by numeric ID instead\n"));
err = B_TRUE;
}
found_config = config;
}
} else {
uint64_t guid;
/*
* Search for a pool by guid.
*/
verify(nvlist_lookup_uint64(config,
ZPOOL_CONFIG_POOL_GUID, &guid) == 0);
if (guid == import->guid)
found_config = config;
}
}
/*
* If we were searching for a specific pool, verify that we found a
* pool, and then do the import.
*/
if (pool_specified && err == 0) {
if (found_config == NULL) {
(void) fprintf(stderr, gettext("cannot import '%s': "
"no such pool available\n"), orig_name);
err = B_TRUE;
} else {
err |= do_import(found_config, new_name,
mntopts, props, flags);
}
}
/*
* If we were just looking for pools, report an error if none were
* found.
*/
if (!pool_specified && first)
(void) fprintf(stderr,
gettext("no pools available to import\n"));
return (err);
}
typedef struct target_exists_args {
const char *poolname;
uint64_t poolguid;
} target_exists_args_t;
static int
name_or_guid_exists(zpool_handle_t *zhp, void *data)
{
target_exists_args_t *args = data;
nvlist_t *config = zpool_get_config(zhp, NULL);
int found = 0;
if (config == NULL)
return (0);
if (args->poolname != NULL) {
const char *pool_name;
verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
&pool_name) == 0);
if (strcmp(pool_name, args->poolname) == 0)
found = 1;
} else {
uint64_t pool_guid;
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
&pool_guid) == 0);
if (pool_guid == args->poolguid)
found = 1;
}
zpool_close(zhp);
return (found);
}
/*
* zpool checkpoint <pool>
* checkpoint --discard <pool>
*
* -d Discard the checkpoint from a checkpointed
* --discard pool.
*
* -w Wait for discarding a checkpoint to complete.
* --wait
*
* Checkpoints the specified pool, by taking a "snapshot" of its
* current state. A pool can only have one checkpoint at a time.
*/
int
zpool_do_checkpoint(int argc, char **argv)
{
boolean_t discard, wait;
char *pool;
zpool_handle_t *zhp;
int c, err;
struct option long_options[] = {
{"discard", no_argument, NULL, 'd'},
{"wait", no_argument, NULL, 'w'},
{0, 0, 0, 0}
};
discard = B_FALSE;
wait = B_FALSE;
while ((c = getopt_long(argc, argv, ":dw", long_options, NULL)) != -1) {
switch (c) {
case 'd':
discard = B_TRUE;
break;
case 'w':
wait = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
if (wait && !discard) {
(void) fprintf(stderr, gettext("--wait only valid when "
"--discard also specified\n"));
usage(B_FALSE);
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
pool = argv[0];
if ((zhp = zpool_open(g_zfs, pool)) == NULL) {
/* As a special case, check for use of '/' in the name */
if (strchr(pool, '/') != NULL)
(void) fprintf(stderr, gettext("'zpool checkpoint' "
"doesn't work on datasets. To save the state "
"of a dataset from a specific point in time "
"please use 'zfs snapshot'\n"));
return (1);
}
if (discard) {
err = (zpool_discard_checkpoint(zhp) != 0);
if (err == 0 && wait)
err = zpool_wait(zhp, ZPOOL_WAIT_CKPT_DISCARD);
} else {
err = (zpool_checkpoint(zhp) != 0);
}
zpool_close(zhp);
return (err);
}
#define CHECKPOINT_OPT 1024
/*
* zpool import [-d dir] [-D]
* import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]
* [-d dir | -c cachefile | -s] [-f] -a
* import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]
* [-d dir | -c cachefile | -s] [-f] [-n] [-F] <pool | id>
* [newpool]
*
* -c Read pool information from a cachefile instead of searching
* devices. If importing from a cachefile config fails, then
* fallback to searching for devices only in the directories that
* exist in the cachefile.
*
* -d Scan in a specific directory, other than /dev/. More than
* one directory can be specified using multiple '-d' options.
*
* -D Scan for previously destroyed pools or import all or only
* specified destroyed pools.
*
* -R Temporarily import the pool, with all mountpoints relative to
* the given root. The pool will remain exported when the machine
* is rebooted.
*
* -V Import even in the presence of faulted vdevs. This is an
* intentionally undocumented option for testing purposes, and
* treats the pool configuration as complete, leaving any bad
* vdevs in the FAULTED state. In other words, it does verbatim
* import.
*
* -f Force import, even if it appears that the pool is active.
*
* -F Attempt rewind if necessary.
*
* -n See if rewind would work, but don't actually rewind.
*
* -N Import the pool but don't mount datasets.
*
* -T Specify a starting txg to use for import. This option is
* intentionally undocumented option for testing purposes.
*
* -a Import all pools found.
*
* -l Load encryption keys while importing.
*
* -o Set property=value and/or temporary mount options (without '=').
*
* -s Scan using the default search path, the libblkid cache will
* not be consulted.
*
* --rewind-to-checkpoint
* Import the pool and revert back to the checkpoint.
*
* The import command scans for pools to import, and import pools based on pool
* name and GUID. The pool can also be renamed as part of the import process.
*/
int
zpool_do_import(int argc, char **argv)
{
char **searchdirs = NULL;
char *env, *envdup = NULL;
int nsearch = 0;
int c;
int err = 0;
nvlist_t *pools = NULL;
boolean_t do_all = B_FALSE;
boolean_t do_destroyed = B_FALSE;
char *mntopts = NULL;
uint64_t searchguid = 0;
char *searchname = NULL;
char *propval;
nvlist_t *policy = NULL;
nvlist_t *props = NULL;
int flags = ZFS_IMPORT_NORMAL;
uint32_t rewind_policy = ZPOOL_NO_REWIND;
boolean_t dryrun = B_FALSE;
boolean_t do_rewind = B_FALSE;
boolean_t xtreme_rewind = B_FALSE;
boolean_t do_scan = B_FALSE;
boolean_t pool_exists = B_FALSE;
boolean_t pool_specified = B_FALSE;
uint64_t txg = -1ULL;
char *cachefile = NULL;
importargs_t idata = { 0 };
char *endptr;
struct option long_options[] = {
{"rewind-to-checkpoint", no_argument, NULL, CHECKPOINT_OPT},
{0, 0, 0, 0}
};
/* check options */
while ((c = getopt_long(argc, argv, ":aCc:d:DEfFlmnNo:R:stT:VX",
long_options, NULL)) != -1) {
switch (c) {
case 'a':
do_all = B_TRUE;
break;
case 'c':
cachefile = optarg;
break;
case 'd':
searchdirs = safe_realloc(searchdirs,
(nsearch + 1) * sizeof (char *));
searchdirs[nsearch++] = optarg;
break;
case 'D':
do_destroyed = B_TRUE;
break;
case 'f':
flags |= ZFS_IMPORT_ANY_HOST;
break;
case 'F':
do_rewind = B_TRUE;
break;
case 'l':
flags |= ZFS_IMPORT_LOAD_KEYS;
break;
case 'm':
flags |= ZFS_IMPORT_MISSING_LOG;
break;
case 'n':
dryrun = B_TRUE;
break;
case 'N':
flags |= ZFS_IMPORT_ONLY;
break;
case 'o':
if ((propval = strchr(optarg, '=')) != NULL) {
*propval = '\0';
propval++;
if (add_prop_list(optarg, propval,
&props, B_TRUE))
goto error;
} else {
mntopts = optarg;
}
break;
case 'R':
if (add_prop_list(zpool_prop_to_name(
ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))
goto error;
if (add_prop_list_default(zpool_prop_to_name(
ZPOOL_PROP_CACHEFILE), "none", &props))
goto error;
break;
case 's':
do_scan = B_TRUE;
break;
case 't':
flags |= ZFS_IMPORT_TEMP_NAME;
if (add_prop_list_default(zpool_prop_to_name(
ZPOOL_PROP_CACHEFILE), "none", &props))
goto error;
break;
case 'T':
errno = 0;
txg = strtoull(optarg, &endptr, 0);
if (errno != 0 || *endptr != '\0') {
(void) fprintf(stderr,
gettext("invalid txg value\n"));
usage(B_FALSE);
}
rewind_policy = ZPOOL_DO_REWIND | ZPOOL_EXTREME_REWIND;
break;
case 'V':
flags |= ZFS_IMPORT_VERBATIM;
break;
case 'X':
xtreme_rewind = B_TRUE;
break;
case CHECKPOINT_OPT:
flags |= ZFS_IMPORT_CHECKPOINT;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (cachefile && nsearch != 0) {
(void) fprintf(stderr, gettext("-c is incompatible with -d\n"));
usage(B_FALSE);
}
if (cachefile && do_scan) {
(void) fprintf(stderr, gettext("-c is incompatible with -s\n"));
usage(B_FALSE);
}
if ((flags & ZFS_IMPORT_LOAD_KEYS) && (flags & ZFS_IMPORT_ONLY)) {
(void) fprintf(stderr, gettext("-l is incompatible with -N\n"));
usage(B_FALSE);
}
if ((flags & ZFS_IMPORT_LOAD_KEYS) && !do_all && argc == 0) {
(void) fprintf(stderr, gettext("-l is only meaningful during "
"an import\n"));
usage(B_FALSE);
}
if ((dryrun || xtreme_rewind) && !do_rewind) {
(void) fprintf(stderr,
gettext("-n or -X only meaningful with -F\n"));
usage(B_FALSE);
}
if (dryrun)
rewind_policy = ZPOOL_TRY_REWIND;
else if (do_rewind)
rewind_policy = ZPOOL_DO_REWIND;
if (xtreme_rewind)
rewind_policy |= ZPOOL_EXTREME_REWIND;
/* In the future, we can capture further policy and include it here */
if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
nvlist_add_uint64(policy, ZPOOL_LOAD_REQUEST_TXG, txg) != 0 ||
nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY,
rewind_policy) != 0)
goto error;
/* check argument count */
if (do_all) {
if (argc != 0) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
} else {
if (argc > 2) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
}
/*
* Check for the effective uid. We do this explicitly here because
* otherwise any attempt to discover pools will silently fail.
*/
if (argc == 0 && geteuid() != 0) {
(void) fprintf(stderr, gettext("cannot "
"discover pools: permission denied\n"));
free(searchdirs);
nvlist_free(props);
nvlist_free(policy);
return (1);
}
/*
* Depending on the arguments given, we do one of the following:
*
* <none> Iterate through all pools and display information about
* each one.
*
* -a Iterate through all pools and try to import each one.
*
* <id> Find the pool that corresponds to the given GUID/pool
* name and import that one.
*
* -D Above options applies only to destroyed pools.
*/
if (argc != 0) {
char *endptr;
errno = 0;
searchguid = strtoull(argv[0], &endptr, 10);
if (errno != 0 || *endptr != '\0') {
searchname = argv[0];
searchguid = 0;
}
pool_specified = B_TRUE;
/*
* User specified a name or guid. Ensure it's unique.
*/
target_exists_args_t search = {searchname, searchguid};
pool_exists = zpool_iter(g_zfs, name_or_guid_exists, &search);
}
/*
* Check the environment for the preferred search path.
*/
if ((searchdirs == NULL) && (env = getenv("ZPOOL_IMPORT_PATH"))) {
char *dir, *tmp = NULL;
envdup = strdup(env);
for (dir = strtok_r(envdup, ":", &tmp);
dir != NULL;
dir = strtok_r(NULL, ":", &tmp)) {
searchdirs = safe_realloc(searchdirs,
(nsearch + 1) * sizeof (char *));
searchdirs[nsearch++] = dir;
}
}
idata.path = searchdirs;
idata.paths = nsearch;
idata.poolname = searchname;
idata.guid = searchguid;
idata.cachefile = cachefile;
idata.scan = do_scan;
idata.policy = policy;
libpc_handle_t lpch = {
.lpc_lib_handle = g_zfs,
.lpc_ops = &libzfs_config_ops,
.lpc_printerr = B_TRUE
};
pools = zpool_search_import(&lpch, &idata);
if (pools != NULL && pool_exists &&
(argc == 1 || strcmp(argv[0], argv[1]) == 0)) {
(void) fprintf(stderr, gettext("cannot import '%s': "
"a pool with that name already exists\n"),
argv[0]);
(void) fprintf(stderr, gettext("use the form '%s "
"<pool | id> <newpool>' to give it a new name\n"),
"zpool import");
err = 1;
} else if (pools == NULL && pool_exists) {
(void) fprintf(stderr, gettext("cannot import '%s': "
"a pool with that name is already created/imported,\n"),
argv[0]);
(void) fprintf(stderr, gettext("and no additional pools "
"with that name were found\n"));
err = 1;
} else if (pools == NULL) {
if (argc != 0) {
(void) fprintf(stderr, gettext("cannot import '%s': "
"no such pool available\n"), argv[0]);
}
err = 1;
}
if (err == 1) {
free(searchdirs);
free(envdup);
nvlist_free(policy);
nvlist_free(pools);
nvlist_free(props);
return (1);
}
err = import_pools(pools, props, mntopts, flags,
argc >= 1 ? argv[0] : NULL,
argc >= 2 ? argv[1] : NULL,
do_destroyed, pool_specified, do_all, &idata);
/*
* If we're using the cachefile and we failed to import, then
* fallback to scanning the directory for pools that match
* those in the cachefile.
*/
if (err != 0 && cachefile != NULL) {
(void) printf(gettext("cachefile import failed, retrying\n"));
/*
* We use the scan flag to gather the directories that exist
* in the cachefile. If we need to fallback to searching for
* the pool config, we will only search devices in these
* directories.
*/
idata.scan = B_TRUE;
nvlist_free(pools);
pools = zpool_search_import(&lpch, &idata);
err = import_pools(pools, props, mntopts, flags,
argc >= 1 ? argv[0] : NULL,
argc >= 2 ? argv[1] : NULL,
do_destroyed, pool_specified, do_all, &idata);
}
error:
nvlist_free(props);
nvlist_free(pools);
nvlist_free(policy);
free(searchdirs);
free(envdup);
return (err ? 1 : 0);
}
/*
* zpool sync [-f] [pool] ...
*
* -f (undocumented) force uberblock (and config including zpool cache file)
* update.
*
* Sync the specified pool(s).
* Without arguments "zpool sync" will sync all pools.
* This command initiates TXG sync(s) and will return after the TXG(s) commit.
*
*/
static int
zpool_do_sync(int argc, char **argv)
{
int ret;
boolean_t force = B_FALSE;
/* check options */
while ((ret = getopt(argc, argv, "f")) != -1) {
switch (ret) {
case 'f':
force = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* if argc == 0 we will execute zpool_sync_one on all pools */
ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
B_FALSE, zpool_sync_one, &force);
return (ret);
}
typedef struct iostat_cbdata {
uint64_t cb_flags;
int cb_namewidth;
int cb_iteration;
boolean_t cb_verbose;
boolean_t cb_literal;
boolean_t cb_scripted;
zpool_list_t *cb_list;
vdev_cmd_data_list_t *vcdl;
vdev_cbdata_t cb_vdevs;
} iostat_cbdata_t;
/* iostat labels */
typedef struct name_and_columns {
const char *name; /* Column name */
unsigned int columns; /* Center name to this number of columns */
} name_and_columns_t;
#define IOSTAT_MAX_LABELS 15 /* Max number of labels on one line */
static const name_and_columns_t iostat_top_labels[][IOSTAT_MAX_LABELS] =
{
[IOS_DEFAULT] = {{"capacity", 2}, {"operations", 2}, {"bandwidth", 2},
{NULL}},
[IOS_LATENCY] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},
{"asyncq_wait", 2}, {"scrub", 1}, {"trim", 1}, {"rebuild", 1},
{NULL}},
[IOS_QUEUES] = {{"syncq_read", 2}, {"syncq_write", 2},
{"asyncq_read", 2}, {"asyncq_write", 2}, {"scrubq_read", 2},
{"trimq_write", 2}, {"rebuildq_write", 2}, {NULL}},
[IOS_L_HISTO] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},
{"asyncq_wait", 2}, {NULL}},
[IOS_RQ_HISTO] = {{"sync_read", 2}, {"sync_write", 2},
{"async_read", 2}, {"async_write", 2}, {"scrub", 2},
{"trim", 2}, {"rebuild", 2}, {NULL}},
};
/* Shorthand - if "columns" field not set, default to 1 column */
static const name_and_columns_t iostat_bottom_labels[][IOSTAT_MAX_LABELS] =
{
[IOS_DEFAULT] = {{"alloc"}, {"free"}, {"read"}, {"write"}, {"read"},
{"write"}, {NULL}},
[IOS_LATENCY] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},
{"write"}, {"read"}, {"write"}, {"wait"}, {"wait"}, {"wait"},
{NULL}},
[IOS_QUEUES] = {{"pend"}, {"activ"}, {"pend"}, {"activ"}, {"pend"},
{"activ"}, {"pend"}, {"activ"}, {"pend"}, {"activ"},
{"pend"}, {"activ"}, {"pend"}, {"activ"}, {NULL}},
[IOS_L_HISTO] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},
{"write"}, {"read"}, {"write"}, {"scrub"}, {"trim"}, {"rebuild"},
{NULL}},
[IOS_RQ_HISTO] = {{"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"},
{"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"},
{"ind"}, {"agg"}, {NULL}},
};
static const char *histo_to_title[] = {
[IOS_L_HISTO] = "latency",
[IOS_RQ_HISTO] = "req_size",
};
/*
* Return the number of labels in a null-terminated name_and_columns_t
* array.
*
*/
static unsigned int
label_array_len(const name_and_columns_t *labels)
{
int i = 0;
while (labels[i].name)
i++;
return (i);
}
/*
* Return the number of strings in a null-terminated string array.
* For example:
*
* const char foo[] = {"bar", "baz", NULL}
*
* returns 2
*/
static uint64_t
str_array_len(const char *array[])
{
uint64_t i = 0;
while (array[i])
i++;
return (i);
}
/*
* Return a default column width for default/latency/queue columns. This does
* not include histograms, which have their columns autosized.
*/
static unsigned int
default_column_width(iostat_cbdata_t *cb, enum iostat_type type)
{
unsigned long column_width = 5; /* Normal niceprint */
static unsigned long widths[] = {
/*
* Choose some sane default column sizes for printing the
* raw numbers.
*/
[IOS_DEFAULT] = 15, /* 1PB capacity */
[IOS_LATENCY] = 10, /* 1B ns = 10sec */
[IOS_QUEUES] = 6, /* 1M queue entries */
[IOS_L_HISTO] = 10, /* 1B ns = 10sec */
[IOS_RQ_HISTO] = 6, /* 1M queue entries */
};
if (cb->cb_literal)
column_width = widths[type];
return (column_width);
}
/*
* Print the column labels, i.e:
*
* capacity operations bandwidth
* alloc free read write read write ...
*
* If force_column_width is set, use it for the column width. If not set, use
* the default column width.
*/
static void
print_iostat_labels(iostat_cbdata_t *cb, unsigned int force_column_width,
const name_and_columns_t labels[][IOSTAT_MAX_LABELS])
{
int i, idx, s;
int text_start, rw_column_width, spaces_to_end;
uint64_t flags = cb->cb_flags;
uint64_t f;
unsigned int column_width = force_column_width;
/* For each bit set in flags */
for (f = flags; f; f &= ~(1ULL << idx)) {
idx = lowbit64(f) - 1;
if (!force_column_width)
column_width = default_column_width(cb, idx);
/* Print our top labels centered over "read write" label. */
for (i = 0; i < label_array_len(labels[idx]); i++) {
const char *name = labels[idx][i].name;
/*
* We treat labels[][].columns == 0 as shorthand
* for one column. It makes writing out the label
* tables more concise.
*/
unsigned int columns = MAX(1, labels[idx][i].columns);
unsigned int slen = strlen(name);
rw_column_width = (column_width * columns) +
(2 * (columns - 1));
text_start = (int)((rw_column_width) / columns -
slen / columns);
if (text_start < 0)
text_start = 0;
printf(" "); /* Two spaces between columns */
/* Space from beginning of column to label */
for (s = 0; s < text_start; s++)
printf(" ");
printf("%s", name);
/* Print space after label to end of column */
spaces_to_end = rw_column_width - text_start - slen;
if (spaces_to_end < 0)
spaces_to_end = 0;
for (s = 0; s < spaces_to_end; s++)
printf(" ");
}
}
}
/*
* print_cmd_columns - Print custom column titles from -c
*
* If the user specified the "zpool status|iostat -c" then print their custom
* column titles in the header. For example, print_cmd_columns() would print
* the " col1 col2" part of this:
*
* $ zpool iostat -vc 'echo col1=val1; echo col2=val2'
* ...
* capacity operations bandwidth
* pool alloc free read write read write col1 col2
* ---------- ----- ----- ----- ----- ----- ----- ---- ----
* mypool 269K 1008M 0 0 107 946
* mirror 269K 1008M 0 0 107 946
* sdb - - 0 0 102 473 val1 val2
* sdc - - 0 0 5 473 val1 val2
* ---------- ----- ----- ----- ----- ----- ----- ---- ----
*/
static void
print_cmd_columns(vdev_cmd_data_list_t *vcdl, int use_dashes)
{
int i, j;
vdev_cmd_data_t *data = &vcdl->data[0];
if (vcdl->count == 0 || data == NULL)
return;
/*
* Each vdev cmd should have the same column names unless the user did
* something weird with their cmd. Just take the column names from the
* first vdev and assume it works for all of them.
*/
for (i = 0; i < vcdl->uniq_cols_cnt; i++) {
printf(" ");
if (use_dashes) {
for (j = 0; j < vcdl->uniq_cols_width[i]; j++)
printf("-");
} else {
printf_color(ANSI_BOLD, "%*s", vcdl->uniq_cols_width[i],
vcdl->uniq_cols[i]);
}
}
}
/*
* Utility function to print out a line of dashes like:
*
* -------------------------------- ----- ----- ----- ----- -----
*
* ...or a dashed named-row line like:
*
* logs - - - - -
*
* @cb: iostat data
*
* @force_column_width If non-zero, use the value as the column width.
* Otherwise use the default column widths.
*
* @name: Print a dashed named-row line starting
* with @name. Otherwise, print a regular
* dashed line.
*/
static void
print_iostat_dashes(iostat_cbdata_t *cb, unsigned int force_column_width,
const char *name)
{
int i;
unsigned int namewidth;
uint64_t flags = cb->cb_flags;
uint64_t f;
int idx;
const name_and_columns_t *labels;
const char *title;
if (cb->cb_flags & IOS_ANYHISTO_M) {
title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];
} else if (cb->cb_vdevs.cb_names_count) {
title = "vdev";
} else {
title = "pool";
}
namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),
name ? strlen(name) : 0);
if (name) {
printf("%-*s", namewidth, name);
} else {
for (i = 0; i < namewidth; i++)
(void) printf("-");
}
/* For each bit in flags */
for (f = flags; f; f &= ~(1ULL << idx)) {
unsigned int column_width;
idx = lowbit64(f) - 1;
if (force_column_width)
column_width = force_column_width;
else
column_width = default_column_width(cb, idx);
labels = iostat_bottom_labels[idx];
for (i = 0; i < label_array_len(labels); i++) {
if (name)
printf(" %*s-", column_width - 1, " ");
else
printf(" %.*s", column_width,
"--------------------");
}
}
}
static void
print_iostat_separator_impl(iostat_cbdata_t *cb,
unsigned int force_column_width)
{
print_iostat_dashes(cb, force_column_width, NULL);
}
static void
print_iostat_separator(iostat_cbdata_t *cb)
{
print_iostat_separator_impl(cb, 0);
}
static void
print_iostat_header_impl(iostat_cbdata_t *cb, unsigned int force_column_width,
const char *histo_vdev_name)
{
unsigned int namewidth;
const char *title;
color_start(ANSI_BOLD);
if (cb->cb_flags & IOS_ANYHISTO_M) {
title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];
} else if (cb->cb_vdevs.cb_names_count) {
title = "vdev";
} else {
title = "pool";
}
namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),
histo_vdev_name ? strlen(histo_vdev_name) : 0);
if (histo_vdev_name)
printf("%-*s", namewidth, histo_vdev_name);
else
printf("%*s", namewidth, "");
print_iostat_labels(cb, force_column_width, iostat_top_labels);
printf("\n");
printf("%-*s", namewidth, title);
print_iostat_labels(cb, force_column_width, iostat_bottom_labels);
if (cb->vcdl != NULL)
print_cmd_columns(cb->vcdl, 0);
printf("\n");
print_iostat_separator_impl(cb, force_column_width);
if (cb->vcdl != NULL)
print_cmd_columns(cb->vcdl, 1);
color_end();
printf("\n");
}
static void
print_iostat_header(iostat_cbdata_t *cb)
{
print_iostat_header_impl(cb, 0, NULL);
}
/*
* Prints a size string (i.e. 120M) with the suffix ("M") colored
* by order of magnitude. Uses column_size to add padding.
*/
static void
print_stat_color(const char *statbuf, unsigned int column_size)
{
fputs(" ", stdout);
size_t len = strlen(statbuf);
while (len < column_size) {
fputc(' ', stdout);
column_size--;
}
if (*statbuf == '0') {
color_start(ANSI_GRAY);
fputc('0', stdout);
} else {
for (; *statbuf; statbuf++) {
if (*statbuf == 'K') color_start(ANSI_GREEN);
else if (*statbuf == 'M') color_start(ANSI_YELLOW);
else if (*statbuf == 'G') color_start(ANSI_RED);
else if (*statbuf == 'T') color_start(ANSI_BOLD_BLUE);
else if (*statbuf == 'P') color_start(ANSI_MAGENTA);
else if (*statbuf == 'E') color_start(ANSI_CYAN);
fputc(*statbuf, stdout);
if (--column_size <= 0)
break;
}
}
color_end();
}
/*
* Display a single statistic.
*/
static void
print_one_stat(uint64_t value, enum zfs_nicenum_format format,
unsigned int column_size, boolean_t scripted)
{
char buf[64];
zfs_nicenum_format(value, buf, sizeof (buf), format);
if (scripted)
printf("\t%s", buf);
else
print_stat_color(buf, column_size);
}
/*
* Calculate the default vdev stats
*
* Subtract oldvs from newvs, apply a scaling factor, and save the resulting
* stats into calcvs.
*/
static void
calc_default_iostats(vdev_stat_t *oldvs, vdev_stat_t *newvs,
vdev_stat_t *calcvs)
{
int i;
memcpy(calcvs, newvs, sizeof (*calcvs));
for (i = 0; i < ARRAY_SIZE(calcvs->vs_ops); i++)
calcvs->vs_ops[i] = (newvs->vs_ops[i] - oldvs->vs_ops[i]);
for (i = 0; i < ARRAY_SIZE(calcvs->vs_bytes); i++)
calcvs->vs_bytes[i] = (newvs->vs_bytes[i] - oldvs->vs_bytes[i]);
}
/*
* Internal representation of the extended iostats data.
*
* The extended iostat stats are exported in nvlists as either uint64_t arrays
* or single uint64_t's. We make both look like arrays to make them easier
* to process. In order to make single uint64_t's look like arrays, we set
* __data to the stat data, and then set *data = &__data with count = 1. Then,
* we can just use *data and count.
*/
struct stat_array {
uint64_t *data;
uint_t count; /* Number of entries in data[] */
uint64_t __data; /* Only used when data is a single uint64_t */
};
static uint64_t
stat_histo_max(struct stat_array *nva, unsigned int len)
{
uint64_t max = 0;
int i;
for (i = 0; i < len; i++)
max = MAX(max, array64_max(nva[i].data, nva[i].count));
return (max);
}
/*
* Helper function to lookup a uint64_t array or uint64_t value and store its
* data as a stat_array. If the nvpair is a single uint64_t value, then we make
* it look like a one element array to make it easier to process.
*/
static int
nvpair64_to_stat_array(nvlist_t *nvl, const char *name,
struct stat_array *nva)
{
nvpair_t *tmp;
int ret;
verify(nvlist_lookup_nvpair(nvl, name, &tmp) == 0);
switch (nvpair_type(tmp)) {
case DATA_TYPE_UINT64_ARRAY:
ret = nvpair_value_uint64_array(tmp, &nva->data, &nva->count);
break;
case DATA_TYPE_UINT64:
ret = nvpair_value_uint64(tmp, &nva->__data);
nva->data = &nva->__data;
nva->count = 1;
break;
default:
/* Not a uint64_t */
ret = EINVAL;
break;
}
return (ret);
}
/*
* Given a list of nvlist names, look up the extended stats in newnv and oldnv,
* subtract them, and return the results in a newly allocated stat_array.
* You must free the returned array after you are done with it with
* free_calc_stats().
*
* Additionally, you can set "oldnv" to NULL if you simply want the newnv
* values.
*/
static struct stat_array *
calc_and_alloc_stats_ex(const char **names, unsigned int len, nvlist_t *oldnv,
nvlist_t *newnv)
{
nvlist_t *oldnvx = NULL, *newnvx;
struct stat_array *oldnva, *newnva, *calcnva;
int i, j;
unsigned int alloc_size = (sizeof (struct stat_array)) * len;
/* Extract our extended stats nvlist from the main list */
verify(nvlist_lookup_nvlist(newnv, ZPOOL_CONFIG_VDEV_STATS_EX,
&newnvx) == 0);
if (oldnv) {
verify(nvlist_lookup_nvlist(oldnv, ZPOOL_CONFIG_VDEV_STATS_EX,
&oldnvx) == 0);
}
newnva = safe_malloc(alloc_size);
oldnva = safe_malloc(alloc_size);
calcnva = safe_malloc(alloc_size);
for (j = 0; j < len; j++) {
verify(nvpair64_to_stat_array(newnvx, names[j],
&newnva[j]) == 0);
calcnva[j].count = newnva[j].count;
alloc_size = calcnva[j].count * sizeof (calcnva[j].data[0]);
calcnva[j].data = safe_malloc(alloc_size);
memcpy(calcnva[j].data, newnva[j].data, alloc_size);
if (oldnvx) {
verify(nvpair64_to_stat_array(oldnvx, names[j],
&oldnva[j]) == 0);
for (i = 0; i < oldnva[j].count; i++)
calcnva[j].data[i] -= oldnva[j].data[i];
}
}
free(newnva);
free(oldnva);
return (calcnva);
}
static void
free_calc_stats(struct stat_array *nva, unsigned int len)
{
int i;
for (i = 0; i < len; i++)
free(nva[i].data);
free(nva);
}
static void
print_iostat_histo(struct stat_array *nva, unsigned int len,
iostat_cbdata_t *cb, unsigned int column_width, unsigned int namewidth,
double scale)
{
int i, j;
char buf[6];
uint64_t val;
enum zfs_nicenum_format format;
unsigned int buckets;
unsigned int start_bucket;
if (cb->cb_literal)
format = ZFS_NICENUM_RAW;
else
format = ZFS_NICENUM_1024;
/* All these histos are the same size, so just use nva[0].count */
buckets = nva[0].count;
if (cb->cb_flags & IOS_RQ_HISTO_M) {
/* Start at 512 - req size should never be lower than this */
start_bucket = 9;
} else {
start_bucket = 0;
}
for (j = start_bucket; j < buckets; j++) {
/* Print histogram bucket label */
if (cb->cb_flags & IOS_L_HISTO_M) {
/* Ending range of this bucket */
val = (1UL << (j + 1)) - 1;
zfs_nicetime(val, buf, sizeof (buf));
} else {
/* Request size (starting range of bucket) */
val = (1UL << j);
zfs_nicenum(val, buf, sizeof (buf));
}
if (cb->cb_scripted)
printf("%llu", (u_longlong_t)val);
else
printf("%-*s", namewidth, buf);
/* Print the values on the line */
for (i = 0; i < len; i++) {
print_one_stat(nva[i].data[j] * scale, format,
column_width, cb->cb_scripted);
}
printf("\n");
}
}
static void
print_solid_separator(unsigned int length)
{
while (length--)
printf("-");
printf("\n");
}
static void
print_iostat_histos(iostat_cbdata_t *cb, nvlist_t *oldnv,
nvlist_t *newnv, double scale, const char *name)
{
unsigned int column_width;
unsigned int namewidth;
unsigned int entire_width;
enum iostat_type type;
struct stat_array *nva;
const char **names;
unsigned int names_len;
/* What type of histo are we? */
type = IOS_HISTO_IDX(cb->cb_flags);
/* Get NULL-terminated array of nvlist names for our histo */
names = vsx_type_to_nvlist[type];
names_len = str_array_len(names); /* num of names */
nva = calc_and_alloc_stats_ex(names, names_len, oldnv, newnv);
if (cb->cb_literal) {
column_width = MAX(5,
(unsigned int) log10(stat_histo_max(nva, names_len)) + 1);
} else {
column_width = 5;
}
namewidth = MAX(cb->cb_namewidth,
strlen(histo_to_title[IOS_HISTO_IDX(cb->cb_flags)]));
/*
* Calculate the entire line width of what we're printing. The
* +2 is for the two spaces between columns:
*/
/* read write */
/* ----- ----- */
/* |___| <---------- column_width */
/* */
/* |__________| <--- entire_width */
/* */
entire_width = namewidth + (column_width + 2) *
label_array_len(iostat_bottom_labels[type]);
if (cb->cb_scripted)
printf("%s\n", name);
else
print_iostat_header_impl(cb, column_width, name);
print_iostat_histo(nva, names_len, cb, column_width,
namewidth, scale);
free_calc_stats(nva, names_len);
if (!cb->cb_scripted)
print_solid_separator(entire_width);
}
/*
* Calculate the average latency of a power-of-two latency histogram
*/
static uint64_t
single_histo_average(uint64_t *histo, unsigned int buckets)
{
int i;
uint64_t count = 0, total = 0;
for (i = 0; i < buckets; i++) {
/*
* Our buckets are power-of-two latency ranges. Use the
* midpoint latency of each bucket to calculate the average.
* For example:
*
* Bucket Midpoint
* 8ns-15ns: 12ns
* 16ns-31ns: 24ns
* ...
*/
if (histo[i] != 0) {
total += histo[i] * (((1UL << i) + ((1UL << i)/2)));
count += histo[i];
}
}
/* Prevent divide by zero */
return (count == 0 ? 0 : total / count);
}
static void
print_iostat_queues(iostat_cbdata_t *cb, nvlist_t *newnv)
{
const char *names[] = {
ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_TRIM_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_REBUILD_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,
};
struct stat_array *nva;
unsigned int column_width = default_column_width(cb, IOS_QUEUES);
enum zfs_nicenum_format format;
nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), NULL, newnv);
if (cb->cb_literal)
format = ZFS_NICENUM_RAW;
else
format = ZFS_NICENUM_1024;
for (int i = 0; i < ARRAY_SIZE(names); i++) {
uint64_t val = nva[i].data[0];
print_one_stat(val, format, column_width, cb->cb_scripted);
}
free_calc_stats(nva, ARRAY_SIZE(names));
}
static void
print_iostat_latency(iostat_cbdata_t *cb, nvlist_t *oldnv,
nvlist_t *newnv)
{
int i;
uint64_t val;
const char *names[] = {
ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
};
struct stat_array *nva;
unsigned int column_width = default_column_width(cb, IOS_LATENCY);
enum zfs_nicenum_format format;
nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), oldnv, newnv);
if (cb->cb_literal)
format = ZFS_NICENUM_RAWTIME;
else
format = ZFS_NICENUM_TIME;
/* Print our avg latencies on the line */
for (i = 0; i < ARRAY_SIZE(names); i++) {
/* Compute average latency for a latency histo */
val = single_histo_average(nva[i].data, nva[i].count);
print_one_stat(val, format, column_width, cb->cb_scripted);
}
free_calc_stats(nva, ARRAY_SIZE(names));
}
/*
* Print default statistics (capacity/operations/bandwidth)
*/
static void
print_iostat_default(vdev_stat_t *vs, iostat_cbdata_t *cb, double scale)
{
unsigned int column_width = default_column_width(cb, IOS_DEFAULT);
enum zfs_nicenum_format format;
char na; /* char to print for "not applicable" values */
if (cb->cb_literal) {
format = ZFS_NICENUM_RAW;
na = '0';
} else {
format = ZFS_NICENUM_1024;
na = '-';
}
/* only toplevel vdevs have capacity stats */
if (vs->vs_space == 0) {
if (cb->cb_scripted)
printf("\t%c\t%c", na, na);
else
printf(" %*c %*c", column_width, na, column_width,
na);
} else {
print_one_stat(vs->vs_alloc, format, column_width,
cb->cb_scripted);
print_one_stat(vs->vs_space - vs->vs_alloc, format,
column_width, cb->cb_scripted);
}
print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_READ] * scale),
format, column_width, cb->cb_scripted);
print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_WRITE] * scale),
format, column_width, cb->cb_scripted);
print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_READ] * scale),
format, column_width, cb->cb_scripted);
print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_WRITE] * scale),
format, column_width, cb->cb_scripted);
}
static const char *const class_name[] = {
VDEV_ALLOC_BIAS_DEDUP,
VDEV_ALLOC_BIAS_SPECIAL,
VDEV_ALLOC_CLASS_LOGS
};
/*
* Print out all the statistics for the given vdev. This can either be the
* toplevel configuration, or called recursively. If 'name' is NULL, then this
* is a verbose output, and we don't want to display the toplevel pool stats.
*
* Returns the number of stat lines printed.
*/
static unsigned int
print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv,
nvlist_t *newnv, iostat_cbdata_t *cb, int depth)
{
nvlist_t **oldchild, **newchild;
uint_t c, children, oldchildren;
vdev_stat_t *oldvs, *newvs, *calcvs;
vdev_stat_t zerovs = { 0 };
char *vname;
int i;
int ret = 0;
uint64_t tdelta;
double scale;
if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)
return (ret);
calcvs = safe_malloc(sizeof (*calcvs));
if (oldnv != NULL) {
verify(nvlist_lookup_uint64_array(oldnv,
ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&oldvs, &c) == 0);
} else {
oldvs = &zerovs;
}
/* Do we only want to see a specific vdev? */
for (i = 0; i < cb->cb_vdevs.cb_names_count; i++) {
/* Yes we do. Is this the vdev? */
if (strcmp(name, cb->cb_vdevs.cb_names[i]) == 0) {
/*
* This is our vdev. Since it is the only vdev we
* will be displaying, make depth = 0 so that it
* doesn't get indented.
*/
depth = 0;
break;
}
}
if (cb->cb_vdevs.cb_names_count && (i == cb->cb_vdevs.cb_names_count)) {
/* Couldn't match the name */
goto children;
}
verify(nvlist_lookup_uint64_array(newnv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&newvs, &c) == 0);
/*
* Print the vdev name unless it's is a histogram. Histograms
* display the vdev name in the header itself.
*/
if (!(cb->cb_flags & IOS_ANYHISTO_M)) {
if (cb->cb_scripted) {
printf("%s", name);
} else {
if (strlen(name) + depth > cb->cb_namewidth)
(void) printf("%*s%s", depth, "", name);
else
(void) printf("%*s%s%*s", depth, "", name,
(int)(cb->cb_namewidth - strlen(name) -
depth), "");
}
}
/* Calculate our scaling factor */
tdelta = newvs->vs_timestamp - oldvs->vs_timestamp;
if ((oldvs->vs_timestamp == 0) && (cb->cb_flags & IOS_ANYHISTO_M)) {
/*
* If we specify printing histograms with no time interval, then
* print the histogram numbers over the entire lifetime of the
* vdev.
*/
scale = 1;
} else {
if (tdelta == 0)
scale = 1.0;
else
scale = (double)NANOSEC / tdelta;
}
if (cb->cb_flags & IOS_DEFAULT_M) {
calc_default_iostats(oldvs, newvs, calcvs);
print_iostat_default(calcvs, cb, scale);
}
if (cb->cb_flags & IOS_LATENCY_M)
print_iostat_latency(cb, oldnv, newnv);
if (cb->cb_flags & IOS_QUEUES_M)
print_iostat_queues(cb, newnv);
if (cb->cb_flags & IOS_ANYHISTO_M) {
printf("\n");
print_iostat_histos(cb, oldnv, newnv, scale, name);
}
if (cb->vcdl != NULL) {
const char *path;
if (nvlist_lookup_string(newnv, ZPOOL_CONFIG_PATH,
&path) == 0) {
printf(" ");
zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);
}
}
if (!(cb->cb_flags & IOS_ANYHISTO_M))
printf("\n");
ret++;
children:
free(calcvs);
if (!cb->cb_verbose)
return (ret);
if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_CHILDREN,
&newchild, &children) != 0)
return (ret);
if (oldnv) {
if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_CHILDREN,
&oldchild, &oldchildren) != 0)
return (ret);
children = MIN(oldchildren, children);
}
/*
* print normal top-level devices
*/
for (c = 0; c < children; c++) {
uint64_t ishole = B_FALSE, islog = B_FALSE;
(void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_HOLE,
&ishole);
(void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_LOG,
&islog);
if (ishole || islog)
continue;
if (nvlist_exists(newchild[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
continue;
vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID);
ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c] : NULL,
newchild[c], cb, depth + 2);
free(vname);
}
/*
* print all other top-level devices
*/
for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) {
boolean_t printed = B_FALSE;
for (c = 0; c < children; c++) {
uint64_t islog = B_FALSE;
const char *bias = NULL;
const char *type = NULL;
(void) nvlist_lookup_uint64(newchild[c],
ZPOOL_CONFIG_IS_LOG, &islog);
if (islog) {
bias = VDEV_ALLOC_CLASS_LOGS;
} else {
(void) nvlist_lookup_string(newchild[c],
ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
(void) nvlist_lookup_string(newchild[c],
ZPOOL_CONFIG_TYPE, &type);
}
if (bias == NULL || strcmp(bias, class_name[n]) != 0)
continue;
if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
continue;
if (!printed) {
if ((!(cb->cb_flags & IOS_ANYHISTO_M)) &&
!cb->cb_scripted &&
!cb->cb_vdevs.cb_names) {
print_iostat_dashes(cb, 0,
class_name[n]);
}
printf("\n");
printed = B_TRUE;
}
vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID);
ret += print_vdev_stats(zhp, vname, oldnv ?
oldchild[c] : NULL, newchild[c], cb, depth + 2);
free(vname);
}
}
/*
* Include level 2 ARC devices in iostat output
*/
if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_L2CACHE,
&newchild, &children) != 0)
return (ret);
if (oldnv) {
if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_L2CACHE,
&oldchild, &oldchildren) != 0)
return (ret);
children = MIN(oldchildren, children);
}
if (children > 0) {
if ((!(cb->cb_flags & IOS_ANYHISTO_M)) && !cb->cb_scripted &&
!cb->cb_vdevs.cb_names) {
print_iostat_dashes(cb, 0, "cache");
}
printf("\n");
for (c = 0; c < children; c++) {
vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
cb->cb_vdevs.cb_name_flags);
ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c]
: NULL, newchild[c], cb, depth + 2);
free(vname);
}
}
return (ret);
}
static int
refresh_iostat(zpool_handle_t *zhp, void *data)
{
iostat_cbdata_t *cb = data;
boolean_t missing;
/*
* If the pool has disappeared, remove it from the list and continue.
*/
if (zpool_refresh_stats(zhp, &missing) != 0)
return (-1);
if (missing)
pool_list_remove(cb->cb_list, zhp);
return (0);
}
/*
* Callback to print out the iostats for the given pool.
*/
static int
print_iostat(zpool_handle_t *zhp, void *data)
{
iostat_cbdata_t *cb = data;
nvlist_t *oldconfig, *newconfig;
nvlist_t *oldnvroot, *newnvroot;
int ret;
newconfig = zpool_get_config(zhp, &oldconfig);
if (cb->cb_iteration == 1)
oldconfig = NULL;
verify(nvlist_lookup_nvlist(newconfig, ZPOOL_CONFIG_VDEV_TREE,
&newnvroot) == 0);
if (oldconfig == NULL)
oldnvroot = NULL;
else
verify(nvlist_lookup_nvlist(oldconfig, ZPOOL_CONFIG_VDEV_TREE,
&oldnvroot) == 0);
ret = print_vdev_stats(zhp, zpool_get_name(zhp), oldnvroot, newnvroot,
cb, 0);
if ((ret != 0) && !(cb->cb_flags & IOS_ANYHISTO_M) &&
!cb->cb_scripted && cb->cb_verbose &&
!cb->cb_vdevs.cb_names_count) {
print_iostat_separator(cb);
if (cb->vcdl != NULL) {
print_cmd_columns(cb->vcdl, 1);
}
printf("\n");
}
return (ret);
}
static int
get_columns(void)
{
struct winsize ws;
int columns = 80;
int error;
if (isatty(STDOUT_FILENO)) {
error = ioctl(STDOUT_FILENO, TIOCGWINSZ, &ws);
if (error == 0)
columns = ws.ws_col;
} else {
columns = 999;
}
return (columns);
}
/*
* Return the required length of the pool/vdev name column. The minimum
* allowed width and output formatting flags must be provided.
*/
static int
get_namewidth(zpool_handle_t *zhp, int min_width, int flags, boolean_t verbose)
{
nvlist_t *config, *nvroot;
int width = min_width;
if ((config = zpool_get_config(zhp, NULL)) != NULL) {
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
size_t poolname_len = strlen(zpool_get_name(zhp));
if (verbose == B_FALSE) {
width = MAX(poolname_len, min_width);
} else {
width = MAX(poolname_len,
max_width(zhp, nvroot, 0, min_width, flags));
}
}
return (width);
}
/*
* Parse the input string, get the 'interval' and 'count' value if there is one.
*/
static void
get_interval_count(int *argcp, char **argv, float *iv,
unsigned long *cnt)
{
float interval = 0;
unsigned long count = 0;
int argc = *argcp;
/*
* Determine if the last argument is an integer or a pool name
*/
if (argc > 0 && zfs_isnumber(argv[argc - 1])) {
char *end;
errno = 0;
interval = strtof(argv[argc - 1], &end);
if (*end == '\0' && errno == 0) {
if (interval == 0) {
(void) fprintf(stderr, gettext(
"interval cannot be zero\n"));
usage(B_FALSE);
}
/*
* Ignore the last parameter
*/
argc--;
} else {
/*
* If this is not a valid number, just plow on. The
* user will get a more informative error message later
* on.
*/
interval = 0;
}
}
/*
* If the last argument is also an integer, then we have both a count
* and an interval.
*/
if (argc > 0 && zfs_isnumber(argv[argc - 1])) {
char *end;
errno = 0;
count = interval;
interval = strtof(argv[argc - 1], &end);
if (*end == '\0' && errno == 0) {
if (interval == 0) {
(void) fprintf(stderr, gettext(
"interval cannot be zero\n"));
usage(B_FALSE);
}
/*
* Ignore the last parameter
*/
argc--;
} else {
interval = 0;
}
}
*iv = interval;
*cnt = count;
*argcp = argc;
}
static void
get_timestamp_arg(char c)
{
if (c == 'u')
timestamp_fmt = UDATE;
else if (c == 'd')
timestamp_fmt = DDATE;
else
usage(B_FALSE);
}
/*
* Return stat flags that are supported by all pools by both the module and
* zpool iostat. "*data" should be initialized to all 0xFFs before running.
* It will get ANDed down until only the flags that are supported on all pools
* remain.
*/
static int
get_stat_flags_cb(zpool_handle_t *zhp, void *data)
{
uint64_t *mask = data;
nvlist_t *config, *nvroot, *nvx;
uint64_t flags = 0;
int i, j;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
/* Default stats are always supported, but for completeness.. */
if (nvlist_exists(nvroot, ZPOOL_CONFIG_VDEV_STATS))
flags |= IOS_DEFAULT_M;
/* Get our extended stats nvlist from the main list */
if (nvlist_lookup_nvlist(nvroot, ZPOOL_CONFIG_VDEV_STATS_EX,
&nvx) != 0) {
/*
* No extended stats; they're probably running an older
* module. No big deal, we support that too.
*/
goto end;
}
/* For each extended stat, make sure all its nvpairs are supported */
for (j = 0; j < ARRAY_SIZE(vsx_type_to_nvlist); j++) {
if (!vsx_type_to_nvlist[j][0])
continue;
/* Start off by assuming the flag is supported, then check */
flags |= (1ULL << j);
for (i = 0; vsx_type_to_nvlist[j][i]; i++) {
if (!nvlist_exists(nvx, vsx_type_to_nvlist[j][i])) {
/* flag isn't supported */
flags = flags & ~(1ULL << j);
break;
}
}
}
end:
*mask = *mask & flags;
return (0);
}
/*
* Return a bitmask of stats that are supported on all pools by both the module
* and zpool iostat.
*/
static uint64_t
get_stat_flags(zpool_list_t *list)
{
uint64_t mask = -1;
/*
* get_stat_flags_cb() will lop off bits from "mask" until only the
* flags that are supported on all pools remain.
*/
pool_list_iter(list, B_FALSE, get_stat_flags_cb, &mask);
return (mask);
}
/*
* Return 1 if cb_data->cb_names[0] is this vdev's name, 0 otherwise.
*/
static int
is_vdev_cb(void *zhp_data, nvlist_t *nv, void *cb_data)
{
uint64_t guid;
vdev_cbdata_t *cb = cb_data;
zpool_handle_t *zhp = zhp_data;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (0);
return (guid == zpool_vdev_path_to_guid(zhp, cb->cb_names[0]));
}
/*
* Returns 1 if cb_data->cb_names[0] is a vdev name, 0 otherwise.
*/
static int
is_vdev(zpool_handle_t *zhp, void *cb_data)
{
return (for_each_vdev(zhp, is_vdev_cb, cb_data));
}
/*
* Check if vdevs are in a pool
*
* Return 1 if all argv[] strings are vdev names in pool "pool_name". Otherwise
* return 0. If pool_name is NULL, then search all pools.
*/
static int
are_vdevs_in_pool(int argc, char **argv, char *pool_name,
vdev_cbdata_t *cb)
{
char **tmp_name;
int ret = 0;
int i;
int pool_count = 0;
if ((argc == 0) || !*argv)
return (0);
if (pool_name)
pool_count = 1;
/* Temporarily hijack cb_names for a second... */
tmp_name = cb->cb_names;
/* Go though our list of prospective vdev names */
for (i = 0; i < argc; i++) {
cb->cb_names = argv + i;
/* Is this name a vdev in our pools? */
ret = for_each_pool(pool_count, &pool_name, B_TRUE, NULL,
ZFS_TYPE_POOL, B_FALSE, is_vdev, cb);
if (!ret) {
/* No match */
break;
}
}
cb->cb_names = tmp_name;
return (ret);
}
static int
is_pool_cb(zpool_handle_t *zhp, void *data)
{
char *name = data;
if (strcmp(name, zpool_get_name(zhp)) == 0)
return (1);
return (0);
}
/*
* Do we have a pool named *name? If so, return 1, otherwise 0.
*/
static int
is_pool(char *name)
{
return (for_each_pool(0, NULL, B_TRUE, NULL, ZFS_TYPE_POOL, B_FALSE,
is_pool_cb, name));
}
/* Are all our argv[] strings pool names? If so return 1, 0 otherwise. */
static int
are_all_pools(int argc, char **argv)
{
if ((argc == 0) || !*argv)
return (0);
while (--argc >= 0)
if (!is_pool(argv[argc]))
return (0);
return (1);
}
/*
* Helper function to print out vdev/pool names we can't resolve. Used for an
* error message.
*/
static void
error_list_unresolved_vdevs(int argc, char **argv, char *pool_name,
vdev_cbdata_t *cb)
{
int i;
char *name;
char *str;
for (i = 0; i < argc; i++) {
name = argv[i];
if (is_pool(name))
str = gettext("pool");
else if (are_vdevs_in_pool(1, &name, pool_name, cb))
str = gettext("vdev in this pool");
else if (are_vdevs_in_pool(1, &name, NULL, cb))
str = gettext("vdev in another pool");
else
str = gettext("unknown");
fprintf(stderr, "\t%s (%s)\n", name, str);
}
}
/*
* Same as get_interval_count(), but with additional checks to not misinterpret
* guids as interval/count values. Assumes VDEV_NAME_GUID is set in
* cb.cb_vdevs.cb_name_flags.
*/
static void
get_interval_count_filter_guids(int *argc, char **argv, float *interval,
unsigned long *count, iostat_cbdata_t *cb)
{
char **tmpargv = argv;
int argc_for_interval = 0;
/* Is the last arg an interval value? Or a guid? */
if (*argc >= 1 && !are_vdevs_in_pool(1, &argv[*argc - 1], NULL,
&cb->cb_vdevs)) {
/*
* The last arg is not a guid, so it's probably an
* interval value.
*/
argc_for_interval++;
if (*argc >= 2 &&
!are_vdevs_in_pool(1, &argv[*argc - 2], NULL,
&cb->cb_vdevs)) {
/*
* The 2nd to last arg is not a guid, so it's probably
* an interval value.
*/
argc_for_interval++;
}
}
/* Point to our list of possible intervals */
tmpargv = &argv[*argc - argc_for_interval];
*argc = *argc - argc_for_interval;
get_interval_count(&argc_for_interval, tmpargv,
interval, count);
}
-/*
- * Floating point sleep(). Allows you to pass in a floating point value for
- * seconds.
- */
-static void
-fsleep(float sec)
-{
- struct timespec req;
- req.tv_sec = floor(sec);
- req.tv_nsec = (sec - (float)req.tv_sec) * NANOSEC;
- nanosleep(&req, NULL);
-}
-
/*
* Terminal height, in rows. Returns -1 if stdout is not connected to a TTY or
* if we were unable to determine its size.
*/
static int
terminal_height(void)
{
struct winsize win;
if (isatty(STDOUT_FILENO) == 0)
return (-1);
if (ioctl(STDOUT_FILENO, TIOCGWINSZ, &win) != -1 && win.ws_row > 0)
return (win.ws_row);
return (-1);
}
/*
* Run one of the zpool status/iostat -c scripts with the help (-h) option and
* print the result.
*
* name: Short name of the script ('iostat').
* path: Full path to the script ('/usr/local/etc/zfs/zpool.d/iostat');
*/
static void
print_zpool_script_help(char *name, char *path)
{
char *argv[] = {path, (char *)"-h", NULL};
char **lines = NULL;
int lines_cnt = 0;
int rc;
rc = libzfs_run_process_get_stdout_nopath(path, argv, NULL, &lines,
&lines_cnt);
if (rc != 0 || lines == NULL || lines_cnt <= 0) {
if (lines != NULL)
libzfs_free_str_array(lines, lines_cnt);
return;
}
for (int i = 0; i < lines_cnt; i++)
if (!is_blank_str(lines[i]))
printf(" %-14s %s\n", name, lines[i]);
libzfs_free_str_array(lines, lines_cnt);
}
/*
* Go though the zpool status/iostat -c scripts in the user's path, run their
* help option (-h), and print out the results.
*/
static void
print_zpool_dir_scripts(char *dirpath)
{
DIR *dir;
struct dirent *ent;
char fullpath[MAXPATHLEN];
struct stat dir_stat;
if ((dir = opendir(dirpath)) != NULL) {
/* print all the files and directories within directory */
while ((ent = readdir(dir)) != NULL) {
if (snprintf(fullpath, sizeof (fullpath), "%s/%s",
dirpath, ent->d_name) >= sizeof (fullpath)) {
(void) fprintf(stderr,
gettext("internal error: "
"ZPOOL_SCRIPTS_PATH too large.\n"));
exit(1);
}
/* Print the scripts */
if (stat(fullpath, &dir_stat) == 0)
if (dir_stat.st_mode & S_IXUSR &&
S_ISREG(dir_stat.st_mode))
print_zpool_script_help(ent->d_name,
fullpath);
}
closedir(dir);
}
}
/*
* Print out help text for all zpool status/iostat -c scripts.
*/
static void
print_zpool_script_list(const char *subcommand)
{
char *dir, *sp, *tmp;
printf(gettext("Available 'zpool %s -c' commands:\n"), subcommand);
sp = zpool_get_cmd_search_path();
if (sp == NULL)
return;
for (dir = strtok_r(sp, ":", &tmp);
dir != NULL;
dir = strtok_r(NULL, ":", &tmp))
print_zpool_dir_scripts(dir);
free(sp);
}
/*
* Set the minimum pool/vdev name column width. The width must be at least 10,
* but may be as large as the column width - 42 so it still fits on one line.
* NOTE: 42 is the width of the default capacity/operations/bandwidth output
*/
static int
get_namewidth_iostat(zpool_handle_t *zhp, void *data)
{
iostat_cbdata_t *cb = data;
int width, available_width;
/*
* get_namewidth() returns the maximum width of any name in that column
* for any pool/vdev/device line that will be output.
*/
width = get_namewidth(zhp, cb->cb_namewidth,
cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose);
/*
* The width we are calculating is the width of the header and also the
* padding width for names that are less than maximum width. The stats
* take up 42 characters, so the width available for names is:
*/
available_width = get_columns() - 42;
/*
* If the maximum width fits on a screen, then great! Make everything
* line up by justifying all lines to the same width. If that max
* width is larger than what's available, the name plus stats won't fit
* on one line, and justifying to that width would cause every line to
* wrap on the screen. We only want lines with long names to wrap.
* Limit the padding to what won't wrap.
*/
if (width > available_width)
width = available_width;
/*
* And regardless of whatever the screen width is (get_columns can
* return 0 if the width is not known or less than 42 for a narrow
* terminal) have the width be a minimum of 10.
*/
if (width < 10)
width = 10;
/* Save the calculated width */
cb->cb_namewidth = width;
return (0);
}
/*
* zpool iostat [[-c [script1,script2,...]] [-lq]|[-rw]] [-ghHLpPvy] [-n name]
* [-T d|u] [[ pool ...]|[pool vdev ...]|[vdev ...]]
* [interval [count]]
*
* -c CMD For each vdev, run command CMD
* -g Display guid for individual vdev name.
* -L Follow links when resolving vdev path name.
* -P Display full path for vdev name.
* -v Display statistics for individual vdevs
* -h Display help
* -p Display values in parsable (exact) format.
* -H Scripted mode. Don't display headers, and separate properties
* by a single tab.
* -l Display average latency
* -q Display queue depths
* -w Display latency histograms
* -r Display request size histogram
* -T Display a timestamp in date(1) or Unix format
* -n Only print headers once
*
* This command can be tricky because we want to be able to deal with pool
* creation/destruction as well as vdev configuration changes. The bulk of this
* processing is handled by the pool_list_* routines in zpool_iter.c. We rely
* on pool_list_update() to detect the addition of new pools. Configuration
* changes are all handled within libzfs.
*/
int
zpool_do_iostat(int argc, char **argv)
{
int c;
int ret;
int npools;
float interval = 0;
unsigned long count = 0;
int winheight = 24;
zpool_list_t *list;
boolean_t verbose = B_FALSE;
boolean_t latency = B_FALSE, l_histo = B_FALSE, rq_histo = B_FALSE;
boolean_t queues = B_FALSE, parsable = B_FALSE, scripted = B_FALSE;
boolean_t omit_since_boot = B_FALSE;
boolean_t guid = B_FALSE;
boolean_t follow_links = B_FALSE;
boolean_t full_name = B_FALSE;
boolean_t headers_once = B_FALSE;
iostat_cbdata_t cb = { 0 };
char *cmd = NULL;
/* Used for printing error message */
const char flag_to_arg[] = {[IOS_LATENCY] = 'l', [IOS_QUEUES] = 'q',
[IOS_L_HISTO] = 'w', [IOS_RQ_HISTO] = 'r'};
uint64_t unsupported_flags;
/* check options */
while ((c = getopt(argc, argv, "c:gLPT:vyhplqrwnH")) != -1) {
switch (c) {
case 'c':
if (cmd != NULL) {
fprintf(stderr,
gettext("Can't set -c flag twice\n"));
exit(1);
}
if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&
!libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {
fprintf(stderr, gettext(
"Can't run -c, disabled by "
"ZPOOL_SCRIPTS_ENABLED.\n"));
exit(1);
}
if ((getuid() <= 0 || geteuid() <= 0) &&
!libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {
fprintf(stderr, gettext(
"Can't run -c with root privileges "
"unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));
exit(1);
}
cmd = optarg;
verbose = B_TRUE;
break;
case 'g':
guid = B_TRUE;
break;
case 'L':
follow_links = B_TRUE;
break;
case 'P':
full_name = B_TRUE;
break;
case 'T':
get_timestamp_arg(*optarg);
break;
case 'v':
verbose = B_TRUE;
break;
case 'p':
parsable = B_TRUE;
break;
case 'l':
latency = B_TRUE;
break;
case 'q':
queues = B_TRUE;
break;
case 'H':
scripted = B_TRUE;
break;
case 'w':
l_histo = B_TRUE;
break;
case 'r':
rq_histo = B_TRUE;
break;
case 'y':
omit_since_boot = B_TRUE;
break;
case 'n':
headers_once = B_TRUE;
break;
case 'h':
usage(B_FALSE);
break;
case '?':
if (optopt == 'c') {
print_zpool_script_list("iostat");
exit(0);
} else {
fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
}
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
cb.cb_literal = parsable;
cb.cb_scripted = scripted;
if (guid)
cb.cb_vdevs.cb_name_flags |= VDEV_NAME_GUID;
if (follow_links)
cb.cb_vdevs.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
if (full_name)
cb.cb_vdevs.cb_name_flags |= VDEV_NAME_PATH;
cb.cb_iteration = 0;
cb.cb_namewidth = 0;
cb.cb_verbose = verbose;
/* Get our interval and count values (if any) */
if (guid) {
get_interval_count_filter_guids(&argc, argv, &interval,
&count, &cb);
} else {
get_interval_count(&argc, argv, &interval, &count);
}
if (argc == 0) {
/* No args, so just print the defaults. */
} else if (are_all_pools(argc, argv)) {
/* All the args are pool names */
} else if (are_vdevs_in_pool(argc, argv, NULL, &cb.cb_vdevs)) {
/* All the args are vdevs */
cb.cb_vdevs.cb_names = argv;
cb.cb_vdevs.cb_names_count = argc;
argc = 0; /* No pools to process */
} else if (are_all_pools(1, argv)) {
/* The first arg is a pool name */
if (are_vdevs_in_pool(argc - 1, argv + 1, argv[0],
&cb.cb_vdevs)) {
/* ...and the rest are vdev names */
cb.cb_vdevs.cb_names = argv + 1;
cb.cb_vdevs.cb_names_count = argc - 1;
argc = 1; /* One pool to process */
} else {
fprintf(stderr, gettext("Expected either a list of "));
fprintf(stderr, gettext("pools, or list of vdevs in"));
fprintf(stderr, " \"%s\", ", argv[0]);
fprintf(stderr, gettext("but got:\n"));
error_list_unresolved_vdevs(argc - 1, argv + 1,
argv[0], &cb.cb_vdevs);
fprintf(stderr, "\n");
usage(B_FALSE);
return (1);
}
} else {
/*
* The args don't make sense. The first arg isn't a pool name,
* nor are all the args vdevs.
*/
fprintf(stderr, gettext("Unable to parse pools/vdevs list.\n"));
fprintf(stderr, "\n");
return (1);
}
if (cb.cb_vdevs.cb_names_count != 0) {
/*
* If user specified vdevs, it implies verbose.
*/
cb.cb_verbose = B_TRUE;
}
/*
* Construct the list of all interesting pools.
*/
ret = 0;
if ((list = pool_list_get(argc, argv, NULL, ZFS_TYPE_POOL, parsable,
&ret)) == NULL)
return (1);
if (pool_list_count(list) == 0 && argc != 0) {
pool_list_free(list);
return (1);
}
if (pool_list_count(list) == 0 && interval == 0) {
pool_list_free(list);
(void) fprintf(stderr, gettext("no pools available\n"));
return (1);
}
if ((l_histo || rq_histo) && (cmd != NULL || latency || queues)) {
pool_list_free(list);
(void) fprintf(stderr,
gettext("[-r|-w] isn't allowed with [-c|-l|-q]\n"));
usage(B_FALSE);
return (1);
}
if (l_histo && rq_histo) {
pool_list_free(list);
(void) fprintf(stderr,
gettext("Only one of [-r|-w] can be passed at a time\n"));
usage(B_FALSE);
return (1);
}
/*
* Enter the main iostat loop.
*/
cb.cb_list = list;
if (l_histo) {
/*
* Histograms tables look out of place when you try to display
* them with the other stats, so make a rule that you can only
* print histograms by themselves.
*/
cb.cb_flags = IOS_L_HISTO_M;
} else if (rq_histo) {
cb.cb_flags = IOS_RQ_HISTO_M;
} else {
cb.cb_flags = IOS_DEFAULT_M;
if (latency)
cb.cb_flags |= IOS_LATENCY_M;
if (queues)
cb.cb_flags |= IOS_QUEUES_M;
}
/*
* See if the module supports all the stats we want to display.
*/
unsupported_flags = cb.cb_flags & ~get_stat_flags(list);
if (unsupported_flags) {
uint64_t f;
int idx;
fprintf(stderr,
gettext("The loaded zfs module doesn't support:"));
/* for each bit set in unsupported_flags */
for (f = unsupported_flags; f; f &= ~(1ULL << idx)) {
idx = lowbit64(f) - 1;
fprintf(stderr, " -%c", flag_to_arg[idx]);
}
fprintf(stderr, ". Try running a newer module.\n");
pool_list_free(list);
return (1);
}
for (;;) {
if ((npools = pool_list_count(list)) == 0)
(void) fprintf(stderr, gettext("no pools available\n"));
else {
/*
* If this is the first iteration and -y was supplied
* we skip any printing.
*/
boolean_t skip = (omit_since_boot &&
cb.cb_iteration == 0);
/*
* Refresh all statistics. This is done as an
* explicit step before calculating the maximum name
* width, so that any * configuration changes are
* properly accounted for.
*/
(void) pool_list_iter(list, B_FALSE, refresh_iostat,
&cb);
/*
* Iterate over all pools to determine the maximum width
* for the pool / device name column across all pools.
*/
cb.cb_namewidth = 0;
(void) pool_list_iter(list, B_FALSE,
get_namewidth_iostat, &cb);
if (timestamp_fmt != NODATE)
print_timestamp(timestamp_fmt);
if (cmd != NULL && cb.cb_verbose &&
!(cb.cb_flags & IOS_ANYHISTO_M)) {
cb.vcdl = all_pools_for_each_vdev_run(argc,
argv, cmd, g_zfs, cb.cb_vdevs.cb_names,
cb.cb_vdevs.cb_names_count,
cb.cb_vdevs.cb_name_flags);
} else {
cb.vcdl = NULL;
}
/*
* Check terminal size so we can print headers
* even when terminal window has its height
* changed.
*/
winheight = terminal_height();
/*
* Are we connected to TTY? If not, headers_once
* should be true, to avoid breaking scripts.
*/
if (winheight < 0)
headers_once = B_TRUE;
/*
* If it's the first time and we're not skipping it,
* or either skip or verbose mode, print the header.
*
* The histogram code explicitly prints its header on
* every vdev, so skip this for histograms.
*/
if (((++cb.cb_iteration == 1 && !skip) ||
(skip != verbose) ||
(!headers_once &&
(cb.cb_iteration % winheight) == 0)) &&
(!(cb.cb_flags & IOS_ANYHISTO_M)) &&
!cb.cb_scripted)
print_iostat_header(&cb);
if (skip) {
(void) fsleep(interval);
continue;
}
pool_list_iter(list, B_FALSE, print_iostat, &cb);
/*
* If there's more than one pool, and we're not in
* verbose mode (which prints a separator for us),
* then print a separator.
*
* In addition, if we're printing specific vdevs then
* we also want an ending separator.
*/
if (((npools > 1 && !verbose &&
!(cb.cb_flags & IOS_ANYHISTO_M)) ||
(!(cb.cb_flags & IOS_ANYHISTO_M) &&
cb.cb_vdevs.cb_names_count)) &&
!cb.cb_scripted) {
print_iostat_separator(&cb);
if (cb.vcdl != NULL)
print_cmd_columns(cb.vcdl, 1);
printf("\n");
}
if (cb.vcdl != NULL)
free_vdev_cmd_data_list(cb.vcdl);
}
/*
* Flush the output so that redirection to a file isn't buffered
* indefinitely.
*/
(void) fflush(stdout);
if (interval == 0)
break;
if (count != 0 && --count == 0)
break;
(void) fsleep(interval);
}
pool_list_free(list);
return (ret);
}
typedef struct list_cbdata {
boolean_t cb_verbose;
int cb_name_flags;
int cb_namewidth;
boolean_t cb_scripted;
zprop_list_t *cb_proplist;
boolean_t cb_literal;
} list_cbdata_t;
/*
* Given a list of columns to display, output appropriate headers for each one.
*/
static void
print_header(list_cbdata_t *cb)
{
zprop_list_t *pl = cb->cb_proplist;
char headerbuf[ZPOOL_MAXPROPLEN];
const char *header;
boolean_t first = B_TRUE;
boolean_t right_justify;
size_t width = 0;
for (; pl != NULL; pl = pl->pl_next) {
width = pl->pl_width;
if (first && cb->cb_verbose) {
/*
* Reset the width to accommodate the verbose listing
* of devices.
*/
width = cb->cb_namewidth;
}
if (!first)
(void) fputs(" ", stdout);
else
first = B_FALSE;
right_justify = B_FALSE;
if (pl->pl_prop != ZPROP_USERPROP) {
header = zpool_prop_column_name(pl->pl_prop);
right_justify = zpool_prop_align_right(pl->pl_prop);
} else {
int i;
for (i = 0; pl->pl_user_prop[i] != '\0'; i++)
headerbuf[i] = toupper(pl->pl_user_prop[i]);
headerbuf[i] = '\0';
header = headerbuf;
}
if (pl->pl_next == NULL && !right_justify)
(void) fputs(header, stdout);
else if (right_justify)
(void) printf("%*s", (int)width, header);
else
(void) printf("%-*s", (int)width, header);
}
(void) fputc('\n', stdout);
}
/*
* Given a pool and a list of properties, print out all the properties according
* to the described layout. Used by zpool_do_list().
*/
static void
print_pool(zpool_handle_t *zhp, list_cbdata_t *cb)
{
zprop_list_t *pl = cb->cb_proplist;
boolean_t first = B_TRUE;
char property[ZPOOL_MAXPROPLEN];
const char *propstr;
boolean_t right_justify;
size_t width;
for (; pl != NULL; pl = pl->pl_next) {
width = pl->pl_width;
if (first && cb->cb_verbose) {
/*
* Reset the width to accommodate the verbose listing
* of devices.
*/
width = cb->cb_namewidth;
}
if (!first) {
if (cb->cb_scripted)
(void) fputc('\t', stdout);
else
(void) fputs(" ", stdout);
} else {
first = B_FALSE;
}
right_justify = B_FALSE;
if (pl->pl_prop != ZPROP_USERPROP) {
if (zpool_get_prop(zhp, pl->pl_prop, property,
sizeof (property), NULL, cb->cb_literal) != 0)
propstr = "-";
else
propstr = property;
right_justify = zpool_prop_align_right(pl->pl_prop);
} else if ((zpool_prop_feature(pl->pl_user_prop) ||
zpool_prop_unsupported(pl->pl_user_prop)) &&
zpool_prop_get_feature(zhp, pl->pl_user_prop, property,
sizeof (property)) == 0) {
propstr = property;
} else if (zfs_prop_user(pl->pl_user_prop) &&
zpool_get_userprop(zhp, pl->pl_user_prop, property,
sizeof (property), NULL) == 0) {
propstr = property;
} else {
propstr = "-";
}
/*
* If this is being called in scripted mode, or if this is the
* last column and it is left-justified, don't include a width
* format specifier.
*/
if (cb->cb_scripted || (pl->pl_next == NULL && !right_justify))
(void) fputs(propstr, stdout);
else if (right_justify)
(void) printf("%*s", (int)width, propstr);
else
(void) printf("%-*s", (int)width, propstr);
}
(void) fputc('\n', stdout);
}
static void
print_one_column(zpool_prop_t prop, uint64_t value, const char *str,
boolean_t scripted, boolean_t valid, enum zfs_nicenum_format format)
{
char propval[64];
boolean_t fixed;
size_t width = zprop_width(prop, &fixed, ZFS_TYPE_POOL);
switch (prop) {
case ZPOOL_PROP_SIZE:
case ZPOOL_PROP_EXPANDSZ:
case ZPOOL_PROP_CHECKPOINT:
case ZPOOL_PROP_DEDUPRATIO:
if (value == 0)
(void) strlcpy(propval, "-", sizeof (propval));
else
zfs_nicenum_format(value, propval, sizeof (propval),
format);
break;
case ZPOOL_PROP_FRAGMENTATION:
if (value == ZFS_FRAG_INVALID) {
(void) strlcpy(propval, "-", sizeof (propval));
} else if (format == ZFS_NICENUM_RAW) {
(void) snprintf(propval, sizeof (propval), "%llu",
(unsigned long long)value);
} else {
(void) snprintf(propval, sizeof (propval), "%llu%%",
(unsigned long long)value);
}
break;
case ZPOOL_PROP_CAPACITY:
/* capacity value is in parts-per-10,000 (aka permyriad) */
if (format == ZFS_NICENUM_RAW)
(void) snprintf(propval, sizeof (propval), "%llu",
(unsigned long long)value / 100);
else
(void) snprintf(propval, sizeof (propval),
value < 1000 ? "%1.2f%%" : value < 10000 ?
"%2.1f%%" : "%3.0f%%", value / 100.0);
break;
case ZPOOL_PROP_HEALTH:
width = 8;
(void) strlcpy(propval, str, sizeof (propval));
break;
default:
zfs_nicenum_format(value, propval, sizeof (propval), format);
}
if (!valid)
(void) strlcpy(propval, "-", sizeof (propval));
if (scripted)
(void) printf("\t%s", propval);
else
(void) printf(" %*s", (int)width, propval);
}
/*
* print static default line per vdev
* not compatible with '-o' <proplist> option
*/
static void
print_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
list_cbdata_t *cb, int depth, boolean_t isspare)
{
nvlist_t **child;
vdev_stat_t *vs;
uint_t c, children;
char *vname;
boolean_t scripted = cb->cb_scripted;
uint64_t islog = B_FALSE;
const char *dashes = "%-*s - - - - "
"- - - - -\n";
verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) == 0);
if (name != NULL) {
boolean_t toplevel = (vs->vs_space != 0);
uint64_t cap;
enum zfs_nicenum_format format;
const char *state;
if (cb->cb_literal)
format = ZFS_NICENUM_RAW;
else
format = ZFS_NICENUM_1024;
if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)
return;
if (scripted)
(void) printf("\t%s", name);
else if (strlen(name) + depth > cb->cb_namewidth)
(void) printf("%*s%s", depth, "", name);
else
(void) printf("%*s%s%*s", depth, "", name,
(int)(cb->cb_namewidth - strlen(name) - depth), "");
/*
* Print the properties for the individual vdevs. Some
* properties are only applicable to toplevel vdevs. The
* 'toplevel' boolean value is passed to the print_one_column()
* to indicate that the value is valid.
*/
if (VDEV_STAT_VALID(vs_pspace, c) && vs->vs_pspace)
print_one_column(ZPOOL_PROP_SIZE, vs->vs_pspace, NULL,
scripted, B_TRUE, format);
else
print_one_column(ZPOOL_PROP_SIZE, vs->vs_space, NULL,
scripted, toplevel, format);
print_one_column(ZPOOL_PROP_ALLOCATED, vs->vs_alloc, NULL,
scripted, toplevel, format);
print_one_column(ZPOOL_PROP_FREE, vs->vs_space - vs->vs_alloc,
NULL, scripted, toplevel, format);
print_one_column(ZPOOL_PROP_CHECKPOINT,
vs->vs_checkpoint_space, NULL, scripted, toplevel, format);
print_one_column(ZPOOL_PROP_EXPANDSZ, vs->vs_esize, NULL,
scripted, B_TRUE, format);
print_one_column(ZPOOL_PROP_FRAGMENTATION,
vs->vs_fragmentation, NULL, scripted,
(vs->vs_fragmentation != ZFS_FRAG_INVALID && toplevel),
format);
cap = (vs->vs_space == 0) ? 0 :
(vs->vs_alloc * 10000 / vs->vs_space);
print_one_column(ZPOOL_PROP_CAPACITY, cap, NULL,
scripted, toplevel, format);
print_one_column(ZPOOL_PROP_DEDUPRATIO, 0, NULL,
scripted, toplevel, format);
state = zpool_state_to_name(vs->vs_state, vs->vs_aux);
if (isspare) {
if (vs->vs_aux == VDEV_AUX_SPARED)
state = "INUSE";
else if (vs->vs_state == VDEV_STATE_HEALTHY)
state = "AVAIL";
}
print_one_column(ZPOOL_PROP_HEALTH, 0, state, scripted,
B_TRUE, format);
(void) fputc('\n', stdout);
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
return;
/* list the normal vdevs first */
for (c = 0; c < children; c++) {
uint64_t ishole = B_FALSE;
if (nvlist_lookup_uint64(child[c],
ZPOOL_CONFIG_IS_HOLE, &ishole) == 0 && ishole)
continue;
if (nvlist_lookup_uint64(child[c],
ZPOOL_CONFIG_IS_LOG, &islog) == 0 && islog)
continue;
if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
continue;
vname = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags | VDEV_NAME_TYPE_ID);
print_list_stats(zhp, vname, child[c], cb, depth + 2, B_FALSE);
free(vname);
}
/* list the classes: 'logs', 'dedup', and 'special' */
for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) {
boolean_t printed = B_FALSE;
for (c = 0; c < children; c++) {
const char *bias = NULL;
const char *type = NULL;
if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&islog) == 0 && islog) {
bias = VDEV_ALLOC_CLASS_LOGS;
} else {
(void) nvlist_lookup_string(child[c],
ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
(void) nvlist_lookup_string(child[c],
ZPOOL_CONFIG_TYPE, &type);
}
if (bias == NULL || strcmp(bias, class_name[n]) != 0)
continue;
if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
continue;
if (!printed) {
/* LINTED E_SEC_PRINTF_VAR_FMT */
(void) printf(dashes, cb->cb_namewidth,
class_name[n]);
printed = B_TRUE;
}
vname = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags | VDEV_NAME_TYPE_ID);
print_list_stats(zhp, vname, child[c], cb, depth + 2,
B_FALSE);
free(vname);
}
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
&child, &children) == 0 && children > 0) {
/* LINTED E_SEC_PRINTF_VAR_FMT */
(void) printf(dashes, cb->cb_namewidth, "cache");
for (c = 0; c < children; c++) {
vname = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags);
print_list_stats(zhp, vname, child[c], cb, depth + 2,
B_FALSE);
free(vname);
}
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, &child,
&children) == 0 && children > 0) {
/* LINTED E_SEC_PRINTF_VAR_FMT */
(void) printf(dashes, cb->cb_namewidth, "spare");
for (c = 0; c < children; c++) {
vname = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags);
print_list_stats(zhp, vname, child[c], cb, depth + 2,
B_TRUE);
free(vname);
}
}
}
/*
* Generic callback function to list a pool.
*/
static int
list_callback(zpool_handle_t *zhp, void *data)
{
list_cbdata_t *cbp = data;
print_pool(zhp, cbp);
if (cbp->cb_verbose) {
nvlist_t *config, *nvroot;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
print_list_stats(zhp, NULL, nvroot, cbp, 0, B_FALSE);
}
return (0);
}
/*
* Set the minimum pool/vdev name column width. The width must be at least 9,
* but may be as large as needed.
*/
static int
get_namewidth_list(zpool_handle_t *zhp, void *data)
{
list_cbdata_t *cb = data;
int width;
width = get_namewidth(zhp, cb->cb_namewidth,
cb->cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose);
if (width < 9)
width = 9;
cb->cb_namewidth = width;
return (0);
}
/*
* zpool list [-gHLpP] [-o prop[,prop]*] [-T d|u] [pool] ... [interval [count]]
*
* -g Display guid for individual vdev name.
* -H Scripted mode. Don't display headers, and separate properties
* by a single tab.
* -L Follow links when resolving vdev path name.
* -o List of properties to display. Defaults to
* "name,size,allocated,free,expandsize,fragmentation,capacity,"
* "dedupratio,health,altroot"
* -p Display values in parsable (exact) format.
* -P Display full path for vdev name.
* -T Display a timestamp in date(1) or Unix format
*
* List all pools in the system, whether or not they're healthy. Output space
* statistics for each one, as well as health status summary.
*/
int
zpool_do_list(int argc, char **argv)
{
int c;
int ret = 0;
list_cbdata_t cb = { 0 };
static char default_props[] =
"name,size,allocated,free,checkpoint,expandsize,fragmentation,"
"capacity,dedupratio,health,altroot";
char *props = default_props;
float interval = 0;
unsigned long count = 0;
zpool_list_t *list;
boolean_t first = B_TRUE;
current_prop_type = ZFS_TYPE_POOL;
/* check options */
while ((c = getopt(argc, argv, ":gHLo:pPT:v")) != -1) {
switch (c) {
case 'g':
cb.cb_name_flags |= VDEV_NAME_GUID;
break;
case 'H':
cb.cb_scripted = B_TRUE;
break;
case 'L':
cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
break;
case 'o':
props = optarg;
break;
case 'P':
cb.cb_name_flags |= VDEV_NAME_PATH;
break;
case 'p':
cb.cb_literal = B_TRUE;
break;
case 'T':
get_timestamp_arg(*optarg);
break;
case 'v':
cb.cb_verbose = B_TRUE;
cb.cb_namewidth = 8; /* 8 until precalc is avail */
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
get_interval_count(&argc, argv, &interval, &count);
if (zprop_get_list(g_zfs, props, &cb.cb_proplist, ZFS_TYPE_POOL) != 0)
usage(B_FALSE);
for (;;) {
if ((list = pool_list_get(argc, argv, &cb.cb_proplist,
ZFS_TYPE_POOL, cb.cb_literal, &ret)) == NULL)
return (1);
if (pool_list_count(list) == 0)
break;
cb.cb_namewidth = 0;
(void) pool_list_iter(list, B_FALSE, get_namewidth_list, &cb);
if (timestamp_fmt != NODATE)
print_timestamp(timestamp_fmt);
if (!cb.cb_scripted && (first || cb.cb_verbose)) {
print_header(&cb);
first = B_FALSE;
}
ret = pool_list_iter(list, B_TRUE, list_callback, &cb);
if (interval == 0)
break;
if (count != 0 && --count == 0)
break;
pool_list_free(list);
(void) fsleep(interval);
}
if (argc == 0 && !cb.cb_scripted && pool_list_count(list) == 0) {
(void) printf(gettext("no pools available\n"));
ret = 0;
}
pool_list_free(list);
zprop_free_list(cb.cb_proplist);
return (ret);
}
static int
zpool_do_attach_or_replace(int argc, char **argv, int replacing)
{
boolean_t force = B_FALSE;
boolean_t rebuild = B_FALSE;
boolean_t wait = B_FALSE;
int c;
nvlist_t *nvroot;
char *poolname, *old_disk, *new_disk;
zpool_handle_t *zhp;
nvlist_t *props = NULL;
char *propval;
int ret;
/* check options */
while ((c = getopt(argc, argv, "fo:sw")) != -1) {
switch (c) {
case 'f':
force = B_TRUE;
break;
case 'o':
if ((propval = strchr(optarg, '=')) == NULL) {
(void) fprintf(stderr, gettext("missing "
"'=' for -o option\n"));
usage(B_FALSE);
}
*propval = '\0';
propval++;
if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) ||
(add_prop_list(optarg, propval, &props, B_TRUE)))
usage(B_FALSE);
break;
case 's':
rebuild = B_TRUE;
break;
case 'w':
wait = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
}
poolname = argv[0];
if (argc < 2) {
(void) fprintf(stderr,
gettext("missing <device> specification\n"));
usage(B_FALSE);
}
old_disk = argv[1];
if (argc < 3) {
if (!replacing) {
(void) fprintf(stderr,
gettext("missing <new_device> specification\n"));
usage(B_FALSE);
}
new_disk = old_disk;
argc -= 1;
argv += 1;
} else {
new_disk = argv[2];
argc -= 2;
argv += 2;
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {
nvlist_free(props);
return (1);
}
if (zpool_get_config(zhp, NULL) == NULL) {
(void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),
poolname);
zpool_close(zhp);
nvlist_free(props);
return (1);
}
/* unless manually specified use "ashift" pool property (if set) */
if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {
int intval;
zprop_source_t src;
char strval[ZPOOL_MAXPROPLEN];
intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);
if (src != ZPROP_SRC_DEFAULT) {
(void) sprintf(strval, "%" PRId32, intval);
verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,
&props, B_TRUE) == 0);
}
}
nvroot = make_root_vdev(zhp, props, force, B_FALSE, replacing, B_FALSE,
argc, argv);
if (nvroot == NULL) {
zpool_close(zhp);
nvlist_free(props);
return (1);
}
ret = zpool_vdev_attach(zhp, old_disk, new_disk, nvroot, replacing,
rebuild);
if (ret == 0 && wait)
ret = zpool_wait(zhp,
replacing ? ZPOOL_WAIT_REPLACE : ZPOOL_WAIT_RESILVER);
nvlist_free(props);
nvlist_free(nvroot);
zpool_close(zhp);
return (ret);
}
/*
* zpool replace [-fsw] [-o property=value] <pool> <device> <new_device>
*
* -f Force attach, even if <new_device> appears to be in use.
* -s Use sequential instead of healing reconstruction for resilver.
* -o Set property=value.
* -w Wait for replacing to complete before returning
*
* Replace <device> with <new_device>.
*/
int
zpool_do_replace(int argc, char **argv)
{
return (zpool_do_attach_or_replace(argc, argv, B_TRUE));
}
/*
* zpool attach [-fsw] [-o property=value] <pool> <device> <new_device>
*
* -f Force attach, even if <new_device> appears to be in use.
* -s Use sequential instead of healing reconstruction for resilver.
* -o Set property=value.
* -w Wait for resilvering to complete before returning
*
* Attach <new_device> to the mirror containing <device>. If <device> is not
* part of a mirror, then <device> will be transformed into a mirror of
* <device> and <new_device>. In either case, <new_device> will begin life
* with a DTL of [0, now], and will immediately begin to resilver itself.
*/
int
zpool_do_attach(int argc, char **argv)
{
return (zpool_do_attach_or_replace(argc, argv, B_FALSE));
}
/*
* zpool detach [-f] <pool> <device>
*
* -f Force detach of <device>, even if DTLs argue against it
* (not supported yet)
*
* Detach a device from a mirror. The operation will be refused if <device>
* is the last device in the mirror, or if the DTLs indicate that this device
* has the only valid copy of some data.
*/
int
zpool_do_detach(int argc, char **argv)
{
int c;
char *poolname, *path;
zpool_handle_t *zhp;
int ret;
/* check options */
while ((c = getopt(argc, argv, "")) != -1) {
switch (c) {
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr,
gettext("missing <device> specification\n"));
usage(B_FALSE);
}
poolname = argv[0];
path = argv[1];
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
ret = zpool_vdev_detach(zhp, path);
zpool_close(zhp);
return (ret);
}
/*
* zpool split [-gLnP] [-o prop=val] ...
* [-o mntopt] ...
* [-R altroot] <pool> <newpool> [<device> ...]
*
* -g Display guid for individual vdev name.
* -L Follow links when resolving vdev path name.
* -n Do not split the pool, but display the resulting layout if
* it were to be split.
* -o Set property=value, or set mount options.
* -P Display full path for vdev name.
* -R Mount the split-off pool under an alternate root.
* -l Load encryption keys while importing.
*
* Splits the named pool and gives it the new pool name. Devices to be split
* off may be listed, provided that no more than one device is specified
* per top-level vdev mirror. The newly split pool is left in an exported
* state unless -R is specified.
*
* Restrictions: the top-level of the pool pool must only be made up of
* mirrors; all devices in the pool must be healthy; no device may be
* undergoing a resilvering operation.
*/
int
zpool_do_split(int argc, char **argv)
{
char *srcpool, *newpool, *propval;
char *mntopts = NULL;
splitflags_t flags;
int c, ret = 0;
int ms_status = 0;
boolean_t loadkeys = B_FALSE;
zpool_handle_t *zhp;
nvlist_t *config, *props = NULL;
flags.dryrun = B_FALSE;
flags.import = B_FALSE;
flags.name_flags = 0;
/* check options */
while ((c = getopt(argc, argv, ":gLR:lno:P")) != -1) {
switch (c) {
case 'g':
flags.name_flags |= VDEV_NAME_GUID;
break;
case 'L':
flags.name_flags |= VDEV_NAME_FOLLOW_LINKS;
break;
case 'R':
flags.import = B_TRUE;
if (add_prop_list(
zpool_prop_to_name(ZPOOL_PROP_ALTROOT), optarg,
&props, B_TRUE) != 0) {
nvlist_free(props);
usage(B_FALSE);
}
break;
case 'l':
loadkeys = B_TRUE;
break;
case 'n':
flags.dryrun = B_TRUE;
break;
case 'o':
if ((propval = strchr(optarg, '=')) != NULL) {
*propval = '\0';
propval++;
if (add_prop_list(optarg, propval,
&props, B_TRUE) != 0) {
nvlist_free(props);
usage(B_FALSE);
}
} else {
mntopts = optarg;
}
break;
case 'P':
flags.name_flags |= VDEV_NAME_PATH;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
break;
}
}
if (!flags.import && mntopts != NULL) {
(void) fprintf(stderr, gettext("setting mntopts is only "
"valid when importing the pool\n"));
usage(B_FALSE);
}
if (!flags.import && loadkeys) {
(void) fprintf(stderr, gettext("loading keys is only "
"valid when importing the pool\n"));
usage(B_FALSE);
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("Missing pool name\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("Missing new pool name\n"));
usage(B_FALSE);
}
srcpool = argv[0];
newpool = argv[1];
argc -= 2;
argv += 2;
if ((zhp = zpool_open(g_zfs, srcpool)) == NULL) {
nvlist_free(props);
return (1);
}
config = split_mirror_vdev(zhp, newpool, props, flags, argc, argv);
if (config == NULL) {
ret = 1;
} else {
if (flags.dryrun) {
(void) printf(gettext("would create '%s' with the "
"following layout:\n\n"), newpool);
print_vdev_tree(NULL, newpool, config, 0, "",
flags.name_flags);
print_vdev_tree(NULL, "dedup", config, 0,
VDEV_ALLOC_BIAS_DEDUP, 0);
print_vdev_tree(NULL, "special", config, 0,
VDEV_ALLOC_BIAS_SPECIAL, 0);
}
}
zpool_close(zhp);
if (ret != 0 || flags.dryrun || !flags.import) {
nvlist_free(config);
nvlist_free(props);
return (ret);
}
/*
* The split was successful. Now we need to open the new
* pool and import it.
*/
if ((zhp = zpool_open_canfail(g_zfs, newpool)) == NULL) {
nvlist_free(config);
nvlist_free(props);
return (1);
}
if (loadkeys) {
ret = zfs_crypto_attempt_load_keys(g_zfs, newpool);
if (ret != 0)
ret = 1;
}
if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL) {
ms_status = zpool_enable_datasets(zhp, mntopts, 0);
if (ms_status == EZFS_SHAREFAILED) {
(void) fprintf(stderr, gettext("Split was successful, "
"datasets are mounted but sharing of some datasets "
"has failed\n"));
} else if (ms_status == EZFS_MOUNTFAILED) {
(void) fprintf(stderr, gettext("Split was successful"
", but some datasets could not be mounted\n"));
(void) fprintf(stderr, gettext("Try doing '%s' with a "
"different altroot\n"), "zpool import");
}
}
zpool_close(zhp);
nvlist_free(config);
nvlist_free(props);
return (ret);
}
-
+#define POWER_OPT 1024
/*
- * zpool online <pool> <device> ...
+ * zpool online [--power] <pool> <device> ...
+ *
+ * --power: Power on the enclosure slot to the drive (if possible)
*/
int
zpool_do_online(int argc, char **argv)
{
int c, i;
char *poolname;
zpool_handle_t *zhp;
int ret = 0;
vdev_state_t newstate;
int flags = 0;
+ boolean_t is_power_on = B_FALSE;
+ struct option long_options[] = {
+ {"power", no_argument, NULL, POWER_OPT},
+ {0, 0, 0, 0}
+ };
/* check options */
- while ((c = getopt(argc, argv, "e")) != -1) {
+ while ((c = getopt_long(argc, argv, "e", long_options, NULL)) != -1) {
switch (c) {
case 'e':
flags |= ZFS_ONLINE_EXPAND;
break;
+ case POWER_OPT:
+ is_power_on = B_TRUE;
+ break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
+ if (libzfs_envvar_is_set("ZPOOL_AUTO_POWER_ON_SLOT"))
+ is_power_on = B_TRUE;
+
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing device name\n"));
usage(B_FALSE);
}
poolname = argv[0];
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
for (i = 1; i < argc; i++) {
vdev_state_t oldstate;
boolean_t avail_spare, l2cache;
+ int rc;
+
+ if (is_power_on) {
+ rc = zpool_power_on_and_disk_wait(zhp, argv[i]);
+ if (rc == ENOTSUP) {
+ (void) fprintf(stderr,
+ gettext("Power control not supported\n"));
+ }
+ if (rc != 0)
+ return (rc);
+ }
+
nvlist_t *tgt = zpool_find_vdev(zhp, argv[i], &avail_spare,
&l2cache, NULL);
if (tgt == NULL) {
ret = 1;
continue;
}
uint_t vsc;
oldstate = ((vdev_stat_t *)fnvlist_lookup_uint64_array(tgt,
ZPOOL_CONFIG_VDEV_STATS, &vsc))->vs_state;
if (zpool_vdev_online(zhp, argv[i], flags, &newstate) == 0) {
if (newstate != VDEV_STATE_HEALTHY) {
(void) printf(gettext("warning: device '%s' "
"onlined, but remains in faulted state\n"),
argv[i]);
if (newstate == VDEV_STATE_FAULTED)
(void) printf(gettext("use 'zpool "
"clear' to restore a faulted "
"device\n"));
else
(void) printf(gettext("use 'zpool "
"replace' to replace devices "
"that are no longer present\n"));
if ((flags & ZFS_ONLINE_EXPAND)) {
(void) printf(gettext("%s: failed "
"to expand usable space on "
"unhealthy device '%s'\n"),
(oldstate >= VDEV_STATE_DEGRADED ?
"error" : "warning"), argv[i]);
if (oldstate >= VDEV_STATE_DEGRADED) {
ret = 1;
break;
}
}
}
} else {
ret = 1;
}
}
zpool_close(zhp);
return (ret);
}
/*
- * zpool offline [-ft] <pool> <device> ...
+ * zpool offline [-ft]|[--power] <pool> <device> ...
+ *
*
* -f Force the device into a faulted state.
*
* -t Only take the device off-line temporarily. The offline/faulted
* state will not be persistent across reboots.
+ *
+ * --power Power off the enclosure slot to the drive (if possible)
*/
int
zpool_do_offline(int argc, char **argv)
{
int c, i;
char *poolname;
zpool_handle_t *zhp;
int ret = 0;
boolean_t istmp = B_FALSE;
boolean_t fault = B_FALSE;
+ boolean_t is_power_off = B_FALSE;
+
+ struct option long_options[] = {
+ {"power", no_argument, NULL, POWER_OPT},
+ {0, 0, 0, 0}
+ };
/* check options */
- while ((c = getopt(argc, argv, "ft")) != -1) {
+ while ((c = getopt_long(argc, argv, "ft", long_options, NULL)) != -1) {
switch (c) {
case 'f':
fault = B_TRUE;
break;
case 't':
istmp = B_TRUE;
break;
+ case POWER_OPT:
+ is_power_off = B_TRUE;
+ break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
+ if (is_power_off && fault) {
+ (void) fprintf(stderr,
+ gettext("-0 and -f cannot be used together\n"));
+ usage(B_FALSE);
+ return (1);
+ }
+
+ if (is_power_off && istmp) {
+ (void) fprintf(stderr,
+ gettext("-0 and -t cannot be used together\n"));
+ usage(B_FALSE);
+ return (1);
+ }
+
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing device name\n"));
usage(B_FALSE);
}
poolname = argv[0];
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
for (i = 1; i < argc; i++) {
- if (fault) {
- uint64_t guid = zpool_vdev_path_to_guid(zhp, argv[i]);
+ uint64_t guid = zpool_vdev_path_to_guid(zhp, argv[i]);
+ if (is_power_off) {
+ /*
+ * Note: we have to power off first, then set REMOVED,
+ * or else zpool_vdev_set_removed_state() returns
+ * EAGAIN.
+ */
+ ret = zpool_power_off(zhp, argv[i]);
+ if (ret != 0) {
+ (void) fprintf(stderr, "%s %s %d\n",
+ gettext("unable to power off slot for"),
+ argv[i], ret);
+ }
+ zpool_vdev_set_removed_state(zhp, guid, VDEV_AUX_NONE);
+
+ } else if (fault) {
vdev_aux_t aux;
if (istmp == B_FALSE) {
/* Force the fault to persist across imports */
aux = VDEV_AUX_EXTERNAL_PERSIST;
} else {
aux = VDEV_AUX_EXTERNAL;
}
if (guid == 0 || zpool_vdev_fault(zhp, guid, aux) != 0)
ret = 1;
} else {
if (zpool_vdev_offline(zhp, argv[i], istmp) != 0)
ret = 1;
}
}
zpool_close(zhp);
return (ret);
}
/*
- * zpool clear <pool> [device]
+ * zpool clear [-nF]|[--power] <pool> [device]
*
* Clear all errors associated with a pool or a particular device.
*/
int
zpool_do_clear(int argc, char **argv)
{
int c;
int ret = 0;
boolean_t dryrun = B_FALSE;
boolean_t do_rewind = B_FALSE;
boolean_t xtreme_rewind = B_FALSE;
+ boolean_t is_power_on = B_FALSE;
uint32_t rewind_policy = ZPOOL_NO_REWIND;
nvlist_t *policy = NULL;
zpool_handle_t *zhp;
char *pool, *device;
+ struct option long_options[] = {
+ {"power", no_argument, NULL, POWER_OPT},
+ {0, 0, 0, 0}
+ };
+
/* check options */
- while ((c = getopt(argc, argv, "FnX")) != -1) {
+ while ((c = getopt_long(argc, argv, "FnX", long_options,
+ NULL)) != -1) {
switch (c) {
case 'F':
do_rewind = B_TRUE;
break;
case 'n':
dryrun = B_TRUE;
break;
case 'X':
xtreme_rewind = B_TRUE;
break;
+ case POWER_OPT:
+ is_power_on = B_TRUE;
+ break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
+ if (libzfs_envvar_is_set("ZPOOL_AUTO_POWER_ON_SLOT"))
+ is_power_on = B_TRUE;
+
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name\n"));
usage(B_FALSE);
}
if (argc > 2) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if ((dryrun || xtreme_rewind) && !do_rewind) {
(void) fprintf(stderr,
gettext("-n or -X only meaningful with -F\n"));
usage(B_FALSE);
}
if (dryrun)
rewind_policy = ZPOOL_TRY_REWIND;
else if (do_rewind)
rewind_policy = ZPOOL_DO_REWIND;
if (xtreme_rewind)
rewind_policy |= ZPOOL_EXTREME_REWIND;
/* In future, further rewind policy choices can be passed along here */
if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY,
rewind_policy) != 0) {
return (1);
}
pool = argv[0];
device = argc == 2 ? argv[1] : NULL;
if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {
nvlist_free(policy);
return (1);
}
+ if (is_power_on) {
+ if (device == NULL) {
+ zpool_power_on_pool_and_wait_for_devices(zhp);
+ } else {
+ zpool_power_on_and_disk_wait(zhp, device);
+ }
+ }
+
if (zpool_clear(zhp, device, policy) != 0)
ret = 1;
zpool_close(zhp);
nvlist_free(policy);
return (ret);
}
/*
* zpool reguid <pool>
*/
int
zpool_do_reguid(int argc, char **argv)
{
int c;
char *poolname;
zpool_handle_t *zhp;
int ret = 0;
/* check options */
while ((c = getopt(argc, argv, "")) != -1) {
switch (c) {
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
poolname = argv[0];
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
ret = zpool_reguid(zhp);
zpool_close(zhp);
return (ret);
}
/*
* zpool reopen <pool>
*
* Reopen the pool so that the kernel can update the sizes of all vdevs.
*/
int
zpool_do_reopen(int argc, char **argv)
{
int c;
int ret = 0;
boolean_t scrub_restart = B_TRUE;
/* check options */
while ((c = getopt(argc, argv, "n")) != -1) {
switch (c) {
case 'n':
scrub_restart = B_FALSE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* if argc == 0 we will execute zpool_reopen_one on all pools */
ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
B_FALSE, zpool_reopen_one, &scrub_restart);
return (ret);
}
typedef struct scrub_cbdata {
int cb_type;
pool_scrub_cmd_t cb_scrub_cmd;
} scrub_cbdata_t;
static boolean_t
zpool_has_checkpoint(zpool_handle_t *zhp)
{
nvlist_t *config, *nvroot;
config = zpool_get_config(zhp, NULL);
if (config != NULL) {
pool_checkpoint_stat_t *pcs = NULL;
uint_t c;
nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
if (pcs == NULL || pcs->pcs_state == CS_NONE)
return (B_FALSE);
assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS ||
pcs->pcs_state == CS_CHECKPOINT_DISCARDING);
return (B_TRUE);
}
return (B_FALSE);
}
static int
scrub_callback(zpool_handle_t *zhp, void *data)
{
scrub_cbdata_t *cb = data;
int err;
/*
* Ignore faulted pools.
*/
if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
(void) fprintf(stderr, gettext("cannot scan '%s': pool is "
"currently unavailable\n"), zpool_get_name(zhp));
return (1);
}
err = zpool_scan(zhp, cb->cb_type, cb->cb_scrub_cmd);
if (err == 0 && zpool_has_checkpoint(zhp) &&
cb->cb_type == POOL_SCAN_SCRUB) {
(void) printf(gettext("warning: will not scrub state that "
"belongs to the checkpoint of pool '%s'\n"),
zpool_get_name(zhp));
}
return (err != 0);
}
static int
wait_callback(zpool_handle_t *zhp, void *data)
{
zpool_wait_activity_t *act = data;
return (zpool_wait(zhp, *act));
}
/*
* zpool scrub [-s | -p] [-w] [-e] <pool> ...
*
* -e Only scrub blocks in the error log.
* -s Stop. Stops any in-progress scrub.
* -p Pause. Pause in-progress scrub.
* -w Wait. Blocks until scrub has completed.
*/
int
zpool_do_scrub(int argc, char **argv)
{
int c;
scrub_cbdata_t cb;
boolean_t wait = B_FALSE;
int error;
cb.cb_type = POOL_SCAN_SCRUB;
cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
boolean_t is_error_scrub = B_FALSE;
boolean_t is_pause = B_FALSE;
boolean_t is_stop = B_FALSE;
/* check options */
while ((c = getopt(argc, argv, "spwe")) != -1) {
switch (c) {
case 'e':
is_error_scrub = B_TRUE;
break;
case 's':
is_stop = B_TRUE;
break;
case 'p':
is_pause = B_TRUE;
break;
case 'w':
wait = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
if (is_pause && is_stop) {
(void) fprintf(stderr, gettext("invalid option "
"combination :-s and -p are mutually exclusive\n"));
usage(B_FALSE);
} else {
if (is_error_scrub)
cb.cb_type = POOL_SCAN_ERRORSCRUB;
if (is_pause) {
cb.cb_scrub_cmd = POOL_SCRUB_PAUSE;
} else if (is_stop) {
cb.cb_type = POOL_SCAN_NONE;
} else {
cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
}
}
if (wait && (cb.cb_type == POOL_SCAN_NONE ||
cb.cb_scrub_cmd == POOL_SCRUB_PAUSE)) {
(void) fprintf(stderr, gettext("invalid option combination: "
"-w cannot be used with -p or -s\n"));
usage(B_FALSE);
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
}
error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
B_FALSE, scrub_callback, &cb);
if (wait && !error) {
zpool_wait_activity_t act = ZPOOL_WAIT_SCRUB;
error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
B_FALSE, wait_callback, &act);
}
return (error);
}
/*
* zpool resilver <pool> ...
*
* Restarts any in-progress resilver
*/
int
zpool_do_resilver(int argc, char **argv)
{
int c;
scrub_cbdata_t cb;
cb.cb_type = POOL_SCAN_RESILVER;
cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
/* check options */
while ((c = getopt(argc, argv, "")) != -1) {
switch (c) {
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
}
return (for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
B_FALSE, scrub_callback, &cb));
}
/*
* zpool trim [-d] [-r <rate>] [-c | -s] <pool> [<device> ...]
*
* -c Cancel. Ends any in-progress trim.
* -d Secure trim. Requires kernel and device support.
* -r <rate> Sets the TRIM rate in bytes (per second). Supports
* adding a multiplier suffix such as 'k' or 'm'.
* -s Suspend. TRIM can then be restarted with no flags.
* -w Wait. Blocks until trimming has completed.
*/
int
zpool_do_trim(int argc, char **argv)
{
struct option long_options[] = {
{"cancel", no_argument, NULL, 'c'},
{"secure", no_argument, NULL, 'd'},
{"rate", required_argument, NULL, 'r'},
{"suspend", no_argument, NULL, 's'},
{"wait", no_argument, NULL, 'w'},
{0, 0, 0, 0}
};
pool_trim_func_t cmd_type = POOL_TRIM_START;
uint64_t rate = 0;
boolean_t secure = B_FALSE;
boolean_t wait = B_FALSE;
int c;
while ((c = getopt_long(argc, argv, "cdr:sw", long_options, NULL))
!= -1) {
switch (c) {
case 'c':
if (cmd_type != POOL_TRIM_START &&
cmd_type != POOL_TRIM_CANCEL) {
(void) fprintf(stderr, gettext("-c cannot be "
"combined with other options\n"));
usage(B_FALSE);
}
cmd_type = POOL_TRIM_CANCEL;
break;
case 'd':
if (cmd_type != POOL_TRIM_START) {
(void) fprintf(stderr, gettext("-d cannot be "
"combined with the -c or -s options\n"));
usage(B_FALSE);
}
secure = B_TRUE;
break;
case 'r':
if (cmd_type != POOL_TRIM_START) {
(void) fprintf(stderr, gettext("-r cannot be "
"combined with the -c or -s options\n"));
usage(B_FALSE);
}
if (zfs_nicestrtonum(g_zfs, optarg, &rate) == -1) {
(void) fprintf(stderr, "%s: %s\n",
gettext("invalid value for rate"),
libzfs_error_description(g_zfs));
usage(B_FALSE);
}
break;
case 's':
if (cmd_type != POOL_TRIM_START &&
cmd_type != POOL_TRIM_SUSPEND) {
(void) fprintf(stderr, gettext("-s cannot be "
"combined with other options\n"));
usage(B_FALSE);
}
cmd_type = POOL_TRIM_SUSPEND;
break;
case 'w':
wait = B_TRUE;
break;
case '?':
if (optopt != 0) {
(void) fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
} else {
(void) fprintf(stderr,
gettext("invalid option '%s'\n"),
argv[optind - 1]);
}
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
return (-1);
}
if (wait && (cmd_type != POOL_TRIM_START)) {
(void) fprintf(stderr, gettext("-w cannot be used with -c or "
"-s\n"));
usage(B_FALSE);
}
char *poolname = argv[0];
zpool_handle_t *zhp = zpool_open(g_zfs, poolname);
if (zhp == NULL)
return (-1);
trimflags_t trim_flags = {
.secure = secure,
.rate = rate,
.wait = wait,
};
nvlist_t *vdevs = fnvlist_alloc();
if (argc == 1) {
/* no individual leaf vdevs specified, so add them all */
nvlist_t *config = zpool_get_config(zhp, NULL);
nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
ZPOOL_CONFIG_VDEV_TREE);
zpool_collect_leaves(zhp, nvroot, vdevs);
trim_flags.fullpool = B_TRUE;
} else {
trim_flags.fullpool = B_FALSE;
for (int i = 1; i < argc; i++) {
fnvlist_add_boolean(vdevs, argv[i]);
}
}
int error = zpool_trim(zhp, cmd_type, vdevs, &trim_flags);
fnvlist_free(vdevs);
zpool_close(zhp);
return (error);
}
/*
* Converts a total number of seconds to a human readable string broken
* down in to days/hours/minutes/seconds.
*/
static void
secs_to_dhms(uint64_t total, char *buf)
{
uint64_t days = total / 60 / 60 / 24;
uint64_t hours = (total / 60 / 60) % 24;
uint64_t mins = (total / 60) % 60;
uint64_t secs = (total % 60);
if (days > 0) {
(void) sprintf(buf, "%llu days %02llu:%02llu:%02llu",
(u_longlong_t)days, (u_longlong_t)hours,
(u_longlong_t)mins, (u_longlong_t)secs);
} else {
(void) sprintf(buf, "%02llu:%02llu:%02llu",
(u_longlong_t)hours, (u_longlong_t)mins,
(u_longlong_t)secs);
}
}
/*
* Print out detailed error scrub status.
*/
static void
print_err_scrub_status(pool_scan_stat_t *ps)
{
time_t start, end, pause;
uint64_t total_secs_left;
uint64_t secs_left, mins_left, hours_left, days_left;
uint64_t examined, to_be_examined;
if (ps == NULL || ps->pss_error_scrub_func != POOL_SCAN_ERRORSCRUB) {
return;
}
(void) printf(gettext(" scrub: "));
start = ps->pss_error_scrub_start;
end = ps->pss_error_scrub_end;
pause = ps->pss_pass_error_scrub_pause;
examined = ps->pss_error_scrub_examined;
to_be_examined = ps->pss_error_scrub_to_be_examined;
assert(ps->pss_error_scrub_func == POOL_SCAN_ERRORSCRUB);
if (ps->pss_error_scrub_state == DSS_FINISHED) {
total_secs_left = end - start;
days_left = total_secs_left / 60 / 60 / 24;
hours_left = (total_secs_left / 60 / 60) % 24;
mins_left = (total_secs_left / 60) % 60;
secs_left = (total_secs_left % 60);
(void) printf(gettext("scrubbed %llu error blocks in %llu days "
"%02llu:%02llu:%02llu on %s"), (u_longlong_t)examined,
(u_longlong_t)days_left, (u_longlong_t)hours_left,
(u_longlong_t)mins_left, (u_longlong_t)secs_left,
ctime(&end));
return;
} else if (ps->pss_error_scrub_state == DSS_CANCELED) {
(void) printf(gettext("error scrub canceled on %s"),
ctime(&end));
return;
}
assert(ps->pss_error_scrub_state == DSS_ERRORSCRUBBING);
/* Error scrub is in progress. */
if (pause == 0) {
(void) printf(gettext("error scrub in progress since %s"),
ctime(&start));
} else {
(void) printf(gettext("error scrub paused since %s"),
ctime(&pause));
(void) printf(gettext("\terror scrub started on %s"),
ctime(&start));
}
double fraction_done = (double)examined / (to_be_examined + examined);
(void) printf(gettext("\t%.2f%% done, issued I/O for %llu error"
" blocks"), 100 * fraction_done, (u_longlong_t)examined);
(void) printf("\n");
}
/*
* Print out detailed scrub status.
*/
static void
print_scan_scrub_resilver_status(pool_scan_stat_t *ps)
{
time_t start, end, pause;
uint64_t pass_scanned, scanned, pass_issued, issued, total_s, total_i;
uint64_t elapsed, scan_rate, issue_rate;
double fraction_done;
char processed_buf[7], scanned_buf[7], issued_buf[7], total_s_buf[7];
char total_i_buf[7], srate_buf[7], irate_buf[7], time_buf[32];
printf(" ");
printf_color(ANSI_BOLD, gettext("scan:"));
printf(" ");
/* If there's never been a scan, there's not much to say. */
if (ps == NULL || ps->pss_func == POOL_SCAN_NONE ||
ps->pss_func >= POOL_SCAN_FUNCS) {
(void) printf(gettext("none requested\n"));
return;
}
start = ps->pss_start_time;
end = ps->pss_end_time;
pause = ps->pss_pass_scrub_pause;
zfs_nicebytes(ps->pss_processed, processed_buf, sizeof (processed_buf));
int is_resilver = ps->pss_func == POOL_SCAN_RESILVER;
int is_scrub = ps->pss_func == POOL_SCAN_SCRUB;
assert(is_resilver || is_scrub);
/* Scan is finished or canceled. */
if (ps->pss_state == DSS_FINISHED) {
secs_to_dhms(end - start, time_buf);
if (is_scrub) {
(void) printf(gettext("scrub repaired %s "
"in %s with %llu errors on %s"), processed_buf,
time_buf, (u_longlong_t)ps->pss_errors,
ctime(&end));
} else if (is_resilver) {
(void) printf(gettext("resilvered %s "
"in %s with %llu errors on %s"), processed_buf,
time_buf, (u_longlong_t)ps->pss_errors,
ctime(&end));
}
return;
} else if (ps->pss_state == DSS_CANCELED) {
if (is_scrub) {
(void) printf(gettext("scrub canceled on %s"),
ctime(&end));
} else if (is_resilver) {
(void) printf(gettext("resilver canceled on %s"),
ctime(&end));
}
return;
}
assert(ps->pss_state == DSS_SCANNING);
/* Scan is in progress. Resilvers can't be paused. */
if (is_scrub) {
if (pause == 0) {
(void) printf(gettext("scrub in progress since %s"),
ctime(&start));
} else {
(void) printf(gettext("scrub paused since %s"),
ctime(&pause));
(void) printf(gettext("\tscrub started on %s"),
ctime(&start));
}
} else if (is_resilver) {
(void) printf(gettext("resilver in progress since %s"),
ctime(&start));
}
scanned = ps->pss_examined;
pass_scanned = ps->pss_pass_exam;
issued = ps->pss_issued;
pass_issued = ps->pss_pass_issued;
total_s = ps->pss_to_examine;
total_i = ps->pss_to_examine - ps->pss_skipped;
/* we are only done with a block once we have issued the IO for it */
fraction_done = (double)issued / total_i;
/* elapsed time for this pass, rounding up to 1 if it's 0 */
elapsed = time(NULL) - ps->pss_pass_start;
elapsed -= ps->pss_pass_scrub_spent_paused;
elapsed = (elapsed != 0) ? elapsed : 1;
scan_rate = pass_scanned / elapsed;
issue_rate = pass_issued / elapsed;
/* format all of the numbers we will be reporting */
zfs_nicebytes(scanned, scanned_buf, sizeof (scanned_buf));
zfs_nicebytes(issued, issued_buf, sizeof (issued_buf));
zfs_nicebytes(total_s, total_s_buf, sizeof (total_s_buf));
zfs_nicebytes(total_i, total_i_buf, sizeof (total_i_buf));
/* do not print estimated time if we have a paused scrub */
(void) printf(gettext("\t%s / %s scanned"), scanned_buf, total_s_buf);
if (pause == 0 && scan_rate > 0) {
zfs_nicebytes(scan_rate, srate_buf, sizeof (srate_buf));
(void) printf(gettext(" at %s/s"), srate_buf);
}
(void) printf(gettext(", %s / %s issued"), issued_buf, total_i_buf);
if (pause == 0 && issue_rate > 0) {
zfs_nicebytes(issue_rate, irate_buf, sizeof (irate_buf));
(void) printf(gettext(" at %s/s"), irate_buf);
}
(void) printf(gettext("\n"));
if (is_resilver) {
(void) printf(gettext("\t%s resilvered, %.2f%% done"),
processed_buf, 100 * fraction_done);
} else if (is_scrub) {
(void) printf(gettext("\t%s repaired, %.2f%% done"),
processed_buf, 100 * fraction_done);
}
if (pause == 0) {
/*
* Only provide an estimate iff:
* 1) we haven't yet issued all we expected, and
* 2) the issue rate exceeds 10 MB/s, and
* 3) it's either:
* a) a resilver which has started repairs, or
* b) a scrub which has entered the issue phase.
*/
if (total_i >= issued && issue_rate >= 10 * 1024 * 1024 &&
((is_resilver && ps->pss_processed > 0) ||
(is_scrub && issued > 0))) {
secs_to_dhms((total_i - issued) / issue_rate, time_buf);
(void) printf(gettext(", %s to go\n"), time_buf);
} else {
(void) printf(gettext(", no estimated "
"completion time\n"));
}
} else {
(void) printf(gettext("\n"));
}
}
static void
print_rebuild_status_impl(vdev_rebuild_stat_t *vrs, uint_t c, char *vdev_name)
{
if (vrs == NULL || vrs->vrs_state == VDEV_REBUILD_NONE)
return;
printf(" ");
printf_color(ANSI_BOLD, gettext("scan:"));
printf(" ");
uint64_t bytes_scanned = vrs->vrs_bytes_scanned;
uint64_t bytes_issued = vrs->vrs_bytes_issued;
uint64_t bytes_rebuilt = vrs->vrs_bytes_rebuilt;
uint64_t bytes_est_s = vrs->vrs_bytes_est;
uint64_t bytes_est_i = vrs->vrs_bytes_est;
if (c > offsetof(vdev_rebuild_stat_t, vrs_pass_bytes_skipped) / 8)
bytes_est_i -= vrs->vrs_pass_bytes_skipped;
uint64_t scan_rate = (vrs->vrs_pass_bytes_scanned /
(vrs->vrs_pass_time_ms + 1)) * 1000;
uint64_t issue_rate = (vrs->vrs_pass_bytes_issued /
(vrs->vrs_pass_time_ms + 1)) * 1000;
double scan_pct = MIN((double)bytes_scanned * 100 /
(bytes_est_s + 1), 100);
/* Format all of the numbers we will be reporting */
char bytes_scanned_buf[7], bytes_issued_buf[7];
char bytes_rebuilt_buf[7], bytes_est_s_buf[7], bytes_est_i_buf[7];
char scan_rate_buf[7], issue_rate_buf[7], time_buf[32];
zfs_nicebytes(bytes_scanned, bytes_scanned_buf,
sizeof (bytes_scanned_buf));
zfs_nicebytes(bytes_issued, bytes_issued_buf,
sizeof (bytes_issued_buf));
zfs_nicebytes(bytes_rebuilt, bytes_rebuilt_buf,
sizeof (bytes_rebuilt_buf));
zfs_nicebytes(bytes_est_s, bytes_est_s_buf, sizeof (bytes_est_s_buf));
zfs_nicebytes(bytes_est_i, bytes_est_i_buf, sizeof (bytes_est_i_buf));
time_t start = vrs->vrs_start_time;
time_t end = vrs->vrs_end_time;
/* Rebuild is finished or canceled. */
if (vrs->vrs_state == VDEV_REBUILD_COMPLETE) {
secs_to_dhms(vrs->vrs_scan_time_ms / 1000, time_buf);
(void) printf(gettext("resilvered (%s) %s in %s "
"with %llu errors on %s"), vdev_name, bytes_rebuilt_buf,
time_buf, (u_longlong_t)vrs->vrs_errors, ctime(&end));
return;
} else if (vrs->vrs_state == VDEV_REBUILD_CANCELED) {
(void) printf(gettext("resilver (%s) canceled on %s"),
vdev_name, ctime(&end));
return;
} else if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
(void) printf(gettext("resilver (%s) in progress since %s"),
vdev_name, ctime(&start));
}
assert(vrs->vrs_state == VDEV_REBUILD_ACTIVE);
(void) printf(gettext("\t%s / %s scanned"), bytes_scanned_buf,
bytes_est_s_buf);
if (scan_rate > 0) {
zfs_nicebytes(scan_rate, scan_rate_buf, sizeof (scan_rate_buf));
(void) printf(gettext(" at %s/s"), scan_rate_buf);
}
(void) printf(gettext(", %s / %s issued"), bytes_issued_buf,
bytes_est_i_buf);
if (issue_rate > 0) {
zfs_nicebytes(issue_rate, issue_rate_buf,
sizeof (issue_rate_buf));
(void) printf(gettext(" at %s/s"), issue_rate_buf);
}
(void) printf(gettext("\n"));
(void) printf(gettext("\t%s resilvered, %.2f%% done"),
bytes_rebuilt_buf, scan_pct);
if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
if (bytes_est_s >= bytes_scanned &&
scan_rate >= 10 * 1024 * 1024) {
secs_to_dhms((bytes_est_s - bytes_scanned) / scan_rate,
time_buf);
(void) printf(gettext(", %s to go\n"), time_buf);
} else {
(void) printf(gettext(", no estimated "
"completion time\n"));
}
} else {
(void) printf(gettext("\n"));
}
}
/*
* Print rebuild status for top-level vdevs.
*/
static void
print_rebuild_status(zpool_handle_t *zhp, nvlist_t *nvroot)
{
nvlist_t **child;
uint_t children;
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
children = 0;
for (uint_t c = 0; c < children; c++) {
vdev_rebuild_stat_t *vrs;
uint_t i;
if (nvlist_lookup_uint64_array(child[c],
ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) {
char *name = zpool_vdev_name(g_zfs, zhp,
child[c], VDEV_NAME_TYPE_ID);
print_rebuild_status_impl(vrs, i, name);
free(name);
}
}
}
/*
* As we don't scrub checkpointed blocks, we want to warn the user that we
* skipped scanning some blocks if a checkpoint exists or existed at any
* time during the scan. If a sequential instead of healing reconstruction
* was performed then the blocks were reconstructed. However, their checksums
* have not been verified so we still print the warning.
*/
static void
print_checkpoint_scan_warning(pool_scan_stat_t *ps, pool_checkpoint_stat_t *pcs)
{
if (ps == NULL || pcs == NULL)
return;
if (pcs->pcs_state == CS_NONE ||
pcs->pcs_state == CS_CHECKPOINT_DISCARDING)
return;
assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS);
if (ps->pss_state == DSS_NONE)
return;
if ((ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) &&
ps->pss_end_time < pcs->pcs_start_time)
return;
if (ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) {
(void) printf(gettext(" scan warning: skipped blocks "
"that are only referenced by the checkpoint.\n"));
} else {
assert(ps->pss_state == DSS_SCANNING);
(void) printf(gettext(" scan warning: skipping blocks "
"that are only referenced by the checkpoint.\n"));
}
}
/*
* Returns B_TRUE if there is an active rebuild in progress. Otherwise,
* B_FALSE is returned and 'rebuild_end_time' is set to the end time for
* the last completed (or cancelled) rebuild.
*/
static boolean_t
check_rebuilding(nvlist_t *nvroot, uint64_t *rebuild_end_time)
{
nvlist_t **child;
uint_t children;
boolean_t rebuilding = B_FALSE;
uint64_t end_time = 0;
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
children = 0;
for (uint_t c = 0; c < children; c++) {
vdev_rebuild_stat_t *vrs;
uint_t i;
if (nvlist_lookup_uint64_array(child[c],
ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) {
if (vrs->vrs_end_time > end_time)
end_time = vrs->vrs_end_time;
if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
rebuilding = B_TRUE;
end_time = 0;
break;
}
}
}
if (rebuild_end_time != NULL)
*rebuild_end_time = end_time;
return (rebuilding);
}
/*
* Print the scan status.
*/
static void
print_scan_status(zpool_handle_t *zhp, nvlist_t *nvroot)
{
uint64_t rebuild_end_time = 0, resilver_end_time = 0;
boolean_t have_resilver = B_FALSE, have_scrub = B_FALSE;
boolean_t have_errorscrub = B_FALSE;
boolean_t active_resilver = B_FALSE;
pool_checkpoint_stat_t *pcs = NULL;
pool_scan_stat_t *ps = NULL;
uint_t c;
time_t scrub_start = 0, errorscrub_start = 0;
if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,
(uint64_t **)&ps, &c) == 0) {
if (ps->pss_func == POOL_SCAN_RESILVER) {
resilver_end_time = ps->pss_end_time;
active_resilver = (ps->pss_state == DSS_SCANNING);
}
have_resilver = (ps->pss_func == POOL_SCAN_RESILVER);
have_scrub = (ps->pss_func == POOL_SCAN_SCRUB);
scrub_start = ps->pss_start_time;
if (c > offsetof(pool_scan_stat_t,
pss_pass_error_scrub_pause) / 8) {
have_errorscrub = (ps->pss_error_scrub_func ==
POOL_SCAN_ERRORSCRUB);
errorscrub_start = ps->pss_error_scrub_start;
}
}
boolean_t active_rebuild = check_rebuilding(nvroot, &rebuild_end_time);
boolean_t have_rebuild = (active_rebuild || (rebuild_end_time > 0));
/* Always print the scrub status when available. */
if (have_scrub && scrub_start > errorscrub_start)
print_scan_scrub_resilver_status(ps);
else if (have_errorscrub && errorscrub_start >= scrub_start)
print_err_scrub_status(ps);
/*
* When there is an active resilver or rebuild print its status.
* Otherwise print the status of the last resilver or rebuild.
*/
if (active_resilver || (!active_rebuild && have_resilver &&
resilver_end_time && resilver_end_time > rebuild_end_time)) {
print_scan_scrub_resilver_status(ps);
} else if (active_rebuild || (!active_resilver && have_rebuild &&
rebuild_end_time && rebuild_end_time > resilver_end_time)) {
print_rebuild_status(zhp, nvroot);
}
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
print_checkpoint_scan_warning(ps, pcs);
}
/*
* Print out detailed removal status.
*/
static void
print_removal_status(zpool_handle_t *zhp, pool_removal_stat_t *prs)
{
char copied_buf[7], examined_buf[7], total_buf[7], rate_buf[7];
time_t start, end;
nvlist_t *config, *nvroot;
nvlist_t **child;
uint_t children;
char *vdev_name;
if (prs == NULL || prs->prs_state == DSS_NONE)
return;
/*
* Determine name of vdev.
*/
config = zpool_get_config(zhp, NULL);
nvroot = fnvlist_lookup_nvlist(config,
ZPOOL_CONFIG_VDEV_TREE);
verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0);
assert(prs->prs_removing_vdev < children);
vdev_name = zpool_vdev_name(g_zfs, zhp,
child[prs->prs_removing_vdev], B_TRUE);
printf_color(ANSI_BOLD, gettext("remove: "));
start = prs->prs_start_time;
end = prs->prs_end_time;
zfs_nicenum(prs->prs_copied, copied_buf, sizeof (copied_buf));
/*
* Removal is finished or canceled.
*/
if (prs->prs_state == DSS_FINISHED) {
uint64_t minutes_taken = (end - start) / 60;
(void) printf(gettext("Removal of vdev %llu copied %s "
"in %lluh%um, completed on %s"),
(longlong_t)prs->prs_removing_vdev,
copied_buf,
(u_longlong_t)(minutes_taken / 60),
(uint_t)(minutes_taken % 60),
ctime((time_t *)&end));
} else if (prs->prs_state == DSS_CANCELED) {
(void) printf(gettext("Removal of %s canceled on %s"),
vdev_name, ctime(&end));
} else {
uint64_t copied, total, elapsed, mins_left, hours_left;
double fraction_done;
uint_t rate;
assert(prs->prs_state == DSS_SCANNING);
/*
* Removal is in progress.
*/
(void) printf(gettext(
"Evacuation of %s in progress since %s"),
vdev_name, ctime(&start));
copied = prs->prs_copied > 0 ? prs->prs_copied : 1;
total = prs->prs_to_copy;
fraction_done = (double)copied / total;
/* elapsed time for this pass */
elapsed = time(NULL) - prs->prs_start_time;
elapsed = elapsed > 0 ? elapsed : 1;
rate = copied / elapsed;
rate = rate > 0 ? rate : 1;
mins_left = ((total - copied) / rate) / 60;
hours_left = mins_left / 60;
zfs_nicenum(copied, examined_buf, sizeof (examined_buf));
zfs_nicenum(total, total_buf, sizeof (total_buf));
zfs_nicenum(rate, rate_buf, sizeof (rate_buf));
/*
* do not print estimated time if hours_left is more than
* 30 days
*/
(void) printf(gettext(
"\t%s copied out of %s at %s/s, %.2f%% done"),
examined_buf, total_buf, rate_buf, 100 * fraction_done);
if (hours_left < (30 * 24)) {
(void) printf(gettext(", %lluh%um to go\n"),
(u_longlong_t)hours_left, (uint_t)(mins_left % 60));
} else {
(void) printf(gettext(
", (copy is slow, no estimated time)\n"));
}
}
free(vdev_name);
if (prs->prs_mapping_memory > 0) {
char mem_buf[7];
zfs_nicenum(prs->prs_mapping_memory, mem_buf, sizeof (mem_buf));
(void) printf(gettext(
"\t%s memory used for removed device mappings\n"),
mem_buf);
}
}
static void
print_checkpoint_status(pool_checkpoint_stat_t *pcs)
{
time_t start;
char space_buf[7];
if (pcs == NULL || pcs->pcs_state == CS_NONE)
return;
(void) printf(gettext("checkpoint: "));
start = pcs->pcs_start_time;
zfs_nicenum(pcs->pcs_space, space_buf, sizeof (space_buf));
if (pcs->pcs_state == CS_CHECKPOINT_EXISTS) {
char *date = ctime(&start);
/*
* ctime() adds a newline at the end of the generated
* string, thus the weird format specifier and the
* strlen() call used to chop it off from the output.
*/
(void) printf(gettext("created %.*s, consumes %s\n"),
(int)(strlen(date) - 1), date, space_buf);
return;
}
assert(pcs->pcs_state == CS_CHECKPOINT_DISCARDING);
(void) printf(gettext("discarding, %s remaining.\n"),
space_buf);
}
static void
print_error_log(zpool_handle_t *zhp)
{
nvlist_t *nverrlist = NULL;
nvpair_t *elem;
char *pathname;
size_t len = MAXPATHLEN * 2;
if (zpool_get_errlog(zhp, &nverrlist) != 0)
return;
(void) printf("errors: Permanent errors have been "
"detected in the following files:\n\n");
pathname = safe_malloc(len);
elem = NULL;
while ((elem = nvlist_next_nvpair(nverrlist, elem)) != NULL) {
nvlist_t *nv;
uint64_t dsobj, obj;
verify(nvpair_value_nvlist(elem, &nv) == 0);
verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_DATASET,
&dsobj) == 0);
verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_OBJECT,
&obj) == 0);
zpool_obj_to_path(zhp, dsobj, obj, pathname, len);
(void) printf("%7s %s\n", "", pathname);
}
free(pathname);
nvlist_free(nverrlist);
}
static void
print_spares(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **spares,
uint_t nspares)
{
uint_t i;
char *name;
if (nspares == 0)
return;
(void) printf(gettext("\tspares\n"));
for (i = 0; i < nspares; i++) {
name = zpool_vdev_name(g_zfs, zhp, spares[i],
cb->cb_name_flags);
print_status_config(zhp, cb, name, spares[i], 2, B_TRUE, NULL);
free(name);
}
}
static void
print_l2cache(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **l2cache,
uint_t nl2cache)
{
uint_t i;
char *name;
if (nl2cache == 0)
return;
(void) printf(gettext("\tcache\n"));
for (i = 0; i < nl2cache; i++) {
name = zpool_vdev_name(g_zfs, zhp, l2cache[i],
cb->cb_name_flags);
print_status_config(zhp, cb, name, l2cache[i], 2,
B_FALSE, NULL);
free(name);
}
}
static void
print_dedup_stats(nvlist_t *config)
{
ddt_histogram_t *ddh;
ddt_stat_t *dds;
ddt_object_t *ddo;
uint_t c;
char dspace[6], mspace[6];
/*
* If the pool was faulted then we may not have been able to
* obtain the config. Otherwise, if we have anything in the dedup
* table continue processing the stats.
*/
if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_OBJ_STATS,
(uint64_t **)&ddo, &c) != 0)
return;
(void) printf("\n");
(void) printf(gettext(" dedup: "));
if (ddo->ddo_count == 0) {
(void) printf(gettext("no DDT entries\n"));
return;
}
zfs_nicebytes(ddo->ddo_dspace, dspace, sizeof (dspace));
zfs_nicebytes(ddo->ddo_mspace, mspace, sizeof (mspace));
(void) printf("DDT entries %llu, size %s on disk, %s in core\n",
(u_longlong_t)ddo->ddo_count,
dspace,
mspace);
verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS,
(uint64_t **)&dds, &c) == 0);
verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_HISTOGRAM,
(uint64_t **)&ddh, &c) == 0);
zpool_dump_ddt(dds, ddh);
}
/*
* Display a summary of pool status. Displays a summary such as:
*
* pool: tank
* status: DEGRADED
* reason: One or more devices ...
* see: https://openzfs.github.io/openzfs-docs/msg/ZFS-xxxx-01
* config:
* mirror DEGRADED
* c1t0d0 OK
* c2t0d0 UNAVAIL
*
* When given the '-v' option, we print out the complete config. If the '-e'
* option is specified, then we print out error rate information as well.
*/
static int
status_callback(zpool_handle_t *zhp, void *data)
{
status_cbdata_t *cbp = data;
nvlist_t *config, *nvroot;
const char *msgid;
zpool_status_t reason;
zpool_errata_t errata;
const char *health;
uint_t c;
vdev_stat_t *vs;
config = zpool_get_config(zhp, NULL);
reason = zpool_get_status(zhp, &msgid, &errata);
cbp->cb_count++;
/*
* If we were given 'zpool status -x', only report those pools with
* problems.
*/
if (cbp->cb_explain &&
(reason == ZPOOL_STATUS_OK ||
reason == ZPOOL_STATUS_VERSION_OLDER ||
reason == ZPOOL_STATUS_FEAT_DISABLED ||
reason == ZPOOL_STATUS_COMPATIBILITY_ERR ||
reason == ZPOOL_STATUS_INCOMPATIBLE_FEAT)) {
if (!cbp->cb_allpools) {
(void) printf(gettext("pool '%s' is healthy\n"),
zpool_get_name(zhp));
if (cbp->cb_first)
cbp->cb_first = B_FALSE;
}
return (0);
}
if (cbp->cb_first)
cbp->cb_first = B_FALSE;
else
(void) printf("\n");
nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) == 0);
health = zpool_get_state_str(zhp);
printf(" ");
printf_color(ANSI_BOLD, gettext("pool:"));
printf(" %s\n", zpool_get_name(zhp));
fputc(' ', stdout);
printf_color(ANSI_BOLD, gettext("state: "));
printf_color(health_str_to_color(health), "%s", health);
fputc('\n', stdout);
switch (reason) {
case ZPOOL_STATUS_MISSING_DEV_R:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices could "
"not be opened. Sufficient replicas exist for\n\tthe pool "
"to continue functioning in a degraded state.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Attach the missing device "
"and online it using 'zpool online'.\n"));
break;
case ZPOOL_STATUS_MISSING_DEV_NR:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices could "
"not be opened. There are insufficient\n\treplicas for the"
" pool to continue functioning.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Attach the missing device "
"and online it using 'zpool online'.\n"));
break;
case ZPOOL_STATUS_CORRUPT_LABEL_R:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices could "
"not be used because the label is missing or\n\tinvalid. "
"Sufficient replicas exist for the pool to continue\n\t"
"functioning in a degraded state.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Replace the device using "
"'zpool replace'.\n"));
break;
case ZPOOL_STATUS_CORRUPT_LABEL_NR:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices could "
"not be used because the label is missing \n\tor invalid. "
"There are insufficient replicas for the pool to "
"continue\n\tfunctioning.\n"));
zpool_explain_recover(zpool_get_handle(zhp),
zpool_get_name(zhp), reason, config);
break;
case ZPOOL_STATUS_FAILING_DEV:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices has "
"experienced an unrecoverable error. An\n\tattempt was "
"made to correct the error. Applications are "
"unaffected.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Determine if the "
"device needs to be replaced, and clear the errors\n\tusing"
" 'zpool clear' or replace the device with 'zpool "
"replace'.\n"));
break;
case ZPOOL_STATUS_OFFLINE_DEV:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices has "
"been taken offline by the administrator.\n\tSufficient "
"replicas exist for the pool to continue functioning in "
"a\n\tdegraded state.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Online the device "
"using 'zpool online' or replace the device with\n\t'zpool "
"replace'.\n"));
break;
case ZPOOL_STATUS_REMOVED_DEV:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices has "
"been removed by the administrator.\n\tSufficient "
"replicas exist for the pool to continue functioning in "
"a\n\tdegraded state.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Online the device "
"using zpool online' or replace the device with\n\t'zpool "
"replace'.\n"));
break;
case ZPOOL_STATUS_RESILVERING:
case ZPOOL_STATUS_REBUILDING:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices is "
"currently being resilvered. The pool will\n\tcontinue "
"to function, possibly in a degraded state.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Wait for the resilver to "
"complete.\n"));
break;
case ZPOOL_STATUS_REBUILD_SCRUB:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices have "
"been sequentially resilvered, scrubbing\n\tthe pool "
"is recommended.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Use 'zpool scrub' to "
"verify all data checksums.\n"));
break;
case ZPOOL_STATUS_CORRUPT_DATA:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices has "
"experienced an error resulting in data\n\tcorruption. "
"Applications may be affected.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Restore the file in question"
" if possible. Otherwise restore the\n\tentire pool from "
"backup.\n"));
break;
case ZPOOL_STATUS_CORRUPT_POOL:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool metadata is "
"corrupted and the pool cannot be opened.\n"));
zpool_explain_recover(zpool_get_handle(zhp),
zpool_get_name(zhp), reason, config);
break;
case ZPOOL_STATUS_VERSION_OLDER:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
"a legacy on-disk format. The pool can\n\tstill be used, "
"but some features are unavailable.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Upgrade the pool using "
"'zpool upgrade'. Once this is done, the\n\tpool will no "
"longer be accessible on software that does not support\n\t"
"feature flags.\n"));
break;
case ZPOOL_STATUS_VERSION_NEWER:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool has been upgraded "
"to a newer, incompatible on-disk version.\n\tThe pool "
"cannot be accessed on this system.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Access the pool from a "
"system running more recent software, or\n\trestore the "
"pool from backup.\n"));
break;
case ZPOOL_STATUS_FEAT_DISABLED:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("Some supported and "
"requested features are not enabled on the pool.\n\t"
"The pool can still be used, but some features are "
"unavailable.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Enable all features using "
"'zpool upgrade'. Once this is done,\n\tthe pool may no "
"longer be accessible by software that does not support\n\t"
"the features. See zpool-features(7) for details.\n"));
break;
case ZPOOL_STATUS_COMPATIBILITY_ERR:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("This pool has a "
"compatibility list specified, but it could not be\n\t"
"read/parsed at this time. The pool can still be used, "
"but this\n\tshould be investigated.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Check the value of the "
"'compatibility' property against the\n\t"
"appropriate file in " ZPOOL_SYSCONF_COMPAT_D " or "
ZPOOL_DATA_COMPAT_D ".\n"));
break;
case ZPOOL_STATUS_INCOMPATIBLE_FEAT:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more features "
"are enabled on the pool despite not being\n\t"
"requested by the 'compatibility' property.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Consider setting "
"'compatibility' to an appropriate value, or\n\t"
"adding needed features to the relevant file in\n\t"
ZPOOL_SYSCONF_COMPAT_D " or " ZPOOL_DATA_COMPAT_D ".\n"));
break;
case ZPOOL_STATUS_UNSUP_FEAT_READ:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool cannot be accessed "
"on this system because it uses the\n\tfollowing feature(s)"
" not supported on this system:\n"));
zpool_print_unsup_feat(config);
(void) printf("\n");
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Access the pool from a "
"system that supports the required feature(s),\n\tor "
"restore the pool from backup.\n"));
break;
case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool can only be "
"accessed in read-only mode on this system. It\n\tcannot be"
" accessed in read-write mode because it uses the "
"following\n\tfeature(s) not supported on this system:\n"));
zpool_print_unsup_feat(config);
(void) printf("\n");
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("The pool cannot be accessed "
"in read-write mode. Import the pool with\n"
"\t\"-o readonly=on\", access the pool from a system that "
"supports the\n\trequired feature(s), or restore the "
"pool from backup.\n"));
break;
case ZPOOL_STATUS_FAULTED_DEV_R:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices are "
"faulted in response to persistent errors.\n\tSufficient "
"replicas exist for the pool to continue functioning "
"in a\n\tdegraded state.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Replace the faulted device, "
"or use 'zpool clear' to mark the device\n\trepaired.\n"));
break;
case ZPOOL_STATUS_FAULTED_DEV_NR:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices are "
"faulted in response to persistent errors. There are "
"insufficient replicas for the pool to\n\tcontinue "
"functioning.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Destroy and re-create the "
"pool from a backup source. Manually marking the device\n"
"\trepaired using 'zpool clear' may allow some data "
"to be recovered.\n"));
break;
case ZPOOL_STATUS_IO_FAILURE_MMP:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool is suspended "
"because multihost writes failed or were delayed;\n\t"
"another system could import the pool undetected.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Make sure the pool's devices"
" are connected, then reboot your system and\n\timport the "
"pool.\n"));
break;
case ZPOOL_STATUS_IO_FAILURE_WAIT:
case ZPOOL_STATUS_IO_FAILURE_CONTINUE:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices are "
"faulted in response to IO failures.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Make sure the affected "
"devices are connected, then run 'zpool clear'.\n"));
break;
case ZPOOL_STATUS_BAD_LOG:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("An intent log record "
"could not be read.\n"
"\tWaiting for administrator intervention to fix the "
"faulted pool.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Either restore the affected "
"device(s) and run 'zpool online',\n"
"\tor ignore the intent log records by running "
"'zpool clear'.\n"));
break;
case ZPOOL_STATUS_NON_NATIVE_ASHIFT:
(void) printf(gettext("status: One or more devices are "
"configured to use a non-native block size.\n"
"\tExpect reduced performance.\n"));
(void) printf(gettext("action: Replace affected devices with "
"devices that support the\n\tconfigured block size, or "
"migrate data to a properly configured\n\tpool.\n"));
break;
case ZPOOL_STATUS_HOSTID_MISMATCH:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("Mismatch between pool hostid"
" and system hostid on imported pool.\n\tThis pool was "
"previously imported into a system with a different "
"hostid,\n\tand then was verbatim imported into this "
"system.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Export this pool on all "
"systems on which it is imported.\n"
"\tThen import it to correct the mismatch.\n"));
break;
case ZPOOL_STATUS_ERRATA:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("Errata #%d detected.\n"),
errata);
switch (errata) {
case ZPOOL_ERRATA_NONE:
break;
case ZPOOL_ERRATA_ZOL_2094_SCRUB:
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("To correct the issue"
" run 'zpool scrub'.\n"));
break;
case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:
(void) printf(gettext("\tExisting encrypted datasets "
"contain an on-disk incompatibility\n\twhich "
"needs to be corrected.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("To correct the issue"
" backup existing encrypted datasets to new\n\t"
"encrypted datasets and destroy the old ones. "
"'zfs mount -o ro' can\n\tbe used to temporarily "
"mount existing encrypted datasets readonly.\n"));
break;
case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION:
(void) printf(gettext("\tExisting encrypted snapshots "
"and bookmarks contain an on-disk\n\tincompat"
"ibility. This may cause on-disk corruption if "
"they are used\n\twith 'zfs recv'.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("To correct the"
"issue, enable the bookmark_v2 feature. No "
"additional\n\taction is needed if there are no "
"encrypted snapshots or bookmarks.\n\tIf preserving"
"the encrypted snapshots and bookmarks is required,"
" use\n\ta non-raw send to backup and restore them."
" Alternately, they may be\n\tremoved to resolve "
"the incompatibility.\n"));
break;
default:
/*
* All errata which allow the pool to be imported
* must contain an action message.
*/
assert(0);
}
break;
default:
/*
* The remaining errors can't actually be generated, yet.
*/
assert(reason == ZPOOL_STATUS_OK);
}
if (msgid != NULL) {
printf(" ");
printf_color(ANSI_BOLD, gettext("see:"));
printf(gettext(
" https://openzfs.github.io/openzfs-docs/msg/%s\n"),
msgid);
}
if (config != NULL) {
uint64_t nerr;
nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache;
pool_checkpoint_stat_t *pcs = NULL;
pool_removal_stat_t *prs = NULL;
print_scan_status(zhp, nvroot);
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);
print_removal_status(zhp, prs);
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
print_checkpoint_status(pcs);
cbp->cb_namewidth = max_width(zhp, nvroot, 0, 0,
cbp->cb_name_flags | VDEV_NAME_TYPE_ID);
if (cbp->cb_namewidth < 10)
cbp->cb_namewidth = 10;
color_start(ANSI_BOLD);
(void) printf(gettext("config:\n\n"));
(void) printf(gettext("\t%-*s %-8s %5s %5s %5s"),
cbp->cb_namewidth, "NAME", "STATE", "READ", "WRITE",
"CKSUM");
color_end();
if (cbp->cb_print_slow_ios) {
printf_color(ANSI_BOLD, " %5s", gettext("SLOW"));
}
+ if (cbp->cb_print_power) {
+ printf_color(ANSI_BOLD, " %5s", gettext("POWER"));
+ }
+
if (cbp->vcdl != NULL)
print_cmd_columns(cbp->vcdl, 0);
printf("\n");
print_status_config(zhp, cbp, zpool_get_name(zhp), nvroot, 0,
B_FALSE, NULL);
print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_DEDUP);
print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_SPECIAL);
print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_CLASS_LOGS);
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2cache, &nl2cache) == 0)
print_l2cache(zhp, cbp, l2cache, nl2cache);
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) == 0)
print_spares(zhp, cbp, spares, nspares);
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
&nerr) == 0) {
(void) printf("\n");
if (nerr == 0) {
(void) printf(gettext(
"errors: No known data errors\n"));
} else if (!cbp->cb_verbose) {
+ color_start(ANSI_RED);
(void) printf(gettext("errors: %llu data "
"errors, use '-v' for a list\n"),
(u_longlong_t)nerr);
+ color_end();
} else {
print_error_log(zhp);
}
}
if (cbp->cb_dedup_stats)
print_dedup_stats(config);
} else {
(void) printf(gettext("config: The configuration cannot be "
"determined.\n"));
}
return (0);
}
/*
- * zpool status [-c [script1,script2,...]] [-igLpPstvx] [-T d|u] [pool] ...
- * [interval [count]]
+ * zpool status [-c [script1,script2,...]] [-igLpPstvx] [--power] [-T d|u] ...
+ * [pool] [interval [count]]
*
* -c CMD For each vdev, run command CMD
+ * -e Display only unhealthy vdevs
* -i Display vdev initialization status.
* -g Display guid for individual vdev name.
* -L Follow links when resolving vdev path name.
* -p Display values in parsable (exact) format.
* -P Display full path for vdev name.
* -s Display slow IOs column.
* -v Display complete error logs
* -x Display only pools with potential problems
* -D Display dedup status (undocumented)
* -t Display vdev TRIM status.
* -T Display a timestamp in date(1) or Unix format
+ * --power Display vdev enclosure slot power status
*
* Describes the health status of all pools or some subset.
*/
int
zpool_do_status(int argc, char **argv)
{
int c;
int ret;
float interval = 0;
unsigned long count = 0;
status_cbdata_t cb = { 0 };
char *cmd = NULL;
+ struct option long_options[] = {
+ {"power", no_argument, NULL, POWER_OPT},
+ {0, 0, 0, 0}
+ };
+
/* check options */
- while ((c = getopt(argc, argv, "c:igLpPsvxDtT:")) != -1) {
+ while ((c = getopt_long(argc, argv, "c:eigLpPsvxDtT:", long_options,
+ NULL)) != -1) {
switch (c) {
case 'c':
if (cmd != NULL) {
fprintf(stderr,
gettext("Can't set -c flag twice\n"));
exit(1);
}
if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&
!libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {
fprintf(stderr, gettext(
"Can't run -c, disabled by "
"ZPOOL_SCRIPTS_ENABLED.\n"));
exit(1);
}
if ((getuid() <= 0 || geteuid() <= 0) &&
!libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {
fprintf(stderr, gettext(
"Can't run -c with root privileges "
"unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));
exit(1);
}
cmd = optarg;
break;
+ case 'e':
+ cb.cb_print_unhealthy = B_TRUE;
+ break;
case 'i':
cb.cb_print_vdev_init = B_TRUE;
break;
case 'g':
cb.cb_name_flags |= VDEV_NAME_GUID;
break;
case 'L':
cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
break;
case 'p':
cb.cb_literal = B_TRUE;
break;
case 'P':
cb.cb_name_flags |= VDEV_NAME_PATH;
break;
case 's':
cb.cb_print_slow_ios = B_TRUE;
break;
case 'v':
cb.cb_verbose = B_TRUE;
break;
case 'x':
cb.cb_explain = B_TRUE;
break;
case 'D':
cb.cb_dedup_stats = B_TRUE;
break;
case 't':
cb.cb_print_vdev_trim = B_TRUE;
break;
case 'T':
get_timestamp_arg(*optarg);
break;
+ case POWER_OPT:
+ cb.cb_print_power = B_TRUE;
+ break;
case '?':
if (optopt == 'c') {
print_zpool_script_list("status");
exit(0);
} else {
fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
}
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
get_interval_count(&argc, argv, &interval, &count);
if (argc == 0)
cb.cb_allpools = B_TRUE;
cb.cb_first = B_TRUE;
cb.cb_print_status = B_TRUE;
for (;;) {
if (timestamp_fmt != NODATE)
print_timestamp(timestamp_fmt);
if (cmd != NULL)
cb.vcdl = all_pools_for_each_vdev_run(argc, argv, cmd,
NULL, NULL, 0, 0);
ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
cb.cb_literal, status_callback, &cb);
if (cb.vcdl != NULL)
free_vdev_cmd_data_list(cb.vcdl);
if (argc == 0 && cb.cb_count == 0)
(void) fprintf(stderr, gettext("no pools available\n"));
else if (cb.cb_explain && cb.cb_first && cb.cb_allpools)
(void) printf(gettext("all pools are healthy\n"));
if (ret != 0)
return (ret);
if (interval == 0)
break;
if (count != 0 && --count == 0)
break;
(void) fsleep(interval);
}
return (0);
}
typedef struct upgrade_cbdata {
int cb_first;
int cb_argc;
uint64_t cb_version;
char **cb_argv;
} upgrade_cbdata_t;
static int
check_unsupp_fs(zfs_handle_t *zhp, void *unsupp_fs)
{
int zfs_version = (int)zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
int *count = (int *)unsupp_fs;
if (zfs_version > ZPL_VERSION) {
(void) printf(gettext("%s (v%d) is not supported by this "
"implementation of ZFS.\n"),
zfs_get_name(zhp), zfs_version);
(*count)++;
}
zfs_iter_filesystems_v2(zhp, 0, check_unsupp_fs, unsupp_fs);
zfs_close(zhp);
return (0);
}
static int
upgrade_version(zpool_handle_t *zhp, uint64_t version)
{
int ret;
nvlist_t *config;
uint64_t oldversion;
int unsupp_fs = 0;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
&oldversion) == 0);
char compat[ZFS_MAXPROPLEN];
if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat,
ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
compat[0] = '\0';
assert(SPA_VERSION_IS_SUPPORTED(oldversion));
assert(oldversion < version);
ret = zfs_iter_root(zpool_get_handle(zhp), check_unsupp_fs, &unsupp_fs);
if (ret != 0)
return (ret);
if (unsupp_fs) {
(void) fprintf(stderr, gettext("Upgrade not performed due "
"to %d unsupported filesystems (max v%d).\n"),
unsupp_fs, (int)ZPL_VERSION);
return (1);
}
if (strcmp(compat, ZPOOL_COMPAT_LEGACY) == 0) {
(void) fprintf(stderr, gettext("Upgrade not performed because "
"'compatibility' property set to '"
ZPOOL_COMPAT_LEGACY "'.\n"));
return (1);
}
ret = zpool_upgrade(zhp, version);
if (ret != 0)
return (ret);
if (version >= SPA_VERSION_FEATURES) {
(void) printf(gettext("Successfully upgraded "
"'%s' from version %llu to feature flags.\n"),
zpool_get_name(zhp), (u_longlong_t)oldversion);
} else {
(void) printf(gettext("Successfully upgraded "
"'%s' from version %llu to version %llu.\n"),
zpool_get_name(zhp), (u_longlong_t)oldversion,
(u_longlong_t)version);
}
return (0);
}
static int
upgrade_enable_all(zpool_handle_t *zhp, int *countp)
{
int i, ret, count;
boolean_t firstff = B_TRUE;
nvlist_t *enabled = zpool_get_features(zhp);
char compat[ZFS_MAXPROPLEN];
if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat,
ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
compat[0] = '\0';
boolean_t requested_features[SPA_FEATURES];
if (zpool_do_load_compat(compat, requested_features) !=
ZPOOL_COMPATIBILITY_OK)
return (-1);
count = 0;
for (i = 0; i < SPA_FEATURES; i++) {
const char *fname = spa_feature_table[i].fi_uname;
const char *fguid = spa_feature_table[i].fi_guid;
if (!spa_feature_table[i].fi_zfs_mod_supported)
continue;
if (!nvlist_exists(enabled, fguid) && requested_features[i]) {
char *propname;
verify(-1 != asprintf(&propname, "feature@%s", fname));
ret = zpool_set_prop(zhp, propname,
ZFS_FEATURE_ENABLED);
if (ret != 0) {
free(propname);
return (ret);
}
count++;
if (firstff) {
(void) printf(gettext("Enabled the "
"following features on '%s':\n"),
zpool_get_name(zhp));
firstff = B_FALSE;
}
(void) printf(gettext(" %s\n"), fname);
free(propname);
}
}
if (countp != NULL)
*countp = count;
return (0);
}
static int
upgrade_cb(zpool_handle_t *zhp, void *arg)
{
upgrade_cbdata_t *cbp = arg;
nvlist_t *config;
uint64_t version;
boolean_t modified_pool = B_FALSE;
int ret;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
&version) == 0);
assert(SPA_VERSION_IS_SUPPORTED(version));
if (version < cbp->cb_version) {
cbp->cb_first = B_FALSE;
ret = upgrade_version(zhp, cbp->cb_version);
if (ret != 0)
return (ret);
modified_pool = B_TRUE;
/*
* If they did "zpool upgrade -a", then we could
* be doing ioctls to different pools. We need
* to log this history once to each pool, and bypass
* the normal history logging that happens in main().
*/
(void) zpool_log_history(g_zfs, history_str);
log_history = B_FALSE;
}
if (cbp->cb_version >= SPA_VERSION_FEATURES) {
int count;
ret = upgrade_enable_all(zhp, &count);
if (ret != 0)
return (ret);
if (count > 0) {
cbp->cb_first = B_FALSE;
modified_pool = B_TRUE;
}
}
if (modified_pool) {
(void) printf("\n");
(void) after_zpool_upgrade(zhp);
}
return (0);
}
static int
upgrade_list_older_cb(zpool_handle_t *zhp, void *arg)
{
upgrade_cbdata_t *cbp = arg;
nvlist_t *config;
uint64_t version;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
&version) == 0);
assert(SPA_VERSION_IS_SUPPORTED(version));
if (version < SPA_VERSION_FEATURES) {
if (cbp->cb_first) {
(void) printf(gettext("The following pools are "
"formatted with legacy version numbers and can\n"
"be upgraded to use feature flags. After "
"being upgraded, these pools\nwill no "
"longer be accessible by software that does not "
"support feature\nflags.\n\n"
"Note that setting a pool's 'compatibility' "
"feature to '" ZPOOL_COMPAT_LEGACY "' will\n"
"inhibit upgrades.\n\n"));
(void) printf(gettext("VER POOL\n"));
(void) printf(gettext("--- ------------\n"));
cbp->cb_first = B_FALSE;
}
(void) printf("%2llu %s\n", (u_longlong_t)version,
zpool_get_name(zhp));
}
return (0);
}
static int
upgrade_list_disabled_cb(zpool_handle_t *zhp, void *arg)
{
upgrade_cbdata_t *cbp = arg;
nvlist_t *config;
uint64_t version;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
&version) == 0);
if (version >= SPA_VERSION_FEATURES) {
int i;
boolean_t poolfirst = B_TRUE;
nvlist_t *enabled = zpool_get_features(zhp);
for (i = 0; i < SPA_FEATURES; i++) {
const char *fguid = spa_feature_table[i].fi_guid;
const char *fname = spa_feature_table[i].fi_uname;
if (!spa_feature_table[i].fi_zfs_mod_supported)
continue;
if (!nvlist_exists(enabled, fguid)) {
if (cbp->cb_first) {
(void) printf(gettext("\nSome "
"supported features are not "
"enabled on the following pools. "
"Once a\nfeature is enabled the "
"pool may become incompatible with "
"software\nthat does not support "
"the feature. See "
"zpool-features(7) for "
"details.\n\n"
"Note that the pool "
"'compatibility' feature can be "
"used to inhibit\nfeature "
"upgrades.\n\n"));
(void) printf(gettext("POOL "
"FEATURE\n"));
(void) printf(gettext("------"
"---------\n"));
cbp->cb_first = B_FALSE;
}
if (poolfirst) {
(void) printf(gettext("%s\n"),
zpool_get_name(zhp));
poolfirst = B_FALSE;
}
(void) printf(gettext(" %s\n"), fname);
}
/*
* If they did "zpool upgrade -a", then we could
* be doing ioctls to different pools. We need
* to log this history once to each pool, and bypass
* the normal history logging that happens in main().
*/
(void) zpool_log_history(g_zfs, history_str);
log_history = B_FALSE;
}
}
return (0);
}
static int
upgrade_one(zpool_handle_t *zhp, void *data)
{
boolean_t modified_pool = B_FALSE;
upgrade_cbdata_t *cbp = data;
uint64_t cur_version;
int ret;
if (strcmp("log", zpool_get_name(zhp)) == 0) {
(void) fprintf(stderr, gettext("'log' is now a reserved word\n"
"Pool 'log' must be renamed using export and import"
" to upgrade.\n"));
return (1);
}
cur_version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
if (cur_version > cbp->cb_version) {
(void) printf(gettext("Pool '%s' is already formatted "
"using more current version '%llu'.\n\n"),
zpool_get_name(zhp), (u_longlong_t)cur_version);
return (0);
}
if (cbp->cb_version != SPA_VERSION && cur_version == cbp->cb_version) {
(void) printf(gettext("Pool '%s' is already formatted "
"using version %llu.\n\n"), zpool_get_name(zhp),
(u_longlong_t)cbp->cb_version);
return (0);
}
if (cur_version != cbp->cb_version) {
modified_pool = B_TRUE;
ret = upgrade_version(zhp, cbp->cb_version);
if (ret != 0)
return (ret);
}
if (cbp->cb_version >= SPA_VERSION_FEATURES) {
int count = 0;
ret = upgrade_enable_all(zhp, &count);
if (ret != 0)
return (ret);
if (count != 0) {
modified_pool = B_TRUE;
} else if (cur_version == SPA_VERSION) {
(void) printf(gettext("Pool '%s' already has all "
"supported and requested features enabled.\n"),
zpool_get_name(zhp));
}
}
if (modified_pool) {
(void) printf("\n");
(void) after_zpool_upgrade(zhp);
}
return (0);
}
/*
* zpool upgrade
* zpool upgrade -v
* zpool upgrade [-V version] <-a | pool ...>
*
* With no arguments, display downrev'd ZFS pool available for upgrade.
* Individual pools can be upgraded by specifying the pool, and '-a' will
* upgrade all pools.
*/
int
zpool_do_upgrade(int argc, char **argv)
{
int c;
upgrade_cbdata_t cb = { 0 };
int ret = 0;
boolean_t showversions = B_FALSE;
boolean_t upgradeall = B_FALSE;
char *end;
/* check options */
while ((c = getopt(argc, argv, ":avV:")) != -1) {
switch (c) {
case 'a':
upgradeall = B_TRUE;
break;
case 'v':
showversions = B_TRUE;
break;
case 'V':
cb.cb_version = strtoll(optarg, &end, 10);
if (*end != '\0' ||
!SPA_VERSION_IS_SUPPORTED(cb.cb_version)) {
(void) fprintf(stderr,
gettext("invalid version '%s'\n"), optarg);
usage(B_FALSE);
}
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
cb.cb_argc = argc;
cb.cb_argv = argv;
argc -= optind;
argv += optind;
if (cb.cb_version == 0) {
cb.cb_version = SPA_VERSION;
} else if (!upgradeall && argc == 0) {
(void) fprintf(stderr, gettext("-V option is "
"incompatible with other arguments\n"));
usage(B_FALSE);
}
if (showversions) {
if (upgradeall || argc != 0) {
(void) fprintf(stderr, gettext("-v option is "
"incompatible with other arguments\n"));
usage(B_FALSE);
}
} else if (upgradeall) {
if (argc != 0) {
(void) fprintf(stderr, gettext("-a option should not "
"be used along with a pool name\n"));
usage(B_FALSE);
}
}
(void) printf("%s", gettext("This system supports ZFS pool feature "
"flags.\n\n"));
if (showversions) {
int i;
(void) printf(gettext("The following features are "
"supported:\n\n"));
(void) printf(gettext("FEAT DESCRIPTION\n"));
(void) printf("----------------------------------------------"
"---------------\n");
for (i = 0; i < SPA_FEATURES; i++) {
zfeature_info_t *fi = &spa_feature_table[i];
if (!fi->fi_zfs_mod_supported)
continue;
const char *ro =
(fi->fi_flags & ZFEATURE_FLAG_READONLY_COMPAT) ?
" (read-only compatible)" : "";
(void) printf("%-37s%s\n", fi->fi_uname, ro);
(void) printf(" %s\n", fi->fi_desc);
}
(void) printf("\n");
(void) printf(gettext("The following legacy versions are also "
"supported:\n\n"));
(void) printf(gettext("VER DESCRIPTION\n"));
(void) printf("--- -----------------------------------------"
"---------------\n");
(void) printf(gettext(" 1 Initial ZFS version\n"));
(void) printf(gettext(" 2 Ditto blocks "
"(replicated metadata)\n"));
(void) printf(gettext(" 3 Hot spares and double parity "
"RAID-Z\n"));
(void) printf(gettext(" 4 zpool history\n"));
(void) printf(gettext(" 5 Compression using the gzip "
"algorithm\n"));
(void) printf(gettext(" 6 bootfs pool property\n"));
(void) printf(gettext(" 7 Separate intent log devices\n"));
(void) printf(gettext(" 8 Delegated administration\n"));
(void) printf(gettext(" 9 refquota and refreservation "
"properties\n"));
(void) printf(gettext(" 10 Cache devices\n"));
(void) printf(gettext(" 11 Improved scrub performance\n"));
(void) printf(gettext(" 12 Snapshot properties\n"));
(void) printf(gettext(" 13 snapused property\n"));
(void) printf(gettext(" 14 passthrough-x aclinherit\n"));
(void) printf(gettext(" 15 user/group space accounting\n"));
(void) printf(gettext(" 16 stmf property support\n"));
(void) printf(gettext(" 17 Triple-parity RAID-Z\n"));
(void) printf(gettext(" 18 Snapshot user holds\n"));
(void) printf(gettext(" 19 Log device removal\n"));
(void) printf(gettext(" 20 Compression using zle "
"(zero-length encoding)\n"));
(void) printf(gettext(" 21 Deduplication\n"));
(void) printf(gettext(" 22 Received properties\n"));
(void) printf(gettext(" 23 Slim ZIL\n"));
(void) printf(gettext(" 24 System attributes\n"));
(void) printf(gettext(" 25 Improved scrub stats\n"));
(void) printf(gettext(" 26 Improved snapshot deletion "
"performance\n"));
(void) printf(gettext(" 27 Improved snapshot creation "
"performance\n"));
(void) printf(gettext(" 28 Multiple vdev replacements\n"));
(void) printf(gettext("\nFor more information on a particular "
"version, including supported releases,\n"));
(void) printf(gettext("see the ZFS Administration Guide.\n\n"));
} else if (argc == 0 && upgradeall) {
cb.cb_first = B_TRUE;
ret = zpool_iter(g_zfs, upgrade_cb, &cb);
if (ret == 0 && cb.cb_first) {
if (cb.cb_version == SPA_VERSION) {
(void) printf(gettext("All pools are already "
"formatted using feature flags.\n\n"));
(void) printf(gettext("Every feature flags "
"pool already has all supported and "
"requested features enabled.\n"));
} else {
(void) printf(gettext("All pools are already "
"formatted with version %llu or higher.\n"),
(u_longlong_t)cb.cb_version);
}
}
} else if (argc == 0) {
cb.cb_first = B_TRUE;
ret = zpool_iter(g_zfs, upgrade_list_older_cb, &cb);
assert(ret == 0);
if (cb.cb_first) {
(void) printf(gettext("All pools are formatted "
"using feature flags.\n\n"));
} else {
(void) printf(gettext("\nUse 'zpool upgrade -v' "
"for a list of available legacy versions.\n"));
}
cb.cb_first = B_TRUE;
ret = zpool_iter(g_zfs, upgrade_list_disabled_cb, &cb);
assert(ret == 0);
if (cb.cb_first) {
(void) printf(gettext("Every feature flags pool has "
"all supported and requested features enabled.\n"));
} else {
(void) printf(gettext("\n"));
}
} else {
ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
B_FALSE, upgrade_one, &cb);
}
return (ret);
}
typedef struct hist_cbdata {
boolean_t first;
boolean_t longfmt;
boolean_t internal;
} hist_cbdata_t;
static void
print_history_records(nvlist_t *nvhis, hist_cbdata_t *cb)
{
nvlist_t **records;
uint_t numrecords;
int i;
verify(nvlist_lookup_nvlist_array(nvhis, ZPOOL_HIST_RECORD,
&records, &numrecords) == 0);
for (i = 0; i < numrecords; i++) {
nvlist_t *rec = records[i];
char tbuf[64] = "";
if (nvlist_exists(rec, ZPOOL_HIST_TIME)) {
time_t tsec;
struct tm t;
tsec = fnvlist_lookup_uint64(records[i],
ZPOOL_HIST_TIME);
(void) localtime_r(&tsec, &t);
(void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t);
}
if (nvlist_exists(rec, ZPOOL_HIST_ELAPSED_NS)) {
uint64_t elapsed_ns = fnvlist_lookup_int64(records[i],
ZPOOL_HIST_ELAPSED_NS);
(void) snprintf(tbuf + strlen(tbuf),
sizeof (tbuf) - strlen(tbuf),
" (%lldms)", (long long)elapsed_ns / 1000 / 1000);
}
if (nvlist_exists(rec, ZPOOL_HIST_CMD)) {
(void) printf("%s %s", tbuf,
fnvlist_lookup_string(rec, ZPOOL_HIST_CMD));
} else if (nvlist_exists(rec, ZPOOL_HIST_INT_EVENT)) {
int ievent =
fnvlist_lookup_uint64(rec, ZPOOL_HIST_INT_EVENT);
if (!cb->internal)
continue;
if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS) {
(void) printf("%s unrecognized record:\n",
tbuf);
dump_nvlist(rec, 4);
continue;
}
(void) printf("%s [internal %s txg:%lld] %s", tbuf,
zfs_history_event_names[ievent],
(longlong_t)fnvlist_lookup_uint64(
rec, ZPOOL_HIST_TXG),
fnvlist_lookup_string(rec, ZPOOL_HIST_INT_STR));
} else if (nvlist_exists(rec, ZPOOL_HIST_INT_NAME)) {
if (!cb->internal)
continue;
(void) printf("%s [txg:%lld] %s", tbuf,
(longlong_t)fnvlist_lookup_uint64(
rec, ZPOOL_HIST_TXG),
fnvlist_lookup_string(rec, ZPOOL_HIST_INT_NAME));
if (nvlist_exists(rec, ZPOOL_HIST_DSNAME)) {
(void) printf(" %s (%llu)",
fnvlist_lookup_string(rec,
ZPOOL_HIST_DSNAME),
(u_longlong_t)fnvlist_lookup_uint64(rec,
ZPOOL_HIST_DSID));
}
(void) printf(" %s", fnvlist_lookup_string(rec,
ZPOOL_HIST_INT_STR));
} else if (nvlist_exists(rec, ZPOOL_HIST_IOCTL)) {
if (!cb->internal)
continue;
(void) printf("%s ioctl %s\n", tbuf,
fnvlist_lookup_string(rec, ZPOOL_HIST_IOCTL));
if (nvlist_exists(rec, ZPOOL_HIST_INPUT_NVL)) {
(void) printf(" input:\n");
dump_nvlist(fnvlist_lookup_nvlist(rec,
ZPOOL_HIST_INPUT_NVL), 8);
}
if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_NVL)) {
(void) printf(" output:\n");
dump_nvlist(fnvlist_lookup_nvlist(rec,
ZPOOL_HIST_OUTPUT_NVL), 8);
}
if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_SIZE)) {
(void) printf(" output nvlist omitted; "
"original size: %lldKB\n",
(longlong_t)fnvlist_lookup_int64(rec,
ZPOOL_HIST_OUTPUT_SIZE) / 1024);
}
if (nvlist_exists(rec, ZPOOL_HIST_ERRNO)) {
(void) printf(" errno: %lld\n",
(longlong_t)fnvlist_lookup_int64(rec,
ZPOOL_HIST_ERRNO));
}
} else {
if (!cb->internal)
continue;
(void) printf("%s unrecognized record:\n", tbuf);
dump_nvlist(rec, 4);
}
if (!cb->longfmt) {
(void) printf("\n");
continue;
}
(void) printf(" [");
if (nvlist_exists(rec, ZPOOL_HIST_WHO)) {
uid_t who = fnvlist_lookup_uint64(rec, ZPOOL_HIST_WHO);
struct passwd *pwd = getpwuid(who);
(void) printf("user %d ", (int)who);
if (pwd != NULL)
(void) printf("(%s) ", pwd->pw_name);
}
if (nvlist_exists(rec, ZPOOL_HIST_HOST)) {
(void) printf("on %s",
fnvlist_lookup_string(rec, ZPOOL_HIST_HOST));
}
if (nvlist_exists(rec, ZPOOL_HIST_ZONE)) {
(void) printf(":%s",
fnvlist_lookup_string(rec, ZPOOL_HIST_ZONE));
}
(void) printf("]");
(void) printf("\n");
}
}
/*
* Print out the command history for a specific pool.
*/
static int
get_history_one(zpool_handle_t *zhp, void *data)
{
nvlist_t *nvhis;
int ret;
hist_cbdata_t *cb = (hist_cbdata_t *)data;
uint64_t off = 0;
boolean_t eof = B_FALSE;
cb->first = B_FALSE;
(void) printf(gettext("History for '%s':\n"), zpool_get_name(zhp));
while (!eof) {
if ((ret = zpool_get_history(zhp, &nvhis, &off, &eof)) != 0)
return (ret);
print_history_records(nvhis, cb);
nvlist_free(nvhis);
}
(void) printf("\n");
return (ret);
}
/*
* zpool history <pool>
*
* Displays the history of commands that modified pools.
*/
int
zpool_do_history(int argc, char **argv)
{
hist_cbdata_t cbdata = { 0 };
int ret;
int c;
cbdata.first = B_TRUE;
/* check options */
while ((c = getopt(argc, argv, "li")) != -1) {
switch (c) {
case 'l':
cbdata.longfmt = B_TRUE;
break;
case 'i':
cbdata.internal = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
B_FALSE, get_history_one, &cbdata);
if (argc == 0 && cbdata.first == B_TRUE) {
(void) fprintf(stderr, gettext("no pools available\n"));
return (0);
}
return (ret);
}
typedef struct ev_opts {
int verbose;
int scripted;
int follow;
int clear;
char poolname[ZFS_MAX_DATASET_NAME_LEN];
} ev_opts_t;
static void
zpool_do_events_short(nvlist_t *nvl, ev_opts_t *opts)
{
char ctime_str[26], str[32];
const char *ptr;
int64_t *tv;
uint_t n;
verify(nvlist_lookup_int64_array(nvl, FM_EREPORT_TIME, &tv, &n) == 0);
memset(str, ' ', 32);
(void) ctime_r((const time_t *)&tv[0], ctime_str);
(void) memcpy(str, ctime_str+4, 6); /* 'Jun 30' */
(void) memcpy(str+7, ctime_str+20, 4); /* '1993' */
(void) memcpy(str+12, ctime_str+11, 8); /* '21:49:08' */
(void) sprintf(str+20, ".%09lld", (longlong_t)tv[1]); /* '.123456789' */
if (opts->scripted)
(void) printf(gettext("%s\t"), str);
else
(void) printf(gettext("%s "), str);
verify(nvlist_lookup_string(nvl, FM_CLASS, &ptr) == 0);
(void) printf(gettext("%s\n"), ptr);
}
static void
zpool_do_events_nvprint(nvlist_t *nvl, int depth)
{
nvpair_t *nvp;
for (nvp = nvlist_next_nvpair(nvl, NULL);
nvp != NULL; nvp = nvlist_next_nvpair(nvl, nvp)) {
data_type_t type = nvpair_type(nvp);
const char *name = nvpair_name(nvp);
boolean_t b;
uint8_t i8;
uint16_t i16;
uint32_t i32;
uint64_t i64;
const char *str;
nvlist_t *cnv;
printf(gettext("%*s%s = "), depth, "", name);
switch (type) {
case DATA_TYPE_BOOLEAN:
printf(gettext("%s"), "1");
break;
case DATA_TYPE_BOOLEAN_VALUE:
(void) nvpair_value_boolean_value(nvp, &b);
printf(gettext("%s"), b ? "1" : "0");
break;
case DATA_TYPE_BYTE:
(void) nvpair_value_byte(nvp, &i8);
printf(gettext("0x%x"), i8);
break;
case DATA_TYPE_INT8:
(void) nvpair_value_int8(nvp, (void *)&i8);
printf(gettext("0x%x"), i8);
break;
case DATA_TYPE_UINT8:
(void) nvpair_value_uint8(nvp, &i8);
printf(gettext("0x%x"), i8);
break;
case DATA_TYPE_INT16:
(void) nvpair_value_int16(nvp, (void *)&i16);
printf(gettext("0x%x"), i16);
break;
case DATA_TYPE_UINT16:
(void) nvpair_value_uint16(nvp, &i16);
printf(gettext("0x%x"), i16);
break;
case DATA_TYPE_INT32:
(void) nvpair_value_int32(nvp, (void *)&i32);
printf(gettext("0x%x"), i32);
break;
case DATA_TYPE_UINT32:
(void) nvpair_value_uint32(nvp, &i32);
printf(gettext("0x%x"), i32);
break;
case DATA_TYPE_INT64:
(void) nvpair_value_int64(nvp, (void *)&i64);
printf(gettext("0x%llx"), (u_longlong_t)i64);
break;
case DATA_TYPE_UINT64:
(void) nvpair_value_uint64(nvp, &i64);
/*
* translate vdev state values to readable
* strings to aide zpool events consumers
*/
if (strcmp(name,
FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE) == 0 ||
strcmp(name,
FM_EREPORT_PAYLOAD_ZFS_VDEV_LASTSTATE) == 0) {
printf(gettext("\"%s\" (0x%llx)"),
zpool_state_to_name(i64, VDEV_AUX_NONE),
(u_longlong_t)i64);
} else {
printf(gettext("0x%llx"), (u_longlong_t)i64);
}
break;
case DATA_TYPE_HRTIME:
(void) nvpair_value_hrtime(nvp, (void *)&i64);
printf(gettext("0x%llx"), (u_longlong_t)i64);
break;
case DATA_TYPE_STRING:
(void) nvpair_value_string(nvp, &str);
printf(gettext("\"%s\""), str ? str : "<NULL>");
break;
case DATA_TYPE_NVLIST:
printf(gettext("(embedded nvlist)\n"));
(void) nvpair_value_nvlist(nvp, &cnv);
zpool_do_events_nvprint(cnv, depth + 8);
printf(gettext("%*s(end %s)"), depth, "", name);
break;
case DATA_TYPE_NVLIST_ARRAY: {
nvlist_t **val;
uint_t i, nelem;
(void) nvpair_value_nvlist_array(nvp, &val, &nelem);
printf(gettext("(%d embedded nvlists)\n"), nelem);
for (i = 0; i < nelem; i++) {
printf(gettext("%*s%s[%d] = %s\n"),
depth, "", name, i, "(embedded nvlist)");
zpool_do_events_nvprint(val[i], depth + 8);
printf(gettext("%*s(end %s[%i])\n"),
depth, "", name, i);
}
printf(gettext("%*s(end %s)\n"), depth, "", name);
}
break;
case DATA_TYPE_INT8_ARRAY: {
int8_t *val;
uint_t i, nelem;
(void) nvpair_value_int8_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%x "), val[i]);
break;
}
case DATA_TYPE_UINT8_ARRAY: {
uint8_t *val;
uint_t i, nelem;
(void) nvpair_value_uint8_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%x "), val[i]);
break;
}
case DATA_TYPE_INT16_ARRAY: {
int16_t *val;
uint_t i, nelem;
(void) nvpair_value_int16_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%x "), val[i]);
break;
}
case DATA_TYPE_UINT16_ARRAY: {
uint16_t *val;
uint_t i, nelem;
(void) nvpair_value_uint16_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%x "), val[i]);
break;
}
case DATA_TYPE_INT32_ARRAY: {
int32_t *val;
uint_t i, nelem;
(void) nvpair_value_int32_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%x "), val[i]);
break;
}
case DATA_TYPE_UINT32_ARRAY: {
uint32_t *val;
uint_t i, nelem;
(void) nvpair_value_uint32_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%x "), val[i]);
break;
}
case DATA_TYPE_INT64_ARRAY: {
int64_t *val;
uint_t i, nelem;
(void) nvpair_value_int64_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%llx "),
(u_longlong_t)val[i]);
break;
}
case DATA_TYPE_UINT64_ARRAY: {
uint64_t *val;
uint_t i, nelem;
(void) nvpair_value_uint64_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%llx "),
(u_longlong_t)val[i]);
break;
}
case DATA_TYPE_STRING_ARRAY: {
const char **str;
uint_t i, nelem;
(void) nvpair_value_string_array(nvp, &str, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("\"%s\" "),
str[i] ? str[i] : "<NULL>");
break;
}
case DATA_TYPE_BOOLEAN_ARRAY:
case DATA_TYPE_BYTE_ARRAY:
case DATA_TYPE_DOUBLE:
case DATA_TYPE_DONTCARE:
case DATA_TYPE_UNKNOWN:
printf(gettext("<unknown>"));
break;
}
printf(gettext("\n"));
}
}
static int
zpool_do_events_next(ev_opts_t *opts)
{
nvlist_t *nvl;
int zevent_fd, ret, dropped;
const char *pool;
zevent_fd = open(ZFS_DEV, O_RDWR);
VERIFY(zevent_fd >= 0);
if (!opts->scripted)
(void) printf(gettext("%-30s %s\n"), "TIME", "CLASS");
while (1) {
ret = zpool_events_next(g_zfs, &nvl, &dropped,
(opts->follow ? ZEVENT_NONE : ZEVENT_NONBLOCK), zevent_fd);
if (ret || nvl == NULL)
break;
if (dropped > 0)
(void) printf(gettext("dropped %d events\n"), dropped);
if (strlen(opts->poolname) > 0 &&
nvlist_lookup_string(nvl, FM_FMRI_ZFS_POOL, &pool) == 0 &&
strcmp(opts->poolname, pool) != 0)
continue;
zpool_do_events_short(nvl, opts);
if (opts->verbose) {
zpool_do_events_nvprint(nvl, 8);
printf(gettext("\n"));
}
(void) fflush(stdout);
nvlist_free(nvl);
}
VERIFY(0 == close(zevent_fd));
return (ret);
}
static int
zpool_do_events_clear(void)
{
int count, ret;
ret = zpool_events_clear(g_zfs, &count);
if (!ret)
(void) printf(gettext("cleared %d events\n"), count);
return (ret);
}
/*
* zpool events [-vHf [pool] | -c]
*
* Displays events logs by ZFS.
*/
int
zpool_do_events(int argc, char **argv)
{
ev_opts_t opts = { 0 };
int ret;
int c;
/* check options */
while ((c = getopt(argc, argv, "vHfc")) != -1) {
switch (c) {
case 'v':
opts.verbose = 1;
break;
case 'H':
opts.scripted = 1;
break;
case 'f':
opts.follow = 1;
break;
case 'c':
opts.clear = 1;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
} else if (argc == 1) {
(void) strlcpy(opts.poolname, argv[0], sizeof (opts.poolname));
if (!zfs_name_valid(opts.poolname, ZFS_TYPE_POOL)) {
(void) fprintf(stderr,
gettext("invalid pool name '%s'\n"), opts.poolname);
usage(B_FALSE);
}
}
if ((argc == 1 || opts.verbose || opts.scripted || opts.follow) &&
opts.clear) {
(void) fprintf(stderr,
gettext("invalid options combined with -c\n"));
usage(B_FALSE);
}
if (opts.clear)
ret = zpool_do_events_clear();
else
ret = zpool_do_events_next(&opts);
return (ret);
}
static int
get_callback_vdev(zpool_handle_t *zhp, char *vdevname, void *data)
{
zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
char value[ZFS_MAXPROPLEN];
zprop_source_t srctype;
for (zprop_list_t *pl = cbp->cb_proplist; pl != NULL;
pl = pl->pl_next) {
char *prop_name;
/*
* If the first property is pool name, it is a special
* placeholder that we can skip. This will also skip
* over the name property when 'all' is specified.
*/
if (pl->pl_prop == ZPOOL_PROP_NAME &&
pl == cbp->cb_proplist)
continue;
if (pl->pl_prop == ZPROP_INVAL) {
prop_name = pl->pl_user_prop;
} else {
prop_name = (char *)vdev_prop_to_name(pl->pl_prop);
}
if (zpool_get_vdev_prop(zhp, vdevname, pl->pl_prop,
prop_name, value, sizeof (value), &srctype,
cbp->cb_literal) == 0) {
zprop_print_one_property(vdevname, cbp, prop_name,
value, srctype, NULL, NULL);
}
}
return (0);
}
static int
get_callback_vdev_cb(void *zhp_data, nvlist_t *nv, void *data)
{
zpool_handle_t *zhp = zhp_data;
zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
char *vdevname;
const char *type;
int ret;
/*
* zpool_vdev_name() transforms the root vdev name (i.e., root-0) to the
* pool name for display purposes, which is not desired. Fallback to
* zpool_vdev_name() when not dealing with the root vdev.
*/
type = fnvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE);
if (zhp != NULL && strcmp(type, "root") == 0)
vdevname = strdup("root-0");
else
vdevname = zpool_vdev_name(g_zfs, zhp, nv,
cbp->cb_vdevs.cb_name_flags);
(void) vdev_expand_proplist(zhp, vdevname, &cbp->cb_proplist);
ret = get_callback_vdev(zhp, vdevname, data);
free(vdevname);
return (ret);
}
static int
get_callback(zpool_handle_t *zhp, void *data)
{
zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
char value[ZFS_MAXPROPLEN];
zprop_source_t srctype;
zprop_list_t *pl;
int vid;
if (cbp->cb_type == ZFS_TYPE_VDEV) {
if (strcmp(cbp->cb_vdevs.cb_names[0], "all-vdevs") == 0) {
for_each_vdev(zhp, get_callback_vdev_cb, data);
} else {
/* Adjust column widths for vdev properties */
for (vid = 0; vid < cbp->cb_vdevs.cb_names_count;
vid++) {
vdev_expand_proplist(zhp,
cbp->cb_vdevs.cb_names[vid],
&cbp->cb_proplist);
}
/* Display the properties */
for (vid = 0; vid < cbp->cb_vdevs.cb_names_count;
vid++) {
get_callback_vdev(zhp,
cbp->cb_vdevs.cb_names[vid], data);
}
}
} else {
assert(cbp->cb_type == ZFS_TYPE_POOL);
for (pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) {
/*
* Skip the special fake placeholder. This will also
* skip over the name property when 'all' is specified.
*/
if (pl->pl_prop == ZPOOL_PROP_NAME &&
pl == cbp->cb_proplist)
continue;
if (pl->pl_prop == ZPROP_INVAL &&
zfs_prop_user(pl->pl_user_prop)) {
srctype = ZPROP_SRC_LOCAL;
if (zpool_get_userprop(zhp, pl->pl_user_prop,
value, sizeof (value), &srctype) != 0)
continue;
zprop_print_one_property(zpool_get_name(zhp),
cbp, pl->pl_user_prop, value, srctype,
NULL, NULL);
} else if (pl->pl_prop == ZPROP_INVAL &&
(zpool_prop_feature(pl->pl_user_prop) ||
zpool_prop_unsupported(pl->pl_user_prop))) {
srctype = ZPROP_SRC_LOCAL;
if (zpool_prop_get_feature(zhp,
pl->pl_user_prop, value,
sizeof (value)) == 0) {
zprop_print_one_property(
zpool_get_name(zhp), cbp,
pl->pl_user_prop, value, srctype,
NULL, NULL);
}
} else {
if (zpool_get_prop(zhp, pl->pl_prop, value,
sizeof (value), &srctype,
cbp->cb_literal) != 0)
continue;
zprop_print_one_property(zpool_get_name(zhp),
cbp, zpool_prop_to_name(pl->pl_prop),
value, srctype, NULL, NULL);
}
}
}
return (0);
}
/*
* zpool get [-Hp] [-o "all" | field[,...]] <"all" | property[,...]> <pool> ...
*
* -H Scripted mode. Don't display headers, and separate properties
* by a single tab.
* -o List of columns to display. Defaults to
* "name,property,value,source".
* -p Display values in parsable (exact) format.
*
* Get properties of pools in the system. Output space statistics
* for each one as well as other attributes.
*/
int
zpool_do_get(int argc, char **argv)
{
zprop_get_cbdata_t cb = { 0 };
zprop_list_t fake_name = { 0 };
int ret;
int c, i;
char *propstr = NULL;
char *vdev = NULL;
cb.cb_first = B_TRUE;
/*
* Set up default columns and sources.
*/
cb.cb_sources = ZPROP_SRC_ALL;
cb.cb_columns[0] = GET_COL_NAME;
cb.cb_columns[1] = GET_COL_PROPERTY;
cb.cb_columns[2] = GET_COL_VALUE;
cb.cb_columns[3] = GET_COL_SOURCE;
cb.cb_type = ZFS_TYPE_POOL;
cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID;
current_prop_type = cb.cb_type;
/* check options */
while ((c = getopt(argc, argv, ":Hpo:")) != -1) {
switch (c) {
case 'p':
cb.cb_literal = B_TRUE;
break;
case 'H':
cb.cb_scripted = B_TRUE;
break;
case 'o':
memset(&cb.cb_columns, 0, sizeof (cb.cb_columns));
i = 0;
for (char *tok; (tok = strsep(&optarg, ",")); ) {
static const char *const col_opts[] =
{ "name", "property", "value", "source",
"all" };
static const zfs_get_column_t col_cols[] =
{ GET_COL_NAME, GET_COL_PROPERTY, GET_COL_VALUE,
GET_COL_SOURCE };
if (i == ZFS_GET_NCOLS - 1) {
(void) fprintf(stderr, gettext("too "
"many fields given to -o "
"option\n"));
usage(B_FALSE);
}
for (c = 0; c < ARRAY_SIZE(col_opts); ++c)
if (strcmp(tok, col_opts[c]) == 0)
goto found;
(void) fprintf(stderr,
gettext("invalid column name '%s'\n"), tok);
usage(B_FALSE);
found:
if (c >= 4) {
if (i > 0) {
(void) fprintf(stderr,
gettext("\"all\" conflicts "
"with specific fields "
"given to -o option\n"));
usage(B_FALSE);
}
memcpy(cb.cb_columns, col_cols,
sizeof (col_cols));
i = ZFS_GET_NCOLS - 1;
} else
cb.cb_columns[i++] = col_cols[c];
}
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing property "
"argument\n"));
usage(B_FALSE);
}
/* Properties list is needed later by zprop_get_list() */
propstr = argv[0];
argc--;
argv++;
if (argc == 0) {
/* No args, so just print the defaults. */
} else if (are_all_pools(argc, argv)) {
/* All the args are pool names */
} else if (are_all_pools(1, argv)) {
/* The first arg is a pool name */
if ((argc == 2 && strcmp(argv[1], "all-vdevs") == 0) ||
(argc == 2 && strcmp(argv[1], "root") == 0) ||
are_vdevs_in_pool(argc - 1, argv + 1, argv[0],
&cb.cb_vdevs)) {
if (strcmp(argv[1], "root") == 0)
vdev = strdup("root-0");
else
vdev = strdup(argv[1]);
/* ... and the rest are vdev names */
cb.cb_vdevs.cb_names = &vdev;
cb.cb_vdevs.cb_names_count = argc - 1;
cb.cb_type = ZFS_TYPE_VDEV;
argc = 1; /* One pool to process */
} else {
fprintf(stderr, gettext("Expected a list of vdevs in"
" \"%s\", but got:\n"), argv[0]);
error_list_unresolved_vdevs(argc - 1, argv + 1,
argv[0], &cb.cb_vdevs);
fprintf(stderr, "\n");
usage(B_FALSE);
return (1);
}
} else {
/*
* The first arg isn't a pool name,
*/
fprintf(stderr, gettext("missing pool name.\n"));
fprintf(stderr, "\n");
usage(B_FALSE);
return (1);
}
if (zprop_get_list(g_zfs, propstr, &cb.cb_proplist,
cb.cb_type) != 0) {
/* Use correct list of valid properties (pool or vdev) */
current_prop_type = cb.cb_type;
usage(B_FALSE);
}
if (cb.cb_proplist != NULL) {
fake_name.pl_prop = ZPOOL_PROP_NAME;
fake_name.pl_width = strlen(gettext("NAME"));
fake_name.pl_next = cb.cb_proplist;
cb.cb_proplist = &fake_name;
}
ret = for_each_pool(argc, argv, B_TRUE, &cb.cb_proplist, cb.cb_type,
cb.cb_literal, get_callback, &cb);
if (cb.cb_proplist == &fake_name)
zprop_free_list(fake_name.pl_next);
else
zprop_free_list(cb.cb_proplist);
if (vdev != NULL)
free(vdev);
return (ret);
}
typedef struct set_cbdata {
char *cb_propname;
char *cb_value;
zfs_type_t cb_type;
vdev_cbdata_t cb_vdevs;
boolean_t cb_any_successful;
} set_cbdata_t;
static int
set_pool_callback(zpool_handle_t *zhp, set_cbdata_t *cb)
{
int error;
/* Check if we have out-of-bounds features */
if (strcmp(cb->cb_propname, ZPOOL_CONFIG_COMPATIBILITY) == 0) {
boolean_t features[SPA_FEATURES];
if (zpool_do_load_compat(cb->cb_value, features) !=
ZPOOL_COMPATIBILITY_OK)
return (-1);
nvlist_t *enabled = zpool_get_features(zhp);
spa_feature_t i;
for (i = 0; i < SPA_FEATURES; i++) {
const char *fguid = spa_feature_table[i].fi_guid;
if (nvlist_exists(enabled, fguid) && !features[i])
break;
}
if (i < SPA_FEATURES)
(void) fprintf(stderr, gettext("Warning: one or "
"more features already enabled on pool '%s'\n"
"are not present in this compatibility set.\n"),
zpool_get_name(zhp));
}
/* if we're setting a feature, check it's in compatibility set */
if (zpool_prop_feature(cb->cb_propname) &&
strcmp(cb->cb_value, ZFS_FEATURE_ENABLED) == 0) {
char *fname = strchr(cb->cb_propname, '@') + 1;
spa_feature_t f;
if (zfeature_lookup_name(fname, &f) == 0) {
char compat[ZFS_MAXPROPLEN];
if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY,
compat, ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
compat[0] = '\0';
boolean_t features[SPA_FEATURES];
if (zpool_do_load_compat(compat, features) !=
ZPOOL_COMPATIBILITY_OK) {
(void) fprintf(stderr, gettext("Error: "
"cannot enable feature '%s' on pool '%s'\n"
"because the pool's 'compatibility' "
"property cannot be parsed.\n"),
fname, zpool_get_name(zhp));
return (-1);
}
if (!features[f]) {
(void) fprintf(stderr, gettext("Error: "
"cannot enable feature '%s' on pool '%s'\n"
"as it is not specified in this pool's "
"current compatibility set.\n"
"Consider setting 'compatibility' to a "
"less restrictive set, or to 'off'.\n"),
fname, zpool_get_name(zhp));
return (-1);
}
}
}
error = zpool_set_prop(zhp, cb->cb_propname, cb->cb_value);
return (error);
}
static int
set_callback(zpool_handle_t *zhp, void *data)
{
int error;
set_cbdata_t *cb = (set_cbdata_t *)data;
if (cb->cb_type == ZFS_TYPE_VDEV) {
error = zpool_set_vdev_prop(zhp, *cb->cb_vdevs.cb_names,
cb->cb_propname, cb->cb_value);
} else {
assert(cb->cb_type == ZFS_TYPE_POOL);
error = set_pool_callback(zhp, cb);
}
cb->cb_any_successful = !error;
return (error);
}
int
zpool_do_set(int argc, char **argv)
{
set_cbdata_t cb = { 0 };
int error;
char *vdev = NULL;
current_prop_type = ZFS_TYPE_POOL;
if (argc > 1 && argv[1][0] == '-') {
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
argv[1][1]);
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing property=value "
"argument\n"));
usage(B_FALSE);
}
if (argc < 3) {
(void) fprintf(stderr, gettext("missing pool name\n"));
usage(B_FALSE);
}
if (argc > 4) {
(void) fprintf(stderr, gettext("too many pool names\n"));
usage(B_FALSE);
}
cb.cb_propname = argv[1];
cb.cb_type = ZFS_TYPE_POOL;
cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID;
cb.cb_value = strchr(cb.cb_propname, '=');
if (cb.cb_value == NULL) {
(void) fprintf(stderr, gettext("missing value in "
"property=value argument\n"));
usage(B_FALSE);
}
*(cb.cb_value) = '\0';
cb.cb_value++;
argc -= 2;
argv += 2;
/* argv[0] is pool name */
if (!is_pool(argv[0])) {
(void) fprintf(stderr,
gettext("cannot open '%s': is not a pool\n"), argv[0]);
return (EINVAL);
}
/* argv[1], when supplied, is vdev name */
if (argc == 2) {
if (strcmp(argv[1], "root") == 0)
vdev = strdup("root-0");
else
vdev = strdup(argv[1]);
if (!are_vdevs_in_pool(1, &vdev, argv[0], &cb.cb_vdevs)) {
(void) fprintf(stderr, gettext(
"cannot find '%s' in '%s': device not in pool\n"),
vdev, argv[0]);
free(vdev);
return (EINVAL);
}
cb.cb_vdevs.cb_names = &vdev;
cb.cb_vdevs.cb_names_count = 1;
cb.cb_type = ZFS_TYPE_VDEV;
}
error = for_each_pool(1, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
B_FALSE, set_callback, &cb);
if (vdev != NULL)
free(vdev);
return (error);
}
/* Add up the total number of bytes left to initialize/trim across all vdevs */
static uint64_t
vdev_activity_remaining(nvlist_t *nv, zpool_wait_activity_t activity)
{
uint64_t bytes_remaining;
nvlist_t **child;
uint_t c, children;
vdev_stat_t *vs;
assert(activity == ZPOOL_WAIT_INITIALIZE ||
activity == ZPOOL_WAIT_TRIM);
verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) == 0);
if (activity == ZPOOL_WAIT_INITIALIZE &&
vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE)
bytes_remaining = vs->vs_initialize_bytes_est -
vs->vs_initialize_bytes_done;
else if (activity == ZPOOL_WAIT_TRIM &&
vs->vs_trim_state == VDEV_TRIM_ACTIVE)
bytes_remaining = vs->vs_trim_bytes_est -
vs->vs_trim_bytes_done;
else
bytes_remaining = 0;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
children = 0;
for (c = 0; c < children; c++)
bytes_remaining += vdev_activity_remaining(child[c], activity);
return (bytes_remaining);
}
/* Add up the total number of bytes left to rebuild across top-level vdevs */
static uint64_t
vdev_activity_top_remaining(nvlist_t *nv)
{
uint64_t bytes_remaining = 0;
nvlist_t **child;
uint_t children;
int error;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
children = 0;
for (uint_t c = 0; c < children; c++) {
vdev_rebuild_stat_t *vrs;
uint_t i;
error = nvlist_lookup_uint64_array(child[c],
ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i);
if (error == 0) {
if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
bytes_remaining += (vrs->vrs_bytes_est -
vrs->vrs_bytes_rebuilt);
}
}
}
return (bytes_remaining);
}
/* Whether any vdevs are 'spare' or 'replacing' vdevs */
static boolean_t
vdev_any_spare_replacing(nvlist_t *nv)
{
nvlist_t **child;
uint_t c, children;
const char *vdev_type;
(void) nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &vdev_type);
if (strcmp(vdev_type, VDEV_TYPE_REPLACING) == 0 ||
strcmp(vdev_type, VDEV_TYPE_SPARE) == 0 ||
strcmp(vdev_type, VDEV_TYPE_DRAID_SPARE) == 0) {
return (B_TRUE);
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
children = 0;
for (c = 0; c < children; c++) {
if (vdev_any_spare_replacing(child[c]))
return (B_TRUE);
}
return (B_FALSE);
}
typedef struct wait_data {
char *wd_poolname;
boolean_t wd_scripted;
boolean_t wd_exact;
boolean_t wd_headers_once;
boolean_t wd_should_exit;
/* Which activities to wait for */
boolean_t wd_enabled[ZPOOL_WAIT_NUM_ACTIVITIES];
float wd_interval;
pthread_cond_t wd_cv;
pthread_mutex_t wd_mutex;
} wait_data_t;
/*
* Print to stdout a single line, containing one column for each activity that
* we are waiting for specifying how many bytes of work are left for that
* activity.
*/
static void
print_wait_status_row(wait_data_t *wd, zpool_handle_t *zhp, int row)
{
nvlist_t *config, *nvroot;
uint_t c;
int i;
pool_checkpoint_stat_t *pcs = NULL;
pool_scan_stat_t *pss = NULL;
pool_removal_stat_t *prs = NULL;
const char *const headers[] = {"DISCARD", "FREE", "INITIALIZE",
"REPLACE", "REMOVE", "RESILVER", "SCRUB", "TRIM"};
int col_widths[ZPOOL_WAIT_NUM_ACTIVITIES];
/* Calculate the width of each column */
for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
/*
* Make sure we have enough space in the col for pretty-printed
* numbers and for the column header, and then leave a couple
* spaces between cols for readability.
*/
col_widths[i] = MAX(strlen(headers[i]), 6) + 2;
}
+ if (timestamp_fmt != NODATE)
+ print_timestamp(timestamp_fmt);
+
/* Print header if appropriate */
int term_height = terminal_height();
boolean_t reprint_header = (!wd->wd_headers_once && term_height > 0 &&
row % (term_height-1) == 0);
if (!wd->wd_scripted && (row == 0 || reprint_header)) {
for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
if (wd->wd_enabled[i])
(void) printf("%*s", col_widths[i], headers[i]);
}
(void) fputc('\n', stdout);
}
/* Bytes of work remaining in each activity */
int64_t bytes_rem[ZPOOL_WAIT_NUM_ACTIVITIES] = {0};
bytes_rem[ZPOOL_WAIT_FREE] =
zpool_get_prop_int(zhp, ZPOOL_PROP_FREEING, NULL);
config = zpool_get_config(zhp, NULL);
nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
if (pcs != NULL && pcs->pcs_state == CS_CHECKPOINT_DISCARDING)
bytes_rem[ZPOOL_WAIT_CKPT_DISCARD] = pcs->pcs_space;
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);
if (prs != NULL && prs->prs_state == DSS_SCANNING)
bytes_rem[ZPOOL_WAIT_REMOVE] = prs->prs_to_copy -
prs->prs_copied;
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&pss, &c);
if (pss != NULL && pss->pss_state == DSS_SCANNING &&
pss->pss_pass_scrub_pause == 0) {
int64_t rem = pss->pss_to_examine - pss->pss_issued;
if (pss->pss_func == POOL_SCAN_SCRUB)
bytes_rem[ZPOOL_WAIT_SCRUB] = rem;
else
bytes_rem[ZPOOL_WAIT_RESILVER] = rem;
} else if (check_rebuilding(nvroot, NULL)) {
bytes_rem[ZPOOL_WAIT_RESILVER] =
vdev_activity_top_remaining(nvroot);
}
bytes_rem[ZPOOL_WAIT_INITIALIZE] =
vdev_activity_remaining(nvroot, ZPOOL_WAIT_INITIALIZE);
bytes_rem[ZPOOL_WAIT_TRIM] =
vdev_activity_remaining(nvroot, ZPOOL_WAIT_TRIM);
/*
* A replace finishes after resilvering finishes, so the amount of work
* left for a replace is the same as for resilvering.
*
* It isn't quite correct to say that if we have any 'spare' or
* 'replacing' vdevs and a resilver is happening, then a replace is in
* progress, like we do here. When a hot spare is used, the faulted vdev
* is not removed after the hot spare is resilvered, so parent 'spare'
* vdev is not removed either. So we could have a 'spare' vdev, but be
* resilvering for a different reason. However, we use it as a heuristic
* because we don't have access to the DTLs, which could tell us whether
* or not we have really finished resilvering a hot spare.
*/
if (vdev_any_spare_replacing(nvroot))
bytes_rem[ZPOOL_WAIT_REPLACE] = bytes_rem[ZPOOL_WAIT_RESILVER];
- if (timestamp_fmt != NODATE)
- print_timestamp(timestamp_fmt);
-
for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
char buf[64];
if (!wd->wd_enabled[i])
continue;
if (wd->wd_exact)
(void) snprintf(buf, sizeof (buf), "%" PRIi64,
bytes_rem[i]);
else
zfs_nicenum(bytes_rem[i], buf, sizeof (buf));
if (wd->wd_scripted)
(void) printf(i == 0 ? "%s" : "\t%s", buf);
else
(void) printf(" %*s", col_widths[i] - 1, buf);
}
(void) printf("\n");
(void) fflush(stdout);
}
static void *
wait_status_thread(void *arg)
{
wait_data_t *wd = (wait_data_t *)arg;
zpool_handle_t *zhp;
if ((zhp = zpool_open(g_zfs, wd->wd_poolname)) == NULL)
return (void *)(1);
for (int row = 0; ; row++) {
boolean_t missing;
struct timespec timeout;
int ret = 0;
(void) clock_gettime(CLOCK_REALTIME, &timeout);
if (zpool_refresh_stats(zhp, &missing) != 0 || missing ||
zpool_props_refresh(zhp) != 0) {
zpool_close(zhp);
return (void *)(uintptr_t)(missing ? 0 : 1);
}
print_wait_status_row(wd, zhp, row);
timeout.tv_sec += floor(wd->wd_interval);
long nanos = timeout.tv_nsec +
(wd->wd_interval - floor(wd->wd_interval)) * NANOSEC;
if (nanos >= NANOSEC) {
timeout.tv_sec++;
timeout.tv_nsec = nanos - NANOSEC;
} else {
timeout.tv_nsec = nanos;
}
pthread_mutex_lock(&wd->wd_mutex);
if (!wd->wd_should_exit)
ret = pthread_cond_timedwait(&wd->wd_cv, &wd->wd_mutex,
&timeout);
pthread_mutex_unlock(&wd->wd_mutex);
if (ret == 0) {
break; /* signaled by main thread */
} else if (ret != ETIMEDOUT) {
(void) fprintf(stderr, gettext("pthread_cond_timedwait "
"failed: %s\n"), strerror(ret));
zpool_close(zhp);
return (void *)(uintptr_t)(1);
}
}
zpool_close(zhp);
return (void *)(0);
}
int
zpool_do_wait(int argc, char **argv)
{
boolean_t verbose = B_FALSE;
int c, i;
unsigned long count;
pthread_t status_thr;
int error = 0;
zpool_handle_t *zhp;
wait_data_t wd;
wd.wd_scripted = B_FALSE;
wd.wd_exact = B_FALSE;
wd.wd_headers_once = B_FALSE;
wd.wd_should_exit = B_FALSE;
pthread_mutex_init(&wd.wd_mutex, NULL);
pthread_cond_init(&wd.wd_cv, NULL);
/* By default, wait for all types of activity. */
for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++)
wd.wd_enabled[i] = B_TRUE;
while ((c = getopt(argc, argv, "HpT:t:")) != -1) {
switch (c) {
case 'H':
wd.wd_scripted = B_TRUE;
break;
case 'n':
wd.wd_headers_once = B_TRUE;
break;
case 'p':
wd.wd_exact = B_TRUE;
break;
case 'T':
get_timestamp_arg(*optarg);
break;
case 't':
/* Reset activities array */
memset(&wd.wd_enabled, 0, sizeof (wd.wd_enabled));
for (char *tok; (tok = strsep(&optarg, ",")); ) {
static const char *const col_opts[] = {
"discard", "free", "initialize", "replace",
"remove", "resilver", "scrub", "trim" };
for (i = 0; i < ARRAY_SIZE(col_opts); ++i)
if (strcmp(tok, col_opts[i]) == 0) {
wd.wd_enabled[i] = B_TRUE;
goto found;
}
(void) fprintf(stderr,
gettext("invalid activity '%s'\n"), tok);
usage(B_FALSE);
found:;
}
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
get_interval_count(&argc, argv, &wd.wd_interval, &count);
if (count != 0) {
/* This subcmd only accepts an interval, not a count */
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if (wd.wd_interval != 0)
verbose = B_TRUE;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing 'pool' argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
wd.wd_poolname = argv[0];
if ((zhp = zpool_open(g_zfs, wd.wd_poolname)) == NULL)
return (1);
if (verbose) {
/*
* We use a separate thread for printing status updates because
* the main thread will call lzc_wait(), which blocks as long
* as an activity is in progress, which can be a long time.
*/
if (pthread_create(&status_thr, NULL, wait_status_thread, &wd)
!= 0) {
(void) fprintf(stderr, gettext("failed to create status"
"thread: %s\n"), strerror(errno));
zpool_close(zhp);
return (1);
}
}
/*
* Loop over all activities that we are supposed to wait for until none
* of them are in progress. Note that this means we can end up waiting
* for more activities to complete than just those that were in progress
* when we began waiting; if an activity we are interested in begins
* while we are waiting for another activity, we will wait for both to
* complete before exiting.
*/
for (;;) {
boolean_t missing = B_FALSE;
boolean_t any_waited = B_FALSE;
for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
boolean_t waited;
if (!wd.wd_enabled[i])
continue;
error = zpool_wait_status(zhp, i, &missing, &waited);
if (error != 0 || missing)
break;
any_waited = (any_waited || waited);
}
if (error != 0 || missing || !any_waited)
break;
}
zpool_close(zhp);
if (verbose) {
uintptr_t status;
pthread_mutex_lock(&wd.wd_mutex);
wd.wd_should_exit = B_TRUE;
pthread_cond_signal(&wd.wd_cv);
pthread_mutex_unlock(&wd.wd_mutex);
(void) pthread_join(status_thr, (void *)&status);
if (status != 0)
error = status;
}
pthread_mutex_destroy(&wd.wd_mutex);
pthread_cond_destroy(&wd.wd_cv);
return (error);
}
static int
find_command_idx(const char *command, int *idx)
{
for (int i = 0; i < NCOMMAND; ++i) {
if (command_table[i].name == NULL)
continue;
if (strcmp(command, command_table[i].name) == 0) {
*idx = i;
return (0);
}
}
return (1);
}
/*
* Display version message
*/
static int
zpool_do_version(int argc, char **argv)
{
(void) argc, (void) argv;
return (zfs_version_print() != 0);
}
/* Display documentation */
static int
zpool_do_help(int argc, char **argv)
{
char page[MAXNAMELEN];
if (argc < 3 || strcmp(argv[2], "zpool") == 0)
strcpy(page, "zpool");
else if (strcmp(argv[2], "concepts") == 0 ||
strcmp(argv[2], "props") == 0)
snprintf(page, sizeof (page), "zpool%s", argv[2]);
else
snprintf(page, sizeof (page), "zpool-%s", argv[2]);
execlp("man", "man", page, NULL);
fprintf(stderr, "couldn't run man program: %s", strerror(errno));
return (-1);
}
/*
* Do zpool_load_compat() and print error message on failure
*/
static zpool_compat_status_t
zpool_do_load_compat(const char *compat, boolean_t *list)
{
char report[1024];
zpool_compat_status_t ret;
ret = zpool_load_compat(compat, list, report, 1024);
switch (ret) {
case ZPOOL_COMPATIBILITY_OK:
break;
case ZPOOL_COMPATIBILITY_NOFILES:
case ZPOOL_COMPATIBILITY_BADFILE:
case ZPOOL_COMPATIBILITY_BADTOKEN:
(void) fprintf(stderr, "Error: %s\n", report);
break;
case ZPOOL_COMPATIBILITY_WARNTOKEN:
(void) fprintf(stderr, "Warning: %s\n", report);
ret = ZPOOL_COMPATIBILITY_OK;
break;
}
return (ret);
}
int
main(int argc, char **argv)
{
int ret = 0;
int i = 0;
char *cmdname;
char **newargv;
(void) setlocale(LC_ALL, "");
(void) setlocale(LC_NUMERIC, "C");
(void) textdomain(TEXT_DOMAIN);
srand(time(NULL));
opterr = 0;
/*
* Make sure the user has specified some command.
*/
if (argc < 2) {
(void) fprintf(stderr, gettext("missing command\n"));
usage(B_FALSE);
}
cmdname = argv[1];
/*
* Special case '-?'
*/
if ((strcmp(cmdname, "-?") == 0) || strcmp(cmdname, "--help") == 0)
usage(B_TRUE);
/*
* Special case '-V|--version'
*/
if ((strcmp(cmdname, "-V") == 0) || (strcmp(cmdname, "--version") == 0))
return (zpool_do_version(argc, argv));
/*
* Special case 'help'
*/
if (strcmp(cmdname, "help") == 0)
return (zpool_do_help(argc, argv));
if ((g_zfs = libzfs_init()) == NULL) {
(void) fprintf(stderr, "%s\n", libzfs_error_init(errno));
return (1);
}
libzfs_print_on_error(g_zfs, B_TRUE);
zfs_save_arguments(argc, argv, history_str, sizeof (history_str));
/*
* Many commands modify input strings for string parsing reasons.
* We create a copy to protect the original argv.
*/
newargv = safe_malloc((argc + 1) * sizeof (newargv[0]));
for (i = 0; i < argc; i++)
newargv[i] = strdup(argv[i]);
newargv[argc] = NULL;
/*
* Run the appropriate command.
*/
if (find_command_idx(cmdname, &i) == 0) {
current_command = &command_table[i];
ret = command_table[i].func(argc - 1, newargv + 1);
} else if (strchr(cmdname, '=')) {
verify(find_command_idx("set", &i) == 0);
current_command = &command_table[i];
ret = command_table[i].func(argc, newargv);
} else if (strcmp(cmdname, "freeze") == 0 && argc == 3) {
/*
* 'freeze' is a vile debugging abomination, so we treat
* it as such.
*/
zfs_cmd_t zc = {"\0"};
(void) strlcpy(zc.zc_name, argv[2], sizeof (zc.zc_name));
ret = zfs_ioctl(g_zfs, ZFS_IOC_POOL_FREEZE, &zc);
if (ret != 0) {
(void) fprintf(stderr,
gettext("failed to freeze pool: %d\n"), errno);
ret = 1;
}
log_history = 0;
} else {
(void) fprintf(stderr, gettext("unrecognized "
"command '%s'\n"), cmdname);
usage(B_FALSE);
ret = 1;
}
for (i = 0; i < argc; i++)
free(newargv[i]);
free(newargv);
if (ret == 0 && log_history)
(void) zpool_log_history(g_zfs, history_str);
libzfs_fini(g_zfs);
/*
* The 'ZFS_ABORT' environment variable causes us to dump core on exit
* for the purposes of running ::findleaks.
*/
if (getenv("ZFS_ABORT") != NULL) {
(void) printf("dumping core by request\n");
abort();
}
return (ret);
}
diff --git a/sys/contrib/openzfs/cmd/zpool/zpool_util.h b/sys/contrib/openzfs/cmd/zpool/zpool_util.h
index db8e631dc6be..7f5406f063e1 100644
--- a/sys/contrib/openzfs/cmd/zpool/zpool_util.h
+++ b/sys/contrib/openzfs/cmd/zpool/zpool_util.h
@@ -1,145 +1,148 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
*/
#ifndef ZPOOL_UTIL_H
#define ZPOOL_UTIL_H
#include <libnvpair.h>
#include <libzfs.h>
#include <libzutil.h>
#ifdef __cplusplus
extern "C" {
#endif
/* Path to scripts you can run with "zpool status/iostat -c" */
#define ZPOOL_SCRIPTS_DIR SYSCONFDIR"/zfs/zpool.d"
/*
* Basic utility functions
*/
void *safe_malloc(size_t);
void *safe_realloc(void *, size_t);
void zpool_no_memory(void);
uint_t num_logs(nvlist_t *nv);
uint64_t array64_max(uint64_t array[], unsigned int len);
int highbit64(uint64_t i);
int lowbit64(uint64_t i);
/*
* Misc utility functions
*/
char *zpool_get_cmd_search_path(void);
/*
* Virtual device functions
*/
nvlist_t *make_root_vdev(zpool_handle_t *zhp, nvlist_t *props, int force,
int check_rep, boolean_t replacing, boolean_t dryrun, int argc,
char **argv);
nvlist_t *split_mirror_vdev(zpool_handle_t *zhp, char *newname,
nvlist_t *props, splitflags_t flags, int argc, char **argv);
/*
* Pool list functions
*/
int for_each_pool(int, char **, boolean_t unavail, zprop_list_t **, zfs_type_t,
boolean_t, zpool_iter_f, void *);
/* Vdev list functions */
int for_each_vdev(zpool_handle_t *zhp, pool_vdev_iter_f func, void *data);
typedef struct zpool_list zpool_list_t;
zpool_list_t *pool_list_get(int, char **, zprop_list_t **, zfs_type_t,
boolean_t, int *);
void pool_list_update(zpool_list_t *);
int pool_list_iter(zpool_list_t *, int unavail, zpool_iter_f, void *);
void pool_list_free(zpool_list_t *);
int pool_list_count(zpool_list_t *);
void pool_list_remove(zpool_list_t *, zpool_handle_t *);
extern libzfs_handle_t *g_zfs;
typedef struct vdev_cmd_data
{
char **lines; /* Array of lines of output, minus the column name */
int lines_cnt; /* Number of lines in the array */
char **cols; /* Array of column names */
int cols_cnt; /* Number of column names */
char *path; /* vdev path */
char *upath; /* vdev underlying path */
char *pool; /* Pool name */
char *cmd; /* backpointer to cmd */
char *vdev_enc_sysfs_path; /* enclosure sysfs path (if any) */
} vdev_cmd_data_t;
typedef struct vdev_cmd_data_list
{
char *cmd; /* Command to run */
unsigned int count; /* Number of vdev_cmd_data items (vdevs) */
/* fields used to select only certain vdevs, if requested */
libzfs_handle_t *g_zfs;
char **vdev_names;
int vdev_names_count;
int cb_name_flags;
vdev_cmd_data_t *data; /* Array of vdevs */
/* List of unique column names and widths */
char **uniq_cols;
int uniq_cols_cnt;
int *uniq_cols_width;
} vdev_cmd_data_list_t;
vdev_cmd_data_list_t *all_pools_for_each_vdev_run(int argc, char **argv,
char *cmd, libzfs_handle_t *g_zfs, char **vdev_names, int vdev_names_count,
int cb_name_flags);
void free_vdev_cmd_data_list(vdev_cmd_data_list_t *vcdl);
void free_vdev_cmd_data(vdev_cmd_data_t *data);
int vdev_run_cmd_simple(char *path, char *cmd);
int check_device(const char *path, boolean_t force,
boolean_t isspare, boolean_t iswholedisk);
boolean_t check_sector_size_database(char *path, int *sector_size);
void vdev_error(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
int check_file(const char *file, boolean_t force, boolean_t isspare);
void after_zpool_upgrade(zpool_handle_t *zhp);
int check_file_generic(const char *file, boolean_t force, boolean_t isspare);
+int zpool_power(zpool_handle_t *zhp, char *vdev, boolean_t turn_on);
+int zpool_power_current_state(zpool_handle_t *zhp, char *vdev);
+
#ifdef __cplusplus
}
#endif
#endif /* ZPOOL_UTIL_H */
diff --git a/sys/contrib/openzfs/cmd/zpool/zpool_vdev.c b/sys/contrib/openzfs/cmd/zpool/zpool_vdev.c
index 3d0fc089c32f..fbd4b81dfacc 100644
--- a/sys/contrib/openzfs/cmd/zpool/zpool_vdev.c
+++ b/sys/contrib/openzfs/cmd/zpool/zpool_vdev.c
@@ -1,1911 +1,1915 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2018 by Delphix. All rights reserved.
* Copyright (c) 2016, 2017 Intel Corporation.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>.
*/
/*
* Functions to convert between a list of vdevs and an nvlist representing the
* configuration. Each entry in the list can be one of:
*
* Device vdevs
* disk=(path=..., devid=...)
* file=(path=...)
*
* Group vdevs
* raidz[1|2]=(...)
* mirror=(...)
*
* Hot spares
*
* While the underlying implementation supports it, group vdevs cannot contain
* other group vdevs. All userland verification of devices is contained within
* this file. If successful, the nvlist returned can be passed directly to the
* kernel; we've done as much verification as possible in userland.
*
* Hot spares are a special case, and passed down as an array of disk vdevs, at
* the same level as the root of the vdev tree.
*
* The only function exported by this file is 'make_root_vdev'. The
* function performs several passes:
*
* 1. Construct the vdev specification. Performs syntax validation and
* makes sure each device is valid.
* 2. Check for devices in use. Using libblkid to make sure that no
* devices are also in use. Some can be overridden using the 'force'
* flag, others cannot.
* 3. Check for replication errors if the 'force' flag is not specified.
* validates that the replication level is consistent across the
* entire pool.
* 4. Call libzfs to label any whole disks with an EFI label.
*/
#include <assert.h>
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
#include <libintl.h>
#include <libnvpair.h>
#include <libzutil.h>
#include <limits.h>
#include <sys/spa.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include "zpool_util.h"
#include <sys/zfs_context.h>
#include <sys/stat.h>
/*
* For any given vdev specification, we can have multiple errors. The
* vdev_error() function keeps track of whether we have seen an error yet, and
* prints out a header if its the first error we've seen.
*/
boolean_t error_seen;
boolean_t is_force;
void
vdev_error(const char *fmt, ...)
{
va_list ap;
if (!error_seen) {
(void) fprintf(stderr, gettext("invalid vdev specification\n"));
if (!is_force)
(void) fprintf(stderr, gettext("use '-f' to override "
"the following errors:\n"));
else
(void) fprintf(stderr, gettext("the following errors "
"must be manually repaired:\n"));
error_seen = B_TRUE;
}
va_start(ap, fmt);
(void) vfprintf(stderr, fmt, ap);
va_end(ap);
}
/*
* Check that a file is valid. All we can do in this case is check that it's
* not in use by another pool, and not in use by swap.
*/
int
check_file_generic(const char *file, boolean_t force, boolean_t isspare)
{
char *name;
int fd;
int ret = 0;
pool_state_t state;
boolean_t inuse;
if ((fd = open(file, O_RDONLY)) < 0)
return (0);
if (zpool_in_use(g_zfs, fd, &state, &name, &inuse) == 0 && inuse) {
const char *desc;
switch (state) {
case POOL_STATE_ACTIVE:
desc = gettext("active");
break;
case POOL_STATE_EXPORTED:
desc = gettext("exported");
break;
case POOL_STATE_POTENTIALLY_ACTIVE:
desc = gettext("potentially active");
break;
default:
desc = gettext("unknown");
break;
}
/*
* Allow hot spares to be shared between pools.
*/
if (state == POOL_STATE_SPARE && isspare) {
free(name);
(void) close(fd);
return (0);
}
if (state == POOL_STATE_ACTIVE ||
state == POOL_STATE_SPARE || !force) {
switch (state) {
case POOL_STATE_SPARE:
vdev_error(gettext("%s is reserved as a hot "
"spare for pool %s\n"), file, name);
break;
default:
vdev_error(gettext("%s is part of %s pool "
"'%s'\n"), file, desc, name);
break;
}
ret = -1;
}
free(name);
}
(void) close(fd);
return (ret);
}
/*
* This may be a shorthand device path or it could be total gibberish.
* Check to see if it is a known device available in zfs_vdev_paths.
* As part of this check, see if we've been given an entire disk
* (minus the slice number).
*/
static int
is_shorthand_path(const char *arg, char *path, size_t path_size,
struct stat64 *statbuf, boolean_t *wholedisk)
{
int error;
error = zfs_resolve_shortname(arg, path, path_size);
if (error == 0) {
*wholedisk = zfs_dev_is_whole_disk(path);
if (*wholedisk || (stat64(path, statbuf) == 0))
return (0);
}
strlcpy(path, arg, path_size);
memset(statbuf, 0, sizeof (*statbuf));
*wholedisk = B_FALSE;
return (error);
}
/*
* Determine if the given path is a hot spare within the given configuration.
* If no configuration is given we rely solely on the label.
*/
static boolean_t
is_spare(nvlist_t *config, const char *path)
{
int fd;
pool_state_t state;
char *name = NULL;
nvlist_t *label;
uint64_t guid, spareguid;
nvlist_t *nvroot;
nvlist_t **spares;
uint_t i, nspares;
boolean_t inuse;
if (zpool_is_draid_spare(path))
return (B_TRUE);
if ((fd = open(path, O_RDONLY|O_DIRECT)) < 0)
return (B_FALSE);
if (zpool_in_use(g_zfs, fd, &state, &name, &inuse) != 0 ||
!inuse ||
state != POOL_STATE_SPARE ||
zpool_read_label(fd, &label, NULL) != 0) {
free(name);
(void) close(fd);
return (B_FALSE);
}
free(name);
(void) close(fd);
if (config == NULL) {
nvlist_free(label);
return (B_TRUE);
}
verify(nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) == 0);
nvlist_free(label);
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) == 0) {
for (i = 0; i < nspares; i++) {
verify(nvlist_lookup_uint64(spares[i],
ZPOOL_CONFIG_GUID, &spareguid) == 0);
if (spareguid == guid)
return (B_TRUE);
}
}
return (B_FALSE);
}
/*
* Create a leaf vdev. Determine if this is a file or a device. If it's a
* device, fill in the device id to make a complete nvlist. Valid forms for a
* leaf vdev are:
*
* /dev/xxx Complete disk path
* /xxx Full path to file
* xxx Shorthand for <zfs_vdev_paths>/xxx
* draid* Virtual dRAID spare
*/
static nvlist_t *
make_leaf_vdev(nvlist_t *props, const char *arg, boolean_t is_primary)
{
char path[MAXPATHLEN];
struct stat64 statbuf;
nvlist_t *vdev = NULL;
const char *type = NULL;
boolean_t wholedisk = B_FALSE;
uint64_t ashift = 0;
int err;
/*
* Determine what type of vdev this is, and put the full path into
* 'path'. We detect whether this is a device of file afterwards by
* checking the st_mode of the file.
*/
if (arg[0] == '/') {
/*
* Complete device or file path. Exact type is determined by
* examining the file descriptor afterwards. Symbolic links
* are resolved to their real paths to determine whole disk
* and S_ISBLK/S_ISREG type checks. However, we are careful
* to store the given path as ZPOOL_CONFIG_PATH to ensure we
* can leverage udev's persistent device labels.
*/
if (realpath(arg, path) == NULL) {
(void) fprintf(stderr,
gettext("cannot resolve path '%s'\n"), arg);
return (NULL);
}
wholedisk = zfs_dev_is_whole_disk(path);
if (!wholedisk && (stat64(path, &statbuf) != 0)) {
(void) fprintf(stderr,
gettext("cannot open '%s': %s\n"),
path, strerror(errno));
return (NULL);
}
/* After whole disk check restore original passed path */
strlcpy(path, arg, sizeof (path));
} else if (zpool_is_draid_spare(arg)) {
if (!is_primary) {
(void) fprintf(stderr,
gettext("cannot open '%s': dRAID spares can only "
"be used to replace primary vdevs\n"), arg);
return (NULL);
}
wholedisk = B_TRUE;
strlcpy(path, arg, sizeof (path));
type = VDEV_TYPE_DRAID_SPARE;
} else {
err = is_shorthand_path(arg, path, sizeof (path),
&statbuf, &wholedisk);
if (err != 0) {
/*
* If we got ENOENT, then the user gave us
* gibberish, so try to direct them with a
* reasonable error message. Otherwise,
* regurgitate strerror() since it's the best we
* can do.
*/
if (err == ENOENT) {
(void) fprintf(stderr,
gettext("cannot open '%s': no such "
"device in %s\n"), arg, DISK_ROOT);
(void) fprintf(stderr,
gettext("must be a full path or "
"shorthand device name\n"));
return (NULL);
} else {
(void) fprintf(stderr,
gettext("cannot open '%s': %s\n"),
path, strerror(errno));
return (NULL);
}
}
}
if (type == NULL) {
/*
* Determine whether this is a device or a file.
*/
if (wholedisk || S_ISBLK(statbuf.st_mode)) {
type = VDEV_TYPE_DISK;
} else if (S_ISREG(statbuf.st_mode)) {
type = VDEV_TYPE_FILE;
} else {
fprintf(stderr, gettext("cannot use '%s': must "
"be a block device or regular file\n"), path);
return (NULL);
}
}
/*
* Finally, we have the complete device or file, and we know that it is
* acceptable to use. Construct the nvlist to describe this vdev. All
* vdevs have a 'path' element, and devices also have a 'devid' element.
*/
verify(nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) == 0);
verify(nvlist_add_string(vdev, ZPOOL_CONFIG_PATH, path) == 0);
verify(nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE, type) == 0);
+ /* Lookup and add the enclosure sysfs path (if exists) */
+ update_vdev_config_dev_sysfs_path(vdev, path,
+ ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH);
+
if (strcmp(type, VDEV_TYPE_DISK) == 0)
verify(nvlist_add_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK,
(uint64_t)wholedisk) == 0);
/*
* Override defaults if custom properties are provided.
*/
if (props != NULL) {
const char *value = NULL;
if (nvlist_lookup_string(props,
zpool_prop_to_name(ZPOOL_PROP_ASHIFT), &value) == 0) {
if (zfs_nicestrtonum(NULL, value, &ashift) != 0) {
(void) fprintf(stderr,
gettext("ashift must be a number.\n"));
return (NULL);
}
if (ashift != 0 &&
(ashift < ASHIFT_MIN || ashift > ASHIFT_MAX)) {
(void) fprintf(stderr,
gettext("invalid 'ashift=%" PRIu64 "' "
"property: only values between %" PRId32 " "
"and %" PRId32 " are allowed.\n"),
ashift, ASHIFT_MIN, ASHIFT_MAX);
return (NULL);
}
}
}
/*
* If the device is known to incorrectly report its physical sector
* size explicitly provide the known correct value.
*/
if (ashift == 0) {
int sector_size;
if (check_sector_size_database(path, &sector_size) == B_TRUE)
ashift = highbit64(sector_size) - 1;
}
if (ashift > 0)
(void) nvlist_add_uint64(vdev, ZPOOL_CONFIG_ASHIFT, ashift);
return (vdev);
}
/*
* Go through and verify the replication level of the pool is consistent.
* Performs the following checks:
*
* For the new spec, verifies that devices in mirrors and raidz are the
* same size.
*
* If the current configuration already has inconsistent replication
* levels, ignore any other potential problems in the new spec.
*
* Otherwise, make sure that the current spec (if there is one) and the new
* spec have consistent replication levels.
*
* If there is no current spec (create), make sure new spec has at least
* one general purpose vdev.
*/
typedef struct replication_level {
const char *zprl_type;
uint64_t zprl_children;
uint64_t zprl_parity;
} replication_level_t;
#define ZPOOL_FUZZ (16 * 1024 * 1024)
/*
* N.B. For the purposes of comparing replication levels dRAID can be
* considered functionally equivalent to raidz.
*/
static boolean_t
is_raidz_mirror(replication_level_t *a, replication_level_t *b,
replication_level_t **raidz, replication_level_t **mirror)
{
if ((strcmp(a->zprl_type, "raidz") == 0 ||
strcmp(a->zprl_type, "draid") == 0) &&
strcmp(b->zprl_type, "mirror") == 0) {
*raidz = a;
*mirror = b;
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Comparison for determining if dRAID and raidz where passed in either order.
*/
static boolean_t
is_raidz_draid(replication_level_t *a, replication_level_t *b)
{
if ((strcmp(a->zprl_type, "raidz") == 0 ||
strcmp(a->zprl_type, "draid") == 0) &&
(strcmp(b->zprl_type, "raidz") == 0 ||
strcmp(b->zprl_type, "draid") == 0)) {
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Given a list of toplevel vdevs, return the current replication level. If
* the config is inconsistent, then NULL is returned. If 'fatal' is set, then
* an error message will be displayed for each self-inconsistent vdev.
*/
static replication_level_t *
get_replication(nvlist_t *nvroot, boolean_t fatal)
{
nvlist_t **top;
uint_t t, toplevels;
nvlist_t **child;
uint_t c, children;
nvlist_t *nv;
const char *type;
replication_level_t lastrep = {0};
replication_level_t rep;
replication_level_t *ret;
replication_level_t *raidz, *mirror;
boolean_t dontreport;
ret = safe_malloc(sizeof (replication_level_t));
verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&top, &toplevels) == 0);
for (t = 0; t < toplevels; t++) {
uint64_t is_log = B_FALSE;
nv = top[t];
/*
* For separate logs we ignore the top level vdev replication
* constraints.
*/
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &is_log);
if (is_log)
continue;
/*
* Ignore holes introduced by removing aux devices, along
* with indirect vdevs introduced by previously removed
* vdevs.
*/
verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
if (strcmp(type, VDEV_TYPE_HOLE) == 0 ||
strcmp(type, VDEV_TYPE_INDIRECT) == 0)
continue;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0) {
/*
* This is a 'file' or 'disk' vdev.
*/
rep.zprl_type = type;
rep.zprl_children = 1;
rep.zprl_parity = 0;
} else {
int64_t vdev_size;
/*
* This is a mirror or RAID-Z vdev. Go through and make
* sure the contents are all the same (files vs. disks),
* keeping track of the number of elements in the
* process.
*
* We also check that the size of each vdev (if it can
* be determined) is the same.
*/
rep.zprl_type = type;
rep.zprl_children = 0;
if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
strcmp(type, VDEV_TYPE_DRAID) == 0) {
verify(nvlist_lookup_uint64(nv,
ZPOOL_CONFIG_NPARITY,
&rep.zprl_parity) == 0);
assert(rep.zprl_parity != 0);
} else {
rep.zprl_parity = 0;
}
/*
* The 'dontreport' variable indicates that we've
* already reported an error for this spec, so don't
* bother doing it again.
*/
type = NULL;
dontreport = 0;
vdev_size = -1LL;
for (c = 0; c < children; c++) {
nvlist_t *cnv = child[c];
const char *path;
struct stat64 statbuf;
int64_t size = -1LL;
const char *childtype;
int fd, err;
rep.zprl_children++;
verify(nvlist_lookup_string(cnv,
ZPOOL_CONFIG_TYPE, &childtype) == 0);
/*
* If this is a replacing or spare vdev, then
* get the real first child of the vdev: do this
* in a loop because replacing and spare vdevs
* can be nested.
*/
while (strcmp(childtype,
VDEV_TYPE_REPLACING) == 0 ||
strcmp(childtype, VDEV_TYPE_SPARE) == 0) {
nvlist_t **rchild;
uint_t rchildren;
verify(nvlist_lookup_nvlist_array(cnv,
ZPOOL_CONFIG_CHILDREN, &rchild,
&rchildren) == 0);
assert(rchildren == 2);
cnv = rchild[0];
verify(nvlist_lookup_string(cnv,
ZPOOL_CONFIG_TYPE,
&childtype) == 0);
}
verify(nvlist_lookup_string(cnv,
ZPOOL_CONFIG_PATH, &path) == 0);
/*
* If we have a raidz/mirror that combines disks
* with files, report it as an error.
*/
if (!dontreport && type != NULL &&
strcmp(type, childtype) != 0) {
if (ret != NULL)
free(ret);
ret = NULL;
if (fatal)
vdev_error(gettext(
"mismatched replication "
"level: %s contains both "
"files and devices\n"),
rep.zprl_type);
else
return (NULL);
dontreport = B_TRUE;
}
/*
* According to stat(2), the value of 'st_size'
* is undefined for block devices and character
* devices. But there is no effective way to
* determine the real size in userland.
*
* Instead, we'll take advantage of an
* implementation detail of spec_size(). If the
* device is currently open, then we (should)
* return a valid size.
*
* If we still don't get a valid size (indicated
* by a size of 0 or MAXOFFSET_T), then ignore
* this device altogether.
*/
if ((fd = open(path, O_RDONLY)) >= 0) {
err = fstat64_blk(fd, &statbuf);
(void) close(fd);
} else {
err = stat64(path, &statbuf);
}
if (err != 0 ||
statbuf.st_size == 0 ||
statbuf.st_size == MAXOFFSET_T)
continue;
size = statbuf.st_size;
/*
* Also make sure that devices and
* slices have a consistent size. If
* they differ by a significant amount
* (~16MB) then report an error.
*/
if (!dontreport &&
(vdev_size != -1LL &&
(llabs(size - vdev_size) >
ZPOOL_FUZZ))) {
if (ret != NULL)
free(ret);
ret = NULL;
if (fatal)
vdev_error(gettext(
"%s contains devices of "
"different sizes\n"),
rep.zprl_type);
else
return (NULL);
dontreport = B_TRUE;
}
type = childtype;
vdev_size = size;
}
}
/*
* At this point, we have the replication of the last toplevel
* vdev in 'rep'. Compare it to 'lastrep' to see if it is
* different.
*/
if (lastrep.zprl_type != NULL) {
if (is_raidz_mirror(&lastrep, &rep, &raidz, &mirror) ||
is_raidz_mirror(&rep, &lastrep, &raidz, &mirror)) {
/*
* Accepted raidz and mirror when they can
* handle the same number of disk failures.
*/
if (raidz->zprl_parity !=
mirror->zprl_children - 1) {
if (ret != NULL)
free(ret);
ret = NULL;
if (fatal)
vdev_error(gettext(
"mismatched replication "
"level: "
"%s and %s vdevs with "
"different redundancy, "
"%llu vs. %llu (%llu-way) "
"are present\n"),
raidz->zprl_type,
mirror->zprl_type,
(u_longlong_t)
raidz->zprl_parity,
(u_longlong_t)
mirror->zprl_children - 1,
(u_longlong_t)
mirror->zprl_children);
else
return (NULL);
}
} else if (is_raidz_draid(&lastrep, &rep)) {
/*
* Accepted raidz and draid when they can
* handle the same number of disk failures.
*/
if (lastrep.zprl_parity != rep.zprl_parity) {
if (ret != NULL)
free(ret);
ret = NULL;
if (fatal)
vdev_error(gettext(
"mismatched replication "
"level: %s and %s vdevs "
"with different "
"redundancy, %llu vs. "
"%llu are present\n"),
lastrep.zprl_type,
rep.zprl_type,
(u_longlong_t)
lastrep.zprl_parity,
(u_longlong_t)
rep.zprl_parity);
else
return (NULL);
}
} else if (strcmp(lastrep.zprl_type, rep.zprl_type) !=
0) {
if (ret != NULL)
free(ret);
ret = NULL;
if (fatal)
vdev_error(gettext(
"mismatched replication level: "
"both %s and %s vdevs are "
"present\n"),
lastrep.zprl_type, rep.zprl_type);
else
return (NULL);
} else if (lastrep.zprl_parity != rep.zprl_parity) {
if (ret)
free(ret);
ret = NULL;
if (fatal)
vdev_error(gettext(
"mismatched replication level: "
"both %llu and %llu device parity "
"%s vdevs are present\n"),
(u_longlong_t)
lastrep.zprl_parity,
(u_longlong_t)rep.zprl_parity,
rep.zprl_type);
else
return (NULL);
} else if (lastrep.zprl_children != rep.zprl_children) {
if (ret)
free(ret);
ret = NULL;
if (fatal)
vdev_error(gettext(
"mismatched replication level: "
"both %llu-way and %llu-way %s "
"vdevs are present\n"),
(u_longlong_t)
lastrep.zprl_children,
(u_longlong_t)
rep.zprl_children,
rep.zprl_type);
else
return (NULL);
}
}
lastrep = rep;
}
if (ret != NULL)
*ret = rep;
return (ret);
}
/*
* Check the replication level of the vdev spec against the current pool. Calls
* get_replication() to make sure the new spec is self-consistent. If the pool
* has a consistent replication level, then we ignore any errors. Otherwise,
* report any difference between the two.
*/
static int
check_replication(nvlist_t *config, nvlist_t *newroot)
{
nvlist_t **child;
uint_t children;
replication_level_t *current = NULL, *new;
replication_level_t *raidz, *mirror;
int ret;
/*
* If we have a current pool configuration, check to see if it's
* self-consistent. If not, simply return success.
*/
if (config != NULL) {
nvlist_t *nvroot;
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
if ((current = get_replication(nvroot, B_FALSE)) == NULL)
return (0);
}
/*
* for spares there may be no children, and therefore no
* replication level to check
*/
if ((nvlist_lookup_nvlist_array(newroot, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0) || (children == 0)) {
free(current);
return (0);
}
/*
* If all we have is logs then there's no replication level to check.
*/
if (num_logs(newroot) == children) {
free(current);
return (0);
}
/*
* Get the replication level of the new vdev spec, reporting any
* inconsistencies found.
*/
if ((new = get_replication(newroot, B_TRUE)) == NULL) {
free(current);
return (-1);
}
/*
* Check to see if the new vdev spec matches the replication level of
* the current pool.
*/
ret = 0;
if (current != NULL) {
if (is_raidz_mirror(current, new, &raidz, &mirror) ||
is_raidz_mirror(new, current, &raidz, &mirror)) {
if (raidz->zprl_parity != mirror->zprl_children - 1) {
vdev_error(gettext(
"mismatched replication level: pool and "
"new vdev with different redundancy, %s "
"and %s vdevs, %llu vs. %llu (%llu-way)\n"),
raidz->zprl_type,
mirror->zprl_type,
(u_longlong_t)raidz->zprl_parity,
(u_longlong_t)mirror->zprl_children - 1,
(u_longlong_t)mirror->zprl_children);
ret = -1;
}
} else if (strcmp(current->zprl_type, new->zprl_type) != 0) {
vdev_error(gettext(
"mismatched replication level: pool uses %s "
"and new vdev is %s\n"),
current->zprl_type, new->zprl_type);
ret = -1;
} else if (current->zprl_parity != new->zprl_parity) {
vdev_error(gettext(
"mismatched replication level: pool uses %llu "
"device parity and new vdev uses %llu\n"),
(u_longlong_t)current->zprl_parity,
(u_longlong_t)new->zprl_parity);
ret = -1;
} else if (current->zprl_children != new->zprl_children) {
vdev_error(gettext(
"mismatched replication level: pool uses %llu-way "
"%s and new vdev uses %llu-way %s\n"),
(u_longlong_t)current->zprl_children,
current->zprl_type,
(u_longlong_t)new->zprl_children,
new->zprl_type);
ret = -1;
}
}
free(new);
if (current != NULL)
free(current);
return (ret);
}
static int
zero_label(const char *path)
{
const int size = 4096;
char buf[size];
int err, fd;
if ((fd = open(path, O_WRONLY|O_EXCL)) < 0) {
(void) fprintf(stderr, gettext("cannot open '%s': %s\n"),
path, strerror(errno));
return (-1);
}
memset(buf, 0, size);
err = write(fd, buf, size);
(void) fdatasync(fd);
(void) close(fd);
if (err == -1) {
(void) fprintf(stderr, gettext("cannot zero first %d bytes "
"of '%s': %s\n"), size, path, strerror(errno));
return (-1);
}
if (err != size) {
(void) fprintf(stderr, gettext("could only zero %d/%d bytes "
"of '%s'\n"), err, size, path);
return (-1);
}
return (0);
}
static void
lines_to_stderr(char *lines[], int lines_cnt)
{
int i;
for (i = 0; i < lines_cnt; i++) {
fprintf(stderr, "%s\n", lines[i]);
}
}
/*
* Go through and find any whole disks in the vdev specification, labelling them
* as appropriate. When constructing the vdev spec, we were unable to open this
* device in order to provide a devid. Now that we have labelled the disk and
* know that slice 0 is valid, we can construct the devid now.
*
* If the disk was already labeled with an EFI label, we will have gotten the
* devid already (because we were able to open the whole disk). Otherwise, we
* need to get the devid after we label the disk.
*/
static int
make_disks(zpool_handle_t *zhp, nvlist_t *nv, boolean_t replacing)
{
nvlist_t **child;
uint_t c, children;
const char *type, *path;
char devpath[MAXPATHLEN];
char udevpath[MAXPATHLEN];
uint64_t wholedisk;
struct stat64 statbuf;
int is_exclusive = 0;
int fd;
int ret;
verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0) {
if (strcmp(type, VDEV_TYPE_DISK) != 0)
return (0);
/*
* We have a disk device. If this is a whole disk write
* out the efi partition table, otherwise write zero's to
* the first 4k of the partition. This is to ensure that
* libblkid will not misidentify the partition due to a
* magic value left by the previous filesystem.
*/
verify(!nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path));
verify(!nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
&wholedisk));
if (!wholedisk) {
/*
* Update device id string for mpath nodes (Linux only)
*/
if (is_mpath_whole_disk(path))
update_vdev_config_dev_strs(nv);
if (!is_spare(NULL, path))
(void) zero_label(path);
return (0);
}
if (realpath(path, devpath) == NULL) {
ret = errno;
(void) fprintf(stderr,
gettext("cannot resolve path '%s'\n"), path);
return (ret);
}
/*
* Remove any previously existing symlink from a udev path to
* the device before labeling the disk. This ensures that
* only newly created links are used. Otherwise there is a
* window between when udev deletes and recreates the link
* during which access attempts will fail with ENOENT.
*/
strlcpy(udevpath, path, MAXPATHLEN);
(void) zfs_append_partition(udevpath, MAXPATHLEN);
fd = open(devpath, O_RDWR|O_EXCL);
if (fd == -1) {
if (errno == EBUSY)
is_exclusive = 1;
#ifdef __FreeBSD__
if (errno == EPERM)
is_exclusive = 1;
#endif
} else {
(void) close(fd);
}
/*
* If the partition exists, contains a valid spare label,
* and is opened exclusively there is no need to partition
* it. Hot spares have already been partitioned and are
* held open exclusively by the kernel as a safety measure.
*
* If the provided path is for a /dev/disk/ device its
* symbolic link will be removed, partition table created,
* and then block until udev creates the new link.
*/
if (!is_exclusive && !is_spare(NULL, udevpath)) {
char *devnode = strrchr(devpath, '/') + 1;
char **lines = NULL;
int lines_cnt = 0;
ret = strncmp(udevpath, UDISK_ROOT, strlen(UDISK_ROOT));
if (ret == 0) {
ret = lstat64(udevpath, &statbuf);
if (ret == 0 && S_ISLNK(statbuf.st_mode))
(void) unlink(udevpath);
}
/*
* When labeling a pool the raw device node name
* is provided as it appears under /dev/.
*
* Note that 'zhp' will be NULL when we're creating a
* pool.
*/
if (zpool_prepare_and_label_disk(g_zfs, zhp, devnode,
nv, zhp == NULL ? "create" :
replacing ? "replace" : "add", &lines,
&lines_cnt) != 0) {
(void) fprintf(stderr,
gettext(
"Error preparing/labeling disk.\n"));
if (lines_cnt > 0) {
(void) fprintf(stderr,
gettext("zfs_prepare_disk output:\n"));
lines_to_stderr(lines, lines_cnt);
}
libzfs_free_str_array(lines, lines_cnt);
return (-1);
}
libzfs_free_str_array(lines, lines_cnt);
/*
* Wait for udev to signal the device is available
* by the provided path.
*/
ret = zpool_label_disk_wait(udevpath, DISK_LABEL_WAIT);
if (ret) {
(void) fprintf(stderr,
gettext("missing link: %s was "
"partitioned but %s is missing\n"),
devnode, udevpath);
return (ret);
}
ret = zero_label(udevpath);
if (ret)
return (ret);
}
/*
* Update the path to refer to the partition. The presence of
* the 'whole_disk' field indicates to the CLI that we should
* chop off the partition number when displaying the device in
* future output.
*/
verify(nvlist_add_string(nv, ZPOOL_CONFIG_PATH, udevpath) == 0);
/*
* Update device id strings for whole disks (Linux only)
*/
update_vdev_config_dev_strs(nv);
return (0);
}
for (c = 0; c < children; c++)
if ((ret = make_disks(zhp, child[c], replacing)) != 0)
return (ret);
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
&child, &children) == 0)
for (c = 0; c < children; c++)
if ((ret = make_disks(zhp, child[c], replacing)) != 0)
return (ret);
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
&child, &children) == 0)
for (c = 0; c < children; c++)
if ((ret = make_disks(zhp, child[c], replacing)) != 0)
return (ret);
return (0);
}
/*
* Go through and find any devices that are in use. We rely on libdiskmgt for
* the majority of this task.
*/
static boolean_t
is_device_in_use(nvlist_t *config, nvlist_t *nv, boolean_t force,
boolean_t replacing, boolean_t isspare)
{
nvlist_t **child;
uint_t c, children;
const char *type, *path;
int ret = 0;
char buf[MAXPATHLEN];
uint64_t wholedisk = B_FALSE;
boolean_t anyinuse = B_FALSE;
verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0) {
verify(!nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path));
if (strcmp(type, VDEV_TYPE_DISK) == 0)
verify(!nvlist_lookup_uint64(nv,
ZPOOL_CONFIG_WHOLE_DISK, &wholedisk));
/*
* As a generic check, we look to see if this is a replace of a
* hot spare within the same pool. If so, we allow it
* regardless of what libblkid or zpool_in_use() says.
*/
if (replacing) {
(void) strlcpy(buf, path, sizeof (buf));
if (wholedisk) {
ret = zfs_append_partition(buf, sizeof (buf));
if (ret == -1)
return (-1);
}
if (is_spare(config, buf))
return (B_FALSE);
}
if (strcmp(type, VDEV_TYPE_DISK) == 0)
ret = check_device(path, force, isspare, wholedisk);
else if (strcmp(type, VDEV_TYPE_FILE) == 0)
ret = check_file(path, force, isspare);
return (ret != 0);
}
for (c = 0; c < children; c++)
if (is_device_in_use(config, child[c], force, replacing,
B_FALSE))
anyinuse = B_TRUE;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
&child, &children) == 0)
for (c = 0; c < children; c++)
if (is_device_in_use(config, child[c], force, replacing,
B_TRUE))
anyinuse = B_TRUE;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
&child, &children) == 0)
for (c = 0; c < children; c++)
if (is_device_in_use(config, child[c], force, replacing,
B_FALSE))
anyinuse = B_TRUE;
return (anyinuse);
}
/*
* Returns the parity level extracted from a raidz or draid type.
* If the parity cannot be determined zero is returned.
*/
static int
get_parity(const char *type)
{
long parity = 0;
const char *p;
if (strncmp(type, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0) {
p = type + strlen(VDEV_TYPE_RAIDZ);
if (*p == '\0') {
/* when unspecified default to single parity */
return (1);
} else if (*p == '0') {
/* no zero prefixes allowed */
return (0);
} else {
/* 0-3, no suffixes allowed */
char *end;
errno = 0;
parity = strtol(p, &end, 10);
if (errno != 0 || *end != '\0' ||
parity < 1 || parity > VDEV_RAIDZ_MAXPARITY) {
return (0);
}
}
} else if (strncmp(type, VDEV_TYPE_DRAID,
strlen(VDEV_TYPE_DRAID)) == 0) {
p = type + strlen(VDEV_TYPE_DRAID);
if (*p == '\0' || *p == ':') {
/* when unspecified default to single parity */
return (1);
} else if (*p == '0') {
/* no zero prefixes allowed */
return (0);
} else {
/* 0-3, allowed suffixes: '\0' or ':' */
char *end;
errno = 0;
parity = strtol(p, &end, 10);
if (errno != 0 ||
parity < 1 || parity > VDEV_DRAID_MAXPARITY ||
(*end != '\0' && *end != ':')) {
return (0);
}
}
}
return ((int)parity);
}
/*
* Assign the minimum and maximum number of devices allowed for
* the specified type. On error NULL is returned, otherwise the
* type prefix is returned (raidz, mirror, etc).
*/
static const char *
is_grouping(const char *type, int *mindev, int *maxdev)
{
int nparity;
if (strncmp(type, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
strncmp(type, VDEV_TYPE_DRAID, strlen(VDEV_TYPE_DRAID)) == 0) {
nparity = get_parity(type);
if (nparity == 0)
return (NULL);
if (mindev != NULL)
*mindev = nparity + 1;
if (maxdev != NULL)
*maxdev = 255;
if (strncmp(type, VDEV_TYPE_RAIDZ,
strlen(VDEV_TYPE_RAIDZ)) == 0) {
return (VDEV_TYPE_RAIDZ);
} else {
return (VDEV_TYPE_DRAID);
}
}
if (maxdev != NULL)
*maxdev = INT_MAX;
if (strcmp(type, "mirror") == 0) {
if (mindev != NULL)
*mindev = 2;
return (VDEV_TYPE_MIRROR);
}
if (strcmp(type, "spare") == 0) {
if (mindev != NULL)
*mindev = 1;
return (VDEV_TYPE_SPARE);
}
if (strcmp(type, "log") == 0) {
if (mindev != NULL)
*mindev = 1;
return (VDEV_TYPE_LOG);
}
if (strcmp(type, VDEV_ALLOC_BIAS_SPECIAL) == 0 ||
strcmp(type, VDEV_ALLOC_BIAS_DEDUP) == 0) {
if (mindev != NULL)
*mindev = 1;
return (type);
}
if (strcmp(type, "cache") == 0) {
if (mindev != NULL)
*mindev = 1;
return (VDEV_TYPE_L2CACHE);
}
return (NULL);
}
/*
* Extract the configuration parameters encoded in the dRAID type and
* use them to generate a dRAID configuration. The expected format is:
*
* draid[<parity>][:<data><d|D>][:<children><c|C>][:<spares><s|S>]
*
* The intent is to be able to generate a good configuration when no
* additional information is provided. The only mandatory component
* of the 'type' is the 'draid' prefix. If a value is not provided
* then reasonable defaults are used. The optional components may
* appear in any order but the d/s/c suffix is required.
*
* Valid inputs:
* - data: number of data devices per group (1-255)
* - parity: number of parity blocks per group (1-3)
* - spares: number of distributed spare (0-100)
* - children: total number of devices (1-255)
*
* Examples:
* - zpool create tank draid <devices...>
* - zpool create tank draid2:8d:51c:2s <devices...>
*/
static int
draid_config_by_type(nvlist_t *nv, const char *type, uint64_t children)
{
uint64_t nparity = 1;
uint64_t nspares = 0;
uint64_t ndata = UINT64_MAX;
uint64_t ngroups = 1;
long value;
if (strncmp(type, VDEV_TYPE_DRAID, strlen(VDEV_TYPE_DRAID)) != 0)
return (EINVAL);
nparity = (uint64_t)get_parity(type);
if (nparity == 0 || nparity > VDEV_DRAID_MAXPARITY) {
fprintf(stderr,
gettext("invalid dRAID parity level %llu; must be "
"between 1 and %d\n"), (u_longlong_t)nparity,
VDEV_DRAID_MAXPARITY);
return (EINVAL);
}
char *p = (char *)type;
while ((p = strchr(p, ':')) != NULL) {
char *end;
p = p + 1;
errno = 0;
if (!isdigit(p[0])) {
(void) fprintf(stderr, gettext("invalid dRAID "
"syntax; expected [:<number><c|d|s>] not '%s'\n"),
type);
return (EINVAL);
}
/* Expected non-zero value with c/d/s suffix */
value = strtol(p, &end, 10);
char suffix = tolower(*end);
if (errno != 0 ||
(suffix != 'c' && suffix != 'd' && suffix != 's')) {
(void) fprintf(stderr, gettext("invalid dRAID "
"syntax; expected [:<number><c|d|s>] not '%s'\n"),
type);
return (EINVAL);
}
if (suffix == 'c') {
if ((uint64_t)value != children) {
fprintf(stderr,
gettext("invalid number of dRAID children; "
"%llu required but %llu provided\n"),
(u_longlong_t)value,
(u_longlong_t)children);
return (EINVAL);
}
} else if (suffix == 'd') {
ndata = (uint64_t)value;
} else if (suffix == 's') {
nspares = (uint64_t)value;
} else {
verify(0); /* Unreachable */
}
}
/*
* When a specific number of data disks is not provided limit a
* redundancy group to 8 data disks. This value was selected to
* provide a reasonable tradeoff between capacity and performance.
*/
if (ndata == UINT64_MAX) {
if (children > nspares + nparity) {
ndata = MIN(children - nspares - nparity, 8);
} else {
fprintf(stderr, gettext("request number of "
"distributed spares %llu and parity level %llu\n"
"leaves no disks available for data\n"),
(u_longlong_t)nspares, (u_longlong_t)nparity);
return (EINVAL);
}
}
/* Verify the maximum allowed group size is never exceeded. */
if (ndata == 0 || (ndata + nparity > children - nspares)) {
fprintf(stderr, gettext("requested number of dRAID data "
"disks per group %llu is too high,\nat most %llu disks "
"are available for data\n"), (u_longlong_t)ndata,
(u_longlong_t)(children - nspares - nparity));
return (EINVAL);
}
/*
* Verify the requested number of spares can be satisfied.
* An arbitrary limit of 100 distributed spares is applied.
*/
if (nspares > 100 || nspares > (children - (ndata + nparity))) {
fprintf(stderr,
gettext("invalid number of dRAID spares %llu; additional "
"disks would be required\n"), (u_longlong_t)nspares);
return (EINVAL);
}
/* Verify the requested number children is sufficient. */
if (children < (ndata + nparity + nspares)) {
fprintf(stderr, gettext("%llu disks were provided, but at "
"least %llu disks are required for this config\n"),
(u_longlong_t)children,
(u_longlong_t)(ndata + nparity + nspares));
}
if (children > VDEV_DRAID_MAX_CHILDREN) {
fprintf(stderr, gettext("%llu disks were provided, but "
"dRAID only supports up to %u disks"),
(u_longlong_t)children, VDEV_DRAID_MAX_CHILDREN);
}
/*
* Calculate the minimum number of groups required to fill a slice.
* This is the LCM of the stripe width (ndata + nparity) and the
* number of data drives (children - nspares).
*/
while (ngroups * (ndata + nparity) % (children - nspares) != 0)
ngroups++;
/* Store the basic dRAID configuration. */
fnvlist_add_uint64(nv, ZPOOL_CONFIG_NPARITY, nparity);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_DRAID_NDATA, ndata);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_DRAID_NSPARES, nspares);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_DRAID_NGROUPS, ngroups);
return (0);
}
/*
* Construct a syntactically valid vdev specification,
* and ensure that all devices and files exist and can be opened.
* Note: we don't bother freeing anything in the error paths
* because the program is just going to exit anyway.
*/
static nvlist_t *
construct_spec(nvlist_t *props, int argc, char **argv)
{
nvlist_t *nvroot, *nv, **top, **spares, **l2cache;
int t, toplevels, mindev, maxdev, nspares, nlogs, nl2cache;
const char *type, *fulltype;
boolean_t is_log, is_special, is_dedup, is_spare;
boolean_t seen_logs;
top = NULL;
toplevels = 0;
spares = NULL;
l2cache = NULL;
nspares = 0;
nlogs = 0;
nl2cache = 0;
is_log = is_special = is_dedup = is_spare = B_FALSE;
seen_logs = B_FALSE;
nvroot = NULL;
while (argc > 0) {
fulltype = argv[0];
nv = NULL;
/*
* If it's a mirror, raidz, or draid the subsequent arguments
* are its leaves -- until we encounter the next mirror,
* raidz or draid.
*/
if ((type = is_grouping(fulltype, &mindev, &maxdev)) != NULL) {
nvlist_t **child = NULL;
int c, children = 0;
if (strcmp(type, VDEV_TYPE_SPARE) == 0) {
if (spares != NULL) {
(void) fprintf(stderr,
gettext("invalid vdev "
"specification: 'spare' can be "
"specified only once\n"));
goto spec_out;
}
is_spare = B_TRUE;
is_log = is_special = is_dedup = B_FALSE;
}
if (strcmp(type, VDEV_TYPE_LOG) == 0) {
if (seen_logs) {
(void) fprintf(stderr,
gettext("invalid vdev "
"specification: 'log' can be "
"specified only once\n"));
goto spec_out;
}
seen_logs = B_TRUE;
is_log = B_TRUE;
is_special = is_dedup = is_spare = B_FALSE;
argc--;
argv++;
/*
* A log is not a real grouping device.
* We just set is_log and continue.
*/
continue;
}
if (strcmp(type, VDEV_ALLOC_BIAS_SPECIAL) == 0) {
is_special = B_TRUE;
is_log = is_dedup = is_spare = B_FALSE;
argc--;
argv++;
continue;
}
if (strcmp(type, VDEV_ALLOC_BIAS_DEDUP) == 0) {
is_dedup = B_TRUE;
is_log = is_special = is_spare = B_FALSE;
argc--;
argv++;
continue;
}
if (strcmp(type, VDEV_TYPE_L2CACHE) == 0) {
if (l2cache != NULL) {
(void) fprintf(stderr,
gettext("invalid vdev "
"specification: 'cache' can be "
"specified only once\n"));
goto spec_out;
}
is_log = is_special = B_FALSE;
is_dedup = is_spare = B_FALSE;
}
if (is_log || is_special || is_dedup) {
if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
(void) fprintf(stderr,
gettext("invalid vdev "
"specification: unsupported '%s' "
"device: %s\n"), is_log ? "log" :
"special", type);
goto spec_out;
}
nlogs++;
}
for (c = 1; c < argc; c++) {
if (is_grouping(argv[c], NULL, NULL) != NULL)
break;
children++;
child = realloc(child,
children * sizeof (nvlist_t *));
if (child == NULL)
zpool_no_memory();
if ((nv = make_leaf_vdev(props, argv[c],
!(is_log || is_special || is_dedup ||
is_spare))) == NULL) {
for (c = 0; c < children - 1; c++)
nvlist_free(child[c]);
free(child);
goto spec_out;
}
child[children - 1] = nv;
}
if (children < mindev) {
(void) fprintf(stderr, gettext("invalid vdev "
"specification: %s requires at least %d "
"devices\n"), argv[0], mindev);
for (c = 0; c < children; c++)
nvlist_free(child[c]);
free(child);
goto spec_out;
}
if (children > maxdev) {
(void) fprintf(stderr, gettext("invalid vdev "
"specification: %s supports no more than "
"%d devices\n"), argv[0], maxdev);
for (c = 0; c < children; c++)
nvlist_free(child[c]);
free(child);
goto spec_out;
}
argc -= c;
argv += c;
if (strcmp(type, VDEV_TYPE_SPARE) == 0) {
spares = child;
nspares = children;
continue;
} else if (strcmp(type, VDEV_TYPE_L2CACHE) == 0) {
l2cache = child;
nl2cache = children;
continue;
} else {
/* create a top-level vdev with children */
verify(nvlist_alloc(&nv, NV_UNIQUE_NAME,
0) == 0);
verify(nvlist_add_string(nv, ZPOOL_CONFIG_TYPE,
type) == 0);
verify(nvlist_add_uint64(nv,
ZPOOL_CONFIG_IS_LOG, is_log) == 0);
if (is_log) {
verify(nvlist_add_string(nv,
ZPOOL_CONFIG_ALLOCATION_BIAS,
VDEV_ALLOC_BIAS_LOG) == 0);
}
if (is_special) {
verify(nvlist_add_string(nv,
ZPOOL_CONFIG_ALLOCATION_BIAS,
VDEV_ALLOC_BIAS_SPECIAL) == 0);
}
if (is_dedup) {
verify(nvlist_add_string(nv,
ZPOOL_CONFIG_ALLOCATION_BIAS,
VDEV_ALLOC_BIAS_DEDUP) == 0);
}
if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {
verify(nvlist_add_uint64(nv,
ZPOOL_CONFIG_NPARITY,
mindev - 1) == 0);
}
if (strcmp(type, VDEV_TYPE_DRAID) == 0) {
if (draid_config_by_type(nv,
fulltype, children) != 0) {
for (c = 0; c < children; c++)
nvlist_free(child[c]);
free(child);
goto spec_out;
}
}
verify(nvlist_add_nvlist_array(nv,
ZPOOL_CONFIG_CHILDREN,
(const nvlist_t **)child, children) == 0);
for (c = 0; c < children; c++)
nvlist_free(child[c]);
free(child);
}
} else {
/*
* We have a device. Pass off to make_leaf_vdev() to
* construct the appropriate nvlist describing the vdev.
*/
if ((nv = make_leaf_vdev(props, argv[0], !(is_log ||
is_special || is_dedup || is_spare))) == NULL)
goto spec_out;
verify(nvlist_add_uint64(nv,
ZPOOL_CONFIG_IS_LOG, is_log) == 0);
if (is_log) {
verify(nvlist_add_string(nv,
ZPOOL_CONFIG_ALLOCATION_BIAS,
VDEV_ALLOC_BIAS_LOG) == 0);
nlogs++;
}
if (is_special) {
verify(nvlist_add_string(nv,
ZPOOL_CONFIG_ALLOCATION_BIAS,
VDEV_ALLOC_BIAS_SPECIAL) == 0);
}
if (is_dedup) {
verify(nvlist_add_string(nv,
ZPOOL_CONFIG_ALLOCATION_BIAS,
VDEV_ALLOC_BIAS_DEDUP) == 0);
}
argc--;
argv++;
}
toplevels++;
top = realloc(top, toplevels * sizeof (nvlist_t *));
if (top == NULL)
zpool_no_memory();
top[toplevels - 1] = nv;
}
if (toplevels == 0 && nspares == 0 && nl2cache == 0) {
(void) fprintf(stderr, gettext("invalid vdev "
"specification: at least one toplevel vdev must be "
"specified\n"));
goto spec_out;
}
if (seen_logs && nlogs == 0) {
(void) fprintf(stderr, gettext("invalid vdev specification: "
"log requires at least 1 device\n"));
goto spec_out;
}
/*
* Finally, create nvroot and add all top-level vdevs to it.
*/
verify(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) == 0);
verify(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
VDEV_TYPE_ROOT) == 0);
verify(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
(const nvlist_t **)top, toplevels) == 0);
if (nspares != 0)
verify(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
(const nvlist_t **)spares, nspares) == 0);
if (nl2cache != 0)
verify(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
(const nvlist_t **)l2cache, nl2cache) == 0);
spec_out:
for (t = 0; t < toplevels; t++)
nvlist_free(top[t]);
for (t = 0; t < nspares; t++)
nvlist_free(spares[t]);
for (t = 0; t < nl2cache; t++)
nvlist_free(l2cache[t]);
free(spares);
free(l2cache);
free(top);
return (nvroot);
}
nvlist_t *
split_mirror_vdev(zpool_handle_t *zhp, char *newname, nvlist_t *props,
splitflags_t flags, int argc, char **argv)
{
nvlist_t *newroot = NULL, **child;
uint_t c, children;
if (argc > 0) {
if ((newroot = construct_spec(props, argc, argv)) == NULL) {
(void) fprintf(stderr, gettext("Unable to build a "
"pool from the specified devices\n"));
return (NULL);
}
if (!flags.dryrun && make_disks(zhp, newroot, B_FALSE) != 0) {
nvlist_free(newroot);
return (NULL);
}
/* avoid any tricks in the spec */
verify(nvlist_lookup_nvlist_array(newroot,
ZPOOL_CONFIG_CHILDREN, &child, &children) == 0);
for (c = 0; c < children; c++) {
const char *path;
const char *type;
int min, max;
verify(nvlist_lookup_string(child[c],
ZPOOL_CONFIG_PATH, &path) == 0);
if ((type = is_grouping(path, &min, &max)) != NULL) {
(void) fprintf(stderr, gettext("Cannot use "
"'%s' as a device for splitting\n"), type);
nvlist_free(newroot);
return (NULL);
}
}
}
if (zpool_vdev_split(zhp, newname, &newroot, props, flags) != 0) {
nvlist_free(newroot);
return (NULL);
}
return (newroot);
}
static int
num_normal_vdevs(nvlist_t *nvroot)
{
nvlist_t **top;
uint_t t, toplevels, normal = 0;
verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&top, &toplevels) == 0);
for (t = 0; t < toplevels; t++) {
uint64_t log = B_FALSE;
(void) nvlist_lookup_uint64(top[t], ZPOOL_CONFIG_IS_LOG, &log);
if (log)
continue;
if (nvlist_exists(top[t], ZPOOL_CONFIG_ALLOCATION_BIAS))
continue;
normal++;
}
return (normal);
}
/*
* Get and validate the contents of the given vdev specification. This ensures
* that the nvlist returned is well-formed, that all the devices exist, and that
* they are not currently in use by any other known consumer. The 'poolconfig'
* parameter is the current configuration of the pool when adding devices
* existing pool, and is used to perform additional checks, such as changing the
* replication level of the pool. It can be 'NULL' to indicate that this is a
* new pool. The 'force' flag controls whether devices should be forcefully
* added, even if they appear in use.
*/
nvlist_t *
make_root_vdev(zpool_handle_t *zhp, nvlist_t *props, int force, int check_rep,
boolean_t replacing, boolean_t dryrun, int argc, char **argv)
{
nvlist_t *newroot;
nvlist_t *poolconfig = NULL;
is_force = force;
/*
* Construct the vdev specification. If this is successful, we know
* that we have a valid specification, and that all devices can be
* opened.
*/
if ((newroot = construct_spec(props, argc, argv)) == NULL)
return (NULL);
if (zhp && ((poolconfig = zpool_get_config(zhp, NULL)) == NULL)) {
nvlist_free(newroot);
return (NULL);
}
/*
* Validate each device to make sure that it's not shared with another
* subsystem. We do this even if 'force' is set, because there are some
* uses (such as a dedicated dump device) that even '-f' cannot
* override.
*/
if (is_device_in_use(poolconfig, newroot, force, replacing, B_FALSE)) {
nvlist_free(newroot);
return (NULL);
}
/*
* Check the replication level of the given vdevs and report any errors
* found. We include the existing pool spec, if any, as we need to
* catch changes against the existing replication level.
*/
if (check_rep && check_replication(poolconfig, newroot) != 0) {
nvlist_free(newroot);
return (NULL);
}
/*
* On pool create the new vdev spec must have one normal vdev.
*/
if (poolconfig == NULL && num_normal_vdevs(newroot) == 0) {
vdev_error(gettext("at least one general top-level vdev must "
"be specified\n"));
nvlist_free(newroot);
return (NULL);
}
/*
* Run through the vdev specification and label any whole disks found.
*/
if (!dryrun && make_disks(zhp, newroot, replacing) != 0) {
nvlist_free(newroot);
return (NULL);
}
return (newroot);
}
diff --git a/sys/contrib/openzfs/config/Rules.am b/sys/contrib/openzfs/config/Rules.am
index 7c266964f3f3..2e463ae6083a 100644
--- a/sys/contrib/openzfs/config/Rules.am
+++ b/sys/contrib/openzfs/config/Rules.am
@@ -1,86 +1,71 @@
#
# Default build rules for all user space components, every Makefile.am
# should include these rules and override or extend them as needed.
#
PHONY =
AM_CPPFLAGS = \
-include $(top_builddir)/zfs_config.h \
-I$(top_builddir)/include \
-I$(top_srcdir)/include \
-I$(top_srcdir)/module/icp/include \
-I$(top_srcdir)/lib/libspl/include \
-I$(top_srcdir)/lib/libspl/include/os/@ac_system_l@
AM_LIBTOOLFLAGS = --silent
AM_CFLAGS = -std=gnu99 -Wall -Wextra -Wstrict-prototypes -Wmissing-prototypes -Wwrite-strings -Wno-sign-compare -Wno-missing-field-initializers
AM_CFLAGS += -fno-strict-aliasing
AM_CFLAGS += $(NO_OMIT_FRAME_POINTER)
AM_CFLAGS += $(IMPLICIT_FALLTHROUGH)
AM_CFLAGS += $(DEBUG_CFLAGS)
AM_CFLAGS += $(ASAN_CFLAGS)
AM_CFLAGS += $(UBSAN_CFLAGS)
AM_CFLAGS += $(CODE_COVERAGE_CFLAGS) $(NO_FORMAT_ZERO_LENGTH)
if BUILD_FREEBSD
AM_CFLAGS += -fPIC -Werror -Wno-unknown-pragmas -Wno-enum-conversion
AM_CFLAGS += -include $(top_srcdir)/include/os/freebsd/spl/sys/ccompile.h
AM_CFLAGS += -I/usr/include -I/usr/local/include
endif
AM_CPPFLAGS += -D_GNU_SOURCE
AM_CPPFLAGS += -D_REENTRANT
AM_CPPFLAGS += -D_FILE_OFFSET_BITS=64
AM_CPPFLAGS += -D_LARGEFILE64_SOURCE
AM_CPPFLAGS += -DLIBEXECDIR=\"$(libexecdir)\"
AM_CPPFLAGS += -DZFSEXECDIR=\"$(zfsexecdir)\"
AM_CPPFLAGS += -DRUNSTATEDIR=\"$(runstatedir)\"
AM_CPPFLAGS += -DSBINDIR=\"$(sbindir)\"
AM_CPPFLAGS += -DSYSCONFDIR=\"$(sysconfdir)\"
AM_CPPFLAGS += -DPKGDATADIR=\"$(pkgdatadir)\"
AM_CPPFLAGS += $(DEBUG_CPPFLAGS)
AM_CPPFLAGS += $(CODE_COVERAGE_CPPFLAGS)
AM_CPPFLAGS += -DTEXT_DOMAIN=\"zfs-@ac_system_l@-user\"
-AM_CPPFLAGS_NOCHECK = -D"strtok(...)=strtok(__VA_ARGS__) __attribute__((deprecated(\"Use strtok_r(3) instead!\")))"
-AM_CPPFLAGS_NOCHECK += -D"__xpg_basename(...)=__xpg_basename(__VA_ARGS__) __attribute__((deprecated(\"basename(3) is underspecified. Use zfs_basename() instead!\")))"
-AM_CPPFLAGS_NOCHECK += -D"basename(...)=basename(__VA_ARGS__) __attribute__((deprecated(\"basename(3) is underspecified. Use zfs_basename() instead!\")))"
-AM_CPPFLAGS_NOCHECK += -D"dirname(...)=dirname(__VA_ARGS__) __attribute__((deprecated(\"dirname(3) is underspecified. Use zfs_dirnamelen() instead!\")))"
-AM_CPPFLAGS_NOCHECK += -D"bcopy(...)=__attribute__((deprecated(\"bcopy(3) is deprecated. Use memcpy(3)/memmove(3) instead!\"))) bcopy(__VA_ARGS__)"
-AM_CPPFLAGS_NOCHECK += -D"bcmp(...)=__attribute__((deprecated(\"bcmp(3) is deprecated. Use memcmp(3) instead!\"))) bcmp(__VA_ARGS__)"
-AM_CPPFLAGS_NOCHECK += -D"bzero(...)=__attribute__((deprecated(\"bzero(3) is deprecated. Use memset(3) instead!\"))) bzero(__VA_ARGS__)"
-AM_CPPFLAGS_NOCHECK += -D"asctime(...)=__attribute__((deprecated(\"Use strftime(3) instead!\"))) asctime(__VA_ARGS__)"
-AM_CPPFLAGS_NOCHECK += -D"asctime_r(...)=__attribute__((deprecated(\"Use strftime(3) instead!\"))) asctime_r(__VA_ARGS__)"
-AM_CPPFLAGS_NOCHECK += -D"gmtime(...)=__attribute__((deprecated(\"gmtime(3) isn't thread-safe. Use gmtime_r(3) instead!\"))) gmtime(__VA_ARGS__)"
-AM_CPPFLAGS_NOCHECK += -D"localtime(...)=__attribute__((deprecated(\"localtime(3) isn't thread-safe. Use localtime_r(3) instead!\"))) localtime(__VA_ARGS__)"
-AM_CPPFLAGS_NOCHECK += -D"strncpy(...)=__attribute__((deprecated(\"strncpy(3) is deprecated. Use strlcpy(3) instead!\"))) strncpy(__VA_ARGS__)"
-
-AM_CPPFLAGS += $(AM_CPPFLAGS_NOCHECK)
-
if ASAN_ENABLED
AM_CPPFLAGS += -DZFS_ASAN_ENABLED
endif
if UBSAN_ENABLED
AM_CPPFLAGS += -DZFS_UBSAN_ENABLED
endif
AM_LDFLAGS = $(DEBUG_LDFLAGS)
AM_LDFLAGS += $(ASAN_LDFLAGS)
AM_LDFLAGS += $(UBSAN_LDFLAGS)
if BUILD_FREEBSD
AM_LDFLAGS += -fstack-protector-strong -shared
AM_LDFLAGS += -Wl,-x -Wl,--fatal-warnings -Wl,--warn-shared-textrel
AM_LDFLAGS += -lm
endif
# If a target includes kernel code, generate warnings for large stack frames
KERNEL_CFLAGS = $(FRAME_LARGER_THAN)
# See https://debbugs.gnu.org/cgi/bugreport.cgi?bug=54020
LIBRARY_CFLAGS = -no-suppress
# Forcibly enable asserts/debugging for libzpool &al.
FORCEDEBUG_CPPFLAGS = -DDEBUG -UNDEBUG -DZFS_DEBUG
diff --git a/sys/contrib/openzfs/config/kernel-acl.m4 b/sys/contrib/openzfs/config/kernel-acl.m4
index be08c3c60724..3ae5dc6b6dbc 100644
--- a/sys/contrib/openzfs/config/kernel-acl.m4
+++ b/sys/contrib/openzfs/config/kernel-acl.m4
@@ -1,395 +1,395 @@
dnl #
dnl # Check if posix_acl_release can be used from a ZFS_META_LICENSED
dnl # module. The is_owner_or_cap macro was replaced by
dnl # inode_owner_or_capable
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_POSIX_ACL_RELEASE], [
ZFS_LINUX_TEST_SRC([posix_acl_release], [
#include <linux/cred.h>
#include <linux/fs.h>
#include <linux/posix_acl.h>
], [
struct posix_acl *tmp = posix_acl_alloc(1, 0);
posix_acl_release(tmp);
], [], [ZFS_META_LICENSE])
])
AC_DEFUN([ZFS_AC_KERNEL_POSIX_ACL_RELEASE], [
AC_MSG_CHECKING([whether posix_acl_release() is available])
ZFS_LINUX_TEST_RESULT([posix_acl_release], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_POSIX_ACL_RELEASE, 1,
[posix_acl_release() is available])
AC_MSG_CHECKING([whether posix_acl_release() is GPL-only])
ZFS_LINUX_TEST_RESULT([posix_acl_release_license], [
AC_MSG_RESULT(no)
],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_POSIX_ACL_RELEASE_GPL_ONLY, 1,
[posix_acl_release() is GPL-only])
])
],[
AC_MSG_RESULT(no)
])
])
dnl #
dnl # 3.14 API change,
dnl # set_cached_acl() and forget_cached_acl() changed from inline to
dnl # EXPORT_SYMBOL. In the former case, they may not be usable because of
dnl # posix_acl_release. In the latter case, we can always use them.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_SET_CACHED_ACL_USABLE], [
ZFS_LINUX_TEST_SRC([set_cached_acl], [
#include <linux/cred.h>
#include <linux/fs.h>
#include <linux/posix_acl.h>
], [
struct inode *ip = NULL;
struct posix_acl *acl = posix_acl_alloc(1, 0);
set_cached_acl(ip, ACL_TYPE_ACCESS, acl);
forget_cached_acl(ip, ACL_TYPE_ACCESS);
], [], [ZFS_META_LICENSE])
])
AC_DEFUN([ZFS_AC_KERNEL_SET_CACHED_ACL_USABLE], [
AC_MSG_CHECKING([whether set_cached_acl() is usable])
ZFS_LINUX_TEST_RESULT([set_cached_acl_license], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_SET_CACHED_ACL_USABLE, 1,
[set_cached_acl() is usable])
],[
AC_MSG_RESULT(no)
])
])
dnl #
dnl # 3.1 API change,
dnl # posix_acl_chmod() was added as the preferred interface.
dnl #
dnl # 3.14 API change,
dnl # posix_acl_chmod() was changed to __posix_acl_chmod()
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_POSIX_ACL_CHMOD], [
ZFS_LINUX_TEST_SRC([posix_acl_chmod], [
#include <linux/fs.h>
#include <linux/posix_acl.h>
],[
posix_acl_chmod(NULL, 0, 0)
])
ZFS_LINUX_TEST_SRC([__posix_acl_chmod], [
#include <linux/fs.h>
#include <linux/posix_acl.h>
],[
__posix_acl_chmod(NULL, 0, 0)
])
])
AC_DEFUN([ZFS_AC_KERNEL_POSIX_ACL_CHMOD], [
AC_MSG_CHECKING([whether __posix_acl_chmod exists])
ZFS_LINUX_TEST_RESULT([__posix_acl_chmod], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE___POSIX_ACL_CHMOD, 1,
[__posix_acl_chmod() exists])
],[
AC_MSG_RESULT(no)
AC_MSG_CHECKING([whether posix_acl_chmod exists])
ZFS_LINUX_TEST_RESULT([posix_acl_chmod], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_POSIX_ACL_CHMOD, 1,
[posix_acl_chmod() exists])
],[
ZFS_LINUX_TEST_ERROR([posix_acl_chmod()])
])
])
])
dnl #
dnl # 3.1 API change,
dnl # posix_acl_equiv_mode now wants an umode_t instead of a mode_t
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_POSIX_ACL_EQUIV_MODE_WANTS_UMODE_T], [
ZFS_LINUX_TEST_SRC([posix_acl_equiv_mode], [
#include <linux/fs.h>
#include <linux/posix_acl.h>
],[
umode_t tmp;
posix_acl_equiv_mode(NULL, &tmp);
])
])
AC_DEFUN([ZFS_AC_KERNEL_POSIX_ACL_EQUIV_MODE_WANTS_UMODE_T], [
AC_MSG_CHECKING([whether posix_acl_equiv_mode() wants umode_t])
ZFS_LINUX_TEST_RESULT([posix_acl_equiv_mode], [
AC_MSG_RESULT(yes)
],[
ZFS_LINUX_TEST_ERROR([posix_acl_equiv_mode()])
])
])
dnl #
dnl # 4.8 API change,
dnl # The function posix_acl_valid now must be passed a namespace.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_POSIX_ACL_VALID_WITH_NS], [
ZFS_LINUX_TEST_SRC([posix_acl_valid_with_ns], [
#include <linux/fs.h>
#include <linux/posix_acl.h>
],[
struct user_namespace *user_ns = NULL;
const struct posix_acl *acl = NULL;
int error;
error = posix_acl_valid(user_ns, acl);
])
])
AC_DEFUN([ZFS_AC_KERNEL_POSIX_ACL_VALID_WITH_NS], [
AC_MSG_CHECKING([whether posix_acl_valid() wants user namespace])
ZFS_LINUX_TEST_RESULT([posix_acl_valid_with_ns], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_POSIX_ACL_VALID_WITH_NS, 1,
[posix_acl_valid() wants user namespace])
],[
AC_MSG_RESULT(no)
])
])
dnl #
dnl # 3.1 API change,
dnl # Check if inode_operations contains the function get_acl
dnl #
dnl # 5.15 API change,
dnl # Added the bool rcu argument to get_acl for rcu path walk.
dnl #
dnl # 6.2 API change,
dnl # get_acl() was renamed to get_inode_acl()
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_INODE_OPERATIONS_GET_ACL], [
ZFS_LINUX_TEST_SRC([inode_operations_get_acl], [
#include <linux/fs.h>
- struct posix_acl *get_acl_fn(struct inode *inode, int type)
+ static struct posix_acl *get_acl_fn(struct inode *inode, int type)
{ return NULL; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
.get_acl = get_acl_fn,
};
],[])
ZFS_LINUX_TEST_SRC([inode_operations_get_acl_rcu], [
#include <linux/fs.h>
- struct posix_acl *get_acl_fn(struct inode *inode, int type,
+ static struct posix_acl *get_acl_fn(struct inode *inode, int type,
bool rcu) { return NULL; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
.get_acl = get_acl_fn,
};
],[])
ZFS_LINUX_TEST_SRC([inode_operations_get_inode_acl], [
#include <linux/fs.h>
- struct posix_acl *get_inode_acl_fn(struct inode *inode, int type,
+ static struct posix_acl *get_inode_acl_fn(struct inode *inode, int type,
bool rcu) { return NULL; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
.get_inode_acl = get_inode_acl_fn,
};
],[])
])
AC_DEFUN([ZFS_AC_KERNEL_INODE_OPERATIONS_GET_ACL], [
AC_MSG_CHECKING([whether iops->get_acl() exists])
ZFS_LINUX_TEST_RESULT([inode_operations_get_acl], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_GET_ACL, 1, [iops->get_acl() exists])
],[
ZFS_LINUX_TEST_RESULT([inode_operations_get_acl_rcu], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_GET_ACL_RCU, 1, [iops->get_acl() takes rcu])
],[
ZFS_LINUX_TEST_RESULT([inode_operations_get_inode_acl], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_GET_INODE_ACL, 1, [has iops->get_inode_acl()])
],[
ZFS_LINUX_TEST_ERROR([iops->get_acl() or iops->get_inode_acl()])
])
])
])
])
dnl #
dnl # 3.14 API change,
dnl # Check if inode_operations contains the function set_acl
dnl #
dnl # 5.12 API change,
dnl # set_acl() added a user_namespace* parameter first
dnl #
dnl # 6.2 API change,
dnl # set_acl() second paramter changed to a struct dentry *
dnl #
dnl # 6.3 API change,
dnl # set_acl() first parameter changed to struct mnt_idmap *
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_INODE_OPERATIONS_SET_ACL], [
ZFS_LINUX_TEST_SRC([inode_operations_set_acl_mnt_idmap_dentry], [
#include <linux/fs.h>
- int set_acl_fn(struct mnt_idmap *idmap,
+ static int set_acl_fn(struct mnt_idmap *idmap,
struct dentry *dent, struct posix_acl *acl,
int type) { return 0; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
.set_acl = set_acl_fn,
};
],[])
ZFS_LINUX_TEST_SRC([inode_operations_set_acl_userns_dentry], [
#include <linux/fs.h>
- int set_acl_fn(struct user_namespace *userns,
+ static int set_acl_fn(struct user_namespace *userns,
struct dentry *dent, struct posix_acl *acl,
int type) { return 0; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
.set_acl = set_acl_fn,
};
],[])
ZFS_LINUX_TEST_SRC([inode_operations_set_acl_userns], [
#include <linux/fs.h>
- int set_acl_fn(struct user_namespace *userns,
+ static int set_acl_fn(struct user_namespace *userns,
struct inode *inode, struct posix_acl *acl,
int type) { return 0; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
.set_acl = set_acl_fn,
};
],[])
ZFS_LINUX_TEST_SRC([inode_operations_set_acl], [
#include <linux/fs.h>
- int set_acl_fn(struct inode *inode, struct posix_acl *acl,
+ static int set_acl_fn(struct inode *inode, struct posix_acl *acl,
int type) { return 0; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
.set_acl = set_acl_fn,
};
],[])
])
AC_DEFUN([ZFS_AC_KERNEL_INODE_OPERATIONS_SET_ACL], [
AC_MSG_CHECKING([whether iops->set_acl() exists])
ZFS_LINUX_TEST_RESULT([inode_operations_set_acl_userns], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_SET_ACL, 1, [iops->set_acl() exists])
AC_DEFINE(HAVE_SET_ACL_USERNS, 1, [iops->set_acl() takes 4 args])
],[
ZFS_LINUX_TEST_RESULT([inode_operations_set_acl_mnt_idmap_dentry], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_SET_ACL, 1, [iops->set_acl() exists])
AC_DEFINE(HAVE_SET_ACL_IDMAP_DENTRY, 1,
[iops->set_acl() takes 4 args, arg1 is struct mnt_idmap *])
],[
ZFS_LINUX_TEST_RESULT([inode_operations_set_acl_userns_dentry], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_SET_ACL, 1, [iops->set_acl() exists])
AC_DEFINE(HAVE_SET_ACL_USERNS_DENTRY_ARG2, 1,
[iops->set_acl() takes 4 args, arg2 is struct dentry *])
],[
ZFS_LINUX_TEST_RESULT([inode_operations_set_acl], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_SET_ACL, 1, [iops->set_acl() exists, takes 3 args])
],[
ZFS_LINUX_REQUIRE_API([i_op->set_acl()], [3.14])
])
])
])
])
])
dnl #
dnl # 4.7 API change,
dnl # The kernel get_acl will now check cache before calling i_op->get_acl and
dnl # do set_cached_acl after that, so i_op->get_acl don't need to do that
dnl # anymore.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_GET_ACL_HANDLE_CACHE], [
ZFS_LINUX_TEST_SRC([get_acl_handle_cache], [
#include <linux/fs.h>
],[
void *sentinel __attribute__ ((unused)) =
uncached_acl_sentinel(NULL);
])
])
AC_DEFUN([ZFS_AC_KERNEL_GET_ACL_HANDLE_CACHE], [
AC_MSG_CHECKING([whether uncached_acl_sentinel() exists])
ZFS_LINUX_TEST_RESULT([get_acl_handle_cache], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_KERNEL_GET_ACL_HANDLE_CACHE, 1,
[uncached_acl_sentinel() exists])
],[
AC_MSG_RESULT(no)
])
])
dnl #
dnl # 4.16 kernel: check if struct posix_acl acl.a_refcount is a refcount_t.
dnl # It's an atomic_t on older kernels.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_ACL_HAS_REFCOUNT], [
ZFS_LINUX_TEST_SRC([acl_refcount], [
#include <linux/backing-dev.h>
#include <linux/refcount.h>
#include <linux/posix_acl.h>
],[
struct posix_acl acl;
refcount_t *r __attribute__ ((unused)) = &acl.a_refcount;
])
])
AC_DEFUN([ZFS_AC_KERNEL_ACL_HAS_REFCOUNT], [
AC_MSG_CHECKING([whether posix_acl has refcount_t])
ZFS_LINUX_TEST_RESULT([acl_refcount], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_ACL_REFCOUNT, 1, [posix_acl has refcount_t])
],[
AC_MSG_RESULT(no)
])
])
AC_DEFUN([ZFS_AC_KERNEL_SRC_ACL], [
ZFS_AC_KERNEL_SRC_POSIX_ACL_RELEASE
ZFS_AC_KERNEL_SRC_SET_CACHED_ACL_USABLE
ZFS_AC_KERNEL_SRC_POSIX_ACL_CHMOD
ZFS_AC_KERNEL_SRC_POSIX_ACL_EQUIV_MODE_WANTS_UMODE_T
ZFS_AC_KERNEL_SRC_POSIX_ACL_VALID_WITH_NS
ZFS_AC_KERNEL_SRC_INODE_OPERATIONS_GET_ACL
ZFS_AC_KERNEL_SRC_INODE_OPERATIONS_SET_ACL
ZFS_AC_KERNEL_SRC_GET_ACL_HANDLE_CACHE
ZFS_AC_KERNEL_SRC_ACL_HAS_REFCOUNT
])
AC_DEFUN([ZFS_AC_KERNEL_ACL], [
ZFS_AC_KERNEL_POSIX_ACL_RELEASE
ZFS_AC_KERNEL_SET_CACHED_ACL_USABLE
ZFS_AC_KERNEL_POSIX_ACL_CHMOD
ZFS_AC_KERNEL_POSIX_ACL_EQUIV_MODE_WANTS_UMODE_T
ZFS_AC_KERNEL_POSIX_ACL_VALID_WITH_NS
ZFS_AC_KERNEL_INODE_OPERATIONS_GET_ACL
ZFS_AC_KERNEL_INODE_OPERATIONS_SET_ACL
ZFS_AC_KERNEL_GET_ACL_HANDLE_CACHE
ZFS_AC_KERNEL_ACL_HAS_REFCOUNT
])
diff --git a/sys/contrib/openzfs/config/kernel-automount.m4 b/sys/contrib/openzfs/config/kernel-automount.m4
index f7bb63c68154..52f1931b748e 100644
--- a/sys/contrib/openzfs/config/kernel-automount.m4
+++ b/sys/contrib/openzfs/config/kernel-automount.m4
@@ -1,25 +1,25 @@
dnl #
dnl # 2.6.37 API change
dnl # The dops->d_automount() dentry operation was added as a clean
dnl # solution to handling automounts. Prior to this cifs/nfs clients
dnl # which required automount support would abuse the follow_link()
dnl # operation on directories for this purpose.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_AUTOMOUNT], [
ZFS_LINUX_TEST_SRC([dentry_operations_d_automount], [
#include <linux/dcache.h>
- struct vfsmount *d_automount(struct path *p) { return NULL; }
+ static struct vfsmount *d_automount(struct path *p) { return NULL; }
struct dentry_operations dops __attribute__ ((unused)) = {
.d_automount = d_automount,
};
])
])
AC_DEFUN([ZFS_AC_KERNEL_AUTOMOUNT], [
AC_MSG_CHECKING([whether dops->d_automount() exists])
ZFS_LINUX_TEST_RESULT([dentry_operations_d_automount], [
AC_MSG_RESULT(yes)
],[
ZFS_LINUX_TEST_ERROR([dops->d_automount()])
])
])
diff --git a/sys/contrib/openzfs/config/kernel-bio.m4 b/sys/contrib/openzfs/config/kernel-bio.m4
index 18620ca5b7e4..b22c1a3de7e1 100644
--- a/sys/contrib/openzfs/config/kernel-bio.m4
+++ b/sys/contrib/openzfs/config/kernel-bio.m4
@@ -1,552 +1,552 @@
dnl #
dnl # 2.6.36 API change,
dnl # REQ_FAILFAST_{DEV|TRANSPORT|DRIVER}
dnl # REQ_DISCARD
dnl # REQ_FLUSH
dnl #
dnl # 4.8 - 4.9 API,
dnl # REQ_FLUSH was renamed to REQ_PREFLUSH
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_REQ], [
ZFS_LINUX_TEST_SRC([req_failfast_mask], [
#include <linux/bio.h>
],[
int flags __attribute__ ((unused));
flags = REQ_FAILFAST_MASK;
])
ZFS_LINUX_TEST_SRC([req_discard], [
#include <linux/bio.h>
],[
int flags __attribute__ ((unused));
flags = REQ_DISCARD;
])
ZFS_LINUX_TEST_SRC([req_flush], [
#include <linux/bio.h>
],[
int flags __attribute__ ((unused));
flags = REQ_FLUSH;
])
ZFS_LINUX_TEST_SRC([req_preflush], [
#include <linux/bio.h>
],[
int flags __attribute__ ((unused));
flags = REQ_PREFLUSH;
])
])
AC_DEFUN([ZFS_AC_KERNEL_BIO_REQ_FAILFAST_MASK], [
AC_MSG_CHECKING([whether REQ_FAILFAST_MASK is defined])
ZFS_LINUX_TEST_RESULT([req_failfast_mask], [
AC_MSG_RESULT(yes)
],[
ZFS_LINUX_TEST_ERROR([REQ_FAILFAST_MASK])
])
])
AC_DEFUN([ZFS_AC_KERNEL_BIO_REQ_DISCARD], [
AC_MSG_CHECKING([whether REQ_DISCARD is defined])
ZFS_LINUX_TEST_RESULT([req_discard], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_REQ_DISCARD, 1, [REQ_DISCARD is defined])
],[
AC_MSG_RESULT(no)
])
])
AC_DEFUN([ZFS_AC_KERNEL_BIO_REQ_FLUSH], [
AC_MSG_CHECKING([whether REQ_FLUSH is defined])
ZFS_LINUX_TEST_RESULT([req_flush], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_REQ_FLUSH, 1, [REQ_FLUSH is defined])
],[
AC_MSG_RESULT(no)
])
])
AC_DEFUN([ZFS_AC_KERNEL_BIO_REQ_PREFLUSH], [
AC_MSG_CHECKING([whether REQ_PREFLUSH is defined])
ZFS_LINUX_TEST_RESULT([req_preflush], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_REQ_PREFLUSH, 1, [REQ_PREFLUSH is defined])
],[
AC_MSG_RESULT(no)
])
])
dnl #
dnl # Linux 4.8 API,
dnl #
dnl # The bio_op() helper was introduced as a replacement for explicitly
dnl # checking the bio->bi_rw flags. The following checks are used to
dnl # detect if a specific operation is supported.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BIO_OPS], [
ZFS_LINUX_TEST_SRC([req_op_discard], [
#include <linux/blk_types.h>
],[
int op __attribute__ ((unused)) = REQ_OP_DISCARD;
])
ZFS_LINUX_TEST_SRC([req_op_secure_erase], [
#include <linux/blk_types.h>
],[
int op __attribute__ ((unused)) = REQ_OP_SECURE_ERASE;
])
ZFS_LINUX_TEST_SRC([req_op_flush], [
#include <linux/blk_types.h>
],[
int op __attribute__ ((unused)) = REQ_OP_FLUSH;
])
ZFS_LINUX_TEST_SRC([bio_bi_opf], [
#include <linux/bio.h>
],[
struct bio bio __attribute__ ((unused));
bio.bi_opf = 0;
])
ZFS_LINUX_TEST_SRC([bio_set_op_attrs], [
#include <linux/bio.h>
],[
struct bio *bio __attribute__ ((unused)) = NULL;
bio_set_op_attrs(bio, 0, 0);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BIO_REQ_OP_DISCARD], [
AC_MSG_CHECKING([whether REQ_OP_DISCARD is defined])
ZFS_LINUX_TEST_RESULT([req_op_discard], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_REQ_OP_DISCARD, 1, [REQ_OP_DISCARD is defined])
],[
AC_MSG_RESULT(no)
])
])
AC_DEFUN([ZFS_AC_KERNEL_BIO_REQ_OP_SECURE_ERASE], [
AC_MSG_CHECKING([whether REQ_OP_SECURE_ERASE is defined])
ZFS_LINUX_TEST_RESULT([req_op_secure_erase], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_REQ_OP_SECURE_ERASE, 1,
[REQ_OP_SECURE_ERASE is defined])
],[
AC_MSG_RESULT(no)
])
])
AC_DEFUN([ZFS_AC_KERNEL_BIO_REQ_OP_FLUSH], [
AC_MSG_CHECKING([whether REQ_OP_FLUSH is defined])
ZFS_LINUX_TEST_RESULT([req_op_flush], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_REQ_OP_FLUSH, 1, [REQ_OP_FLUSH is defined])
],[
AC_MSG_RESULT(no)
])
])
AC_DEFUN([ZFS_AC_KERNEL_BIO_BI_OPF], [
AC_MSG_CHECKING([whether bio->bi_opf is defined])
ZFS_LINUX_TEST_RESULT([bio_bi_opf], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BIO_BI_OPF, 1, [bio->bi_opf is defined])
],[
AC_MSG_RESULT(no)
])
])
AC_DEFUN([ZFS_AC_KERNEL_BIO_SET_OP_ATTRS], [
AC_MSG_CHECKING([whether bio_set_op_attrs is available])
ZFS_LINUX_TEST_RESULT([bio_set_op_attrs], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BIO_SET_OP_ATTRS, 1,
[bio_set_op_attrs is available])
],[
AC_MSG_RESULT(no)
])
])
dnl #
dnl # Linux 4.14 API,
dnl #
dnl # The bio_set_dev() helper macro was introduced as part of the transition
dnl # to have struct gendisk in struct bio.
dnl #
dnl # Linux 5.0 API,
dnl #
dnl # The bio_set_dev() helper macro was updated to internally depend on
dnl # bio_associate_blkg() symbol which is exported GPL-only.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BIO_SET_DEV], [
ZFS_LINUX_TEST_SRC([bio_set_dev], [
#include <linux/bio.h>
#include <linux/fs.h>
],[
struct block_device *bdev = NULL;
struct bio *bio = NULL;
bio_set_dev(bio, bdev);
], [], [ZFS_META_LICENSE])
])
dnl #
dnl # Linux 5.16 API
dnl #
dnl # bio_set_dev is no longer a helper macro and is now an inline function,
dnl # meaning that the function it calls internally can no longer be overridden
dnl # by our code
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BIO_SET_DEV_MACRO], [
ZFS_LINUX_TEST_SRC([bio_set_dev_macro], [
#include <linux/bio.h>
#include <linux/fs.h>
],[
#ifndef bio_set_dev
#error Not a macro
#endif
], [], [ZFS_META_LICENSE])
])
AC_DEFUN([ZFS_AC_KERNEL_BIO_SET_DEV], [
AC_MSG_CHECKING([whether bio_set_dev() is available])
ZFS_LINUX_TEST_RESULT([bio_set_dev], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BIO_SET_DEV, 1, [bio_set_dev() is available])
AC_MSG_CHECKING([whether bio_set_dev() is GPL-only])
ZFS_LINUX_TEST_RESULT([bio_set_dev_license], [
AC_MSG_RESULT(no)
],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BIO_SET_DEV_GPL_ONLY, 1,
[bio_set_dev() GPL-only])
])
AC_MSG_CHECKING([whether bio_set_dev() is a macro])
ZFS_LINUX_TEST_RESULT([bio_set_dev_macro], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BIO_SET_DEV_MACRO, 1,
[bio_set_dev() is a macro])
],[
AC_MSG_RESULT(no)
])
],[
AC_MSG_RESULT(no)
])
])
dnl #
dnl # 4.3 API change
dnl # Error argument dropped from bio_endio in favor of newly introduced
dnl # bio->bi_error. This also replaces bio->bi_flags value BIO_UPTODATE.
dnl # Introduced by torvalds/linux@4246a0b63bd8f56a1469b12eafeb875b1041a451
dnl # ("block: add a bi_error field to struct bio").
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BIO_END_IO_T_ARGS], [
ZFS_LINUX_TEST_SRC([bio_end_io_t_args], [
#include <linux/bio.h>
- void wanted_end_io(struct bio *bio) { return; }
+ static void wanted_end_io(struct bio *bio) { return; }
bio_end_io_t *end_io __attribute__ ((unused)) = wanted_end_io;
], [])
])
AC_DEFUN([ZFS_AC_KERNEL_BIO_END_IO_T_ARGS], [
AC_MSG_CHECKING([whether bio_end_io_t wants 1 arg])
ZFS_LINUX_TEST_RESULT([bio_end_io_t_args], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_1ARG_BIO_END_IO_T, 1,
[bio_end_io_t wants 1 arg])
], [
AC_MSG_RESULT(no)
])
])
dnl #
dnl # 4.13 API change
dnl # The bio->bi_error field was replaced with bio->bi_status which is an
dnl # enum which describes all possible error types.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BIO_BI_STATUS], [
ZFS_LINUX_TEST_SRC([bio_bi_status], [
#include <linux/bio.h>
], [
struct bio bio __attribute__ ((unused));
blk_status_t status __attribute__ ((unused)) = BLK_STS_OK;
bio.bi_status = status;
])
])
AC_DEFUN([ZFS_AC_KERNEL_BIO_BI_STATUS], [
AC_MSG_CHECKING([whether bio->bi_status exists])
ZFS_LINUX_TEST_RESULT([bio_bi_status], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BIO_BI_STATUS, 1, [bio->bi_status exists])
],[
AC_MSG_RESULT(no)
])
])
dnl #
dnl # 3.14 API change,
dnl # Immutable biovecs. A number of fields of struct bio are moved to
dnl # struct bvec_iter.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BIO_BVEC_ITER], [
ZFS_LINUX_TEST_SRC([bio_bvec_iter], [
#include <linux/bio.h>
],[
struct bio bio;
bio.bi_iter.bi_sector = 0;
])
])
AC_DEFUN([ZFS_AC_KERNEL_BIO_BVEC_ITER], [
AC_MSG_CHECKING([whether bio has bi_iter])
ZFS_LINUX_TEST_RESULT([bio_bvec_iter], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BIO_BVEC_ITER, 1, [bio has bi_iter])
],[
AC_MSG_RESULT(no)
])
])
dnl #
dnl # 4.8 API change
dnl # The rw argument has been removed from submit_bio/submit_bio_wait.
dnl # Callers are now expected to set bio->bi_rw instead of passing it in.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BIO_SUBMIT_BIO], [
ZFS_LINUX_TEST_SRC([submit_bio], [
#include <linux/bio.h>
],[
struct bio *bio = NULL;
(void) submit_bio(bio);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BIO_SUBMIT_BIO], [
AC_MSG_CHECKING([whether submit_bio() wants 1 arg])
ZFS_LINUX_TEST_RESULT([submit_bio], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_1ARG_SUBMIT_BIO, 1, [submit_bio() wants 1 arg])
],[
AC_MSG_RESULT(no)
])
])
dnl #
dnl # 2.6.34 API change
dnl # current->bio_list
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BIO_CURRENT_BIO_LIST], [
ZFS_LINUX_TEST_SRC([current_bio_list], [
#include <linux/sched.h>
], [
current->bio_list = (struct bio_list *) NULL;
])
])
AC_DEFUN([ZFS_AC_KERNEL_BIO_CURRENT_BIO_LIST], [
AC_MSG_CHECKING([whether current->bio_list exists])
ZFS_LINUX_TEST_RESULT([current_bio_list], [
AC_MSG_RESULT(yes)
],[
ZFS_LINUX_TEST_ERROR([bio_list])
])
])
dnl #
dnl # Linux 5.5 API,
dnl #
dnl # The Linux 5.5 kernel updated percpu_ref_tryget() which is inlined by
dnl # blkg_tryget() to use rcu_read_lock() instead of rcu_read_lock_sched().
dnl # As a side effect the function was converted to GPL-only.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKG_TRYGET], [
ZFS_LINUX_TEST_SRC([blkg_tryget], [
#include <linux/blk-cgroup.h>
#include <linux/bio.h>
#include <linux/fs.h>
],[
struct blkcg_gq blkg __attribute__ ((unused)) = {};
bool rc __attribute__ ((unused));
rc = blkg_tryget(&blkg);
], [], [ZFS_META_LICENSE])
])
AC_DEFUN([ZFS_AC_KERNEL_BLKG_TRYGET], [
AC_MSG_CHECKING([whether blkg_tryget() is available])
ZFS_LINUX_TEST_RESULT([blkg_tryget], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BLKG_TRYGET, 1, [blkg_tryget() is available])
AC_MSG_CHECKING([whether blkg_tryget() is GPL-only])
ZFS_LINUX_TEST_RESULT([blkg_tryget_license], [
AC_MSG_RESULT(no)
],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BLKG_TRYGET_GPL_ONLY, 1,
[blkg_tryget() GPL-only])
])
],[
AC_MSG_RESULT(no)
])
])
dnl #
dnl # Linux 5.12 API,
dnl #
dnl # The Linux 5.12 kernel updated struct bio to create a new bi_bdev member
dnl # and bio->bi_disk was moved to bio->bi_bdev->bd_disk
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BIO_BDEV_DISK], [
ZFS_LINUX_TEST_SRC([bio_bdev_disk], [
#include <linux/blk_types.h>
#include <linux/blkdev.h>
],[
struct bio *b = NULL;
struct gendisk *d = b->bi_bdev->bd_disk;
blk_register_queue(d);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BIO_BDEV_DISK], [
AC_MSG_CHECKING([whether bio->bi_bdev->bd_disk exists])
ZFS_LINUX_TEST_RESULT([bio_bdev_disk], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BIO_BDEV_DISK, 1, [bio->bi_bdev->bd_disk exists])
],[
AC_MSG_RESULT(no)
])
])
dnl #
dnl # Linux 5.16 API
dnl #
dnl # The Linux 5.16 API for submit_bio changed the return type to be
dnl # void instead of int
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BDEV_SUBMIT_BIO_RETURNS_VOID], [
ZFS_LINUX_TEST_SRC([bio_bdev_submit_bio_void], [
#include <linux/blkdev.h>
],[
struct block_device_operations *bdev = NULL;
__attribute__((unused)) void(*f)(struct bio *) = bdev->submit_bio;
])
])
AC_DEFUN([ZFS_AC_KERNEL_BDEV_SUBMIT_BIO_RETURNS_VOID], [
AC_MSG_CHECKING(
[whether block_device_operations->submit_bio() returns void])
ZFS_LINUX_TEST_RESULT([bio_bdev_submit_bio_void], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BDEV_SUBMIT_BIO_RETURNS_VOID, 1,
[block_device_operations->submit_bio() returns void])
],[
AC_MSG_RESULT(no)
])
])
dnl #
dnl # Linux 5.16 API
dnl #
dnl # The Linux 5.16 API moved struct blkcg_gq into linux/blk-cgroup.h, which
dnl # has been around since 2015. This test looks for the presence of that
dnl # header, so that it can be conditionally included where it exists, but
dnl # still be backward compatible with kernels that pre-date its introduction.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_CGROUP_HEADER], [
ZFS_LINUX_TEST_SRC([blk_cgroup_header], [
#include <linux/blk-cgroup.h>
], [])
])
AC_DEFUN([ZFS_AC_KERNEL_BLK_CGROUP_HEADER], [
AC_MSG_CHECKING([whether linux/blk-cgroup.h exists])
ZFS_LINUX_TEST_RESULT([blk_cgroup_header],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_LINUX_BLK_CGROUP_HEADER, 1,
[linux/blk-cgroup.h exists])
],[
AC_MSG_RESULT(no)
])
])
dnl #
dnl # Linux 5.18 API
dnl #
dnl # In 07888c665b405b1cd3577ddebfeb74f4717a84c4 ("block: pass a block_device and opf to bio_alloc")
dnl # bio_alloc(gfp_t gfp_mask, unsigned short nr_iovecs)
dnl # became
dnl # bio_alloc(struct block_device *bdev, unsigned short nr_vecs, unsigned int opf, gfp_t gfp_mask)
dnl # however
dnl # > NULL/0 can be passed, both for the
dnl # > passthrough case on a raw request_queue and to temporarily avoid
dnl # > refactoring some nasty code.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BIO_ALLOC_4ARG], [
ZFS_LINUX_TEST_SRC([bio_alloc_4arg], [
#include <linux/bio.h>
],[
gfp_t gfp_mask = 0;
unsigned short nr_iovecs = 0;
struct block_device *bdev = NULL;
unsigned int opf = 0;
struct bio *__attribute__((unused)) allocated = bio_alloc(bdev, nr_iovecs, opf, gfp_mask);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BIO_ALLOC_4ARG], [
AC_MSG_CHECKING([whether bio_alloc() wants 4 args])
ZFS_LINUX_TEST_RESULT([bio_alloc_4arg],[
AC_MSG_RESULT(yes)
AC_DEFINE([HAVE_BIO_ALLOC_4ARG], 1, [bio_alloc() takes 4 arguments])
],[
AC_MSG_RESULT(no)
])
])
AC_DEFUN([ZFS_AC_KERNEL_SRC_BIO], [
ZFS_AC_KERNEL_SRC_REQ
ZFS_AC_KERNEL_SRC_BIO_OPS
ZFS_AC_KERNEL_SRC_BIO_SET_DEV
ZFS_AC_KERNEL_SRC_BIO_END_IO_T_ARGS
ZFS_AC_KERNEL_SRC_BIO_BI_STATUS
ZFS_AC_KERNEL_SRC_BIO_BVEC_ITER
ZFS_AC_KERNEL_SRC_BIO_SUBMIT_BIO
ZFS_AC_KERNEL_SRC_BIO_CURRENT_BIO_LIST
ZFS_AC_KERNEL_SRC_BLKG_TRYGET
ZFS_AC_KERNEL_SRC_BIO_BDEV_DISK
ZFS_AC_KERNEL_SRC_BDEV_SUBMIT_BIO_RETURNS_VOID
ZFS_AC_KERNEL_SRC_BIO_SET_DEV_MACRO
ZFS_AC_KERNEL_SRC_BLK_CGROUP_HEADER
ZFS_AC_KERNEL_SRC_BIO_ALLOC_4ARG
])
AC_DEFUN([ZFS_AC_KERNEL_BIO], [
ZFS_AC_KERNEL_BIO_REQ_FAILFAST_MASK
ZFS_AC_KERNEL_BIO_REQ_DISCARD
ZFS_AC_KERNEL_BIO_REQ_FLUSH
ZFS_AC_KERNEL_BIO_REQ_PREFLUSH
ZFS_AC_KERNEL_BIO_REQ_OP_DISCARD
ZFS_AC_KERNEL_BIO_REQ_OP_SECURE_ERASE
ZFS_AC_KERNEL_BIO_REQ_OP_FLUSH
ZFS_AC_KERNEL_BIO_BI_OPF
ZFS_AC_KERNEL_BIO_SET_OP_ATTRS
ZFS_AC_KERNEL_BIO_SET_DEV
ZFS_AC_KERNEL_BIO_END_IO_T_ARGS
ZFS_AC_KERNEL_BIO_BI_STATUS
ZFS_AC_KERNEL_BIO_BVEC_ITER
ZFS_AC_KERNEL_BIO_SUBMIT_BIO
ZFS_AC_KERNEL_BIO_CURRENT_BIO_LIST
ZFS_AC_KERNEL_BLKG_TRYGET
ZFS_AC_KERNEL_BIO_BDEV_DISK
ZFS_AC_KERNEL_BDEV_SUBMIT_BIO_RETURNS_VOID
ZFS_AC_KERNEL_BLK_CGROUP_HEADER
ZFS_AC_KERNEL_BIO_ALLOC_4ARG
])
diff --git a/sys/contrib/openzfs/config/kernel-blkdev.m4 b/sys/contrib/openzfs/config/kernel-blkdev.m4
index e04a2bd2c3b6..c5a353ca9203 100644
--- a/sys/contrib/openzfs/config/kernel-blkdev.m4
+++ b/sys/contrib/openzfs/config/kernel-blkdev.m4
@@ -1,611 +1,687 @@
dnl #
dnl # 2.6.38 API change,
dnl # Added blkdev_get_by_path()
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_GET_BY_PATH], [
ZFS_LINUX_TEST_SRC([blkdev_get_by_path], [
#include <linux/fs.h>
#include <linux/blkdev.h>
], [
struct block_device *bdev __attribute__ ((unused)) = NULL;
const char *path = "path";
fmode_t mode = 0;
void *holder = NULL;
bdev = blkdev_get_by_path(path, mode, holder);
])
])
dnl #
dnl # 6.5.x API change,
dnl # blkdev_get_by_path() takes 4 args
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_GET_BY_PATH_4ARG], [
ZFS_LINUX_TEST_SRC([blkdev_get_by_path_4arg], [
#include <linux/fs.h>
#include <linux/blkdev.h>
], [
struct block_device *bdev __attribute__ ((unused)) = NULL;
const char *path = "path";
fmode_t mode = 0;
void *holder = NULL;
struct blk_holder_ops h;
bdev = blkdev_get_by_path(path, mode, holder, &h);
])
])
+dnl #
+dnl # 6.8.x API change
+dnl # bdev_open_by_path() replaces blkdev_get_by_path()
+dnl #
+AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_OPEN_BY_PATH], [
+ ZFS_LINUX_TEST_SRC([bdev_open_by_path], [
+ #include <linux/fs.h>
+ #include <linux/blkdev.h>
+ ], [
+ struct bdev_handle *bdh __attribute__ ((unused)) = NULL;
+ const char *path = "path";
+ fmode_t mode = 0;
+ void *holder = NULL;
+ struct blk_holder_ops h;
+
+ bdh = bdev_open_by_path(path, mode, holder, &h);
+ ])
+])
+
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_GET_BY_PATH], [
AC_MSG_CHECKING([whether blkdev_get_by_path() exists and takes 3 args])
ZFS_LINUX_TEST_RESULT([blkdev_get_by_path], [
AC_MSG_RESULT(yes)
], [
AC_MSG_RESULT(no)
AC_MSG_CHECKING([whether blkdev_get_by_path() exists and takes 4 args])
ZFS_LINUX_TEST_RESULT([blkdev_get_by_path_4arg], [
AC_DEFINE(HAVE_BLKDEV_GET_BY_PATH_4ARG, 1,
[blkdev_get_by_path() exists and takes 4 args])
AC_MSG_RESULT(yes)
], [
- ZFS_LINUX_TEST_ERROR([blkdev_get_by_path()])
+ AC_MSG_RESULT(no)
+ AC_MSG_CHECKING([whether bdev_open_by_path() exists])
+ ZFS_LINUX_TEST_RESULT([bdev_open_by_path], [
+ AC_DEFINE(HAVE_BDEV_OPEN_BY_PATH, 1,
+ [bdev_open_by_path() exists])
+ AC_MSG_RESULT(yes)
+ ], [
+ ZFS_LINUX_TEST_ERROR([blkdev_get_by_path()])
+ ])
])
])
])
dnl #
dnl # 6.5.x API change
dnl # blk_mode_t was added as a type to supercede some places where fmode_t
dnl # is used
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_BLK_MODE_T], [
ZFS_LINUX_TEST_SRC([blk_mode_t], [
#include <linux/fs.h>
#include <linux/blkdev.h>
], [
blk_mode_t m __attribute((unused)) = (blk_mode_t)0;
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_BLK_MODE_T], [
AC_MSG_CHECKING([whether blk_mode_t is defined])
ZFS_LINUX_TEST_RESULT([blk_mode_t], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BLK_MODE_T, 1, [blk_mode_t is defined])
], [
AC_MSG_RESULT(no)
])
])
dnl #
dnl # 2.6.38 API change,
dnl # Added blkdev_put()
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_PUT], [
ZFS_LINUX_TEST_SRC([blkdev_put], [
#include <linux/fs.h>
#include <linux/blkdev.h>
], [
struct block_device *bdev = NULL;
fmode_t mode = 0;
blkdev_put(bdev, mode);
])
])
dnl #
dnl # 6.5.x API change.
dnl # blkdev_put() takes (void* holder) as arg 2
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_PUT_HOLDER], [
ZFS_LINUX_TEST_SRC([blkdev_put_holder], [
#include <linux/fs.h>
#include <linux/blkdev.h>
], [
struct block_device *bdev = NULL;
void *holder = NULL;
blkdev_put(bdev, holder);
])
])
+dnl #
+dnl # 6.8.x API change
+dnl # bdev_release() replaces blkdev_put()
+dnl #
+AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_RELEASE], [
+ ZFS_LINUX_TEST_SRC([bdev_release], [
+ #include <linux/fs.h>
+ #include <linux/blkdev.h>
+ ], [
+ struct bdev_handle *bdh = NULL;
+ bdev_release(bdh);
+ ])
+])
+
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_PUT], [
AC_MSG_CHECKING([whether blkdev_put() exists])
ZFS_LINUX_TEST_RESULT([blkdev_put], [
AC_MSG_RESULT(yes)
], [
+ AC_MSG_RESULT(no)
AC_MSG_CHECKING([whether blkdev_put() accepts void* as arg 2])
ZFS_LINUX_TEST_RESULT([blkdev_put_holder], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BLKDEV_PUT_HOLDER, 1,
[blkdev_put() accepts void* as arg 2])
], [
- ZFS_LINUX_TEST_ERROR([blkdev_put()])
+ AC_MSG_RESULT(no)
+ AC_MSG_CHECKING([whether bdev_release() exists])
+ ZFS_LINUX_TEST_RESULT([bdev_release], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BDEV_RELEASE, 1,
+ [bdev_release() exists])
+ ], [
+ ZFS_LINUX_TEST_ERROR([blkdev_put()])
+ ])
])
])
])
dnl #
dnl # 4.1 API, exported blkdev_reread_part() symbol, back ported to the
dnl # 3.10.0 CentOS 7.x enterprise kernels.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_REREAD_PART], [
ZFS_LINUX_TEST_SRC([blkdev_reread_part], [
#include <linux/fs.h>
#include <linux/blkdev.h>
], [
struct block_device *bdev = NULL;
int error;
error = blkdev_reread_part(bdev);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_REREAD_PART], [
AC_MSG_CHECKING([whether blkdev_reread_part() exists])
ZFS_LINUX_TEST_RESULT([blkdev_reread_part], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BLKDEV_REREAD_PART, 1,
[blkdev_reread_part() exists])
], [
AC_MSG_RESULT(no)
])
])
dnl #
dnl # check_disk_change() was removed in 5.10
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_CHECK_DISK_CHANGE], [
ZFS_LINUX_TEST_SRC([check_disk_change], [
#include <linux/fs.h>
#include <linux/blkdev.h>
], [
struct block_device *bdev = NULL;
bool error;
error = check_disk_change(bdev);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_CHECK_DISK_CHANGE], [
AC_MSG_CHECKING([whether check_disk_change() exists])
ZFS_LINUX_TEST_RESULT([check_disk_change], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_CHECK_DISK_CHANGE, 1,
[check_disk_change() exists])
], [
AC_MSG_RESULT(no)
])
])
dnl #
dnl # 6.5.x API change
dnl # disk_check_media_change() was added
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_DISK_CHECK_MEDIA_CHANGE], [
ZFS_LINUX_TEST_SRC([disk_check_media_change], [
#include <linux/fs.h>
#include <linux/blkdev.h>
], [
struct block_device *bdev = NULL;
bool error;
error = disk_check_media_change(bdev->bd_disk);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_DISK_CHECK_MEDIA_CHANGE], [
AC_MSG_CHECKING([whether disk_check_media_change() exists])
ZFS_LINUX_TEST_RESULT([disk_check_media_change], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_DISK_CHECK_MEDIA_CHANGE, 1,
[disk_check_media_change() exists])
], [
AC_MSG_RESULT(no)
])
])
dnl #
dnl # bdev_kobj() is introduced from 5.12
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_KOBJ], [
ZFS_LINUX_TEST_SRC([bdev_kobj], [
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/kobject.h>
], [
struct block_device *bdev = NULL;
struct kobject *disk_kobj;
disk_kobj = bdev_kobj(bdev);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_BDEV_KOBJ], [
AC_MSG_CHECKING([whether bdev_kobj() exists])
ZFS_LINUX_TEST_RESULT([bdev_kobj], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BDEV_KOBJ, 1,
[bdev_kobj() exists])
], [
AC_MSG_RESULT(no)
])
])
dnl #
dnl # part_to_dev() was removed in 5.12
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_PART_TO_DEV], [
ZFS_LINUX_TEST_SRC([part_to_dev], [
#include <linux/fs.h>
#include <linux/blkdev.h>
], [
struct hd_struct *p = NULL;
struct device *pdev;
pdev = part_to_dev(p);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_PART_TO_DEV], [
AC_MSG_CHECKING([whether part_to_dev() exists])
ZFS_LINUX_TEST_RESULT([part_to_dev], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_PART_TO_DEV, 1,
[part_to_dev() exists])
], [
AC_MSG_RESULT(no)
])
])
dnl #
dnl # 5.10 API, check_disk_change() is removed, in favor of
dnl # bdev_check_media_change(), which doesn't force revalidation
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_CHECK_MEDIA_CHANGE], [
ZFS_LINUX_TEST_SRC([bdev_check_media_change], [
#include <linux/fs.h>
#include <linux/blkdev.h>
], [
struct block_device *bdev = NULL;
int error;
error = bdev_check_media_change(bdev);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_BDEV_CHECK_MEDIA_CHANGE], [
AC_MSG_CHECKING([whether bdev_check_media_change() exists])
ZFS_LINUX_TEST_RESULT([bdev_check_media_change], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BDEV_CHECK_MEDIA_CHANGE, 1,
[bdev_check_media_change() exists])
], [
AC_MSG_RESULT(no)
])
])
dnl #
dnl # 2.6.22 API change
dnl # Single argument invalidate_bdev()
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_INVALIDATE_BDEV], [
ZFS_LINUX_TEST_SRC([invalidate_bdev], [
#include <linux/buffer_head.h>
#include <linux/blkdev.h>
],[
struct block_device *bdev = NULL;
invalidate_bdev(bdev);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_INVALIDATE_BDEV], [
AC_MSG_CHECKING([whether invalidate_bdev() exists])
ZFS_LINUX_TEST_RESULT([invalidate_bdev], [
AC_MSG_RESULT(yes)
],[
ZFS_LINUX_TEST_ERROR([invalidate_bdev()])
])
])
dnl #
dnl # 5.11 API, lookup_bdev() takes dev_t argument.
dnl # 2.6.27 API, lookup_bdev() was first exported.
dnl # 4.4.0-6.21 API, lookup_bdev() on Ubuntu takes mode argument.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_LOOKUP_BDEV], [
ZFS_LINUX_TEST_SRC([lookup_bdev_devt], [
#include <linux/blkdev.h>
], [
int error __attribute__ ((unused));
const char path[] = "/example/path";
dev_t dev;
error = lookup_bdev(path, &dev);
])
ZFS_LINUX_TEST_SRC([lookup_bdev_1arg], [
#include <linux/fs.h>
#include <linux/blkdev.h>
], [
struct block_device *bdev __attribute__ ((unused));
const char path[] = "/example/path";
bdev = lookup_bdev(path);
])
ZFS_LINUX_TEST_SRC([lookup_bdev_mode], [
#include <linux/fs.h>
], [
struct block_device *bdev __attribute__ ((unused));
const char path[] = "/example/path";
bdev = lookup_bdev(path, FMODE_READ);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_LOOKUP_BDEV], [
AC_MSG_CHECKING([whether lookup_bdev() wants dev_t arg])
ZFS_LINUX_TEST_RESULT_SYMBOL([lookup_bdev_devt],
[lookup_bdev], [fs/block_dev.c], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_DEVT_LOOKUP_BDEV, 1,
[lookup_bdev() wants dev_t arg])
], [
AC_MSG_RESULT(no)
AC_MSG_CHECKING([whether lookup_bdev() wants 1 arg])
ZFS_LINUX_TEST_RESULT_SYMBOL([lookup_bdev_1arg],
[lookup_bdev], [fs/block_dev.c], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_1ARG_LOOKUP_BDEV, 1,
[lookup_bdev() wants 1 arg])
], [
AC_MSG_RESULT(no)
AC_MSG_CHECKING([whether lookup_bdev() wants mode arg])
ZFS_LINUX_TEST_RESULT_SYMBOL([lookup_bdev_mode],
[lookup_bdev], [fs/block_dev.c], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_MODE_LOOKUP_BDEV, 1,
[lookup_bdev() wants mode arg])
], [
ZFS_LINUX_TEST_ERROR([lookup_bdev()])
])
])
])
])
dnl #
dnl # 2.6.30 API change
dnl #
dnl # The bdev_physical_block_size() interface was added to provide a way
dnl # to determine the smallest write which can be performed without a
dnl # read-modify-write operation.
dnl #
dnl # Unfortunately, this interface isn't entirely reliable because
dnl # drives are sometimes known to misreport this value.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_PHYSICAL_BLOCK_SIZE], [
ZFS_LINUX_TEST_SRC([bdev_physical_block_size], [
#include <linux/blkdev.h>
],[
struct block_device *bdev __attribute__ ((unused)) = NULL;
bdev_physical_block_size(bdev);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_BDEV_PHYSICAL_BLOCK_SIZE], [
AC_MSG_CHECKING([whether bdev_physical_block_size() is available])
ZFS_LINUX_TEST_RESULT([bdev_physical_block_size], [
AC_MSG_RESULT(yes)
],[
ZFS_LINUX_TEST_ERROR([bdev_physical_block_size()])
])
])
dnl #
dnl # 2.6.30 API change
dnl # Added bdev_logical_block_size().
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_LOGICAL_BLOCK_SIZE], [
ZFS_LINUX_TEST_SRC([bdev_logical_block_size], [
#include <linux/blkdev.h>
],[
struct block_device *bdev __attribute__ ((unused)) = NULL;
bdev_logical_block_size(bdev);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_BDEV_LOGICAL_BLOCK_SIZE], [
AC_MSG_CHECKING([whether bdev_logical_block_size() is available])
ZFS_LINUX_TEST_RESULT([bdev_logical_block_size], [
AC_MSG_RESULT(yes)
],[
ZFS_LINUX_TEST_ERROR([bdev_logical_block_size()])
])
])
dnl #
dnl # 5.11 API change
dnl # Added bdev_whole() helper.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_WHOLE], [
ZFS_LINUX_TEST_SRC([bdev_whole], [
#include <linux/blkdev.h>
],[
struct block_device *bdev = NULL;
bdev = bdev_whole(bdev);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_BDEV_WHOLE], [
AC_MSG_CHECKING([whether bdev_whole() is available])
ZFS_LINUX_TEST_RESULT([bdev_whole], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BDEV_WHOLE, 1, [bdev_whole() is available])
],[
AC_MSG_RESULT(no)
])
])
dnl #
dnl # 5.20 API change,
dnl # Removed bdevname(), snprintf(.., %pg) should be used.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_BDEVNAME], [
ZFS_LINUX_TEST_SRC([bdevname], [
#include <linux/fs.h>
#include <linux/blkdev.h>
], [
struct block_device *bdev __attribute__ ((unused)) = NULL;
char path[BDEVNAME_SIZE];
(void) bdevname(bdev, path);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_BDEVNAME], [
AC_MSG_CHECKING([whether bdevname() exists])
ZFS_LINUX_TEST_RESULT([bdevname], [
AC_DEFINE(HAVE_BDEVNAME, 1, [bdevname() is available])
AC_MSG_RESULT(yes)
], [
AC_MSG_RESULT(no)
])
])
dnl #
dnl # 5.19 API: blkdev_issue_secure_erase()
+dnl # 4.7 API: __blkdev_issue_discard(..., BLKDEV_DISCARD_SECURE)
dnl # 3.10 API: blkdev_issue_discard(..., BLKDEV_DISCARD_SECURE)
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_ISSUE_SECURE_ERASE], [
ZFS_LINUX_TEST_SRC([blkdev_issue_secure_erase], [
#include <linux/blkdev.h>
],[
struct block_device *bdev = NULL;
sector_t sector = 0;
sector_t nr_sects = 0;
int error __attribute__ ((unused));
error = blkdev_issue_secure_erase(bdev,
sector, nr_sects, GFP_KERNEL);
])
+ ZFS_LINUX_TEST_SRC([blkdev_issue_discard_async_flags], [
+ #include <linux/blkdev.h>
+ ],[
+ struct block_device *bdev = NULL;
+ sector_t sector = 0;
+ sector_t nr_sects = 0;
+ unsigned long flags = 0;
+ struct bio *biop = NULL;
+ int error __attribute__ ((unused));
+
+ error = __blkdev_issue_discard(bdev,
+ sector, nr_sects, GFP_KERNEL, flags, &biop);
+ ])
+
ZFS_LINUX_TEST_SRC([blkdev_issue_discard_flags], [
#include <linux/blkdev.h>
],[
struct block_device *bdev = NULL;
sector_t sector = 0;
sector_t nr_sects = 0;
unsigned long flags = 0;
int error __attribute__ ((unused));
error = blkdev_issue_discard(bdev,
sector, nr_sects, GFP_KERNEL, flags);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_ISSUE_SECURE_ERASE], [
AC_MSG_CHECKING([whether blkdev_issue_secure_erase() is available])
ZFS_LINUX_TEST_RESULT([blkdev_issue_secure_erase], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BLKDEV_ISSUE_SECURE_ERASE, 1,
[blkdev_issue_secure_erase() is available])
],[
AC_MSG_RESULT(no)
- AC_MSG_CHECKING([whether blkdev_issue_discard() is available])
- ZFS_LINUX_TEST_RESULT([blkdev_issue_discard_flags], [
+ AC_MSG_CHECKING([whether __blkdev_issue_discard() is available])
+ ZFS_LINUX_TEST_RESULT([blkdev_issue_discard_async_flags], [
AC_MSG_RESULT(yes)
- AC_DEFINE(HAVE_BLKDEV_ISSUE_DISCARD, 1,
- [blkdev_issue_discard() is available])
+ AC_DEFINE(HAVE_BLKDEV_ISSUE_DISCARD_ASYNC, 1,
+ [__blkdev_issue_discard() is available])
],[
- ZFS_LINUX_TEST_ERROR([blkdev_issue_discard()])
+ AC_MSG_RESULT(no)
+
+ AC_MSG_CHECKING([whether blkdev_issue_discard() is available])
+ ZFS_LINUX_TEST_RESULT([blkdev_issue_discard_flags], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLKDEV_ISSUE_DISCARD, 1,
+ [blkdev_issue_discard() is available])
+ ],[
+ ZFS_LINUX_TEST_ERROR([blkdev_issue_discard()])
+ ])
])
])
])
dnl #
dnl # 5.13 API change
dnl # blkdev_get_by_path() no longer handles ERESTARTSYS
dnl #
dnl # Unfortunately we're forced to rely solely on the kernel version
dnl # number in order to determine the expected behavior. This was an
dnl # internal change to blkdev_get_by_dev(), see commit a8ed1a0607.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_GET_ERESTARTSYS], [
AC_MSG_CHECKING([whether blkdev_get_by_path() handles ERESTARTSYS])
AS_VERSION_COMPARE([$LINUX_VERSION], [5.13.0], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BLKDEV_GET_ERESTARTSYS, 1,
[blkdev_get_by_path() handles ERESTARTSYS])
],[
AC_MSG_RESULT(no)
],[
AC_MSG_RESULT(no)
])
])
dnl #
dnl # 6.5.x API change
dnl # BLK_STS_NEXUS replaced with BLK_STS_RESV_CONFLICT
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_BLK_STS_RESV_CONFLICT], [
ZFS_LINUX_TEST_SRC([blk_sts_resv_conflict], [
#include <linux/blkdev.h>
],[
blk_status_t s __attribute__ ((unused)) = BLK_STS_RESV_CONFLICT;
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_BLK_STS_RESV_CONFLICT], [
AC_MSG_CHECKING([whether BLK_STS_RESV_CONFLICT is defined])
ZFS_LINUX_TEST_RESULT([blk_sts_resv_conflict], [
AC_DEFINE(HAVE_BLK_STS_RESV_CONFLICT, 1, [BLK_STS_RESV_CONFLICT is defined])
AC_MSG_RESULT(yes)
], [
AC_MSG_RESULT(no)
])
])
])
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV], [
ZFS_AC_KERNEL_SRC_BLKDEV_GET_BY_PATH
ZFS_AC_KERNEL_SRC_BLKDEV_GET_BY_PATH_4ARG
+ ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_OPEN_BY_PATH
ZFS_AC_KERNEL_SRC_BLKDEV_PUT
ZFS_AC_KERNEL_SRC_BLKDEV_PUT_HOLDER
+ ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_RELEASE
ZFS_AC_KERNEL_SRC_BLKDEV_REREAD_PART
ZFS_AC_KERNEL_SRC_BLKDEV_INVALIDATE_BDEV
ZFS_AC_KERNEL_SRC_BLKDEV_LOOKUP_BDEV
ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_LOGICAL_BLOCK_SIZE
ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_PHYSICAL_BLOCK_SIZE
ZFS_AC_KERNEL_SRC_BLKDEV_CHECK_DISK_CHANGE
ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_CHECK_MEDIA_CHANGE
ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_WHOLE
ZFS_AC_KERNEL_SRC_BLKDEV_BDEVNAME
ZFS_AC_KERNEL_SRC_BLKDEV_ISSUE_SECURE_ERASE
ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_KOBJ
ZFS_AC_KERNEL_SRC_BLKDEV_PART_TO_DEV
ZFS_AC_KERNEL_SRC_BLKDEV_DISK_CHECK_MEDIA_CHANGE
ZFS_AC_KERNEL_SRC_BLKDEV_BLK_STS_RESV_CONFLICT
ZFS_AC_KERNEL_SRC_BLKDEV_BLK_MODE_T
])
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV], [
ZFS_AC_KERNEL_BLKDEV_GET_BY_PATH
ZFS_AC_KERNEL_BLKDEV_PUT
ZFS_AC_KERNEL_BLKDEV_REREAD_PART
ZFS_AC_KERNEL_BLKDEV_INVALIDATE_BDEV
ZFS_AC_KERNEL_BLKDEV_LOOKUP_BDEV
ZFS_AC_KERNEL_BLKDEV_BDEV_LOGICAL_BLOCK_SIZE
ZFS_AC_KERNEL_BLKDEV_BDEV_PHYSICAL_BLOCK_SIZE
ZFS_AC_KERNEL_BLKDEV_CHECK_DISK_CHANGE
ZFS_AC_KERNEL_BLKDEV_BDEV_CHECK_MEDIA_CHANGE
ZFS_AC_KERNEL_BLKDEV_BDEV_WHOLE
ZFS_AC_KERNEL_BLKDEV_BDEVNAME
ZFS_AC_KERNEL_BLKDEV_GET_ERESTARTSYS
ZFS_AC_KERNEL_BLKDEV_ISSUE_SECURE_ERASE
ZFS_AC_KERNEL_BLKDEV_BDEV_KOBJ
ZFS_AC_KERNEL_BLKDEV_PART_TO_DEV
ZFS_AC_KERNEL_BLKDEV_DISK_CHECK_MEDIA_CHANGE
ZFS_AC_KERNEL_BLKDEV_BLK_STS_RESV_CONFLICT
ZFS_AC_KERNEL_BLKDEV_BLK_MODE_T
])
diff --git a/sys/contrib/openzfs/config/kernel-block-device-operations.m4 b/sys/contrib/openzfs/config/kernel-block-device-operations.m4
index d13c1337b1fb..4ff20b9c413d 100644
--- a/sys/contrib/openzfs/config/kernel-block-device-operations.m4
+++ b/sys/contrib/openzfs/config/kernel-block-device-operations.m4
@@ -1,133 +1,133 @@
dnl #
dnl # 2.6.38 API change
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLOCK_DEVICE_OPERATIONS_CHECK_EVENTS], [
ZFS_LINUX_TEST_SRC([block_device_operations_check_events], [
#include <linux/blkdev.h>
- unsigned int blk_check_events(struct gendisk *disk,
+ static unsigned int blk_check_events(struct gendisk *disk,
unsigned int clearing) {
(void) disk, (void) clearing;
return (0);
}
static const struct block_device_operations
bops __attribute__ ((unused)) = {
.check_events = blk_check_events,
};
], [], [])
])
AC_DEFUN([ZFS_AC_KERNEL_BLOCK_DEVICE_OPERATIONS_CHECK_EVENTS], [
AC_MSG_CHECKING([whether bops->check_events() exists])
ZFS_LINUX_TEST_RESULT([block_device_operations_check_events], [
AC_MSG_RESULT(yes)
],[
ZFS_LINUX_TEST_ERROR([bops->check_events()])
])
])
dnl #
dnl # 3.10.x API change
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLOCK_DEVICE_OPERATIONS_RELEASE_VOID], [
ZFS_LINUX_TEST_SRC([block_device_operations_release_void], [
#include <linux/blkdev.h>
- void blk_release(struct gendisk *g, fmode_t mode) {
+ static void blk_release(struct gendisk *g, fmode_t mode) {
(void) g, (void) mode;
return;
}
static const struct block_device_operations
bops __attribute__ ((unused)) = {
.open = NULL,
.release = blk_release,
.ioctl = NULL,
.compat_ioctl = NULL,
};
], [], [])
])
dnl #
dnl # 5.9.x API change
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLOCK_DEVICE_OPERATIONS_RELEASE_1ARG], [
ZFS_LINUX_TEST_SRC([block_device_operations_release_void_1arg], [
#include <linux/blkdev.h>
- void blk_release(struct gendisk *g) {
+ static void blk_release(struct gendisk *g) {
(void) g;
return;
}
static const struct block_device_operations
bops __attribute__ ((unused)) = {
.open = NULL,
.release = blk_release,
.ioctl = NULL,
.compat_ioctl = NULL,
};
], [], [])
])
AC_DEFUN([ZFS_AC_KERNEL_BLOCK_DEVICE_OPERATIONS_RELEASE_VOID], [
AC_MSG_CHECKING([whether bops->release() is void and takes 2 args])
ZFS_LINUX_TEST_RESULT([block_device_operations_release_void], [
AC_MSG_RESULT(yes)
],[
AC_MSG_RESULT(no)
AC_MSG_CHECKING([whether bops->release() is void and takes 1 arg])
ZFS_LINUX_TEST_RESULT([block_device_operations_release_void_1arg], [
AC_MSG_RESULT(yes)
AC_DEFINE([HAVE_BLOCK_DEVICE_OPERATIONS_RELEASE_1ARG], [1],
[Define if release() in block_device_operations takes 1 arg])
],[
ZFS_LINUX_TEST_ERROR([bops->release()])
])
])
])
dnl #
dnl # 5.13 API change
dnl # block_device_operations->revalidate_disk() was removed
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK], [
ZFS_LINUX_TEST_SRC([block_device_operations_revalidate_disk], [
#include <linux/blkdev.h>
- int blk_revalidate_disk(struct gendisk *disk) {
+ static int blk_revalidate_disk(struct gendisk *disk) {
(void) disk;
return(0);
}
static const struct block_device_operations
bops __attribute__ ((unused)) = {
.revalidate_disk = blk_revalidate_disk,
};
], [], [])
])
AC_DEFUN([ZFS_AC_KERNEL_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK], [
AC_MSG_CHECKING([whether bops->revalidate_disk() exists])
ZFS_LINUX_TEST_RESULT([block_device_operations_revalidate_disk], [
AC_DEFINE([HAVE_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK], [1],
[Define if revalidate_disk() in block_device_operations])
AC_MSG_RESULT(yes)
],[
AC_MSG_RESULT(no)
])
])
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLOCK_DEVICE_OPERATIONS], [
ZFS_AC_KERNEL_SRC_BLOCK_DEVICE_OPERATIONS_CHECK_EVENTS
ZFS_AC_KERNEL_SRC_BLOCK_DEVICE_OPERATIONS_RELEASE_VOID
ZFS_AC_KERNEL_SRC_BLOCK_DEVICE_OPERATIONS_RELEASE_1ARG
ZFS_AC_KERNEL_SRC_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK
])
AC_DEFUN([ZFS_AC_KERNEL_BLOCK_DEVICE_OPERATIONS], [
ZFS_AC_KERNEL_BLOCK_DEVICE_OPERATIONS_CHECK_EVENTS
ZFS_AC_KERNEL_BLOCK_DEVICE_OPERATIONS_RELEASE_VOID
ZFS_AC_KERNEL_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK
])
diff --git a/sys/contrib/openzfs/config/kernel-commit-metadata.m4 b/sys/contrib/openzfs/config/kernel-commit-metadata.m4
index 7df9b980290e..49bffbf609d2 100644
--- a/sys/contrib/openzfs/config/kernel-commit-metadata.m4
+++ b/sys/contrib/openzfs/config/kernel-commit-metadata.m4
@@ -1,24 +1,24 @@
dnl #
dnl # 2.6.33 API change
dnl # Added eops->commit_metadata() callback to allow the underlying
dnl # filesystem to determine the most efficient way to commit the inode.
dnl # Prior to this the nfs server would issue an explicit fsync().
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_COMMIT_METADATA], [
ZFS_LINUX_TEST_SRC([export_operations_commit_metadata], [
#include <linux/exportfs.h>
- int commit_metadata(struct inode *inode) { return 0; }
+ static int commit_metadata(struct inode *inode) { return 0; }
static struct export_operations eops __attribute__ ((unused))={
.commit_metadata = commit_metadata,
};
],[])
])
AC_DEFUN([ZFS_AC_KERNEL_COMMIT_METADATA], [
AC_MSG_CHECKING([whether eops->commit_metadata() exists])
ZFS_LINUX_TEST_RESULT([export_operations_commit_metadata], [
AC_MSG_RESULT(yes)
],[
ZFS_LINUX_TEST_ERROR([eops->commit_metadata()])
])
])
diff --git a/sys/contrib/openzfs/config/kernel-current-time.m4 b/sys/contrib/openzfs/config/kernel-current-time.m4
index 3ceb5f63efa9..ab7d9c5cedba 100644
--- a/sys/contrib/openzfs/config/kernel-current-time.m4
+++ b/sys/contrib/openzfs/config/kernel-current-time.m4
@@ -1,23 +1,26 @@
dnl #
dnl # 4.9, current_time() added
dnl # 4.18, return type changed from timespec to timespec64
dnl #
+dnl # Note that we don't care about the return type in this check. If we have
+dnl # to implement a fallback, we'll know we're <4.9, which was timespec.
+dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_CURRENT_TIME], [
ZFS_LINUX_TEST_SRC([current_time], [
#include <linux/fs.h>
], [
struct inode ip __attribute__ ((unused));
- ip.i_atime = current_time(&ip);
+ (void) current_time(&ip);
])
])
AC_DEFUN([ZFS_AC_KERNEL_CURRENT_TIME], [
AC_MSG_CHECKING([whether current_time() exists])
ZFS_LINUX_TEST_RESULT_SYMBOL([current_time],
[current_time], [fs/inode.c], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_CURRENT_TIME, 1, [current_time() exists])
], [
AC_MSG_RESULT(no)
])
])
diff --git a/sys/contrib/openzfs/config/kernel-dentry-operations.m4 b/sys/contrib/openzfs/config/kernel-dentry-operations.m4
index dd470d7607b4..500f61e26aee 100644
--- a/sys/contrib/openzfs/config/kernel-dentry-operations.m4
+++ b/sys/contrib/openzfs/config/kernel-dentry-operations.m4
@@ -1,190 +1,190 @@
dnl #
dnl # 3.4.0 API change
dnl # Added d_make_root() to replace previous d_alloc_root() function.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_D_MAKE_ROOT], [
ZFS_LINUX_TEST_SRC([d_make_root], [
#include <linux/dcache.h>
], [
d_make_root(NULL);
])
])
AC_DEFUN([ZFS_AC_KERNEL_D_MAKE_ROOT], [
AC_MSG_CHECKING([whether d_make_root() is available])
ZFS_LINUX_TEST_RESULT_SYMBOL([d_make_root],
[d_make_root], [fs/dcache.c], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_D_MAKE_ROOT, 1, [d_make_root() is available])
], [
AC_MSG_RESULT(no)
])
])
dnl #
dnl # 2.6.28 API change
dnl # Added d_obtain_alias() helper function.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_D_OBTAIN_ALIAS], [
ZFS_LINUX_TEST_SRC([d_obtain_alias], [
#include <linux/dcache.h>
], [
d_obtain_alias(NULL);
])
])
AC_DEFUN([ZFS_AC_KERNEL_D_OBTAIN_ALIAS], [
AC_MSG_CHECKING([whether d_obtain_alias() is available])
ZFS_LINUX_TEST_RESULT_SYMBOL([d_obtain_alias],
[d_obtain_alias], [fs/dcache.c], [
AC_MSG_RESULT(yes)
], [
ZFS_LINUX_TEST_ERROR([d_obtain_alias()])
])
])
dnl #
dnl # 2.6.12 API change
dnl # d_prune_aliases() helper function available.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_D_PRUNE_ALIASES], [
ZFS_LINUX_TEST_SRC([d_prune_aliases], [
#include <linux/dcache.h>
], [
struct inode *ip = NULL;
d_prune_aliases(ip);
])
])
AC_DEFUN([ZFS_AC_KERNEL_D_PRUNE_ALIASES], [
AC_MSG_CHECKING([whether d_prune_aliases() is available])
ZFS_LINUX_TEST_RESULT_SYMBOL([d_prune_aliases],
[d_prune_aliases], [fs/dcache.c], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_D_PRUNE_ALIASES, 1,
[d_prune_aliases() is available])
], [
ZFS_LINUX_TEST_ERROR([d_prune_aliases()])
])
])
dnl #
dnl # 2.6.38 API change
dnl # Added d_set_d_op() helper function.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_D_SET_D_OP], [
ZFS_LINUX_TEST_SRC([d_set_d_op], [
#include <linux/dcache.h>
], [
d_set_d_op(NULL, NULL);
])
])
AC_DEFUN([ZFS_AC_KERNEL_D_SET_D_OP], [
AC_MSG_CHECKING([whether d_set_d_op() is available])
ZFS_LINUX_TEST_RESULT_SYMBOL([d_set_d_op],
[d_set_d_op], [fs/dcache.c], [
AC_MSG_RESULT(yes)
], [
ZFS_LINUX_TEST_ERROR([d_set_d_op])
])
])
dnl #
dnl # 3.6 API change
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_D_REVALIDATE_NAMEIDATA], [
ZFS_LINUX_TEST_SRC([dentry_operations_revalidate], [
#include <linux/dcache.h>
#include <linux/sched.h>
- int revalidate (struct dentry *dentry,
+ static int revalidate (struct dentry *dentry,
struct nameidata *nidata) { return 0; }
static const struct dentry_operations
dops __attribute__ ((unused)) = {
.d_revalidate = revalidate,
};
],[])
])
AC_DEFUN([ZFS_AC_KERNEL_D_REVALIDATE_NAMEIDATA], [
AC_MSG_CHECKING([whether dops->d_revalidate() takes struct nameidata])
ZFS_LINUX_TEST_RESULT([dentry_operations_revalidate], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_D_REVALIDATE_NAMEIDATA, 1,
[dops->d_revalidate() operation takes nameidata])
],[
AC_MSG_RESULT(no)
])
])
dnl #
dnl # 2.6.30 API change
dnl # The 'struct dentry_operations' was constified in the dentry structure.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_CONST_DENTRY_OPERATIONS], [
ZFS_LINUX_TEST_SRC([dentry_operations_const], [
#include <linux/dcache.h>
const struct dentry_operations test_d_op = {
.d_revalidate = NULL,
};
],[
struct dentry d __attribute__ ((unused));
d.d_op = &test_d_op;
])
])
AC_DEFUN([ZFS_AC_KERNEL_CONST_DENTRY_OPERATIONS], [
AC_MSG_CHECKING([whether dentry uses const struct dentry_operations])
ZFS_LINUX_TEST_RESULT([dentry_operations_const], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_CONST_DENTRY_OPERATIONS, 1,
[dentry uses const struct dentry_operations])
],[
ZFS_LINUX_TEST_ERROR([const dentry_operations])
])
])
dnl #
dnl # 2.6.38 API change
dnl # Added sb->s_d_op default dentry_operations member
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_S_D_OP], [
ZFS_LINUX_TEST_SRC([super_block_s_d_op], [
#include <linux/fs.h>
],[
struct super_block sb __attribute__ ((unused));
sb.s_d_op = NULL;
])
])
AC_DEFUN([ZFS_AC_KERNEL_S_D_OP], [
AC_MSG_CHECKING([whether super_block has s_d_op])
ZFS_LINUX_TEST_RESULT([super_block_s_d_op], [
AC_MSG_RESULT(yes)
], [
ZFS_LINUX_TEST_ERROR([super_block s_d_op])
])
])
AC_DEFUN([ZFS_AC_KERNEL_SRC_DENTRY], [
ZFS_AC_KERNEL_SRC_D_MAKE_ROOT
ZFS_AC_KERNEL_SRC_D_OBTAIN_ALIAS
ZFS_AC_KERNEL_SRC_D_PRUNE_ALIASES
ZFS_AC_KERNEL_SRC_D_SET_D_OP
ZFS_AC_KERNEL_SRC_D_REVALIDATE_NAMEIDATA
ZFS_AC_KERNEL_SRC_CONST_DENTRY_OPERATIONS
ZFS_AC_KERNEL_SRC_S_D_OP
])
AC_DEFUN([ZFS_AC_KERNEL_DENTRY], [
ZFS_AC_KERNEL_D_MAKE_ROOT
ZFS_AC_KERNEL_D_OBTAIN_ALIAS
ZFS_AC_KERNEL_D_PRUNE_ALIASES
ZFS_AC_KERNEL_D_SET_D_OP
ZFS_AC_KERNEL_D_REVALIDATE_NAMEIDATA
ZFS_AC_KERNEL_CONST_DENTRY_OPERATIONS
ZFS_AC_KERNEL_S_D_OP
])
diff --git a/sys/contrib/openzfs/config/kernel-dirty-inode.m4 b/sys/contrib/openzfs/config/kernel-dirty-inode.m4
index dc7667fa4881..2ef8658748ca 100644
--- a/sys/contrib/openzfs/config/kernel-dirty-inode.m4
+++ b/sys/contrib/openzfs/config/kernel-dirty-inode.m4
@@ -1,29 +1,29 @@
dnl #
dnl # 3.0 API change
dnl # The sops->dirty_inode() callbacks were updated to take a flags
dnl # argument. This allows the greater control over whether the
dnl # filesystem needs to push out a transaction or not.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_DIRTY_INODE], [
ZFS_LINUX_TEST_SRC([dirty_inode_with_flags], [
#include <linux/fs.h>
- void dirty_inode(struct inode *a, int b) { return; }
+ static void dirty_inode(struct inode *a, int b) { return; }
static const struct super_operations
sops __attribute__ ((unused)) = {
.dirty_inode = dirty_inode,
};
],[])
])
AC_DEFUN([ZFS_AC_KERNEL_DIRTY_INODE], [
AC_MSG_CHECKING([whether sops->dirty_inode() wants flags])
ZFS_LINUX_TEST_RESULT([dirty_inode_with_flags], [
AC_MSG_RESULT([yes])
AC_DEFINE(HAVE_DIRTY_INODE_WITH_FLAGS, 1,
[sops->dirty_inode() wants flags])
],[
AC_MSG_RESULT([no])
])
])
diff --git a/sys/contrib/openzfs/config/kernel-encode-fh-inode.m4 b/sys/contrib/openzfs/config/kernel-encode-fh-inode.m4
index 9d4ba5f0f61f..b3ec040b5e95 100644
--- a/sys/contrib/openzfs/config/kernel-encode-fh-inode.m4
+++ b/sys/contrib/openzfs/config/kernel-encode-fh-inode.m4
@@ -1,27 +1,27 @@
dnl #
dnl # 3.5.0 API change
dnl # torvalds/linux@b0b0382bb4904965a9e9fca77ad87514dfda0d1c changed the
dnl # ->encode_fh() callback to pass the child inode and its parents inode
dnl # rather than a dentry and a boolean saying whether we want the parent.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_ENCODE_FH_WITH_INODE], [
ZFS_LINUX_TEST_SRC([export_operations_encode_fh], [
#include <linux/exportfs.h>
- int encode_fh(struct inode *inode, __u32 *fh, int *max_len,
+ static int encode_fh(struct inode *inode, __u32 *fh, int *max_len,
struct inode *parent) { return 0; }
static struct export_operations eops __attribute__ ((unused))={
.encode_fh = encode_fh,
};
],[])
])
AC_DEFUN([ZFS_AC_KERNEL_ENCODE_FH_WITH_INODE], [
AC_MSG_CHECKING([whether eops->encode_fh() wants inode])
ZFS_LINUX_TEST_RESULT([export_operations_encode_fh], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_ENCODE_FH_WITH_INODE, 1,
[eops->encode_fh() wants child and parent inodes])
],[
AC_MSG_RESULT(no)
])
])
diff --git a/sys/contrib/openzfs/config/kernel-evict-inode.m4 b/sys/contrib/openzfs/config/kernel-evict-inode.m4
index 66f10492de54..87082c9a2839 100644
--- a/sys/contrib/openzfs/config/kernel-evict-inode.m4
+++ b/sys/contrib/openzfs/config/kernel-evict-inode.m4
@@ -1,24 +1,24 @@
dnl #
dnl # 2.6.36 API change
dnl # The sops->delete_inode() and sops->clear_inode() callbacks have
dnl # replaced by a single sops->evict_inode() callback.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_EVICT_INODE], [
ZFS_LINUX_TEST_SRC([evict_inode], [
#include <linux/fs.h>
- void evict_inode (struct inode * t) { return; }
+ static void evict_inode (struct inode * t) { return; }
static struct super_operations sops __attribute__ ((unused)) = {
.evict_inode = evict_inode,
};
],[])
])
AC_DEFUN([ZFS_AC_KERNEL_EVICT_INODE], [
AC_MSG_CHECKING([whether sops->evict_inode() exists])
ZFS_LINUX_TEST_RESULT([evict_inode], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_EVICT_INODE, 1, [sops->evict_inode() exists])
],[
ZFS_LINUX_TEST_ERROR([evict_inode])
])
])
diff --git a/sys/contrib/openzfs/config/kernel-fallocate.m4 b/sys/contrib/openzfs/config/kernel-fallocate.m4
index 815602d3e2c6..95186dada453 100644
--- a/sys/contrib/openzfs/config/kernel-fallocate.m4
+++ b/sys/contrib/openzfs/config/kernel-fallocate.m4
@@ -1,44 +1,44 @@
dnl #
dnl # Linux 2.6.38 - 3.x API
dnl # The fallocate callback was moved from the inode_operations
dnl # structure to the file_operations structure.
dnl #
dnl #
dnl # Linux 3.15+
dnl # fallocate learned a new flag, FALLOC_FL_ZERO_RANGE
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_FALLOCATE], [
ZFS_LINUX_TEST_SRC([file_fallocate], [
#include <linux/fs.h>
- long test_fallocate(struct file *file, int mode,
+ static long test_fallocate(struct file *file, int mode,
loff_t offset, loff_t len) { return 0; }
static const struct file_operations
fops __attribute__ ((unused)) = {
.fallocate = test_fallocate,
};
], [])
ZFS_LINUX_TEST_SRC([falloc_fl_zero_range], [
#include <linux/falloc.h>
],[
int flags __attribute__ ((unused));
flags = FALLOC_FL_ZERO_RANGE;
])
])
AC_DEFUN([ZFS_AC_KERNEL_FALLOCATE], [
AC_MSG_CHECKING([whether fops->fallocate() exists])
ZFS_LINUX_TEST_RESULT([file_fallocate], [
AC_MSG_RESULT(yes)
AC_MSG_CHECKING([whether FALLOC_FL_ZERO_RANGE exists])
ZFS_LINUX_TEST_RESULT([falloc_fl_zero_range], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_FALLOC_FL_ZERO_RANGE, 1, [FALLOC_FL_ZERO_RANGE is defined])
],[
AC_MSG_RESULT(no)
])
],[
ZFS_LINUX_TEST_ERROR([file_fallocate])
])
])
diff --git a/sys/contrib/openzfs/config/kernel-flush_dcache_page.m4 b/sys/contrib/openzfs/config/kernel-flush_dcache_page.m4
index 2340c386ef57..aa916c87d531 100644
--- a/sys/contrib/openzfs/config/kernel-flush_dcache_page.m4
+++ b/sys/contrib/openzfs/config/kernel-flush_dcache_page.m4
@@ -1,26 +1,27 @@
dnl #
dnl # Starting from Linux 5.13, flush_dcache_page() becomes an inline
-dnl # function and may indirectly referencing GPL-only cpu_feature_keys on
-dnl # powerpc
+dnl # function and may indirectly referencing GPL-only symbols:
+dnl # on powerpc: cpu_feature_keys
+dnl # on riscv: PageHuge (added from 6.2)
dnl #
dnl #
dnl # Checking if flush_dcache_page is exported GPL-only
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_FLUSH_DCACHE_PAGE], [
ZFS_LINUX_TEST_SRC([flush_dcache_page], [
#include <asm/cacheflush.h>
], [
flush_dcache_page(0);
], [], [ZFS_META_LICENSE])
])
AC_DEFUN([ZFS_AC_KERNEL_FLUSH_DCACHE_PAGE], [
AC_MSG_CHECKING([whether flush_dcache_page() is GPL-only])
ZFS_LINUX_TEST_RESULT([flush_dcache_page_license], [
AC_MSG_RESULT(no)
], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_FLUSH_DCACHE_PAGE_GPL_ONLY, 1,
[flush_dcache_page() is GPL-only])
])
])
diff --git a/sys/contrib/openzfs/config/kernel-fpu.m4 b/sys/contrib/openzfs/config/kernel-fpu.m4
index c6efebd8cf61..edfde1a02d30 100644
--- a/sys/contrib/openzfs/config/kernel-fpu.m4
+++ b/sys/contrib/openzfs/config/kernel-fpu.m4
@@ -1,113 +1,130 @@
dnl #
dnl # Handle differences in kernel FPU code.
dnl #
dnl # Kernel
dnl # 5.19: The asm/fpu/internal.h header was removed, it has been
dnl # effectively empty since the 5.16 kernel.
dnl #
dnl # 5.11: kernel_fpu_begin() is an inlined function now, so don't check
dnl # for it inside the kernel symbols.
dnl #
dnl # 5.0: Wrappers have been introduced to save/restore the FPU state.
dnl # This change was made to the 4.19.38 and 4.14.120 LTS kernels.
dnl # HAVE_KERNEL_FPU_INTERNAL
dnl #
dnl # 4.2: Use __kernel_fpu_{begin,end}()
dnl # HAVE_UNDERSCORE_KERNEL_FPU & KERNEL_EXPORTS_X86_FPU
dnl #
dnl # Pre-4.2: Use kernel_fpu_{begin,end}()
dnl # HAVE_KERNEL_FPU & KERNEL_EXPORTS_X86_FPU
dnl #
dnl # N.B. The header check is performed before all other checks since it
dnl # depends on HAVE_KERNEL_FPU_API_HEADER being set in confdefs.h.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_FPU_HEADER], [
AC_MSG_CHECKING([whether fpu headers are available])
ZFS_LINUX_TRY_COMPILE([
#include <linux/module.h>
#include <asm/fpu/api.h>
],[
],[
AC_DEFINE(HAVE_KERNEL_FPU_API_HEADER, 1,
[kernel has asm/fpu/api.h])
ZFS_LINUX_TRY_COMPILE([
#include <linux/module.h>
#include <asm/fpu/internal.h>
],[
],[
AC_DEFINE(HAVE_KERNEL_FPU_INTERNAL_HEADER, 1,
[kernel has asm/fpu/internal.h])
AC_MSG_RESULT([asm/fpu/api.h asm/fpu/internal.h])
],[
AC_MSG_RESULT([asm/fpu/api.h])
])
],[
AC_MSG_RESULT([i387.h])
])
])
AC_DEFUN([ZFS_AC_KERNEL_SRC_FPU], [
ZFS_LINUX_TEST_SRC([kernel_fpu], [
#include <linux/types.h>
#ifdef HAVE_KERNEL_FPU_API_HEADER
#include <asm/fpu/api.h>
#ifdef HAVE_KERNEL_FPU_INTERNAL_HEADER
#include <asm/fpu/internal.h>
#endif
#else
#include <asm/i387.h>
#endif
], [
kernel_fpu_begin();
kernel_fpu_end();
], [], [ZFS_META_LICENSE])
ZFS_LINUX_TEST_SRC([__kernel_fpu], [
#include <linux/types.h>
#ifdef HAVE_KERNEL_FPU_API_HEADER
#include <asm/fpu/api.h>
#ifdef HAVE_KERNEL_FPU_INTERNAL_HEADER
#include <asm/fpu/internal.h>
#endif
#else
#include <asm/i387.h>
#endif
], [
__kernel_fpu_begin();
__kernel_fpu_end();
], [], [ZFS_META_LICENSE])
+ ZFS_LINUX_TEST_SRC([kernel_neon], [
+ #include <asm/neon.h>
+ ], [
+ kernel_neon_begin();
+ kernel_neon_end();
+ ], [], [ZFS_META_LICENSE])
])
AC_DEFUN([ZFS_AC_KERNEL_FPU], [
dnl #
dnl # Legacy kernel
dnl #
AC_MSG_CHECKING([whether kernel fpu is available])
ZFS_LINUX_TEST_RESULT([kernel_fpu_license], [
AC_MSG_RESULT(kernel_fpu_*)
AC_DEFINE(HAVE_KERNEL_FPU, 1,
[kernel has kernel_fpu_* functions])
AC_DEFINE(KERNEL_EXPORTS_X86_FPU, 1,
[kernel exports FPU functions])
],[
dnl #
dnl # Linux 4.2 kernel
dnl #
ZFS_LINUX_TEST_RESULT_SYMBOL([__kernel_fpu_license],
[__kernel_fpu_begin],
[arch/x86/kernel/fpu/core.c arch/x86/kernel/i387.c], [
AC_MSG_RESULT(__kernel_fpu_*)
AC_DEFINE(HAVE_UNDERSCORE_KERNEL_FPU, 1,
[kernel has __kernel_fpu_* functions])
AC_DEFINE(KERNEL_EXPORTS_X86_FPU, 1,
[kernel exports FPU functions])
],[
- AC_MSG_RESULT(internal)
- AC_DEFINE(HAVE_KERNEL_FPU_INTERNAL, 1,
- [kernel fpu internal])
+ dnl #
+ dnl # ARM neon symbols (only on arm and arm64)
+ dnl # could be GPL-only on arm64 after Linux 6.2
+ dnl #
+ ZFS_LINUX_TEST_RESULT([kernel_neon_license],[
+ AC_MSG_RESULT(kernel_neon_*)
+ AC_DEFINE(HAVE_KERNEL_NEON, 1,
+ [kernel has kernel_neon_* functions])
+ ],[
+ # catch-all
+ AC_MSG_RESULT(internal)
+ AC_DEFINE(HAVE_KERNEL_FPU_INTERNAL, 1,
+ [kernel fpu internal])
+ ])
])
])
])
diff --git a/sys/contrib/openzfs/config/kernel-fsync.m4 b/sys/contrib/openzfs/config/kernel-fsync.m4
index d198191d3ab9..c155f8af81a8 100644
--- a/sys/contrib/openzfs/config/kernel-fsync.m4
+++ b/sys/contrib/openzfs/config/kernel-fsync.m4
@@ -1,53 +1,53 @@
dnl #
dnl # Check file_operations->fsync interface.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_FSYNC], [
ZFS_LINUX_TEST_SRC([fsync_without_dentry], [
#include <linux/fs.h>
- int test_fsync(struct file *f, int x) { return 0; }
+ static int test_fsync(struct file *f, int x) { return 0; }
static const struct file_operations
fops __attribute__ ((unused)) = {
.fsync = test_fsync,
};
],[])
ZFS_LINUX_TEST_SRC([fsync_range], [
#include <linux/fs.h>
- int test_fsync(struct file *f, loff_t a, loff_t b, int c)
+ static int test_fsync(struct file *f, loff_t a, loff_t b, int c)
{ return 0; }
static const struct file_operations
fops __attribute__ ((unused)) = {
.fsync = test_fsync,
};
],[])
])
AC_DEFUN([ZFS_AC_KERNEL_FSYNC], [
dnl #
dnl # Linux 2.6.35 - Linux 3.0 API
dnl #
AC_MSG_CHECKING([whether fops->fsync() wants no dentry])
ZFS_LINUX_TEST_RESULT([fsync_without_dentry], [
AC_MSG_RESULT([yes])
AC_DEFINE(HAVE_FSYNC_WITHOUT_DENTRY, 1,
[fops->fsync() without dentry])
],[
AC_MSG_RESULT([no])
dnl #
dnl # Linux 3.1 - 3.x API
dnl #
AC_MSG_CHECKING([whether fops->fsync() wants range])
ZFS_LINUX_TEST_RESULT([fsync_range], [
AC_MSG_RESULT([range])
AC_DEFINE(HAVE_FSYNC_RANGE, 1,
[fops->fsync() with range])
],[
ZFS_LINUX_TEST_ERROR([fops->fsync])
])
])
])
diff --git a/sys/contrib/openzfs/config/kernel-get-link.m4 b/sys/contrib/openzfs/config/kernel-get-link.m4
index e4f478e37c18..1f8f5b0c8b72 100644
--- a/sys/contrib/openzfs/config/kernel-get-link.m4
+++ b/sys/contrib/openzfs/config/kernel-get-link.m4
@@ -1,104 +1,104 @@
dnl #
dnl # Supported get_link() interfaces checked newest to oldest.
dnl # Note this interface used to be named follow_link.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_GET_LINK], [
ZFS_LINUX_TEST_SRC([inode_operations_get_link], [
#include <linux/fs.h>
- const char *get_link(struct dentry *de, struct inode *ip,
+ static const char *get_link(struct dentry *de, struct inode *ip,
struct delayed_call *done) { return "symlink"; }
static struct inode_operations
iops __attribute__ ((unused)) = {
.get_link = get_link,
};
],[])
ZFS_LINUX_TEST_SRC([inode_operations_get_link_cookie], [
#include <linux/fs.h>
- const char *get_link(struct dentry *de, struct
+ static const char *get_link(struct dentry *de, struct
inode *ip, void **cookie) { return "symlink"; }
static struct inode_operations
iops __attribute__ ((unused)) = {
.get_link = get_link,
};
],[])
ZFS_LINUX_TEST_SRC([inode_operations_follow_link], [
#include <linux/fs.h>
- const char *follow_link(struct dentry *de,
+ static const char *follow_link(struct dentry *de,
void **cookie) { return "symlink"; }
static struct inode_operations
iops __attribute__ ((unused)) = {
.follow_link = follow_link,
};
],[])
ZFS_LINUX_TEST_SRC([inode_operations_follow_link_nameidata], [
#include <linux/fs.h>
- void *follow_link(struct dentry *de, struct
+ static void *follow_link(struct dentry *de, struct
nameidata *nd) { return (void *)NULL; }
static struct inode_operations
iops __attribute__ ((unused)) = {
.follow_link = follow_link,
};
],[])
])
AC_DEFUN([ZFS_AC_KERNEL_GET_LINK], [
dnl #
dnl # 4.5 API change
dnl # The get_link interface has added a delayed done call and
dnl # used it to retire the put_link() interface.
dnl #
AC_MSG_CHECKING([whether iops->get_link() passes delayed])
ZFS_LINUX_TEST_RESULT([inode_operations_get_link], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_GET_LINK_DELAYED, 1, [iops->get_link() delayed])
],[
AC_MSG_RESULT(no)
dnl #
dnl # 4.5 API change
dnl # The follow_link() interface has been replaced by
dnl # get_link() which behaves the same as before except:
dnl # - An inode is passed as a separate argument
dnl # - When called in RCU mode a NULL dentry is passed.
dnl #
AC_MSG_CHECKING([whether iops->get_link() passes cookie])
ZFS_LINUX_TEST_RESULT([inode_operations_get_link_cookie], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_GET_LINK_COOKIE, 1,
[iops->get_link() cookie])
],[
AC_MSG_RESULT(no)
dnl #
dnl # 4.2 API change
dnl # This kernel retired the nameidata structure.
dnl #
AC_MSG_CHECKING(
[whether iops->follow_link() passes cookie])
ZFS_LINUX_TEST_RESULT([inode_operations_follow_link], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_FOLLOW_LINK_COOKIE, 1,
[iops->follow_link() cookie])
],[
AC_MSG_RESULT(no)
dnl #
dnl # 2.6.32 API
dnl #
AC_MSG_CHECKING(
[whether iops->follow_link() passes nameidata])
ZFS_LINUX_TEST_RESULT(
[inode_operations_follow_link_nameidata],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_FOLLOW_LINK_NAMEIDATA, 1,
[iops->follow_link() nameidata])
],[
ZFS_LINUX_TEST_ERROR([get_link])
])
])
])
])
])
diff --git a/sys/contrib/openzfs/config/kernel-idmap_mnt_api.m4 b/sys/contrib/openzfs/config/kernel-idmap_mnt_api.m4
index 47ddc5702fb7..d1bdd053203e 100644
--- a/sys/contrib/openzfs/config/kernel-idmap_mnt_api.m4
+++ b/sys/contrib/openzfs/config/kernel-idmap_mnt_api.m4
@@ -1,25 +1,50 @@
dnl #
dnl # 5.12 API
dnl #
dnl # Check if APIs for idmapped mount are available
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_IDMAP_MNT_API], [
ZFS_LINUX_TEST_SRC([idmap_mnt_api], [
#include <linux/fs.h>
],[
int fs_flags = 0;
fs_flags |= FS_ALLOW_IDMAP;
])
])
AC_DEFUN([ZFS_AC_KERNEL_IDMAP_MNT_API], [
AC_MSG_CHECKING([whether APIs for idmapped mount are present])
ZFS_LINUX_TEST_RESULT([idmap_mnt_api], [
AC_MSG_RESULT([yes])
AC_DEFINE(HAVE_IDMAP_MNT_API, 1,
[APIs for idmapped mount are present])
],[
AC_MSG_RESULT([no])
])
])
+dnl #
+dnl # 6.8 decouples mnt_idmap from user_namespace. This is all internal
+dnl # to mnt_idmap so we can't detect it directly, but we detect a related
+dnl # change as use that as a signal.
+dnl #
+AC_DEFUN([ZFS_AC_KERNEL_SRC_IDMAP_NO_USERNS], [
+ ZFS_LINUX_TEST_SRC([idmap_no_userns], [
+ #include <linux/uidgid.h>
+ ], [
+ struct uid_gid_map *map = NULL;
+ map_id_down(map, 0);
+ ])
+])
+
+
+AC_DEFUN([ZFS_AC_KERNEL_IDMAP_NO_USERNS], [
+ AC_MSG_CHECKING([whether idmapped mounts have a user namespace])
+ ZFS_LINUX_TEST_RESULT([idmap_no_userns], [
+ AC_MSG_RESULT([yes])
+ AC_DEFINE(HAVE_IDMAP_NO_USERNS, 1,
+ [mnt_idmap does not have user_namespace])
+ ], [
+ AC_MSG_RESULT([no])
+ ])
+])
diff --git a/sys/contrib/openzfs/config/kernel-inode-create.m4 b/sys/contrib/openzfs/config/kernel-inode-create.m4
index 9e9e43180976..95f8aa2d5220 100644
--- a/sys/contrib/openzfs/config/kernel-inode-create.m4
+++ b/sys/contrib/openzfs/config/kernel-inode-create.m4
@@ -1,80 +1,80 @@
AC_DEFUN([ZFS_AC_KERNEL_SRC_CREATE], [
dnl #
dnl # 6.3 API change
dnl # The first arg is changed to struct mnt_idmap *
dnl #
ZFS_LINUX_TEST_SRC([create_mnt_idmap], [
#include <linux/fs.h>
#include <linux/sched.h>
- int inode_create(struct mnt_idmap *idmap,
+ static int inode_create(struct mnt_idmap *idmap,
struct inode *inode ,struct dentry *dentry,
umode_t umode, bool flag) { return 0; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
.create = inode_create,
};
],[])
dnl #
dnl # 5.12 API change that added the struct user_namespace* arg
dnl # to the front of this function type's arg list.
dnl #
ZFS_LINUX_TEST_SRC([create_userns], [
#include <linux/fs.h>
#include <linux/sched.h>
- int inode_create(struct user_namespace *userns,
+ static int inode_create(struct user_namespace *userns,
struct inode *inode ,struct dentry *dentry,
umode_t umode, bool flag) { return 0; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
.create = inode_create,
};
],[])
dnl #
dnl # 3.6 API change
dnl #
ZFS_LINUX_TEST_SRC([create_flags], [
#include <linux/fs.h>
#include <linux/sched.h>
- int inode_create(struct inode *inode ,struct dentry *dentry,
+ static int inode_create(struct inode *inode ,struct dentry *dentry,
umode_t umode, bool flag) { return 0; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
.create = inode_create,
};
],[])
])
AC_DEFUN([ZFS_AC_KERNEL_CREATE], [
AC_MSG_CHECKING([whether iops->create() takes struct mnt_idmap*])
ZFS_LINUX_TEST_RESULT([create_mnt_idmap], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_IOPS_CREATE_IDMAP, 1,
[iops->create() takes struct mnt_idmap*])
],[
AC_MSG_RESULT(no)
AC_MSG_CHECKING([whether iops->create() takes struct user_namespace*])
ZFS_LINUX_TEST_RESULT([create_userns], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_IOPS_CREATE_USERNS, 1,
[iops->create() takes struct user_namespace*])
],[
AC_MSG_RESULT(no)
AC_MSG_CHECKING([whether iops->create() passes flags])
ZFS_LINUX_TEST_RESULT([create_flags], [
AC_MSG_RESULT(yes)
],[
ZFS_LINUX_TEST_ERROR([iops->create()])
])
])
])
])
diff --git a/sys/contrib/openzfs/config/kernel-inode-getattr.m4 b/sys/contrib/openzfs/config/kernel-inode-getattr.m4
index c8bfb07862ab..5f7ce1ad9a5d 100644
--- a/sys/contrib/openzfs/config/kernel-inode-getattr.m4
+++ b/sys/contrib/openzfs/config/kernel-inode-getattr.m4
@@ -1,123 +1,123 @@
AC_DEFUN([ZFS_AC_KERNEL_SRC_INODE_GETATTR], [
dnl #
dnl # Linux 6.3 API
dnl # The first arg of getattr I/O operations handler type
dnl # is changed to struct mnt_idmap*
dnl #
ZFS_LINUX_TEST_SRC([inode_operations_getattr_mnt_idmap], [
#include <linux/fs.h>
- int test_getattr(
+ static int test_getattr(
struct mnt_idmap *idmap,
const struct path *p, struct kstat *k,
u32 request_mask, unsigned int query_flags)
{ return 0; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
.getattr = test_getattr,
};
],[])
dnl #
dnl # Linux 5.12 API
dnl # The getattr I/O operations handler type was extended to require
dnl # a struct user_namespace* as its first arg, to support idmapped
dnl # mounts.
dnl #
ZFS_LINUX_TEST_SRC([inode_operations_getattr_userns], [
#include <linux/fs.h>
- int test_getattr(
+ static int test_getattr(
struct user_namespace *userns,
const struct path *p, struct kstat *k,
u32 request_mask, unsigned int query_flags)
{ return 0; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
.getattr = test_getattr,
};
],[])
dnl #
dnl # Linux 4.11 API
dnl # See torvalds/linux@a528d35
dnl #
ZFS_LINUX_TEST_SRC([inode_operations_getattr_path], [
#include <linux/fs.h>
- int test_getattr(
+ static int test_getattr(
const struct path *p, struct kstat *k,
u32 request_mask, unsigned int query_flags)
{ return 0; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
.getattr = test_getattr,
};
],[])
ZFS_LINUX_TEST_SRC([inode_operations_getattr_vfsmount], [
#include <linux/fs.h>
- int test_getattr(
+ static int test_getattr(
struct vfsmount *mnt, struct dentry *d,
struct kstat *k)
{ return 0; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
.getattr = test_getattr,
};
],[])
])
AC_DEFUN([ZFS_AC_KERNEL_INODE_GETATTR], [
dnl #
dnl # Kernel 6.3 test
dnl #
AC_MSG_CHECKING([whether iops->getattr() takes mnt_idmap])
ZFS_LINUX_TEST_RESULT([inode_operations_getattr_mnt_idmap], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_IDMAP_IOPS_GETATTR, 1,
[iops->getattr() takes struct mnt_idmap*])
],[
AC_MSG_RESULT(no)
dnl #
dnl # Kernel 5.12 test
dnl #
AC_MSG_CHECKING([whether iops->getattr() takes user_namespace])
ZFS_LINUX_TEST_RESULT([inode_operations_getattr_userns], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_USERNS_IOPS_GETATTR, 1,
[iops->getattr() takes struct user_namespace*])
],[
AC_MSG_RESULT(no)
dnl #
dnl # Kernel 4.11 test
dnl #
AC_MSG_CHECKING([whether iops->getattr() takes a path])
ZFS_LINUX_TEST_RESULT([inode_operations_getattr_path], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_PATH_IOPS_GETATTR, 1,
[iops->getattr() takes a path])
],[
AC_MSG_RESULT(no)
dnl #
dnl # Kernel < 4.11 test
dnl #
AC_MSG_CHECKING([whether iops->getattr() takes a vfsmount])
ZFS_LINUX_TEST_RESULT([inode_operations_getattr_vfsmount], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_VFSMOUNT_IOPS_GETATTR, 1,
[iops->getattr() takes a vfsmount])
],[
AC_MSG_RESULT(no)
])
])
])
])
])
diff --git a/sys/contrib/openzfs/config/kernel-inode-lookup.m4 b/sys/contrib/openzfs/config/kernel-inode-lookup.m4
index 1a56e69b04aa..c7373056422c 100644
--- a/sys/contrib/openzfs/config/kernel-inode-lookup.m4
+++ b/sys/contrib/openzfs/config/kernel-inode-lookup.m4
@@ -1,26 +1,26 @@
dnl #
dnl # 3.6 API change
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_LOOKUP_FLAGS], [
ZFS_LINUX_TEST_SRC([lookup_flags], [
#include <linux/fs.h>
#include <linux/sched.h>
- struct dentry *inode_lookup(struct inode *inode,
+ static struct dentry *inode_lookup(struct inode *inode,
struct dentry *dentry, unsigned int flags) { return NULL; }
static const struct inode_operations iops
__attribute__ ((unused)) = {
.lookup = inode_lookup,
};
],[])
])
AC_DEFUN([ZFS_AC_KERNEL_LOOKUP_FLAGS], [
AC_MSG_CHECKING([whether iops->lookup() passes flags])
ZFS_LINUX_TEST_RESULT([lookup_flags], [
AC_MSG_RESULT(yes)
],[
ZFS_LINUX_TEST_ERROR([iops->lookup()])
])
])
diff --git a/sys/contrib/openzfs/config/kernel-inode-permission.m4 b/sys/contrib/openzfs/config/kernel-inode-permission.m4
index 01d23635b0c9..f7fc16439093 100644
--- a/sys/contrib/openzfs/config/kernel-inode-permission.m4
+++ b/sys/contrib/openzfs/config/kernel-inode-permission.m4
@@ -1,54 +1,54 @@
AC_DEFUN([ZFS_AC_KERNEL_SRC_PERMISSION], [
dnl #
dnl # 6.3 API change
dnl # iops->permission() now takes struct mnt_idmap*
dnl # as its first arg
dnl #
ZFS_LINUX_TEST_SRC([permission_mnt_idmap], [
#include <linux/fs.h>
#include <linux/sched.h>
- int inode_permission(struct mnt_idmap *idmap,
+ static int test_permission(struct mnt_idmap *idmap,
struct inode *inode, int mask) { return 0; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
- .permission = inode_permission,
+ .permission = test_permission,
};
],[])
dnl #
dnl # 5.12 API change that added the struct user_namespace* arg
dnl # to the front of this function type's arg list.
dnl #
ZFS_LINUX_TEST_SRC([permission_userns], [
#include <linux/fs.h>
#include <linux/sched.h>
- int inode_permission(struct user_namespace *userns,
+ static int test_permission(struct user_namespace *userns,
struct inode *inode, int mask) { return 0; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
- .permission = inode_permission,
+ .permission = test_permission,
};
],[])
])
AC_DEFUN([ZFS_AC_KERNEL_PERMISSION], [
AC_MSG_CHECKING([whether iops->permission() takes struct mnt_idmap*])
ZFS_LINUX_TEST_RESULT([permission_mnt_idmap], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_IOPS_PERMISSION_IDMAP, 1,
[iops->permission() takes struct mnt_idmap*])
],[
AC_MSG_CHECKING([whether iops->permission() takes struct user_namespace*])
ZFS_LINUX_TEST_RESULT([permission_userns], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_IOPS_PERMISSION_USERNS, 1,
[iops->permission() takes struct user_namespace*])
],[
AC_MSG_RESULT(no)
])
])
])
diff --git a/sys/contrib/openzfs/config/kernel-inode-setattr.m4 b/sys/contrib/openzfs/config/kernel-inode-setattr.m4
index 45755b4eb273..69289e897be6 100644
--- a/sys/contrib/openzfs/config/kernel-inode-setattr.m4
+++ b/sys/contrib/openzfs/config/kernel-inode-setattr.m4
@@ -1,87 +1,87 @@
AC_DEFUN([ZFS_AC_KERNEL_SRC_INODE_SETATTR], [
dnl #
dnl # Linux 6.3 API
dnl # The first arg of setattr I/O operations handler type
dnl # is changed to struct mnt_idmap*
dnl #
ZFS_LINUX_TEST_SRC([inode_operations_setattr_mnt_idmap], [
#include <linux/fs.h>
- int test_setattr(
+ static int test_setattr(
struct mnt_idmap *idmap,
struct dentry *de, struct iattr *ia)
{ return 0; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
.setattr = test_setattr,
};
],[])
dnl #
dnl # Linux 5.12 API
dnl # The setattr I/O operations handler type was extended to require
dnl # a struct user_namespace* as its first arg, to support idmapped
dnl # mounts.
dnl #
ZFS_LINUX_TEST_SRC([inode_operations_setattr_userns], [
#include <linux/fs.h>
- int test_setattr(
+ static int test_setattr(
struct user_namespace *userns,
struct dentry *de, struct iattr *ia)
{ return 0; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
.setattr = test_setattr,
};
],[])
ZFS_LINUX_TEST_SRC([inode_operations_setattr], [
#include <linux/fs.h>
- int test_setattr(
+ static int test_setattr(
struct dentry *de, struct iattr *ia)
{ return 0; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
.setattr = test_setattr,
};
],[])
])
AC_DEFUN([ZFS_AC_KERNEL_INODE_SETATTR], [
dnl #
dnl # Kernel 6.3 test
dnl #
AC_MSG_CHECKING([whether iops->setattr() takes mnt_idmap])
ZFS_LINUX_TEST_RESULT([inode_operations_setattr_mnt_idmap], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_IDMAP_IOPS_SETATTR, 1,
[iops->setattr() takes struct mnt_idmap*])
],[
AC_MSG_RESULT(no)
dnl #
dnl # Kernel 5.12 test
dnl #
AC_MSG_CHECKING([whether iops->setattr() takes user_namespace])
ZFS_LINUX_TEST_RESULT([inode_operations_setattr_userns], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_USERNS_IOPS_SETATTR, 1,
[iops->setattr() takes struct user_namespace*])
],[
AC_MSG_RESULT(no)
AC_MSG_CHECKING([whether iops->setattr() exists])
ZFS_LINUX_TEST_RESULT([inode_operations_setattr], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_IOPS_SETATTR, 1,
[iops->setattr() exists])
],[
AC_MSG_RESULT(no)
])
])
])
])
diff --git a/sys/contrib/openzfs/config/kernel-inode-times.m4 b/sys/contrib/openzfs/config/kernel-inode-times.m4
index aae95abf1720..4d861596ed0b 100644
--- a/sys/contrib/openzfs/config/kernel-inode-times.m4
+++ b/sys/contrib/openzfs/config/kernel-inode-times.m4
@@ -1,93 +1,171 @@
AC_DEFUN([ZFS_AC_KERNEL_SRC_INODE_TIMES], [
dnl #
dnl # 5.6 API change
dnl # timespec64_trunc() replaced by timestamp_truncate() interface.
dnl #
ZFS_LINUX_TEST_SRC([timestamp_truncate], [
#include <linux/fs.h>
],[
struct timespec64 ts;
struct inode ip;
memset(&ts, 0, sizeof(ts));
ts = timestamp_truncate(ts, &ip);
])
dnl #
dnl # 4.18 API change
dnl # i_atime, i_mtime, and i_ctime changed from timespec to timespec64.
dnl #
ZFS_LINUX_TEST_SRC([inode_times], [
#include <linux/fs.h>
],[
struct inode ip;
struct timespec ts;
memset(&ip, 0, sizeof(ip));
ts = ip.i_mtime;
])
dnl #
dnl # 6.6 API change
dnl # i_ctime no longer directly accessible, must use
dnl # inode_get_ctime(ip), inode_set_ctime*(ip) to
dnl # read/write.
dnl #
ZFS_LINUX_TEST_SRC([inode_get_ctime], [
#include <linux/fs.h>
],[
struct inode ip;
memset(&ip, 0, sizeof(ip));
inode_get_ctime(&ip);
])
ZFS_LINUX_TEST_SRC([inode_set_ctime_to_ts], [
#include <linux/fs.h>
],[
struct inode ip;
struct timespec64 ts = {0};
memset(&ip, 0, sizeof(ip));
inode_set_ctime_to_ts(&ip, ts);
])
+
+ dnl #
+ dnl # 6.7 API change
+ dnl # i_atime/i_mtime no longer directly accessible, must use
+ dnl # inode_get_mtime(ip), inode_set_mtime*(ip) to
+ dnl # read/write.
+ dnl #
+ ZFS_LINUX_TEST_SRC([inode_get_atime], [
+ #include <linux/fs.h>
+ ],[
+ struct inode ip;
+
+ memset(&ip, 0, sizeof(ip));
+ inode_get_atime(&ip);
+ ])
+ ZFS_LINUX_TEST_SRC([inode_get_mtime], [
+ #include <linux/fs.h>
+ ],[
+ struct inode ip;
+
+ memset(&ip, 0, sizeof(ip));
+ inode_get_mtime(&ip);
+ ])
+
+ ZFS_LINUX_TEST_SRC([inode_set_atime_to_ts], [
+ #include <linux/fs.h>
+ ],[
+ struct inode ip;
+ struct timespec64 ts = {0};
+
+ memset(&ip, 0, sizeof(ip));
+ inode_set_atime_to_ts(&ip, ts);
+ ])
+ ZFS_LINUX_TEST_SRC([inode_set_mtime_to_ts], [
+ #include <linux/fs.h>
+ ],[
+ struct inode ip;
+ struct timespec64 ts = {0};
+
+ memset(&ip, 0, sizeof(ip));
+ inode_set_mtime_to_ts(&ip, ts);
+ ])
])
AC_DEFUN([ZFS_AC_KERNEL_INODE_TIMES], [
AC_MSG_CHECKING([whether timestamp_truncate() exists])
ZFS_LINUX_TEST_RESULT([timestamp_truncate], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_INODE_TIMESTAMP_TRUNCATE, 1,
[timestamp_truncate() exists])
],[
AC_MSG_RESULT(no)
])
AC_MSG_CHECKING([whether inode->i_*time's are timespec64])
ZFS_LINUX_TEST_RESULT([inode_times], [
AC_MSG_RESULT(no)
],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_INODE_TIMESPEC64_TIMES, 1,
[inode->i_*time's are timespec64])
])
AC_MSG_CHECKING([whether inode_get_ctime() exists])
ZFS_LINUX_TEST_RESULT([inode_get_ctime], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_INODE_GET_CTIME, 1,
[inode_get_ctime() exists in linux/fs.h])
],[
AC_MSG_RESULT(no)
])
AC_MSG_CHECKING([whether inode_set_ctime_to_ts() exists])
ZFS_LINUX_TEST_RESULT([inode_set_ctime_to_ts], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_INODE_SET_CTIME_TO_TS, 1,
[inode_set_ctime_to_ts() exists in linux/fs.h])
],[
AC_MSG_RESULT(no)
])
+
+ AC_MSG_CHECKING([whether inode_get_atime() exists])
+ ZFS_LINUX_TEST_RESULT([inode_get_atime], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_INODE_GET_ATIME, 1,
+ [inode_get_atime() exists in linux/fs.h])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([whether inode_set_atime_to_ts() exists])
+ ZFS_LINUX_TEST_RESULT([inode_set_atime_to_ts], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_INODE_SET_ATIME_TO_TS, 1,
+ [inode_set_atime_to_ts() exists in linux/fs.h])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([whether inode_get_mtime() exists])
+ ZFS_LINUX_TEST_RESULT([inode_get_mtime], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_INODE_GET_MTIME, 1,
+ [inode_get_mtime() exists in linux/fs.h])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([whether inode_set_mtime_to_ts() exists])
+ ZFS_LINUX_TEST_RESULT([inode_set_mtime_to_ts], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_INODE_SET_MTIME_TO_TS, 1,
+ [inode_set_mtime_to_ts() exists in linux/fs.h])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
])
diff --git a/sys/contrib/openzfs/config/kernel-make-request-fn.m4 b/sys/contrib/openzfs/config/kernel-make-request-fn.m4
index f17416acca67..4d20dd45c4a1 100644
--- a/sys/contrib/openzfs/config/kernel-make-request-fn.m4
+++ b/sys/contrib/openzfs/config/kernel-make-request-fn.m4
@@ -1,180 +1,180 @@
dnl #
dnl # Check for make_request_fn interface.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_MAKE_REQUEST_FN], [
ZFS_LINUX_TEST_SRC([make_request_fn_void], [
#include <linux/blkdev.h>
- void make_request(struct request_queue *q,
+ static void make_request(struct request_queue *q,
struct bio *bio) { return; }
],[
blk_queue_make_request(NULL, &make_request);
])
ZFS_LINUX_TEST_SRC([make_request_fn_blk_qc_t], [
#include <linux/blkdev.h>
- blk_qc_t make_request(struct request_queue *q,
+ static blk_qc_t make_request(struct request_queue *q,
struct bio *bio) { return (BLK_QC_T_NONE); }
],[
blk_queue_make_request(NULL, &make_request);
])
ZFS_LINUX_TEST_SRC([blk_alloc_queue_request_fn], [
#include <linux/blkdev.h>
- blk_qc_t make_request(struct request_queue *q,
+ static blk_qc_t make_request(struct request_queue *q,
struct bio *bio) { return (BLK_QC_T_NONE); }
],[
struct request_queue *q __attribute__ ((unused));
q = blk_alloc_queue(make_request, NUMA_NO_NODE);
])
ZFS_LINUX_TEST_SRC([blk_alloc_queue_request_fn_rh], [
#include <linux/blkdev.h>
- blk_qc_t make_request(struct request_queue *q,
+ static blk_qc_t make_request(struct request_queue *q,
struct bio *bio) { return (BLK_QC_T_NONE); }
],[
struct request_queue *q __attribute__ ((unused));
q = blk_alloc_queue_rh(make_request, NUMA_NO_NODE);
])
ZFS_LINUX_TEST_SRC([block_device_operations_submit_bio], [
#include <linux/blkdev.h>
],[
struct block_device_operations o;
o.submit_bio = NULL;
])
ZFS_LINUX_TEST_SRC([blk_alloc_disk], [
#include <linux/blkdev.h>
],[
struct gendisk *disk __attribute__ ((unused));
disk = blk_alloc_disk(NUMA_NO_NODE);
])
ZFS_LINUX_TEST_SRC([blk_cleanup_disk], [
#include <linux/blkdev.h>
],[
struct gendisk *disk __attribute__ ((unused));
blk_cleanup_disk(disk);
])
])
AC_DEFUN([ZFS_AC_KERNEL_MAKE_REQUEST_FN], [
dnl # Checked as part of the blk_alloc_queue_request_fn test
dnl #
dnl # Linux 5.9 API Change
dnl # make_request_fn was moved into block_device_operations->submit_bio
dnl #
AC_MSG_CHECKING([whether submit_bio is member of struct block_device_operations])
ZFS_LINUX_TEST_RESULT([block_device_operations_submit_bio], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS, 1,
[submit_bio is member of struct block_device_operations])
dnl #
dnl # Linux 5.14 API Change:
dnl # blk_alloc_queue() + alloc_disk() combo replaced by
dnl # a single call to blk_alloc_disk().
dnl #
AC_MSG_CHECKING([whether blk_alloc_disk() exists])
ZFS_LINUX_TEST_RESULT([blk_alloc_disk], [
AC_MSG_RESULT(yes)
AC_DEFINE([HAVE_BLK_ALLOC_DISK], 1, [blk_alloc_disk() exists])
dnl #
dnl # 5.20 API change,
dnl # Removed blk_cleanup_disk(), put_disk() should be used.
dnl #
AC_MSG_CHECKING([whether blk_cleanup_disk() exists])
ZFS_LINUX_TEST_RESULT([blk_cleanup_disk], [
AC_MSG_RESULT(yes)
AC_DEFINE([HAVE_BLK_CLEANUP_DISK], 1,
[blk_cleanup_disk() exists])
], [
AC_MSG_RESULT(no)
])
], [
AC_MSG_RESULT(no)
])
],[
AC_MSG_RESULT(no)
dnl # Checked as part of the blk_alloc_queue_request_fn test
dnl #
dnl # Linux 5.7 API Change
dnl # blk_alloc_queue() expects request function.
dnl #
AC_MSG_CHECKING([whether blk_alloc_queue() expects request function])
ZFS_LINUX_TEST_RESULT([blk_alloc_queue_request_fn], [
AC_MSG_RESULT(yes)
dnl # This is currently always the case.
AC_MSG_CHECKING([whether make_request_fn() returns blk_qc_t])
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BLK_ALLOC_QUEUE_REQUEST_FN, 1,
[blk_alloc_queue() expects request function])
AC_DEFINE(MAKE_REQUEST_FN_RET, blk_qc_t,
[make_request_fn() return type])
AC_DEFINE(HAVE_MAKE_REQUEST_FN_RET_QC, 1,
[Noting that make_request_fn() returns blk_qc_t])
],[
dnl #
dnl # CentOS Stream 4.18.0-257 API Change
dnl # The Linux 5.7 blk_alloc_queue() change was back-
dnl # ported and the symbol renamed blk_alloc_queue_rh().
dnl # As of this kernel version they're not providing
dnl # any compatibility code in the kernel for this.
dnl #
ZFS_LINUX_TEST_RESULT([blk_alloc_queue_request_fn_rh], [
AC_MSG_RESULT(yes)
dnl # This is currently always the case.
AC_MSG_CHECKING([whether make_request_fn_rh() returns blk_qc_t])
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BLK_ALLOC_QUEUE_REQUEST_FN_RH, 1,
[blk_alloc_queue_rh() expects request function])
AC_DEFINE(MAKE_REQUEST_FN_RET, blk_qc_t,
[make_request_fn() return type])
AC_DEFINE(HAVE_MAKE_REQUEST_FN_RET_QC, 1,
[Noting that make_request_fn() returns blk_qc_t])
],[
AC_MSG_RESULT(no)
dnl #
dnl # Linux 3.2 API Change
dnl # make_request_fn returns void.
dnl #
AC_MSG_CHECKING(
[whether make_request_fn() returns void])
ZFS_LINUX_TEST_RESULT([make_request_fn_void], [
AC_MSG_RESULT(yes)
AC_DEFINE(MAKE_REQUEST_FN_RET, void,
[make_request_fn() return type])
AC_DEFINE(HAVE_MAKE_REQUEST_FN_RET_VOID, 1,
[Noting that make_request_fn() returns void])
],[
AC_MSG_RESULT(no)
dnl #
dnl # Linux 4.4 API Change
dnl # make_request_fn returns blk_qc_t.
dnl #
AC_MSG_CHECKING(
[whether make_request_fn() returns blk_qc_t])
ZFS_LINUX_TEST_RESULT([make_request_fn_blk_qc_t], [
AC_MSG_RESULT(yes)
AC_DEFINE(MAKE_REQUEST_FN_RET, blk_qc_t,
[make_request_fn() return type])
AC_DEFINE(HAVE_MAKE_REQUEST_FN_RET_QC, 1,
[Noting that make_request_fn() ]
[returns blk_qc_t])
],[
ZFS_LINUX_TEST_ERROR([make_request_fn])
])
])
])
])
])
])
diff --git a/sys/contrib/openzfs/config/kernel-mkdir.m4 b/sys/contrib/openzfs/config/kernel-mkdir.m4
index 7407a791b846..367f100094d3 100644
--- a/sys/contrib/openzfs/config/kernel-mkdir.m4
+++ b/sys/contrib/openzfs/config/kernel-mkdir.m4
@@ -1,94 +1,94 @@
dnl #
dnl # Supported mkdir() interfaces checked newest to oldest.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_MKDIR], [
dnl #
dnl # 6.3 API change
dnl # mkdir() takes struct mnt_idmap * as the first arg
dnl #
ZFS_LINUX_TEST_SRC([mkdir_mnt_idmap], [
#include <linux/fs.h>
- int mkdir(struct mnt_idmap *idmap,
+ static int mkdir(struct mnt_idmap *idmap,
struct inode *inode, struct dentry *dentry,
umode_t umode) { return 0; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
.mkdir = mkdir,
};
],[])
dnl #
dnl # 5.12 API change
dnl # The struct user_namespace arg was added as the first argument to
dnl # mkdir()
dnl #
ZFS_LINUX_TEST_SRC([mkdir_user_namespace], [
#include <linux/fs.h>
- int mkdir(struct user_namespace *userns,
+ static int mkdir(struct user_namespace *userns,
struct inode *inode, struct dentry *dentry,
umode_t umode) { return 0; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
.mkdir = mkdir,
};
],[])
dnl #
dnl # 3.3 API change
dnl # The VFS .create, .mkdir and .mknod callbacks were updated to take a
dnl # umode_t type rather than an int. The expectation is that any backport
dnl # would also change all three prototypes. However, if it turns out that
dnl # some distribution doesn't backport the whole thing this could be
dnl # broken apart into three separate checks.
dnl #
ZFS_LINUX_TEST_SRC([inode_operations_mkdir], [
#include <linux/fs.h>
- int mkdir(struct inode *inode, struct dentry *dentry,
+ static int mkdir(struct inode *inode, struct dentry *dentry,
umode_t umode) { return 0; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
.mkdir = mkdir,
};
],[])
])
AC_DEFUN([ZFS_AC_KERNEL_MKDIR], [
dnl #
dnl # 6.3 API change
dnl # mkdir() takes struct mnt_idmap * as the first arg
dnl #
AC_MSG_CHECKING([whether iops->mkdir() takes struct mnt_idmap*])
ZFS_LINUX_TEST_RESULT([mkdir_mnt_idmap], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_IOPS_MKDIR_IDMAP, 1,
[iops->mkdir() takes struct mnt_idmap*])
],[
dnl #
dnl # 5.12 API change
dnl # The struct user_namespace arg was added as the first argument to
dnl # mkdir() of the iops structure.
dnl #
AC_MSG_CHECKING([whether iops->mkdir() takes struct user_namespace*])
ZFS_LINUX_TEST_RESULT([mkdir_user_namespace], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_IOPS_MKDIR_USERNS, 1,
[iops->mkdir() takes struct user_namespace*])
],[
AC_MSG_RESULT(no)
AC_MSG_CHECKING([whether iops->mkdir() takes umode_t])
ZFS_LINUX_TEST_RESULT([inode_operations_mkdir], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_MKDIR_UMODE_T, 1,
[iops->mkdir() takes umode_t])
],[
ZFS_LINUX_TEST_ERROR([mkdir()])
])
])
])
])
diff --git a/sys/contrib/openzfs/config/kernel-mknod.m4 b/sys/contrib/openzfs/config/kernel-mknod.m4
index 1494ec1ae4d4..6ad3453aaf0a 100644
--- a/sys/contrib/openzfs/config/kernel-mknod.m4
+++ b/sys/contrib/openzfs/config/kernel-mknod.m4
@@ -1,56 +1,56 @@
AC_DEFUN([ZFS_AC_KERNEL_SRC_MKNOD], [
dnl #
dnl # 6.3 API change
dnl # The first arg is now struct mnt_idmap*
dnl #
ZFS_LINUX_TEST_SRC([mknod_mnt_idmap], [
#include <linux/fs.h>
#include <linux/sched.h>
- int tmp_mknod(struct mnt_idmap *idmap,
+ static int tmp_mknod(struct mnt_idmap *idmap,
struct inode *inode ,struct dentry *dentry,
umode_t u, dev_t d) { return 0; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
.mknod = tmp_mknod,
};
],[])
dnl #
dnl # 5.12 API change that added the struct user_namespace* arg
dnl # to the front of this function type's arg list.
dnl #
ZFS_LINUX_TEST_SRC([mknod_userns], [
#include <linux/fs.h>
#include <linux/sched.h>
- int tmp_mknod(struct user_namespace *userns,
+ static int tmp_mknod(struct user_namespace *userns,
struct inode *inode ,struct dentry *dentry,
umode_t u, dev_t d) { return 0; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
.mknod = tmp_mknod,
};
],[])
])
AC_DEFUN([ZFS_AC_KERNEL_MKNOD], [
AC_MSG_CHECKING([whether iops->mknod() takes struct mnt_idmap*])
ZFS_LINUX_TEST_RESULT([mknod_mnt_idmap], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_IOPS_MKNOD_IDMAP, 1,
[iops->mknod() takes struct mnt_idmap*])
],[
AC_MSG_RESULT(no)
AC_MSG_CHECKING([whether iops->mknod() takes struct user_namespace*])
ZFS_LINUX_TEST_RESULT([mknod_userns], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_IOPS_MKNOD_USERNS, 1,
[iops->mknod() takes struct user_namespace*])
],[
AC_MSG_RESULT(no)
])
])
])
diff --git a/sys/contrib/openzfs/config/kernel-proc-operations.m4 b/sys/contrib/openzfs/config/kernel-proc-operations.m4
index df216222ecc2..3ae8ce2b6d0d 100644
--- a/sys/contrib/openzfs/config/kernel-proc-operations.m4
+++ b/sys/contrib/openzfs/config/kernel-proc-operations.m4
@@ -1,41 +1,41 @@
dnl #
dnl # 5.6 API Change
dnl # The proc_ops structure was introduced to replace the use of
dnl # of the file_operations structure when registering proc handlers.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_PROC_OPERATIONS], [
ZFS_LINUX_TEST_SRC([proc_ops_struct], [
#include <linux/proc_fs.h>
- int test_open(struct inode *ip, struct file *fp) { return 0; }
- ssize_t test_read(struct file *fp, char __user *ptr,
+ static int test_open(struct inode *ip, struct file *fp) { return 0; }
+ static ssize_t test_read(struct file *fp, char __user *ptr,
size_t size, loff_t *offp) { return 0; }
- ssize_t test_write(struct file *fp, const char __user *ptr,
+ static ssize_t test_write(struct file *fp, const char __user *ptr,
size_t size, loff_t *offp) { return 0; }
- loff_t test_lseek(struct file *fp, loff_t off, int flag)
+ static loff_t test_lseek(struct file *fp, loff_t off, int flag)
{ return 0; }
- int test_release(struct inode *ip, struct file *fp)
+ static int test_release(struct inode *ip, struct file *fp)
{ return 0; }
const struct proc_ops test_ops __attribute__ ((unused)) = {
.proc_open = test_open,
.proc_read = test_read,
.proc_write = test_write,
.proc_lseek = test_lseek,
.proc_release = test_release,
};
], [
struct proc_dir_entry *entry __attribute__ ((unused)) =
proc_create_data("test", 0444, NULL, &test_ops, NULL);
])
])
AC_DEFUN([ZFS_AC_KERNEL_PROC_OPERATIONS], [
AC_MSG_CHECKING([whether proc_ops structure exists])
ZFS_LINUX_TEST_RESULT([proc_ops_struct], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_PROC_OPS_STRUCT, 1, [proc_ops structure exists])
], [
AC_MSG_RESULT(no)
])
])
diff --git a/sys/contrib/openzfs/config/kernel-put-link.m4 b/sys/contrib/openzfs/config/kernel-put-link.m4
index 4234861f3347..8ab318cbff8c 100644
--- a/sys/contrib/openzfs/config/kernel-put-link.m4
+++ b/sys/contrib/openzfs/config/kernel-put-link.m4
@@ -1,61 +1,61 @@
dnl #
dnl # Supported symlink APIs
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_PUT_LINK], [
ZFS_LINUX_TEST_SRC([put_link_cookie], [
#include <linux/fs.h>
- void put_link(struct inode *ip, void *cookie)
+ static void put_link(struct inode *ip, void *cookie)
{ return; }
static struct inode_operations
iops __attribute__ ((unused)) = {
.put_link = put_link,
};
],[])
ZFS_LINUX_TEST_SRC([put_link_nameidata], [
#include <linux/fs.h>
- void put_link(struct dentry *de, struct
+ static void put_link(struct dentry *de, struct
nameidata *nd, void *ptr) { return; }
static struct inode_operations
iops __attribute__ ((unused)) = {
.put_link = put_link,
};
],[])
])
AC_DEFUN([ZFS_AC_KERNEL_PUT_LINK], [
dnl #
dnl # 4.5 API change
dnl # get_link() uses delayed done, there is no put_link() interface.
dnl # This check initially uses the inode_operations_get_link result
dnl #
ZFS_LINUX_TEST_RESULT([inode_operations_get_link], [
AC_DEFINE(HAVE_PUT_LINK_DELAYED, 1, [iops->put_link() delayed])
],[
dnl #
dnl # 4.2 API change
dnl # This kernel retired the nameidata structure.
dnl #
AC_MSG_CHECKING([whether iops->put_link() passes cookie])
ZFS_LINUX_TEST_RESULT([put_link_cookie], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_PUT_LINK_COOKIE, 1,
[iops->put_link() cookie])
],[
AC_MSG_RESULT(no)
dnl #
dnl # 2.6.32 API
dnl #
AC_MSG_CHECKING(
[whether iops->put_link() passes nameidata])
ZFS_LINUX_TEST_RESULT([put_link_nameidata], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_PUT_LINK_NAMEIDATA, 1,
[iops->put_link() nameidata])
],[
ZFS_LINUX_TEST_ERROR([put_link])
])
])
])
])
diff --git a/sys/contrib/openzfs/config/kernel-rename.m4 b/sys/contrib/openzfs/config/kernel-rename.m4
index 57c3eed78974..ce881502d1b1 100644
--- a/sys/contrib/openzfs/config/kernel-rename.m4
+++ b/sys/contrib/openzfs/config/kernel-rename.m4
@@ -1,133 +1,133 @@
AC_DEFUN([ZFS_AC_KERNEL_SRC_RENAME], [
dnl #
dnl # 3.9 (to 4.9) API change,
dnl #
dnl # A new version of iops->rename() was added (rename2) that takes a flag
dnl # argument (to support renameat2). However this separate function was
dnl # merged back into iops->rename() in Linux 4.9.
dnl #
ZFS_LINUX_TEST_SRC([inode_operations_rename2], [
#include <linux/fs.h>
- int rename2_fn(struct inode *sip, struct dentry *sdp,
+ static int rename2_fn(struct inode *sip, struct dentry *sdp,
struct inode *tip, struct dentry *tdp,
unsigned int flags) { return 0; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
.rename2 = rename2_fn,
};
],[])
dnl #
dnl # 4.9 API change,
dnl #
dnl # iops->rename2() merged into iops->rename(), and iops->rename() now
dnl # wants flags.
dnl #
ZFS_LINUX_TEST_SRC([inode_operations_rename_flags], [
#include <linux/fs.h>
- int rename_fn(struct inode *sip, struct dentry *sdp,
+ static int rename_fn(struct inode *sip, struct dentry *sdp,
struct inode *tip, struct dentry *tdp,
unsigned int flags) { return 0; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
.rename = rename_fn,
};
],[])
dnl #
dnl # EL7 compatibility
dnl #
dnl # EL7 has backported renameat2 support, but it's done by defining a
dnl # separate iops wrapper structure that takes the .renameat2 function.
dnl #
ZFS_LINUX_TEST_SRC([dir_inode_operations_wrapper_rename2], [
#include <linux/fs.h>
- int rename2_fn(struct inode *sip, struct dentry *sdp,
+ static int rename2_fn(struct inode *sip, struct dentry *sdp,
struct inode *tip, struct dentry *tdp,
unsigned int flags) { return 0; }
static const struct inode_operations_wrapper
iops __attribute__ ((unused)) = {
.rename2 = rename2_fn,
};
],[])
dnl #
dnl # 5.12 API change,
dnl #
dnl # Linux 5.12 introduced passing struct user_namespace* as the first
dnl # argument of the rename() and other inode_operations members.
dnl #
ZFS_LINUX_TEST_SRC([inode_operations_rename_userns], [
#include <linux/fs.h>
- int rename_fn(struct user_namespace *user_ns, struct inode *sip,
+ static int rename_fn(struct user_namespace *user_ns, struct inode *sip,
struct dentry *sdp, struct inode *tip, struct dentry *tdp,
unsigned int flags) { return 0; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
.rename = rename_fn,
};
],[])
dnl #
dnl # 6.3 API change - the first arg is now struct mnt_idmap*
dnl #
ZFS_LINUX_TEST_SRC([inode_operations_rename_mnt_idmap], [
#include <linux/fs.h>
- int rename_fn(struct mnt_idmap *idmap, struct inode *sip,
+ static int rename_fn(struct mnt_idmap *idmap, struct inode *sip,
struct dentry *sdp, struct inode *tip, struct dentry *tdp,
unsigned int flags) { return 0; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
.rename = rename_fn,
};
],[])
])
AC_DEFUN([ZFS_AC_KERNEL_RENAME], [
AC_MSG_CHECKING([whether iops->rename() takes struct mnt_idmap*])
ZFS_LINUX_TEST_RESULT([inode_operations_rename_mnt_idmap], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_IOPS_RENAME_IDMAP, 1,
[iops->rename() takes struct mnt_idmap*])
],[
AC_MSG_CHECKING([whether iops->rename() takes struct user_namespace*])
ZFS_LINUX_TEST_RESULT([inode_operations_rename_userns], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_IOPS_RENAME_USERNS, 1,
[iops->rename() takes struct user_namespace*])
],[
AC_MSG_RESULT(no)
AC_MSG_CHECKING([whether iops->rename2() exists])
ZFS_LINUX_TEST_RESULT([inode_operations_rename2], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_RENAME2, 1, [iops->rename2() exists])
],[
AC_MSG_RESULT(no)
AC_MSG_CHECKING([whether iops->rename() wants flags])
ZFS_LINUX_TEST_RESULT([inode_operations_rename_flags], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_RENAME_WANTS_FLAGS, 1,
[iops->rename() wants flags])
],[
AC_MSG_RESULT(no)
AC_MSG_CHECKING([whether struct inode_operations_wrapper takes .rename2()])
ZFS_LINUX_TEST_RESULT([dir_inode_operations_wrapper_rename2], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_RENAME2_OPERATIONS_WRAPPER, 1,
[struct inode_operations_wrapper takes .rename2()])
],[
AC_MSG_RESULT(no)
])
])
])
])
])
])
diff --git a/sys/contrib/openzfs/config/kernel-show-options.m4 b/sys/contrib/openzfs/config/kernel-show-options.m4
index 93bd5fbfbb24..fd62f30086dc 100644
--- a/sys/contrib/openzfs/config/kernel-show-options.m4
+++ b/sys/contrib/openzfs/config/kernel-show-options.m4
@@ -1,25 +1,25 @@
dnl #
dnl # Linux 3.3 API
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_SHOW_OPTIONS], [
ZFS_LINUX_TEST_SRC([super_operations_show_options], [
#include <linux/fs.h>
- int show_options(struct seq_file * x, struct dentry * y) {
+ static int show_options(struct seq_file * x, struct dentry * y) {
return 0;
};
static struct super_operations sops __attribute__ ((unused)) = {
.show_options = show_options,
};
],[])
])
AC_DEFUN([ZFS_AC_KERNEL_SHOW_OPTIONS], [
AC_MSG_CHECKING([whether sops->show_options() wants dentry])
ZFS_LINUX_TEST_RESULT([super_operations_show_options], [
AC_MSG_RESULT([yes])
],[
ZFS_LINUX_TEST_ERROR([sops->show_options()])
])
])
diff --git a/sys/contrib/openzfs/config/kernel-shrink.m4 b/sys/contrib/openzfs/config/kernel-shrink.m4
index 0c702153e8c4..6580b08d5ff2 100644
--- a/sys/contrib/openzfs/config/kernel-shrink.m4
+++ b/sys/contrib/openzfs/config/kernel-shrink.m4
@@ -1,188 +1,260 @@
dnl #
dnl # 3.1 API change
dnl # The super_block structure now stores a per-filesystem shrinker.
dnl # This interface is preferable because it can be used to specifically
dnl # target only the zfs filesystem for pruning.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_SUPER_BLOCK_S_SHRINK], [
ZFS_LINUX_TEST_SRC([super_block_s_shrink], [
#include <linux/fs.h>
- int shrink(struct shrinker *s, struct shrink_control *sc)
- { return 0; }
-
static const struct super_block
sb __attribute__ ((unused)) = {
.s_shrink.seeks = DEFAULT_SEEKS,
.s_shrink.batch = 0,
};
],[])
])
+dnl #
+dnl # 6.7 API change
+dnl # s_shrink is now a pointer.
+dnl #
+AC_DEFUN([ZFS_AC_KERNEL_SRC_SUPER_BLOCK_S_SHRINK_PTR], [
+ ZFS_LINUX_TEST_SRC([super_block_s_shrink_ptr], [
+ #include <linux/fs.h>
+ static unsigned long shrinker_cb(struct shrinker *shrink,
+ struct shrink_control *sc) { return 0; }
+ static struct shrinker shrinker = {
+ .count_objects = shrinker_cb,
+ .scan_objects = shrinker_cb,
+ .seeks = DEFAULT_SEEKS,
+ };
+ static const struct super_block
+ sb __attribute__ ((unused)) = {
+ .s_shrink = &shrinker,
+ };
+ ],[])
+])
+
AC_DEFUN([ZFS_AC_KERNEL_SUPER_BLOCK_S_SHRINK], [
AC_MSG_CHECKING([whether super_block has s_shrink])
ZFS_LINUX_TEST_RESULT([super_block_s_shrink], [
AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_SUPER_BLOCK_S_SHRINK, 1,
+ [have super_block s_shrink])
],[
- ZFS_LINUX_TEST_ERROR([sb->s_shrink()])
+ AC_MSG_RESULT(no)
+ AC_MSG_CHECKING([whether super_block has s_shrink pointer])
+ ZFS_LINUX_TEST_RESULT([super_block_s_shrink_ptr], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_SUPER_BLOCK_S_SHRINK_PTR, 1,
+ [have super_block s_shrink pointer])
+ ],[
+ AC_MSG_RESULT(no)
+ ZFS_LINUX_TEST_ERROR([sb->s_shrink()])
+ ])
])
])
dnl #
dnl # 3.12 API change
dnl # The nid member was added to struct shrink_control to support
dnl # NUMA-aware shrinkers.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_SHRINK_CONTROL_HAS_NID], [
ZFS_LINUX_TEST_SRC([shrink_control_nid], [
#include <linux/fs.h>
],[
struct shrink_control sc __attribute__ ((unused));
unsigned long scnidsize __attribute__ ((unused)) =
sizeof(sc.nid);
])
])
AC_DEFUN([ZFS_AC_KERNEL_SHRINK_CONTROL_HAS_NID], [
AC_MSG_CHECKING([whether shrink_control has nid])
ZFS_LINUX_TEST_RESULT([shrink_control_nid], [
AC_MSG_RESULT(yes)
AC_DEFINE(SHRINK_CONTROL_HAS_NID, 1,
[struct shrink_control has nid])
],[
AC_MSG_RESULT(no)
])
])
AC_DEFUN([ZFS_AC_KERNEL_SRC_REGISTER_SHRINKER_VARARG], [
ZFS_LINUX_TEST_SRC([register_shrinker_vararg], [
#include <linux/mm.h>
- unsigned long shrinker_cb(struct shrinker *shrink,
+ static unsigned long shrinker_cb(struct shrinker *shrink,
struct shrink_control *sc) { return 0; }
],[
struct shrinker cache_shrinker = {
.count_objects = shrinker_cb,
.scan_objects = shrinker_cb,
.seeks = DEFAULT_SEEKS,
};
register_shrinker(&cache_shrinker, "vararg-reg-shrink-test");
])
])
AC_DEFUN([ZFS_AC_KERNEL_SRC_SHRINKER_CALLBACK], [
ZFS_LINUX_TEST_SRC([shrinker_cb_shrink_control], [
#include <linux/mm.h>
- int shrinker_cb(struct shrinker *shrink,
+ static int shrinker_cb(struct shrinker *shrink,
struct shrink_control *sc) { return 0; }
],[
struct shrinker cache_shrinker = {
.shrink = shrinker_cb,
.seeks = DEFAULT_SEEKS,
};
register_shrinker(&cache_shrinker);
])
ZFS_LINUX_TEST_SRC([shrinker_cb_shrink_control_split], [
#include <linux/mm.h>
- unsigned long shrinker_cb(struct shrinker *shrink,
+ static unsigned long shrinker_cb(struct shrinker *shrink,
struct shrink_control *sc) { return 0; }
],[
struct shrinker cache_shrinker = {
.count_objects = shrinker_cb,
.scan_objects = shrinker_cb,
.seeks = DEFAULT_SEEKS,
};
register_shrinker(&cache_shrinker);
])
])
+dnl #
+dnl # 6.7 API change
+dnl # register_shrinker has been replaced by shrinker_register.
+dnl #
+AC_DEFUN([ZFS_AC_KERNEL_SRC_SHRINKER_REGISTER], [
+ ZFS_LINUX_TEST_SRC([shrinker_register], [
+ #include <linux/shrinker.h>
+ static unsigned long shrinker_cb(struct shrinker *shrink,
+ struct shrink_control *sc) { return 0; }
+ ],[
+ struct shrinker cache_shrinker = {
+ .count_objects = shrinker_cb,
+ .scan_objects = shrinker_cb,
+ .seeks = DEFAULT_SEEKS,
+ };
+ shrinker_register(&cache_shrinker);
+ ])
+])
+
AC_DEFUN([ZFS_AC_KERNEL_SHRINKER_CALLBACK],[
dnl #
dnl # 6.0 API change
dnl # register_shrinker() becomes a var-arg function that takes
dnl # a printf-style format string as args > 0
dnl #
AC_MSG_CHECKING([whether new var-arg register_shrinker() exists])
ZFS_LINUX_TEST_RESULT([register_shrinker_vararg], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_REGISTER_SHRINKER_VARARG, 1,
[register_shrinker is vararg])
dnl # We assume that the split shrinker callback exists if the
dnl # vararg register_shrinker() exists, because the latter is
dnl # a much more recent addition, and the macro test for the
dnl # var-arg version only works if the callback is split
AC_DEFINE(HAVE_SPLIT_SHRINKER_CALLBACK, 1,
[cs->count_objects exists])
],[
AC_MSG_RESULT(no)
dnl #
dnl # 3.0 - 3.11 API change
dnl # cs->shrink(struct shrinker *, struct shrink_control *sc)
dnl #
AC_MSG_CHECKING([whether new 2-argument shrinker exists])
ZFS_LINUX_TEST_RESULT([shrinker_cb_shrink_control], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_SINGLE_SHRINKER_CALLBACK, 1,
[new shrinker callback wants 2 args])
],[
AC_MSG_RESULT(no)
dnl #
dnl # 3.12 API change,
dnl # cs->shrink() is logically split in to
dnl # cs->count_objects() and cs->scan_objects()
dnl #
- AC_MSG_CHECKING([if cs->count_objects callback exists])
+ AC_MSG_CHECKING(
+ [whether cs->count_objects callback exists])
ZFS_LINUX_TEST_RESULT(
- [shrinker_cb_shrink_control_split],[
- AC_MSG_RESULT(yes)
- AC_DEFINE(HAVE_SPLIT_SHRINKER_CALLBACK, 1,
- [cs->count_objects exists])
+ [shrinker_cb_shrink_control_split],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_SPLIT_SHRINKER_CALLBACK, 1,
+ [cs->count_objects exists])
],[
+ AC_MSG_RESULT(no)
+
+ AC_MSG_CHECKING(
+ [whether shrinker_register exists])
+ ZFS_LINUX_TEST_RESULT([shrinker_register], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_SHRINKER_REGISTER, 1,
+ [shrinker_register exists])
+
+ dnl # We assume that the split shrinker
+ dnl # callback exists if
+ dnl # shrinker_register() exists,
+ dnl # because the latter is a much more
+ dnl # recent addition, and the macro
+ dnl # test for shrinker_register() only
+ dnl # works if the callback is split
+ AC_DEFINE(HAVE_SPLIT_SHRINKER_CALLBACK,
+ 1, [cs->count_objects exists])
+ ],[
+ AC_MSG_RESULT(no)
ZFS_LINUX_TEST_ERROR([shrinker])
+ ])
])
])
])
])
dnl #
dnl # 2.6.39 API change,
dnl # Shrinker adjust to use common shrink_control structure.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_SHRINK_CONTROL_STRUCT], [
ZFS_LINUX_TEST_SRC([shrink_control_struct], [
#include <linux/mm.h>
],[
struct shrink_control sc __attribute__ ((unused));
sc.nr_to_scan = 0;
sc.gfp_mask = GFP_KERNEL;
])
])
AC_DEFUN([ZFS_AC_KERNEL_SHRINK_CONTROL_STRUCT], [
AC_MSG_CHECKING([whether struct shrink_control exists])
ZFS_LINUX_TEST_RESULT([shrink_control_struct], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_SHRINK_CONTROL_STRUCT, 1,
[struct shrink_control exists])
],[
ZFS_LINUX_TEST_ERROR([shrink_control])
])
])
AC_DEFUN([ZFS_AC_KERNEL_SRC_SHRINKER], [
ZFS_AC_KERNEL_SRC_SUPER_BLOCK_S_SHRINK
+ ZFS_AC_KERNEL_SRC_SUPER_BLOCK_S_SHRINK_PTR
ZFS_AC_KERNEL_SRC_SHRINK_CONTROL_HAS_NID
ZFS_AC_KERNEL_SRC_SHRINKER_CALLBACK
ZFS_AC_KERNEL_SRC_SHRINK_CONTROL_STRUCT
ZFS_AC_KERNEL_SRC_REGISTER_SHRINKER_VARARG
+ ZFS_AC_KERNEL_SRC_SHRINKER_REGISTER
])
AC_DEFUN([ZFS_AC_KERNEL_SHRINKER], [
ZFS_AC_KERNEL_SUPER_BLOCK_S_SHRINK
ZFS_AC_KERNEL_SHRINK_CONTROL_HAS_NID
ZFS_AC_KERNEL_SHRINKER_CALLBACK
ZFS_AC_KERNEL_SHRINK_CONTROL_STRUCT
])
diff --git a/sys/contrib/openzfs/config/kernel-strlcpy.m4 b/sys/contrib/openzfs/config/kernel-strlcpy.m4
new file mode 100644
index 000000000000..c31cf52d78b0
--- /dev/null
+++ b/sys/contrib/openzfs/config/kernel-strlcpy.m4
@@ -0,0 +1,47 @@
+dnl #
+dnl # 6.8.x replaced strlcpy with strscpy. Check for both so we can provide
+dnl # appropriate fallbacks.
+dnl #
+AC_DEFUN([ZFS_AC_KERNEL_SRC_STRLCPY], [
+ ZFS_LINUX_TEST_SRC([kernel_has_strlcpy], [
+ #include <linux/string.h>
+ ], [
+ const char *src = "goodbye";
+ char dst[32];
+ size_t len;
+ len = strlcpy(dst, src, sizeof (dst));
+ ])
+])
+
+AC_DEFUN([ZFS_AC_KERNEL_SRC_STRSCPY], [
+ ZFS_LINUX_TEST_SRC([kernel_has_strscpy], [
+ #include <linux/string.h>
+ ], [
+ const char *src = "goodbye";
+ char dst[32];
+ ssize_t len;
+ len = strscpy(dst, src, sizeof (dst));
+ ])
+])
+
+AC_DEFUN([ZFS_AC_KERNEL_STRLCPY], [
+ AC_MSG_CHECKING([whether strlcpy() exists])
+ ZFS_LINUX_TEST_RESULT([kernel_has_strlcpy], [
+ AC_MSG_RESULT([yes])
+ AC_DEFINE(HAVE_KERNEL_STRLCPY, 1,
+ [strlcpy() exists])
+ ], [
+ AC_MSG_RESULT([no])
+ ])
+])
+
+AC_DEFUN([ZFS_AC_KERNEL_STRSCPY], [
+ AC_MSG_CHECKING([whether strscpy() exists])
+ ZFS_LINUX_TEST_RESULT([kernel_has_strscpy], [
+ AC_MSG_RESULT([yes])
+ AC_DEFINE(HAVE_KERNEL_STRSCPY, 1,
+ [strscpy() exists])
+ ], [
+ AC_MSG_RESULT([no])
+ ])
+])
diff --git a/sys/contrib/openzfs/config/kernel-symlink.m4 b/sys/contrib/openzfs/config/kernel-symlink.m4
index a0333ed66a7c..804fceab28f0 100644
--- a/sys/contrib/openzfs/config/kernel-symlink.m4
+++ b/sys/contrib/openzfs/config/kernel-symlink.m4
@@ -1,53 +1,53 @@
AC_DEFUN([ZFS_AC_KERNEL_SRC_SYMLINK], [
dnl #
dnl # 6.3 API change that changed the first arg
dnl # to struct mnt_idmap*
dnl #
ZFS_LINUX_TEST_SRC([symlink_mnt_idmap], [
#include <linux/fs.h>
#include <linux/sched.h>
- int tmp_symlink(struct mnt_idmap *idmap,
+ static int tmp_symlink(struct mnt_idmap *idmap,
struct inode *inode ,struct dentry *dentry,
const char *path) { return 0; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
.symlink = tmp_symlink,
};
],[])
dnl #
dnl # 5.12 API change that added the struct user_namespace* arg
dnl # to the front of this function type's arg list.
dnl #
ZFS_LINUX_TEST_SRC([symlink_userns], [
#include <linux/fs.h>
#include <linux/sched.h>
- int tmp_symlink(struct user_namespace *userns,
+ static int tmp_symlink(struct user_namespace *userns,
struct inode *inode ,struct dentry *dentry,
const char *path) { return 0; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
.symlink = tmp_symlink,
};
],[])
])
AC_DEFUN([ZFS_AC_KERNEL_SYMLINK], [
AC_MSG_CHECKING([whether iops->symlink() takes struct mnt_idmap*])
ZFS_LINUX_TEST_RESULT([symlink_mnt_idmap], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_IOPS_SYMLINK_IDMAP, 1,
[iops->symlink() takes struct mnt_idmap*])
],[
AC_MSG_CHECKING([whether iops->symlink() takes struct user_namespace*])
ZFS_LINUX_TEST_RESULT([symlink_userns], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_IOPS_SYMLINK_USERNS, 1,
[iops->symlink() takes struct user_namespace*])
],[
AC_MSG_RESULT(no)
])
])
])
diff --git a/sys/contrib/openzfs/config/kernel-timer.m4 b/sys/contrib/openzfs/config/kernel-timer.m4
index 403cff3f4189..c710e804be0b 100644
--- a/sys/contrib/openzfs/config/kernel-timer.m4
+++ b/sys/contrib/openzfs/config/kernel-timer.m4
@@ -1,75 +1,75 @@
dnl # 4.14-rc3 API change
dnl # https://lwn.net/Articles/735887/
dnl #
dnl # Check if timer_list.func get passed a timer_list or an unsigned long
dnl # (older kernels). Also sanity check the from_timer() and timer_setup()
dnl # macros are available as well, since they will be used in the same newer
dnl # kernels that support the new timer_list.func signature.
dnl #
dnl # Also check for the existence of flags in struct timer_list, they were
dnl # added in 4.1-rc8 via 0eeda71bc30d.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_TIMER_SETUP], [
ZFS_LINUX_TEST_SRC([timer_setup], [
#include <linux/timer.h>
struct my_task_timer {
struct timer_list timer;
int data;
};
- void task_expire(struct timer_list *tl)
+ static void task_expire(struct timer_list *tl)
{
struct my_task_timer *task_timer =
from_timer(task_timer, tl, timer);
task_timer->data = 42;
}
],[
struct my_task_timer task_timer;
timer_setup(&task_timer.timer, task_expire, 0);
])
ZFS_LINUX_TEST_SRC([timer_list_function], [
#include <linux/timer.h>
- void task_expire(struct timer_list *tl) {}
+ static void task_expire(struct timer_list *tl) {}
],[
struct timer_list tl;
tl.function = task_expire;
])
ZFS_LINUX_TEST_SRC([timer_list_flags], [
#include <linux/timer.h>
],[
struct timer_list tl;
tl.flags = 2;
])
])
AC_DEFUN([ZFS_AC_KERNEL_TIMER_SETUP], [
AC_MSG_CHECKING([whether timer_setup() is available])
ZFS_LINUX_TEST_RESULT([timer_setup], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_KERNEL_TIMER_SETUP, 1,
[timer_setup() is available])
],[
AC_MSG_RESULT(no)
])
AC_MSG_CHECKING([whether timer function expects timer_list])
ZFS_LINUX_TEST_RESULT([timer_list_function], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_KERNEL_TIMER_FUNCTION_TIMER_LIST, 1,
[timer_list.function gets a timer_list])
],[
AC_MSG_RESULT(no)
])
AC_MSG_CHECKING([whether struct timer_list has flags])
ZFS_LINUX_TEST_RESULT([timer_list_flags], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_KERNEL_TIMER_LIST_FLAGS, 1,
[struct timer_list has a flags member])
],[
AC_MSG_RESULT(no)
])
])
diff --git a/sys/contrib/openzfs/config/kernel-tmpfile.m4 b/sys/contrib/openzfs/config/kernel-tmpfile.m4
index cc18b8f65a88..7439514186e4 100644
--- a/sys/contrib/openzfs/config/kernel-tmpfile.m4
+++ b/sys/contrib/openzfs/config/kernel-tmpfile.m4
@@ -1,86 +1,86 @@
dnl #
dnl # 3.11 API change
dnl # Add support for i_op->tmpfile
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_TMPFILE], [
dnl #
dnl # 6.3 API change
dnl # The first arg is now struct mnt_idmap *
dnl #
ZFS_LINUX_TEST_SRC([inode_operations_tmpfile_mnt_idmap], [
#include <linux/fs.h>
- int tmpfile(struct mnt_idmap *idmap,
+ static int tmpfile(struct mnt_idmap *idmap,
struct inode *inode, struct file *file,
umode_t mode) { return 0; }
static struct inode_operations
iops __attribute__ ((unused)) = {
.tmpfile = tmpfile,
};
],[])
dnl # 6.1 API change
dnl # use struct file instead of struct dentry
dnl #
ZFS_LINUX_TEST_SRC([inode_operations_tmpfile], [
#include <linux/fs.h>
- int tmpfile(struct user_namespace *userns,
+ static int tmpfile(struct user_namespace *userns,
struct inode *inode, struct file *file,
umode_t mode) { return 0; }
static struct inode_operations
iops __attribute__ ((unused)) = {
.tmpfile = tmpfile,
};
],[])
dnl #
dnl # 5.11 API change
dnl # add support for userns parameter to tmpfile
dnl #
ZFS_LINUX_TEST_SRC([inode_operations_tmpfile_dentry_userns], [
#include <linux/fs.h>
- int tmpfile(struct user_namespace *userns,
+ static int tmpfile(struct user_namespace *userns,
struct inode *inode, struct dentry *dentry,
umode_t mode) { return 0; }
static struct inode_operations
iops __attribute__ ((unused)) = {
.tmpfile = tmpfile,
};
],[])
ZFS_LINUX_TEST_SRC([inode_operations_tmpfile_dentry], [
#include <linux/fs.h>
- int tmpfile(struct inode *inode, struct dentry *dentry,
+ static int tmpfile(struct inode *inode, struct dentry *dentry,
umode_t mode) { return 0; }
static struct inode_operations
iops __attribute__ ((unused)) = {
.tmpfile = tmpfile,
};
],[])
])
AC_DEFUN([ZFS_AC_KERNEL_TMPFILE], [
AC_MSG_CHECKING([whether i_op->tmpfile() exists])
ZFS_LINUX_TEST_RESULT([inode_operations_tmpfile_mnt_idmap], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_TMPFILE, 1, [i_op->tmpfile() exists])
AC_DEFINE(HAVE_TMPFILE_IDMAP, 1, [i_op->tmpfile() has mnt_idmap])
], [
ZFS_LINUX_TEST_RESULT([inode_operations_tmpfile], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_TMPFILE, 1, [i_op->tmpfile() exists])
AC_DEFINE(HAVE_TMPFILE_USERNS, 1, [i_op->tmpfile() has userns])
],[
ZFS_LINUX_TEST_RESULT([inode_operations_tmpfile_dentry_userns], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_TMPFILE, 1, [i_op->tmpfile() exists])
AC_DEFINE(HAVE_TMPFILE_USERNS, 1, [i_op->tmpfile() has userns])
AC_DEFINE(HAVE_TMPFILE_DENTRY, 1, [i_op->tmpfile() uses old dentry signature])
],[
ZFS_LINUX_TEST_RESULT([inode_operations_tmpfile_dentry], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_TMPFILE, 1, [i_op->tmpfile() exists])
AC_DEFINE(HAVE_TMPFILE_DENTRY, 1, [i_op->tmpfile() uses old dentry signature])
],[
ZFS_LINUX_REQUIRE_API([i_op->tmpfile()], [3.11])
])
])
])
])
])
diff --git a/sys/contrib/openzfs/config/kernel-vfs-direct_IO.m4 b/sys/contrib/openzfs/config/kernel-vfs-direct_IO.m4
index 82583d52fcbc..7b7b91f979f9 100644
--- a/sys/contrib/openzfs/config/kernel-vfs-direct_IO.m4
+++ b/sys/contrib/openzfs/config/kernel-vfs-direct_IO.m4
@@ -1,109 +1,109 @@
dnl #
dnl # Check for direct IO interfaces.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_VFS_DIRECT_IO], [
ZFS_LINUX_TEST_SRC([direct_io_iter], [
#include <linux/fs.h>
- ssize_t test_direct_IO(struct kiocb *kiocb,
+ static ssize_t test_direct_IO(struct kiocb *kiocb,
struct iov_iter *iter) { return 0; }
static const struct address_space_operations
aops __attribute__ ((unused)) = {
.direct_IO = test_direct_IO,
};
],[])
ZFS_LINUX_TEST_SRC([direct_io_iter_offset], [
#include <linux/fs.h>
- ssize_t test_direct_IO(struct kiocb *kiocb,
+ static ssize_t test_direct_IO(struct kiocb *kiocb,
struct iov_iter *iter, loff_t offset) { return 0; }
static const struct address_space_operations
aops __attribute__ ((unused)) = {
.direct_IO = test_direct_IO,
};
],[])
ZFS_LINUX_TEST_SRC([direct_io_iter_rw_offset], [
#include <linux/fs.h>
- ssize_t test_direct_IO(int rw, struct kiocb *kiocb,
+ static ssize_t test_direct_IO(int rw, struct kiocb *kiocb,
struct iov_iter *iter, loff_t offset) { return 0; }
static const struct address_space_operations
aops __attribute__ ((unused)) = {
.direct_IO = test_direct_IO,
};
],[])
ZFS_LINUX_TEST_SRC([direct_io_iovec], [
#include <linux/fs.h>
- ssize_t test_direct_IO(int rw, struct kiocb *kiocb,
+ static ssize_t test_direct_IO(int rw, struct kiocb *kiocb,
const struct iovec *iov, loff_t offset,
unsigned long nr_segs) { return 0; }
static const struct address_space_operations
aops __attribute__ ((unused)) = {
.direct_IO = test_direct_IO,
};
],[])
])
AC_DEFUN([ZFS_AC_KERNEL_VFS_DIRECT_IO], [
dnl #
dnl # Linux 4.6.x API change
dnl #
AC_MSG_CHECKING([whether aops->direct_IO() uses iov_iter])
ZFS_LINUX_TEST_RESULT([direct_io_iter], [
AC_MSG_RESULT([yes])
AC_DEFINE(HAVE_VFS_DIRECT_IO_ITER, 1,
[aops->direct_IO() uses iov_iter without rw])
],[
AC_MSG_RESULT([no])
dnl #
dnl # Linux 4.1.x API change
dnl #
AC_MSG_CHECKING(
[whether aops->direct_IO() uses offset])
ZFS_LINUX_TEST_RESULT([direct_io_iter_offset], [
AC_MSG_RESULT([yes])
AC_DEFINE(HAVE_VFS_DIRECT_IO_ITER_OFFSET, 1,
[aops->direct_IO() uses iov_iter with offset])
],[
AC_MSG_RESULT([no])
dnl #
dnl # Linux 3.16.x API change
dnl #
AC_MSG_CHECKING(
[whether aops->direct_IO() uses rw and offset])
ZFS_LINUX_TEST_RESULT([direct_io_iter_rw_offset], [
AC_MSG_RESULT([yes])
AC_DEFINE(HAVE_VFS_DIRECT_IO_ITER_RW_OFFSET, 1,
[aops->direct_IO() uses iov_iter with ]
[rw and offset])
],[
AC_MSG_RESULT([no])
dnl #
dnl # Ancient Linux API (predates git)
dnl #
AC_MSG_CHECKING(
[whether aops->direct_IO() uses iovec])
ZFS_LINUX_TEST_RESULT([direct_io_iovec], [
AC_MSG_RESULT([yes])
AC_DEFINE(HAVE_VFS_DIRECT_IO_IOVEC, 1,
[aops->direct_IO() uses iovec])
],[
ZFS_LINUX_TEST_ERROR([direct IO])
AC_MSG_RESULT([no])
])
])
])
])
])
diff --git a/sys/contrib/openzfs/config/kernel-vfs-iterate.m4 b/sys/contrib/openzfs/config/kernel-vfs-iterate.m4
index 172118eac87b..2e396daa1c0f 100644
--- a/sys/contrib/openzfs/config/kernel-vfs-iterate.m4
+++ b/sys/contrib/openzfs/config/kernel-vfs-iterate.m4
@@ -1,83 +1,83 @@
AC_DEFUN([ZFS_AC_KERNEL_SRC_VFS_ITERATE], [
ZFS_LINUX_TEST_SRC([file_operations_iterate_shared], [
#include <linux/fs.h>
- int iterate(struct file *filp, struct dir_context * context)
+ static int iterate(struct file *filp, struct dir_context * context)
{ return 0; }
static const struct file_operations fops
__attribute__ ((unused)) = {
.iterate_shared = iterate,
};
],[])
ZFS_LINUX_TEST_SRC([file_operations_iterate], [
#include <linux/fs.h>
- int iterate(struct file *filp,
+ static int iterate(struct file *filp,
struct dir_context *context) { return 0; }
static const struct file_operations fops
__attribute__ ((unused)) = {
.iterate = iterate,
};
#if defined(FMODE_KABI_ITERATE)
#error "RHEL 7.5, FMODE_KABI_ITERATE interface"
#endif
],[])
ZFS_LINUX_TEST_SRC([file_operations_readdir], [
#include <linux/fs.h>
- int readdir(struct file *filp, void *entry,
+ static int readdir(struct file *filp, void *entry,
filldir_t func) { return 0; }
static const struct file_operations fops
__attribute__ ((unused)) = {
.readdir = readdir,
};
],[])
])
AC_DEFUN([ZFS_AC_KERNEL_VFS_ITERATE], [
dnl #
dnl # 4.7 API change
dnl #
AC_MSG_CHECKING([whether fops->iterate_shared() is available])
ZFS_LINUX_TEST_RESULT([file_operations_iterate_shared], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_VFS_ITERATE_SHARED, 1,
[fops->iterate_shared() is available])
],[
AC_MSG_RESULT(no)
dnl #
dnl # 3.11 API change
dnl #
dnl # RHEL 7.5 compatibility; the fops.iterate() method was
dnl # added to the file_operations structure but in order to
dnl # maintain KABI compatibility all callers must set
dnl # FMODE_KABI_ITERATE which is checked in iterate_dir().
dnl # When detected ignore this interface and fallback to
dnl # to using fops.readdir() to retain KABI compatibility.
dnl #
AC_MSG_CHECKING([whether fops->iterate() is available])
ZFS_LINUX_TEST_RESULT([file_operations_iterate], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_VFS_ITERATE, 1,
[fops->iterate() is available])
],[
AC_MSG_RESULT(no)
dnl #
dnl # readdir interface introduced
dnl #
AC_MSG_CHECKING([whether fops->readdir() is available])
ZFS_LINUX_TEST_RESULT([file_operations_readdir], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_VFS_READDIR, 1,
[fops->readdir() is available])
],[
ZFS_LINUX_TEST_ERROR([vfs_iterate])
])
])
])
])
diff --git a/sys/contrib/openzfs/config/kernel-vfs-rw-iterate.m4 b/sys/contrib/openzfs/config/kernel-vfs-rw-iterate.m4
index 000353ec15b0..cb20ed03099a 100644
--- a/sys/contrib/openzfs/config/kernel-vfs-rw-iterate.m4
+++ b/sys/contrib/openzfs/config/kernel-vfs-rw-iterate.m4
@@ -1,80 +1,80 @@
dnl #
dnl # Linux 3.16 API
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_VFS_RW_ITERATE], [
ZFS_LINUX_TEST_SRC([file_operations_rw], [
#include <linux/fs.h>
- ssize_t test_read(struct kiocb *kiocb, struct iov_iter *to)
+ static ssize_t test_read(struct kiocb *kiocb, struct iov_iter *to)
{ return 0; }
- ssize_t test_write(struct kiocb *kiocb, struct iov_iter *from)
+ static ssize_t test_write(struct kiocb *kiocb, struct iov_iter *from)
{ return 0; }
static const struct file_operations
fops __attribute__ ((unused)) = {
.read_iter = test_read,
.write_iter = test_write,
};
],[])
ZFS_LINUX_TEST_SRC([new_sync_rw], [
#include <linux/fs.h>
],[
ssize_t ret __attribute__ ((unused));
struct file *filp = NULL;
char __user *rbuf = NULL;
const char __user *wbuf = NULL;
size_t len = 0;
loff_t ppos;
ret = new_sync_read(filp, rbuf, len, &ppos);
ret = new_sync_write(filp, wbuf, len, &ppos);
])
])
AC_DEFUN([ZFS_AC_KERNEL_VFS_RW_ITERATE], [
AC_MSG_CHECKING([whether fops->read/write_iter() are available])
ZFS_LINUX_TEST_RESULT([file_operations_rw], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_VFS_RW_ITERATE, 1,
[fops->read/write_iter() are available])
dnl #
dnl # Linux 4.1 API
dnl #
AC_MSG_CHECKING([whether new_sync_read/write() are available])
ZFS_LINUX_TEST_RESULT([new_sync_rw], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_NEW_SYNC_READ, 1,
[new_sync_read()/new_sync_write() are available])
],[
AC_MSG_RESULT(no)
])
],[
AC_MSG_RESULT(no)
])
])
dnl #
dnl # Linux 4.1.x API
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_VFS_GENERIC_WRITE_CHECKS], [
ZFS_LINUX_TEST_SRC([generic_write_checks], [
#include <linux/fs.h>
],[
struct kiocb *iocb = NULL;
struct iov_iter *iov = NULL;
generic_write_checks(iocb, iov);
])
])
AC_DEFUN([ZFS_AC_KERNEL_VFS_GENERIC_WRITE_CHECKS], [
AC_MSG_CHECKING([whether generic_write_checks() takes kiocb])
ZFS_LINUX_TEST_RESULT([generic_write_checks], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_GENERIC_WRITE_CHECKS_KIOCB, 1,
[generic_write_checks() takes kiocb])
],[
AC_MSG_RESULT(no)
])
])
diff --git a/sys/contrib/openzfs/config/kernel-writepage_t.m4 b/sys/contrib/openzfs/config/kernel-writepage_t.m4
index 3a0cffd98570..a82cf370c9d4 100644
--- a/sys/contrib/openzfs/config/kernel-writepage_t.m4
+++ b/sys/contrib/openzfs/config/kernel-writepage_t.m4
@@ -1,26 +1,26 @@
AC_DEFUN([ZFS_AC_KERNEL_SRC_WRITEPAGE_T], [
dnl #
dnl # 6.3 API change
dnl # The writepage_t function type now has its first argument as
dnl # struct folio* instead of struct page*
dnl #
ZFS_LINUX_TEST_SRC([writepage_t_folio], [
#include <linux/writeback.h>
- int putpage(struct folio *folio,
+ static int putpage(struct folio *folio,
struct writeback_control *wbc, void *data)
{ return 0; }
writepage_t func = putpage;
],[])
])
AC_DEFUN([ZFS_AC_KERNEL_WRITEPAGE_T], [
AC_MSG_CHECKING([whether int (*writepage_t)() takes struct folio*])
ZFS_LINUX_TEST_RESULT([writepage_t_folio], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_WRITEPAGE_T_FOLIO, 1,
[int (*writepage_t)() takes struct folio*])
],[
AC_MSG_RESULT(no)
])
])
diff --git a/sys/contrib/openzfs/config/kernel-xattr-handler.m4 b/sys/contrib/openzfs/config/kernel-xattr-handler.m4
index 6b8a08dbcc80..32f58c70a500 100644
--- a/sys/contrib/openzfs/config/kernel-xattr-handler.m4
+++ b/sys/contrib/openzfs/config/kernel-xattr-handler.m4
@@ -1,477 +1,477 @@
dnl #
dnl # 2.6.35 API change,
dnl # The 'struct xattr_handler' was constified in the generic
dnl # super_block structure.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_CONST_XATTR_HANDLER], [
ZFS_LINUX_TEST_SRC([const_xattr_handler], [
#include <linux/fs.h>
#include <linux/xattr.h>
const struct xattr_handler xattr_test_handler = {
.prefix = "test",
.get = NULL,
.set = NULL,
};
const struct xattr_handler *xattr_handlers[] = {
&xattr_test_handler,
};
const struct super_block sb __attribute__ ((unused)) = {
.s_xattr = xattr_handlers,
};
],[])
])
AC_DEFUN([ZFS_AC_KERNEL_CONST_XATTR_HANDLER], [
AC_MSG_CHECKING([whether super_block uses const struct xattr_handler])
ZFS_LINUX_TEST_RESULT([const_xattr_handler], [
AC_MSG_RESULT([yes])
],[
ZFS_LINUX_TEST_ERROR([const xattr_handler])
])
])
dnl #
dnl # 4.5 API change,
dnl # struct xattr_handler added new member "name".
dnl # xattr_handler which matches to whole name rather than prefix should use
dnl # "name" instead of "prefix", e.g. "system.posix_acl_access"
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_XATTR_HANDLER_NAME], [
ZFS_LINUX_TEST_SRC([xattr_handler_name], [
#include <linux/xattr.h>
static const struct xattr_handler
xops __attribute__ ((unused)) = {
.name = XATTR_NAME_POSIX_ACL_ACCESS,
};
],[])
])
AC_DEFUN([ZFS_AC_KERNEL_XATTR_HANDLER_NAME], [
AC_MSG_CHECKING([whether xattr_handler has name])
ZFS_LINUX_TEST_RESULT([xattr_handler_name], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_XATTR_HANDLER_NAME, 1,
[xattr_handler has name])
],[
AC_MSG_RESULT(no)
])
])
dnl #
dnl # Supported xattr handler get() interfaces checked newest to oldest.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_XATTR_HANDLER_GET], [
ZFS_LINUX_TEST_SRC([xattr_handler_get_dentry_inode], [
#include <linux/xattr.h>
- int get(const struct xattr_handler *handler,
+ static int get(const struct xattr_handler *handler,
struct dentry *dentry, struct inode *inode,
const char *name, void *buffer, size_t size) { return 0; }
static const struct xattr_handler
xops __attribute__ ((unused)) = {
.get = get,
};
],[])
ZFS_LINUX_TEST_SRC([xattr_handler_get_xattr_handler], [
#include <linux/xattr.h>
- int get(const struct xattr_handler *handler,
+ static int get(const struct xattr_handler *handler,
struct dentry *dentry, const char *name,
void *buffer, size_t size) { return 0; }
static const struct xattr_handler
xops __attribute__ ((unused)) = {
.get = get,
};
],[])
ZFS_LINUX_TEST_SRC([xattr_handler_get_dentry], [
#include <linux/xattr.h>
- int get(struct dentry *dentry, const char *name,
+ static int get(struct dentry *dentry, const char *name,
void *buffer, size_t size, int handler_flags)
{ return 0; }
static const struct xattr_handler
xops __attribute__ ((unused)) = {
.get = get,
};
],[])
ZFS_LINUX_TEST_SRC([xattr_handler_get_dentry_inode_flags], [
#include <linux/xattr.h>
- int get(const struct xattr_handler *handler,
+ static int get(const struct xattr_handler *handler,
struct dentry *dentry, struct inode *inode,
const char *name, void *buffer,
size_t size, int flags) { return 0; }
static const struct xattr_handler
xops __attribute__ ((unused)) = {
.get = get,
};
],[])
])
AC_DEFUN([ZFS_AC_KERNEL_XATTR_HANDLER_GET], [
dnl #
dnl # 4.7 API change,
dnl # The xattr_handler->get() callback was changed to take both
dnl # dentry and inode.
dnl #
AC_MSG_CHECKING([whether xattr_handler->get() wants dentry and inode])
ZFS_LINUX_TEST_RESULT([xattr_handler_get_dentry_inode], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_XATTR_GET_DENTRY_INODE, 1,
[xattr_handler->get() wants both dentry and inode])
],[
dnl #
dnl # 4.4 API change,
dnl # The xattr_handler->get() callback was changed to take a
dnl # attr_handler, and handler_flags argument was removed and
dnl # should be accessed by handler->flags.
dnl #
AC_MSG_RESULT(no)
AC_MSG_CHECKING(
[whether xattr_handler->get() wants xattr_handler])
ZFS_LINUX_TEST_RESULT([xattr_handler_get_xattr_handler], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_XATTR_GET_HANDLER, 1,
[xattr_handler->get() wants xattr_handler])
],[
dnl #
dnl # 2.6.33 API change,
dnl # The xattr_handler->get() callback was changed
dnl # to take a dentry instead of an inode, and a
dnl # handler_flags argument was added.
dnl #
AC_MSG_RESULT(no)
AC_MSG_CHECKING(
[whether xattr_handler->get() wants dentry])
ZFS_LINUX_TEST_RESULT([xattr_handler_get_dentry], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_XATTR_GET_DENTRY, 1,
[xattr_handler->get() wants dentry])
],[
dnl #
dnl # Android API change,
dnl # The xattr_handler->get() callback was
dnl # changed to take dentry, inode and flags.
dnl #
AC_MSG_RESULT(no)
AC_MSG_CHECKING(
[whether xattr_handler->get() wants dentry and inode and flags])
ZFS_LINUX_TEST_RESULT([xattr_handler_get_dentry_inode_flags], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_XATTR_GET_DENTRY_INODE_FLAGS, 1,
[xattr_handler->get() wants dentry and inode and flags])
],[
ZFS_LINUX_TEST_ERROR([xattr get()])
])
])
])
])
])
dnl #
dnl # Supported xattr handler set() interfaces checked newest to oldest.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_XATTR_HANDLER_SET], [
ZFS_LINUX_TEST_SRC([xattr_handler_set_mnt_idmap], [
#include <linux/xattr.h>
- int set(const struct xattr_handler *handler,
+ static int set(const struct xattr_handler *handler,
struct mnt_idmap *idmap,
struct dentry *dentry, struct inode *inode,
const char *name, const void *buffer,
size_t size, int flags)
{ return 0; }
static const struct xattr_handler
xops __attribute__ ((unused)) = {
.set = set,
};
],[])
ZFS_LINUX_TEST_SRC([xattr_handler_set_userns], [
#include <linux/xattr.h>
- int set(const struct xattr_handler *handler,
+ static int set(const struct xattr_handler *handler,
struct user_namespace *mnt_userns,
struct dentry *dentry, struct inode *inode,
const char *name, const void *buffer,
size_t size, int flags)
{ return 0; }
static const struct xattr_handler
xops __attribute__ ((unused)) = {
.set = set,
};
],[])
ZFS_LINUX_TEST_SRC([xattr_handler_set_dentry_inode], [
#include <linux/xattr.h>
- int set(const struct xattr_handler *handler,
+ static int set(const struct xattr_handler *handler,
struct dentry *dentry, struct inode *inode,
const char *name, const void *buffer,
size_t size, int flags)
{ return 0; }
static const struct xattr_handler
xops __attribute__ ((unused)) = {
.set = set,
};
],[])
ZFS_LINUX_TEST_SRC([xattr_handler_set_xattr_handler], [
#include <linux/xattr.h>
- int set(const struct xattr_handler *handler,
+ static int set(const struct xattr_handler *handler,
struct dentry *dentry, const char *name,
const void *buffer, size_t size, int flags)
{ return 0; }
static const struct xattr_handler
xops __attribute__ ((unused)) = {
.set = set,
};
],[])
ZFS_LINUX_TEST_SRC([xattr_handler_set_dentry], [
#include <linux/xattr.h>
- int set(struct dentry *dentry, const char *name,
+ static int set(struct dentry *dentry, const char *name,
const void *buffer, size_t size, int flags,
int handler_flags) { return 0; }
static const struct xattr_handler
xops __attribute__ ((unused)) = {
.set = set,
};
],[])
])
AC_DEFUN([ZFS_AC_KERNEL_XATTR_HANDLER_SET], [
dnl #
dnl # 5.12 API change,
dnl # The xattr_handler->set() callback was changed to 8 arguments, and
dnl # struct user_namespace* was inserted as arg #2
dnl #
dnl # 6.3 API change,
dnl # The xattr_handler->set() callback 2nd arg is now struct mnt_idmap *
dnl #
AC_MSG_CHECKING([whether xattr_handler->set() wants dentry, inode, and mnt_idmap])
ZFS_LINUX_TEST_RESULT([xattr_handler_set_mnt_idmap], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_XATTR_SET_IDMAP, 1,
[xattr_handler->set() takes mnt_idmap])
], [
AC_MSG_CHECKING([whether xattr_handler->set() wants dentry, inode, and user_namespace])
ZFS_LINUX_TEST_RESULT([xattr_handler_set_userns], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_XATTR_SET_USERNS, 1,
[xattr_handler->set() takes user_namespace])
],[
dnl #
dnl # 4.7 API change,
dnl # The xattr_handler->set() callback was changed to take both
dnl # dentry and inode.
dnl #
AC_MSG_RESULT(no)
AC_MSG_CHECKING([whether xattr_handler->set() wants dentry and inode])
ZFS_LINUX_TEST_RESULT([xattr_handler_set_dentry_inode], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_XATTR_SET_DENTRY_INODE, 1,
[xattr_handler->set() wants both dentry and inode])
],[
dnl #
dnl # 4.4 API change,
dnl # The xattr_handler->set() callback was changed to take a
dnl # xattr_handler, and handler_flags argument was removed and
dnl # should be accessed by handler->flags.
dnl #
AC_MSG_RESULT(no)
AC_MSG_CHECKING(
[whether xattr_handler->set() wants xattr_handler])
ZFS_LINUX_TEST_RESULT([xattr_handler_set_xattr_handler], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_XATTR_SET_HANDLER, 1,
[xattr_handler->set() wants xattr_handler])
],[
dnl #
dnl # 2.6.33 API change,
dnl # The xattr_handler->set() callback was changed
dnl # to take a dentry instead of an inode, and a
dnl # handler_flags argument was added.
dnl #
AC_MSG_RESULT(no)
AC_MSG_CHECKING(
[whether xattr_handler->set() wants dentry])
ZFS_LINUX_TEST_RESULT([xattr_handler_set_dentry], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_XATTR_SET_DENTRY, 1,
[xattr_handler->set() wants dentry])
],[
ZFS_LINUX_TEST_ERROR([xattr set()])
])
])
])
])
])
])
dnl #
dnl # Supported xattr handler list() interfaces checked newest to oldest.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_XATTR_HANDLER_LIST], [
ZFS_LINUX_TEST_SRC([xattr_handler_list_simple], [
#include <linux/xattr.h>
- bool list(struct dentry *dentry) { return 0; }
+ static bool list(struct dentry *dentry) { return 0; }
static const struct xattr_handler
xops __attribute__ ((unused)) = {
.list = list,
};
],[])
ZFS_LINUX_TEST_SRC([xattr_handler_list_xattr_handler], [
#include <linux/xattr.h>
- size_t list(const struct xattr_handler *handler,
+ static size_t list(const struct xattr_handler *handler,
struct dentry *dentry, char *list, size_t list_size,
const char *name, size_t name_len) { return 0; }
static const struct xattr_handler
xops __attribute__ ((unused)) = {
.list = list,
};
],[])
ZFS_LINUX_TEST_SRC([xattr_handler_list_dentry], [
#include <linux/xattr.h>
- size_t list(struct dentry *dentry,
+ static size_t list(struct dentry *dentry,
char *list, size_t list_size,
const char *name, size_t name_len,
int handler_flags) { return 0; }
static const struct xattr_handler
xops __attribute__ ((unused)) = {
.list = list,
};
],[])
])
AC_DEFUN([ZFS_AC_KERNEL_XATTR_HANDLER_LIST], [
dnl # 4.5 API change,
dnl # The xattr_handler->list() callback was changed to take only a
dnl # dentry and it only needs to return if it's accessible.
AC_MSG_CHECKING([whether xattr_handler->list() wants simple])
ZFS_LINUX_TEST_RESULT([xattr_handler_list_simple], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_XATTR_LIST_SIMPLE, 1,
[xattr_handler->list() wants simple])
],[
dnl #
dnl # 4.4 API change,
dnl # The xattr_handler->list() callback was changed to take a
dnl # xattr_handler, and handler_flags argument was removed
dnl # and should be accessed by handler->flags.
dnl #
AC_MSG_RESULT(no)
AC_MSG_CHECKING(
[whether xattr_handler->list() wants xattr_handler])
ZFS_LINUX_TEST_RESULT([xattr_handler_list_xattr_handler], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_XATTR_LIST_HANDLER, 1,
[xattr_handler->list() wants xattr_handler])
],[
dnl #
dnl # 2.6.33 API change,
dnl # The xattr_handler->list() callback was changed
dnl # to take a dentry instead of an inode, and a
dnl # handler_flags argument was added.
dnl #
AC_MSG_RESULT(no)
AC_MSG_CHECKING(
[whether xattr_handler->list() wants dentry])
ZFS_LINUX_TEST_RESULT([xattr_handler_list_dentry], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_XATTR_LIST_DENTRY, 1,
[xattr_handler->list() wants dentry])
],[
ZFS_LINUX_TEST_ERROR([xattr list()])
])
])
])
])
dnl #
dnl # 3.7 API change,
dnl # The posix_acl_{from,to}_xattr functions gained a new
dnl # parameter: user_ns
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_POSIX_ACL_FROM_XATTR_USERNS], [
ZFS_LINUX_TEST_SRC([posix_acl_from_xattr_userns], [
#include <linux/cred.h>
#include <linux/fs.h>
#include <linux/posix_acl_xattr.h>
],[
posix_acl_from_xattr(&init_user_ns, NULL, 0);
])
])
AC_DEFUN([ZFS_AC_KERNEL_POSIX_ACL_FROM_XATTR_USERNS], [
AC_MSG_CHECKING([whether posix_acl_from_xattr() needs user_ns])
ZFS_LINUX_TEST_RESULT([posix_acl_from_xattr_userns], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_POSIX_ACL_FROM_XATTR_USERNS, 1,
[posix_acl_from_xattr() needs user_ns])
],[
ZFS_LINUX_TEST_ERROR([posix_acl_from_xattr()])
])
])
dnl #
dnl # 4.9 API change,
dnl # iops->{set,get,remove}xattr and generic_{set,get,remove}xattr are
dnl # removed. xattr operations will directly go through sb->s_xattr.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_GENERIC_SETXATTR], [
ZFS_LINUX_TEST_SRC([have_generic_setxattr], [
#include <linux/fs.h>
#include <linux/xattr.h>
static const struct inode_operations
iops __attribute__ ((unused)) = {
.setxattr = generic_setxattr
};
],[])
])
AC_DEFUN([ZFS_AC_KERNEL_GENERIC_SETXATTR], [
AC_MSG_CHECKING([whether generic_setxattr() exists])
ZFS_LINUX_TEST_RESULT([have_generic_setxattr], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_GENERIC_SETXATTR, 1,
[generic_setxattr() exists])
],[
AC_MSG_RESULT(no)
])
])
AC_DEFUN([ZFS_AC_KERNEL_SRC_XATTR], [
ZFS_AC_KERNEL_SRC_CONST_XATTR_HANDLER
ZFS_AC_KERNEL_SRC_XATTR_HANDLER_NAME
ZFS_AC_KERNEL_SRC_XATTR_HANDLER_GET
ZFS_AC_KERNEL_SRC_XATTR_HANDLER_SET
ZFS_AC_KERNEL_SRC_XATTR_HANDLER_LIST
ZFS_AC_KERNEL_SRC_POSIX_ACL_FROM_XATTR_USERNS
ZFS_AC_KERNEL_SRC_GENERIC_SETXATTR
])
AC_DEFUN([ZFS_AC_KERNEL_XATTR], [
ZFS_AC_KERNEL_CONST_XATTR_HANDLER
ZFS_AC_KERNEL_XATTR_HANDLER_NAME
ZFS_AC_KERNEL_XATTR_HANDLER_GET
ZFS_AC_KERNEL_XATTR_HANDLER_SET
ZFS_AC_KERNEL_XATTR_HANDLER_LIST
ZFS_AC_KERNEL_POSIX_ACL_FROM_XATTR_USERNS
ZFS_AC_KERNEL_GENERIC_SETXATTR
])
diff --git a/sys/contrib/openzfs/config/kernel.m4 b/sys/contrib/openzfs/config/kernel.m4
index 056517a841f2..e3f8645774c5 100644
--- a/sys/contrib/openzfs/config/kernel.m4
+++ b/sys/contrib/openzfs/config/kernel.m4
@@ -1,1030 +1,1042 @@
dnl #
dnl # Default ZFS kernel configuration
dnl #
AC_DEFUN([ZFS_AC_CONFIG_KERNEL], [
AM_COND_IF([BUILD_LINUX], [
dnl # Setup the kernel build environment.
ZFS_AC_KERNEL
ZFS_AC_QAT
dnl # Sanity checks for module building and CONFIG_* defines
ZFS_AC_KERNEL_CONFIG_DEFINED
ZFS_AC_MODULE_SYMVERS
dnl # Sequential ZFS_LINUX_TRY_COMPILE tests
ZFS_AC_KERNEL_FPU_HEADER
ZFS_AC_KERNEL_OBJTOOL_HEADER
ZFS_AC_KERNEL_WAIT_QUEUE_ENTRY_T
ZFS_AC_KERNEL_MISC_MINOR
ZFS_AC_KERNEL_DECLARE_EVENT_CLASS
dnl # Parallel ZFS_LINUX_TEST_SRC / ZFS_LINUX_TEST_RESULT tests
ZFS_AC_KERNEL_TEST_SRC
ZFS_AC_KERNEL_TEST_RESULT
AS_IF([test "$LINUX_OBJ" != "$LINUX"], [
KERNEL_MAKE="$KERNEL_MAKE O=$LINUX_OBJ"
])
AC_SUBST(KERNEL_MAKE)
])
])
dnl #
dnl # Generate and compile all of the kernel API test cases to determine
dnl # which interfaces are available. By invoking the kernel build system
dnl # only once the compilation can be done in parallel significantly
dnl # speeding up the process.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_TEST_SRC], [
ZFS_AC_KERNEL_SRC_OBJTOOL
ZFS_AC_KERNEL_SRC_GLOBAL_PAGE_STATE
ZFS_AC_KERNEL_SRC_ACCESS_OK_TYPE
ZFS_AC_KERNEL_SRC_PDE_DATA
ZFS_AC_KERNEL_SRC_FALLOCATE
ZFS_AC_KERNEL_SRC_FADVISE
ZFS_AC_KERNEL_SRC_GENERIC_FADVISE
ZFS_AC_KERNEL_SRC_2ARGS_ZLIB_DEFLATE_WORKSPACESIZE
ZFS_AC_KERNEL_SRC_RWSEM
ZFS_AC_KERNEL_SRC_SCHED
ZFS_AC_KERNEL_SRC_USLEEP_RANGE
ZFS_AC_KERNEL_SRC_KMEM_CACHE
ZFS_AC_KERNEL_SRC_KVMALLOC
ZFS_AC_KERNEL_SRC_VMALLOC_PAGE_KERNEL
ZFS_AC_KERNEL_SRC_WAIT
ZFS_AC_KERNEL_SRC_INODE_TIMES
ZFS_AC_KERNEL_SRC_INODE_LOCK
ZFS_AC_KERNEL_SRC_GROUP_INFO_GID
ZFS_AC_KERNEL_SRC_RW
ZFS_AC_KERNEL_SRC_TIMER_SETUP
ZFS_AC_KERNEL_SRC_SUPER_USER_NS
ZFS_AC_KERNEL_SRC_PROC_OPERATIONS
ZFS_AC_KERNEL_SRC_BLOCK_DEVICE_OPERATIONS
ZFS_AC_KERNEL_SRC_BIO
ZFS_AC_KERNEL_SRC_BLKDEV
ZFS_AC_KERNEL_SRC_BLK_QUEUE
ZFS_AC_KERNEL_SRC_GENHD_FLAGS
ZFS_AC_KERNEL_SRC_REVALIDATE_DISK
ZFS_AC_KERNEL_SRC_GET_DISK_RO
ZFS_AC_KERNEL_SRC_GENERIC_READLINK_GLOBAL
ZFS_AC_KERNEL_SRC_DISCARD_GRANULARITY
ZFS_AC_KERNEL_SRC_INODE_OWNER_OR_CAPABLE
ZFS_AC_KERNEL_SRC_XATTR
ZFS_AC_KERNEL_SRC_ACL
ZFS_AC_KERNEL_SRC_INODE_SETATTR
ZFS_AC_KERNEL_SRC_INODE_GETATTR
ZFS_AC_KERNEL_SRC_INODE_SET_FLAGS
ZFS_AC_KERNEL_SRC_INODE_SET_IVERSION
ZFS_AC_KERNEL_SRC_SHOW_OPTIONS
ZFS_AC_KERNEL_SRC_FILE_INODE
ZFS_AC_KERNEL_SRC_FILE_DENTRY
ZFS_AC_KERNEL_SRC_FSYNC
ZFS_AC_KERNEL_SRC_AIO_FSYNC
ZFS_AC_KERNEL_SRC_EVICT_INODE
ZFS_AC_KERNEL_SRC_DIRTY_INODE
ZFS_AC_KERNEL_SRC_SHRINKER
ZFS_AC_KERNEL_SRC_MKDIR
ZFS_AC_KERNEL_SRC_LOOKUP_FLAGS
ZFS_AC_KERNEL_SRC_CREATE
ZFS_AC_KERNEL_SRC_PERMISSION
ZFS_AC_KERNEL_SRC_GET_LINK
ZFS_AC_KERNEL_SRC_PUT_LINK
ZFS_AC_KERNEL_SRC_TMPFILE
ZFS_AC_KERNEL_SRC_AUTOMOUNT
ZFS_AC_KERNEL_SRC_ENCODE_FH_WITH_INODE
ZFS_AC_KERNEL_SRC_COMMIT_METADATA
ZFS_AC_KERNEL_SRC_CLEAR_INODE
ZFS_AC_KERNEL_SRC_SETATTR_PREPARE
ZFS_AC_KERNEL_SRC_INSERT_INODE_LOCKED
ZFS_AC_KERNEL_SRC_DENTRY
ZFS_AC_KERNEL_SRC_DENTRY_ALIAS_D_U
ZFS_AC_KERNEL_SRC_TRUNCATE_SETSIZE
ZFS_AC_KERNEL_SRC_SECURITY_INODE
ZFS_AC_KERNEL_SRC_FST_MOUNT
ZFS_AC_KERNEL_SRC_BDI
ZFS_AC_KERNEL_SRC_SET_NLINK
ZFS_AC_KERNEL_SRC_SGET
ZFS_AC_KERNEL_SRC_LSEEK_EXECUTE
ZFS_AC_KERNEL_SRC_VFS_FILEMAP_DIRTY_FOLIO
ZFS_AC_KERNEL_SRC_VFS_READ_FOLIO
ZFS_AC_KERNEL_SRC_VFS_GETATTR
ZFS_AC_KERNEL_SRC_VFS_FSYNC_2ARGS
ZFS_AC_KERNEL_SRC_VFS_ITERATE
ZFS_AC_KERNEL_SRC_VFS_DIRECT_IO
ZFS_AC_KERNEL_SRC_VFS_READPAGES
ZFS_AC_KERNEL_SRC_VFS_SET_PAGE_DIRTY_NOBUFFERS
ZFS_AC_KERNEL_SRC_VFS_RW_ITERATE
ZFS_AC_KERNEL_SRC_VFS_GENERIC_WRITE_CHECKS
ZFS_AC_KERNEL_SRC_VFS_IOV_ITER
ZFS_AC_KERNEL_SRC_VFS_COPY_FILE_RANGE
ZFS_AC_KERNEL_SRC_VFS_GENERIC_COPY_FILE_RANGE
ZFS_AC_KERNEL_SRC_VFS_REMAP_FILE_RANGE
ZFS_AC_KERNEL_SRC_VFS_CLONE_FILE_RANGE
ZFS_AC_KERNEL_SRC_VFS_DEDUPE_FILE_RANGE
ZFS_AC_KERNEL_SRC_VFS_FILE_OPERATIONS_EXTEND
ZFS_AC_KERNEL_SRC_KMAP_ATOMIC_ARGS
ZFS_AC_KERNEL_SRC_FOLLOW_DOWN_ONE
ZFS_AC_KERNEL_SRC_MAKE_REQUEST_FN
ZFS_AC_KERNEL_SRC_GENERIC_IO_ACCT
ZFS_AC_KERNEL_SRC_FPU
ZFS_AC_KERNEL_SRC_FMODE_T
ZFS_AC_KERNEL_SRC_KUIDGID_T
ZFS_AC_KERNEL_SRC_KUID_HELPERS
ZFS_AC_KERNEL_SRC_RENAME
ZFS_AC_KERNEL_SRC_CURRENT_TIME
ZFS_AC_KERNEL_SRC_USERNS_CAPABILITIES
ZFS_AC_KERNEL_SRC_IN_COMPAT_SYSCALL
ZFS_AC_KERNEL_SRC_KTIME
ZFS_AC_KERNEL_SRC_TOTALRAM_PAGES_FUNC
ZFS_AC_KERNEL_SRC_TOTALHIGH_PAGES
ZFS_AC_KERNEL_SRC_KSTRTOUL
ZFS_AC_KERNEL_SRC_PERCPU
ZFS_AC_KERNEL_SRC_CPU_HOTPLUG
ZFS_AC_KERNEL_SRC_GENERIC_FILLATTR
ZFS_AC_KERNEL_SRC_MKNOD
ZFS_AC_KERNEL_SRC_SYMLINK
ZFS_AC_KERNEL_SRC_BIO_MAX_SEGS
ZFS_AC_KERNEL_SRC_SIGNAL_STOP
ZFS_AC_KERNEL_SRC_SIGINFO
ZFS_AC_KERNEL_SRC_SYSFS
ZFS_AC_KERNEL_SRC_SET_SPECIAL_STATE
ZFS_AC_KERNEL_SRC_STANDALONE_LINUX_STDARG
+ ZFS_AC_KERNEL_SRC_STRLCPY
+ ZFS_AC_KERNEL_SRC_STRSCPY
ZFS_AC_KERNEL_SRC_PAGEMAP_FOLIO_WAIT_BIT
ZFS_AC_KERNEL_SRC_ADD_DISK
ZFS_AC_KERNEL_SRC_KTHREAD
ZFS_AC_KERNEL_SRC_ZERO_PAGE
ZFS_AC_KERNEL_SRC___COPY_FROM_USER_INATOMIC
ZFS_AC_KERNEL_SRC_USER_NS_COMMON_INUM
ZFS_AC_KERNEL_SRC_IDMAP_MNT_API
+ ZFS_AC_KERNEL_SRC_IDMAP_NO_USERNS
ZFS_AC_KERNEL_SRC_IATTR_VFSID
ZFS_AC_KERNEL_SRC_FILEMAP
ZFS_AC_KERNEL_SRC_WRITEPAGE_T
ZFS_AC_KERNEL_SRC_RECLAIMED
ZFS_AC_KERNEL_SRC_REGISTER_SYSCTL_TABLE
ZFS_AC_KERNEL_SRC_COPY_SPLICE_READ
ZFS_AC_KERNEL_SRC_SYNC_BDEV
case "$host_cpu" in
powerpc*)
ZFS_AC_KERNEL_SRC_CPU_HAS_FEATURE
ZFS_AC_KERNEL_SRC_FLUSH_DCACHE_PAGE
;;
+ riscv*)
+ ZFS_AC_KERNEL_SRC_FLUSH_DCACHE_PAGE
+ ;;
esac
AC_MSG_CHECKING([for available kernel interfaces])
ZFS_LINUX_TEST_COMPILE_ALL([kabi])
AC_MSG_RESULT([done])
])
dnl #
dnl # Check results of kernel interface tests.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_TEST_RESULT], [
ZFS_AC_KERNEL_ACCESS_OK_TYPE
ZFS_AC_KERNEL_GLOBAL_PAGE_STATE
ZFS_AC_KERNEL_OBJTOOL
ZFS_AC_KERNEL_PDE_DATA
ZFS_AC_KERNEL_FALLOCATE
ZFS_AC_KERNEL_FADVISE
ZFS_AC_KERNEL_GENERIC_FADVISE
ZFS_AC_KERNEL_2ARGS_ZLIB_DEFLATE_WORKSPACESIZE
ZFS_AC_KERNEL_RWSEM
ZFS_AC_KERNEL_SCHED
ZFS_AC_KERNEL_USLEEP_RANGE
ZFS_AC_KERNEL_KMEM_CACHE
ZFS_AC_KERNEL_KVMALLOC
ZFS_AC_KERNEL_VMALLOC_PAGE_KERNEL
ZFS_AC_KERNEL_WAIT
ZFS_AC_KERNEL_INODE_TIMES
ZFS_AC_KERNEL_INODE_LOCK
ZFS_AC_KERNEL_GROUP_INFO_GID
ZFS_AC_KERNEL_RW
ZFS_AC_KERNEL_TIMER_SETUP
ZFS_AC_KERNEL_SUPER_USER_NS
ZFS_AC_KERNEL_PROC_OPERATIONS
ZFS_AC_KERNEL_BLOCK_DEVICE_OPERATIONS
ZFS_AC_KERNEL_BIO
ZFS_AC_KERNEL_BLKDEV
ZFS_AC_KERNEL_BLK_QUEUE
ZFS_AC_KERNEL_GENHD_FLAGS
ZFS_AC_KERNEL_REVALIDATE_DISK
ZFS_AC_KERNEL_GET_DISK_RO
ZFS_AC_KERNEL_GENERIC_READLINK_GLOBAL
ZFS_AC_KERNEL_DISCARD_GRANULARITY
ZFS_AC_KERNEL_INODE_OWNER_OR_CAPABLE
ZFS_AC_KERNEL_XATTR
ZFS_AC_KERNEL_ACL
ZFS_AC_KERNEL_INODE_SETATTR
ZFS_AC_KERNEL_INODE_GETATTR
ZFS_AC_KERNEL_INODE_SET_FLAGS
ZFS_AC_KERNEL_INODE_SET_IVERSION
ZFS_AC_KERNEL_SHOW_OPTIONS
ZFS_AC_KERNEL_FILE_INODE
ZFS_AC_KERNEL_FILE_DENTRY
ZFS_AC_KERNEL_FSYNC
ZFS_AC_KERNEL_AIO_FSYNC
ZFS_AC_KERNEL_EVICT_INODE
ZFS_AC_KERNEL_DIRTY_INODE
ZFS_AC_KERNEL_SHRINKER
ZFS_AC_KERNEL_MKDIR
ZFS_AC_KERNEL_LOOKUP_FLAGS
ZFS_AC_KERNEL_CREATE
ZFS_AC_KERNEL_PERMISSION
ZFS_AC_KERNEL_GET_LINK
ZFS_AC_KERNEL_PUT_LINK
ZFS_AC_KERNEL_TMPFILE
ZFS_AC_KERNEL_AUTOMOUNT
ZFS_AC_KERNEL_ENCODE_FH_WITH_INODE
ZFS_AC_KERNEL_COMMIT_METADATA
ZFS_AC_KERNEL_CLEAR_INODE
ZFS_AC_KERNEL_SETATTR_PREPARE
ZFS_AC_KERNEL_INSERT_INODE_LOCKED
ZFS_AC_KERNEL_DENTRY
ZFS_AC_KERNEL_DENTRY_ALIAS_D_U
ZFS_AC_KERNEL_TRUNCATE_SETSIZE
ZFS_AC_KERNEL_SECURITY_INODE
ZFS_AC_KERNEL_FST_MOUNT
ZFS_AC_KERNEL_BDI
ZFS_AC_KERNEL_SET_NLINK
ZFS_AC_KERNEL_SGET
ZFS_AC_KERNEL_LSEEK_EXECUTE
ZFS_AC_KERNEL_VFS_FILEMAP_DIRTY_FOLIO
ZFS_AC_KERNEL_VFS_READ_FOLIO
ZFS_AC_KERNEL_VFS_GETATTR
ZFS_AC_KERNEL_VFS_FSYNC_2ARGS
ZFS_AC_KERNEL_VFS_ITERATE
ZFS_AC_KERNEL_VFS_DIRECT_IO
ZFS_AC_KERNEL_VFS_READPAGES
ZFS_AC_KERNEL_VFS_SET_PAGE_DIRTY_NOBUFFERS
ZFS_AC_KERNEL_VFS_RW_ITERATE
ZFS_AC_KERNEL_VFS_GENERIC_WRITE_CHECKS
ZFS_AC_KERNEL_VFS_IOV_ITER
ZFS_AC_KERNEL_VFS_COPY_FILE_RANGE
ZFS_AC_KERNEL_VFS_GENERIC_COPY_FILE_RANGE
ZFS_AC_KERNEL_VFS_REMAP_FILE_RANGE
ZFS_AC_KERNEL_VFS_CLONE_FILE_RANGE
ZFS_AC_KERNEL_VFS_DEDUPE_FILE_RANGE
ZFS_AC_KERNEL_VFS_FILE_OPERATIONS_EXTEND
ZFS_AC_KERNEL_KMAP_ATOMIC_ARGS
ZFS_AC_KERNEL_FOLLOW_DOWN_ONE
ZFS_AC_KERNEL_MAKE_REQUEST_FN
ZFS_AC_KERNEL_GENERIC_IO_ACCT
ZFS_AC_KERNEL_FPU
ZFS_AC_KERNEL_FMODE_T
ZFS_AC_KERNEL_KUIDGID_T
ZFS_AC_KERNEL_KUID_HELPERS
ZFS_AC_KERNEL_RENAME
ZFS_AC_KERNEL_CURRENT_TIME
ZFS_AC_KERNEL_USERNS_CAPABILITIES
ZFS_AC_KERNEL_IN_COMPAT_SYSCALL
ZFS_AC_KERNEL_KTIME
ZFS_AC_KERNEL_TOTALRAM_PAGES_FUNC
ZFS_AC_KERNEL_TOTALHIGH_PAGES
ZFS_AC_KERNEL_KSTRTOUL
ZFS_AC_KERNEL_PERCPU
ZFS_AC_KERNEL_CPU_HOTPLUG
ZFS_AC_KERNEL_GENERIC_FILLATTR
ZFS_AC_KERNEL_MKNOD
ZFS_AC_KERNEL_SYMLINK
ZFS_AC_KERNEL_BIO_MAX_SEGS
ZFS_AC_KERNEL_SIGNAL_STOP
ZFS_AC_KERNEL_SIGINFO
ZFS_AC_KERNEL_SYSFS
ZFS_AC_KERNEL_SET_SPECIAL_STATE
ZFS_AC_KERNEL_STANDALONE_LINUX_STDARG
+ ZFS_AC_KERNEL_STRLCPY
+ ZFS_AC_KERNEL_STRSCPY
ZFS_AC_KERNEL_PAGEMAP_FOLIO_WAIT_BIT
ZFS_AC_KERNEL_ADD_DISK
ZFS_AC_KERNEL_KTHREAD
ZFS_AC_KERNEL_ZERO_PAGE
ZFS_AC_KERNEL___COPY_FROM_USER_INATOMIC
ZFS_AC_KERNEL_USER_NS_COMMON_INUM
ZFS_AC_KERNEL_IDMAP_MNT_API
+ ZFS_AC_KERNEL_IDMAP_NO_USERNS
ZFS_AC_KERNEL_IATTR_VFSID
ZFS_AC_KERNEL_FILEMAP
ZFS_AC_KERNEL_WRITEPAGE_T
ZFS_AC_KERNEL_RECLAIMED
ZFS_AC_KERNEL_REGISTER_SYSCTL_TABLE
ZFS_AC_KERNEL_COPY_SPLICE_READ
ZFS_AC_KERNEL_SYNC_BDEV
case "$host_cpu" in
powerpc*)
ZFS_AC_KERNEL_CPU_HAS_FEATURE
ZFS_AC_KERNEL_FLUSH_DCACHE_PAGE
;;
+ riscv*)
+ ZFS_AC_KERNEL_FLUSH_DCACHE_PAGE
+ ;;
esac
])
dnl #
dnl # Detect name used for Module.symvers file in kernel
dnl #
AC_DEFUN([ZFS_AC_MODULE_SYMVERS], [
modpost=$LINUX/scripts/Makefile.modpost
AC_MSG_CHECKING([kernel file name for module symbols])
AS_IF([test "x$enable_linux_builtin" != xyes -a -f "$modpost"], [
AS_IF([grep -q Modules.symvers $modpost], [
LINUX_SYMBOLS=Modules.symvers
], [
LINUX_SYMBOLS=Module.symvers
])
AS_IF([test ! -f "$LINUX_OBJ/$LINUX_SYMBOLS"], [
AC_MSG_ERROR([
*** Please make sure the kernel devel package for your distribution
*** is installed. If you are building with a custom kernel, make sure
*** the kernel is configured, built, and the '--with-linux=PATH'
*** configure option refers to the location of the kernel source.
])
])
], [
LINUX_SYMBOLS=NONE
])
AC_MSG_RESULT($LINUX_SYMBOLS)
AC_SUBST(LINUX_SYMBOLS)
])
dnl #
dnl # Detect the kernel to be built against
dnl #
dnl # Most modern Linux distributions have separate locations for bare
dnl # source (source) and prebuilt (build) files. Additionally, there are
dnl # `source` and `build` symlinks in `/lib/modules/$(KERNEL_VERSION)`
dnl # pointing to them. The directory search order is now:
dnl #
dnl # - `configure` command line values if both `--with-linux` and
dnl # `--with-linux-obj` were defined
dnl #
dnl # - If only `--with-linux` was defined, `--with-linux-obj` is assumed
dnl # to have the same value as `--with-linux`
dnl #
dnl # - If neither `--with-linux` nor `--with-linux-obj` were defined
dnl # autodetection is used:
dnl #
dnl # - `/lib/modules/$(uname -r)/{source,build}` respectively, if exist.
dnl #
dnl # - If only `/lib/modules/$(uname -r)/build` exists, it is assumed
dnl # to be both source and build directory.
dnl #
dnl # - The first directory in `/lib/modules` with the highest version
dnl # number according to `sort -V` which contains both `source` and
dnl # `build` symlinks/directories. If module directory contains only
dnl # `build` component, it is assumed to be both source and build
dnl # directory.
dnl #
dnl # - Last resort: the first directory matching `/usr/src/kernels/*`
dnl # and `/usr/src/linux-*` with the highest version number according
dnl # to `sort -V` is assumed to be both source and build directory.
dnl #
AC_DEFUN([ZFS_AC_KERNEL], [
AC_ARG_WITH([linux],
AS_HELP_STRING([--with-linux=PATH],
[Path to kernel source]),
[kernelsrc="$withval"])
AC_ARG_WITH(linux-obj,
AS_HELP_STRING([--with-linux-obj=PATH],
[Path to kernel build objects]),
[kernelbuild="$withval"])
AC_MSG_CHECKING([kernel source and build directories])
AS_IF([test -n "$kernelsrc" && test -z "$kernelbuild"], [
kernelbuild="$kernelsrc"
], [test -z "$kernelsrc"], [
AS_IF([test -e "/lib/modules/$(uname -r)/source" && \
test -e "/lib/modules/$(uname -r)/build"], [
src="/lib/modules/$(uname -r)/source"
build="/lib/modules/$(uname -r)/build"
], [test -e "/lib/modules/$(uname -r)/build"], [
build="/lib/modules/$(uname -r)/build"
src="$build"
], [
src=
for d in $(ls -1d /lib/modules/* 2>/dev/null | sort -Vr); do
if test -e "$d/source" && test -e "$d/build"; then
src="$d/source"
build="$d/build"
break
fi
if test -e "$d/build"; then
src="$d/build"
build="$d/build"
break
fi
done
# the least reliable method
if test -z "$src"; then
src=$(ls -1d /usr/src/kernels/* /usr/src/linux-* \
2>/dev/null | grep -v obj | sort -Vr | head -1)
build="$src"
fi
])
AS_IF([test -n "$src" && test -e "$src"], [
kernelsrc=$(readlink -e "$src")
], [
kernelsrc="[Not found]"
])
AS_IF([test -n "$build" && test -e "$build"], [
kernelbuild=$(readlink -e "$build")
], [
kernelbuild="[Not found]"
])
], [
AS_IF([test "$kernelsrc" = "NONE"], [
kernsrcver=NONE
])
withlinux=yes
])
AC_MSG_RESULT([done])
AC_MSG_CHECKING([kernel source directory])
AC_MSG_RESULT([$kernelsrc])
AC_MSG_CHECKING([kernel build directory])
AC_MSG_RESULT([$kernelbuild])
AS_IF([test ! -d "$kernelsrc" || test ! -d "$kernelbuild"], [
AC_MSG_ERROR([
*** Please make sure the kernel devel package for your distribution
*** is installed and then try again. If that fails, you can specify the
*** location of the kernel source and build with the '--with-linux=PATH' and
*** '--with-linux-obj=PATH' options respectively.])
])
AC_MSG_CHECKING([kernel source version])
utsrelease1=$kernelbuild/include/linux/version.h
utsrelease2=$kernelbuild/include/linux/utsrelease.h
utsrelease3=$kernelbuild/include/generated/utsrelease.h
AS_IF([test -r $utsrelease1 && grep -qF UTS_RELEASE $utsrelease1], [
utsrelease=$utsrelease1
], [test -r $utsrelease2 && grep -qF UTS_RELEASE $utsrelease2], [
utsrelease=$utsrelease2
], [test -r $utsrelease3 && grep -qF UTS_RELEASE $utsrelease3], [
utsrelease=$utsrelease3
])
AS_IF([test -n "$utsrelease"], [
kernsrcver=$($AWK '/UTS_RELEASE/ { gsub(/"/, "", $[3]); print $[3] }' $utsrelease)
AS_IF([test -z "$kernsrcver"], [
AC_MSG_RESULT([Not found])
AC_MSG_ERROR([
*** Cannot determine kernel version.
])
])
], [
AC_MSG_RESULT([Not found])
if test "x$enable_linux_builtin" != xyes; then
AC_MSG_ERROR([
*** Cannot find UTS_RELEASE definition.
])
else
AC_MSG_ERROR([
*** Cannot find UTS_RELEASE definition.
*** Please run 'make prepare' inside the kernel source tree.])
fi
])
AC_MSG_RESULT([$kernsrcver])
AS_VERSION_COMPARE([$kernsrcver], [$ZFS_META_KVER_MIN], [
AC_MSG_ERROR([
*** Cannot build against kernel version $kernsrcver.
*** The minimum supported kernel version is $ZFS_META_KVER_MIN.
])
])
LINUX=${kernelsrc}
LINUX_OBJ=${kernelbuild}
LINUX_VERSION=${kernsrcver}
AC_SUBST(LINUX)
AC_SUBST(LINUX_OBJ)
AC_SUBST(LINUX_VERSION)
])
dnl #
dnl # Detect the QAT module to be built against, QAT provides hardware
dnl # acceleration for data compression:
dnl #
dnl # https://01.org/intel-quickassist-technology
dnl #
dnl # 1) Download and install QAT driver from the above link
dnl # 2) Start QAT driver in your system:
dnl # service qat_service start
dnl # 3) Enable QAT in ZFS, e.g.:
dnl # ./configure --with-qat=<qat-driver-path>/QAT1.6
dnl # make
dnl # 4) Set GZIP compression in ZFS dataset:
dnl # zfs set compression = gzip <dataset>
dnl #
dnl # Then the data written to this ZFS pool is compressed by QAT accelerator
dnl # automatically, and de-compressed by QAT when read from the pool.
dnl #
dnl # 1) Get QAT hardware statistics with:
dnl # cat /proc/icp_dh895xcc_dev/qat
dnl # 2) To disable QAT:
dnl # insmod zfs.ko zfs_qat_disable=1
dnl #
AC_DEFUN([ZFS_AC_QAT], [
AC_ARG_WITH([qat],
AS_HELP_STRING([--with-qat=PATH],
[Path to qat source]),
AS_IF([test "$withval" = "yes"],
AC_MSG_ERROR([--with-qat=PATH requires a PATH]),
[qatsrc="$withval"]))
AC_ARG_WITH([qat-obj],
AS_HELP_STRING([--with-qat-obj=PATH],
[Path to qat build objects]),
[qatbuild="$withval"])
AS_IF([test ! -z "${qatsrc}"], [
AC_MSG_CHECKING([qat source directory])
AC_MSG_RESULT([$qatsrc])
QAT_SRC="${qatsrc}/quickassist"
AS_IF([ test ! -e "$QAT_SRC/include/cpa.h"], [
AC_MSG_ERROR([
*** Please make sure the qat driver package is installed
*** and specify the location of the qat source with the
*** '--with-qat=PATH' option then try again. Failed to
*** find cpa.h in:
${QAT_SRC}/include])
])
])
AS_IF([test ! -z "${qatsrc}"], [
AC_MSG_CHECKING([qat build directory])
AS_IF([test -z "$qatbuild"], [
qatbuild="${qatsrc}/build"
])
AC_MSG_RESULT([$qatbuild])
QAT_OBJ=${qatbuild}
AS_IF([ ! test -e "$QAT_OBJ/icp_qa_al.ko" && ! test -e "$QAT_OBJ/qat_api.ko"], [
AC_MSG_ERROR([
*** Please make sure the qat driver is installed then try again.
*** Failed to find icp_qa_al.ko or qat_api.ko in:
$QAT_OBJ])
])
AC_SUBST(QAT_SRC)
AC_SUBST(QAT_OBJ)
AC_DEFINE(HAVE_QAT, 1,
[qat is enabled and existed])
])
dnl #
dnl # Detect the name used for the QAT Module.symvers file.
dnl #
AS_IF([test ! -z "${qatsrc}"], [
AC_MSG_CHECKING([qat file for module symbols])
QAT_SYMBOLS=$QAT_SRC/lookaside/access_layer/src/Module.symvers
AS_IF([test -r $QAT_SYMBOLS], [
AC_MSG_RESULT([$QAT_SYMBOLS])
AC_SUBST(QAT_SYMBOLS)
],[
AC_MSG_ERROR([
*** Please make sure the qat driver is installed then try again.
*** Failed to find Module.symvers in:
$QAT_SYMBOLS
])
])
])
])
dnl #
dnl # ZFS_LINUX_CONFTEST_H
dnl #
AC_DEFUN([ZFS_LINUX_CONFTEST_H], [
test -d build/$2 || mkdir -p build/$2
cat - <<_ACEOF >build/$2/$2.h
$1
_ACEOF
])
dnl #
dnl # ZFS_LINUX_CONFTEST_C
dnl #
AC_DEFUN([ZFS_LINUX_CONFTEST_C], [
test -d build/$2 || mkdir -p build/$2
cat confdefs.h - <<_ACEOF >build/$2/$2.c
$1
_ACEOF
])
dnl #
dnl # ZFS_LINUX_CONFTEST_MAKEFILE
dnl #
dnl # $1 - test case name
dnl # $2 - add to top-level Makefile
dnl # $3 - additional build flags
dnl #
AC_DEFUN([ZFS_LINUX_CONFTEST_MAKEFILE], [
test -d build || mkdir -p build
test -d build/$1 || mkdir -p build/$1
file=build/$1/Makefile
dnl # Example command line to manually build source.
cat - <<_ACEOF >$file
# Example command line to manually build source
# make modules -C $LINUX_OBJ $ARCH_UM M=$PWD/build/$1
ccflags-y := -Werror $FRAME_LARGER_THAN
_ACEOF
dnl # Additional custom CFLAGS as requested.
m4_ifval($3, [echo "ccflags-y += $3" >>$file], [])
dnl # Test case source
echo "obj-m := $1.o" >>$file
AS_IF([test "x$2" = "xyes"], [echo "obj-m += $1/" >>build/Makefile], [])
])
dnl #
dnl # ZFS_LINUX_TEST_PROGRAM(C)([PROLOGUE], [BODY])
dnl #
m4_define([ZFS_LINUX_TEST_PROGRAM], [
#include <linux/module.h>
$1
int
main (void)
{
$2
;
return 0;
}
MODULE_DESCRIPTION("conftest");
MODULE_AUTHOR(ZFS_META_AUTHOR);
MODULE_VERSION(ZFS_META_VERSION "-" ZFS_META_RELEASE);
MODULE_LICENSE($3);
])
dnl #
dnl # ZFS_LINUX_TEST_REMOVE
dnl #
dnl # Removes the specified test source and results.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_REMOVE], [
test -d build/$1 && rm -Rf build/$1
test -f build/Makefile && sed '/$1/d' build/Makefile
])
dnl #
dnl # ZFS_LINUX_COMPILE
dnl #
dnl # $1 - build dir
dnl # $2 - test command
dnl # $3 - pass command
dnl # $4 - fail command
dnl # $5 - set KBUILD_MODPOST_NOFINAL='yes'
dnl # $6 - set KBUILD_MODPOST_WARN='yes'
dnl #
dnl # Used internally by ZFS_LINUX_TEST_{COMPILE,MODPOST}
dnl #
AC_DEFUN([ZFS_LINUX_COMPILE], [
AC_ARG_VAR([KERNEL_CC], [C compiler for
building kernel modules])
AC_ARG_VAR([KERNEL_LD], [Linker for
building kernel modules])
AC_ARG_VAR([KERNEL_LLVM], [Binary option to
build kernel modules with LLVM/CLANG toolchain])
AC_TRY_COMMAND([
KBUILD_MODPOST_NOFINAL="$5" KBUILD_MODPOST_WARN="$6"
make modules -k -j$TEST_JOBS ${KERNEL_CC:+CC=$KERNEL_CC}
${KERNEL_LD:+LD=$KERNEL_LD} ${KERNEL_LLVM:+LLVM=$KERNEL_LLVM}
CONFIG_MODULES=y CFLAGS_MODULE=-DCONFIG_MODULES
-C $LINUX_OBJ $ARCH_UM M=$PWD/$1 >$1/build.log 2>&1])
AS_IF([AC_TRY_COMMAND([$2])], [$3], [$4])
])
dnl #
dnl # ZFS_LINUX_TEST_COMPILE
dnl #
dnl # Perform a full compile excluding the final modpost phase.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_COMPILE], [
ZFS_LINUX_COMPILE([$2], [test -f $2/build.log], [
mv $2/Makefile $2/Makefile.compile.$1
mv $2/build.log $2/build.log.$1
],[
AC_MSG_ERROR([
*** Unable to compile test source to determine kernel interfaces.])
], [yes], [])
])
dnl #
dnl # ZFS_LINUX_TEST_MODPOST
dnl #
dnl # Perform a full compile including the modpost phase. This may
dnl # be an incremental build if the objects have already been built.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_MODPOST], [
ZFS_LINUX_COMPILE([$2], [test -f $2/build.log], [
mv $2/Makefile $2/Makefile.modpost.$1
cat $2/build.log >>build/build.log.$1
],[
AC_MSG_ERROR([
*** Unable to modpost test source to determine kernel interfaces.])
], [], [yes])
])
dnl #
dnl # Perform the compilation of the test cases in two phases.
dnl #
dnl # Phase 1) attempt to build the object files for all of the tests
dnl # defined by the ZFS_LINUX_TEST_SRC macro. But do not
dnl # perform the final modpost stage.
dnl #
dnl # Phase 2) disable all tests which failed the initial compilation,
dnl # then invoke the final modpost step for the remaining tests.
dnl #
dnl # This allows us efficiently build the test cases in parallel while
dnl # remaining resilient to build failures which are expected when
dnl # detecting the available kernel interfaces.
dnl #
dnl # The maximum allowed parallelism can be controlled by setting the
dnl # TEST_JOBS environment variable. Otherwise, it default to $(nproc).
dnl #
AC_DEFUN([ZFS_LINUX_TEST_COMPILE_ALL], [
dnl # Phase 1 - Compilation only, final linking is skipped.
ZFS_LINUX_TEST_COMPILE([$1], [build])
dnl #
dnl # Phase 2 - When building external modules disable test cases
dnl # which failed to compile and invoke modpost to verify the
dnl # final linking.
dnl #
dnl # Test names suffixed with '_license' call modpost independently
dnl # to ensure that a single incompatibility does not result in the
dnl # modpost phase exiting early. This check is not performed on
dnl # every symbol since the majority are compatible and doing so
dnl # would significantly slow down this phase.
dnl #
dnl # When configuring for builtin (--enable-linux-builtin)
dnl # fake the linking step artificially create the expected .ko
dnl # files for tests which did compile. This is required for
dnl # kernels which do not have loadable module support or have
dnl # not yet been built.
dnl #
AS_IF([test "x$enable_linux_builtin" = "xno"], [
for dir in $(awk '/^obj-m/ { print [$]3 }' \
build/Makefile.compile.$1); do
name=${dir%/}
AS_IF([test -f build/$name/$name.o], [
AS_IF([test "${name##*_}" = "license"], [
ZFS_LINUX_TEST_MODPOST([$1],
[build/$name])
echo "obj-n += $dir" >>build/Makefile
], [
echo "obj-m += $dir" >>build/Makefile
])
], [
echo "obj-n += $dir" >>build/Makefile
])
done
ZFS_LINUX_TEST_MODPOST([$1], [build])
], [
for dir in $(awk '/^obj-m/ { print [$]3 }' \
build/Makefile.compile.$1); do
name=${dir%/}
AS_IF([test -f build/$name/$name.o], [
touch build/$name/$name.ko
])
done
])
])
dnl #
dnl # ZFS_LINUX_TEST_SRC
dnl #
dnl # $1 - name
dnl # $2 - global
dnl # $3 - source
dnl # $4 - extra cflags
dnl # $5 - check license-compatibility
dnl #
dnl # Check if the test source is buildable at all and then if it is
dnl # license compatible.
dnl #
dnl # N.B because all of the test cases are compiled in parallel they
dnl # must never depend on the results of previous tests. Each test
dnl # needs to be entirely independent.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_SRC], [
ZFS_LINUX_CONFTEST_C([ZFS_LINUX_TEST_PROGRAM([[$2]], [[$3]],
[["Dual BSD/GPL"]])], [$1])
ZFS_LINUX_CONFTEST_MAKEFILE([$1], [yes], [$4])
AS_IF([ test -n "$5" ], [
ZFS_LINUX_CONFTEST_C([ZFS_LINUX_TEST_PROGRAM(
[[$2]], [[$3]], [[$5]])], [$1_license])
ZFS_LINUX_CONFTEST_MAKEFILE([$1_license], [yes], [$4])
])
])
dnl #
dnl # ZFS_LINUX_TEST_RESULT
dnl #
dnl # $1 - name of a test source (ZFS_LINUX_TEST_SRC)
dnl # $2 - run on success (valid .ko generated)
dnl # $3 - run on failure (unable to compile)
dnl #
AC_DEFUN([ZFS_LINUX_TEST_RESULT], [
AS_IF([test -d build/$1], [
AS_IF([test -f build/$1/$1.ko], [$2], [$3])
], [
AC_MSG_ERROR([
*** No matching source for the "$1" test, check that
*** both the test source and result macros refer to the same name.
])
])
])
dnl #
dnl # ZFS_LINUX_TEST_ERROR
dnl #
dnl # Generic error message which can be used when none of the expected
dnl # kernel interfaces were detected.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_ERROR], [
AC_MSG_ERROR([
*** None of the expected "$1" interfaces were detected.
*** This may be because your kernel version is newer than what is
*** supported, or you are using a patched custom kernel with
*** incompatible modifications.
***
*** ZFS Version: $ZFS_META_ALIAS
*** Compatible Kernels: $ZFS_META_KVER_MIN - $ZFS_META_KVER_MAX
])
])
dnl #
dnl # ZFS_LINUX_TEST_RESULT_SYMBOL
dnl #
dnl # Like ZFS_LINUX_TEST_RESULT except ZFS_CHECK_SYMBOL_EXPORT is called to
dnl # verify symbol exports, unless --enable-linux-builtin was provided to
dnl # configure.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_RESULT_SYMBOL], [
AS_IF([ ! test -f build/$1/$1.ko], [
$5
], [
AS_IF([test "x$enable_linux_builtin" != "xyes"], [
ZFS_CHECK_SYMBOL_EXPORT([$2], [$3], [$4], [$5])
], [
$4
])
])
])
dnl #
dnl # ZFS_LINUX_COMPILE_IFELSE
dnl #
AC_DEFUN([ZFS_LINUX_COMPILE_IFELSE], [
ZFS_LINUX_TEST_REMOVE([conftest])
m4_ifvaln([$1], [ZFS_LINUX_CONFTEST_C([$1], [conftest])])
m4_ifvaln([$5], [ZFS_LINUX_CONFTEST_H([$5], [conftest])],
[ZFS_LINUX_CONFTEST_H([], [conftest])])
ZFS_LINUX_CONFTEST_MAKEFILE([conftest], [no],
[m4_ifvaln([$5], [-I$PWD/build/conftest], [])])
ZFS_LINUX_COMPILE([build/conftest], [$2], [$3], [$4], [], [])
])
dnl #
dnl # ZFS_LINUX_TRY_COMPILE
dnl #
dnl # $1 - global
dnl # $2 - source
dnl # $3 - run on success (valid .ko generated)
dnl # $4 - run on failure (unable to compile)
dnl #
dnl # When configuring as builtin (--enable-linux-builtin) for kernels
dnl # without loadable module support (CONFIG_MODULES=n) only the object
dnl # file is created. See ZFS_LINUX_TEST_COMPILE_ALL for details.
dnl #
AC_DEFUN([ZFS_LINUX_TRY_COMPILE], [
AS_IF([test "x$enable_linux_builtin" = "xyes"], [
ZFS_LINUX_COMPILE_IFELSE(
[ZFS_LINUX_TEST_PROGRAM([[$1]], [[$2]],
[[ZFS_META_LICENSE]])],
[test -f build/conftest/conftest.o], [$3], [$4])
], [
ZFS_LINUX_COMPILE_IFELSE(
[ZFS_LINUX_TEST_PROGRAM([[$1]], [[$2]],
[[ZFS_META_LICENSE]])],
[test -f build/conftest/conftest.ko], [$3], [$4])
])
])
dnl #
dnl # ZFS_CHECK_SYMBOL_EXPORT
dnl #
dnl # Check if a symbol is exported on not by consulting the symbols
dnl # file, or optionally the source code.
dnl #
AC_DEFUN([ZFS_CHECK_SYMBOL_EXPORT], [
grep -q -E '[[[:space:]]]$1[[[:space:]]]' \
$LINUX_OBJ/$LINUX_SYMBOLS 2>/dev/null
rc=$?
if test $rc -ne 0; then
export=0
for file in $2; do
grep -q -E "EXPORT_SYMBOL.*($1)" \
"$LINUX/$file" 2>/dev/null
rc=$?
if test $rc -eq 0; then
export=1
break;
fi
done
if test $export -eq 0; then :
$4
else :
$3
fi
else :
$3
fi
])
dnl #
dnl # ZFS_LINUX_TRY_COMPILE_SYMBOL
dnl #
dnl # Like ZFS_LINUX_TRY_COMPILER except ZFS_CHECK_SYMBOL_EXPORT is called
dnl # to verify symbol exports, unless --enable-linux-builtin was provided
dnl # to configure.
dnl #
AC_DEFUN([ZFS_LINUX_TRY_COMPILE_SYMBOL], [
ZFS_LINUX_TRY_COMPILE([$1], [$2], [rc=0], [rc=1])
if test $rc -ne 0; then :
$6
else
if test "x$enable_linux_builtin" != xyes; then
ZFS_CHECK_SYMBOL_EXPORT([$3], [$4], [rc=0], [rc=1])
fi
if test $rc -ne 0; then :
$6
else :
$5
fi
fi
])
dnl #
dnl # ZFS_LINUX_TRY_COMPILE_HEADER
dnl # like ZFS_LINUX_TRY_COMPILE, except the contents conftest.h are
dnl # provided via the fifth parameter
dnl #
AC_DEFUN([ZFS_LINUX_TRY_COMPILE_HEADER], [
AS_IF([test "x$enable_linux_builtin" = "xyes"], [
ZFS_LINUX_COMPILE_IFELSE(
[ZFS_LINUX_TEST_PROGRAM([[$1]], [[$2]],
[[ZFS_META_LICENSE]])],
[test -f build/conftest/conftest.o], [$3], [$4], [$5])
], [
ZFS_LINUX_COMPILE_IFELSE(
[ZFS_LINUX_TEST_PROGRAM([[$1]], [[$2]],
[[ZFS_META_LICENSE]])],
[test -f build/conftest/conftest.ko], [$3], [$4], [$5])
])
])
dnl #
dnl # AS_VERSION_COMPARE_LE
dnl # like AS_VERSION_COMPARE_LE, but runs $3 if (and only if) $1 <= $2
dnl # AS_VERSION_COMPARE_LE (version-1, version-2, [action-if-less-or-equal], [action-if-greater])
dnl #
AC_DEFUN([AS_VERSION_COMPARE_LE], [
AS_VERSION_COMPARE([$1], [$2], [$3], [$3], [$4])
])
dnl #
dnl # ZFS_LINUX_REQUIRE_API
dnl # like ZFS_LINUX_TEST_ERROR, except only fails if the kernel is
dnl # at least some specified version.
dnl #
AC_DEFUN([ZFS_LINUX_REQUIRE_API], [
AS_VERSION_COMPARE_LE([$2], [$kernsrcver], [
AC_MSG_ERROR([
*** None of the expected "$1" interfaces were detected. This
*** interface is expected for kernels version "$2" and above.
*** This may be because your kernel version is newer than what is
*** supported, or you are using a patched custom kernel with
*** incompatible modifications. Newer kernels may have incompatible
*** APIs.
***
*** ZFS Version: $ZFS_META_ALIAS
*** Compatible Kernels: $ZFS_META_KVER_MIN - $ZFS_META_KVER_MAX
])
], [
AC_MSG_RESULT(no)
])
])
diff --git a/sys/contrib/openzfs/config/rpm.am b/sys/contrib/openzfs/config/rpm.am
index 13bd54a625b0..85c56c0b2e3a 100644
--- a/sys/contrib/openzfs/config/rpm.am
+++ b/sys/contrib/openzfs/config/rpm.am
@@ -1,105 +1,115 @@
###############################################################################
# Copyright (C) 2007-2013 Lawrence Livermore National Security, LLC.
# Copyright (C) 2007 The Regents of the University of California.
# Written by Brian Behlendorf <behlendorf1@llnl.gov>.
###############################################################################
# Build targets for RPM packages.
###############################################################################
PHONY += srpm srpms srpm-kmod srpm-dkms srpm-utils
PHONY += rpm rpms rpm-kmod rpm-dkms rpm-utils rpm-utils-initramfs
PHONY += srpm-common rpm-common rpm-local
srpm-kmod srpm-dkms srpm-utils: dist
srpm-kmod:
$(MAKE) $(AM_MAKEFLAGS) pkg="${PACKAGE}-kmod" \
def='${SRPM_DEFINE_COMMON} ${SRPM_DEFINE_KMOD}' srpm-common
srpm-dkms:
$(MAKE) $(AM_MAKEFLAGS) pkg="${PACKAGE}-dkms" \
def='${SRPM_DEFINE_COMMON} ${SRPM_DEFINE_DKMS}' srpm-common
srpm-utils:
$(MAKE) $(AM_MAKEFLAGS) pkg="${PACKAGE}" \
def='${SRPM_DEFINE_COMMON} ${SRPM_DEFINE_UTIL}' srpm-common
srpm: srpm-kmod srpm-dkms srpm-utils
srpms: srpm-kmod srpm-dkms srpm-utils
rpm-kmod: srpm-kmod
$(MAKE) $(AM_MAKEFLAGS) pkg="${PACKAGE}-kmod" \
def='${RPM_DEFINE_COMMON} ${RPM_DEFINE_KMOD}' rpm-common
rpm-dkms: srpm-dkms
$(MAKE) $(AM_MAKEFLAGS) pkg="${PACKAGE}-dkms" \
def='${RPM_DEFINE_COMMON} ${RPM_DEFINE_DKMS}' rpm-common
# The rpm-utils and rpm-utils-initramfs targets are identical except for the
# zfs-initramfs package: rpm-utils never includes it, rpm-utils-initramfs
# includes it if detected at configure time. The zfs-initramfs package does
# not work on any known RPM-based distribution and the resulting RPM is only
# used to create a Debian package. The rpm-utils-initramfs target is not
# intended to be specified by the user directly, it is provided as a
# dependency of the deb-utils target.
rpm-utils: srpm-utils
$(MAKE) $(AM_MAKEFLAGS) pkg="${PACKAGE}" \
def='${RPM_DEFINE_COMMON} ${RPM_DEFINE_UTIL}' rpm-common
rpm-utils-initramfs: srpm-utils
$(MAKE) $(AM_MAKEFLAGS) pkg="${PACKAGE}" \
def='${RPM_DEFINE_COMMON} ${RPM_DEFINE_UTIL} ${RPM_DEFINE_INITRAMFS}' rpm-common
rpm: rpm-kmod rpm-dkms rpm-utils
rpms: rpm-kmod rpm-dkms rpm-utils
rpm-local:
@(if test "${HAVE_RPMBUILD}" = "no"; then \
echo -e "\n" \
"*** Required util ${RPMBUILD} missing. Please install the\n" \
"*** package for your distribution which provides ${RPMBUILD},\n" \
"*** re-run configure, and try again.\n"; \
exit 1; \
fi; \
mkdir -p $(rpmbuild)/TMP && \
mkdir -p $(rpmbuild)/BUILD && \
mkdir -p $(rpmbuild)/RPMS && \
mkdir -p $(rpmbuild)/SRPMS && \
mkdir -p $(rpmbuild)/SPECS && \
cp ${RPM_SPEC_DIR}/$(rpmspec) $(rpmbuild)/SPECS && \
mkdir -p $(rpmbuild)/SOURCES && \
cp $(top_srcdir)/scripts/kmodtool $(rpmbuild)/SOURCES && \
cp $(distdir).tar.gz $(rpmbuild)/SOURCES)
srpm-common:
@(dist=`$(RPM) --eval %{?dist}`; \
rpmpkg=$(pkg)-$(VERSION)-$(RELEASE)$$dist*src.rpm; \
rpmspec=$(pkg).spec; \
rpmbuild=`mktemp -t -d $(PACKAGE)-build-$$USER-XXXXXXXX`; \
$(MAKE) $(AM_MAKEFLAGS) \
rpmbuild="$$rpmbuild" \
rpmspec="$$rpmspec" \
rpm-local || exit 1; \
LANG=C $(RPMBUILD) \
--define "_tmppath $$rpmbuild/TMP" \
+ --define "_builddir $$rpmbuild/BUILD" \
+ --define "_rpmdir $$rpmbuild/RPMS" \
+ --define "_srcrpmdir $$rpmbuild/SRPMS" \
+ --define "_specdir $$rpmbuild/SPECS" \
+ --define "_sourcedir $$rpmbuild/SOURCES" \
--define "_topdir $$rpmbuild" \
$(def) -bs $$rpmbuild/SPECS/$$rpmspec || exit 1; \
cp $$rpmbuild/SRPMS/$$rpmpkg . || exit 1; \
rm -R $$rpmbuild)
rpm-common:
@(dist=`$(RPM) --eval %{?dist}`; \
rpmpkg=$(pkg)-$(VERSION)-$(RELEASE)$$dist*src.rpm; \
rpmspec=$(pkg).spec; \
rpmbuild=`mktemp -t -d $(PACKAGE)-build-$$USER-XXXXXXXX`; \
$(MAKE) $(AM_MAKEFLAGS) \
rpmbuild="$$rpmbuild" \
rpmspec="$$rpmspec" \
rpm-local || exit 1; \
LANG=C ${RPMBUILD} \
--define "_tmppath $$rpmbuild/TMP" \
+ --define "_builddir $$rpmbuild/BUILD" \
+ --define "_rpmdir $$rpmbuild/RPMS" \
+ --define "_srcrpmdir $$rpmbuild/SRPMS" \
+ --define "_specdir $$rpmbuild/SPECS" \
+ --define "_sourcedir $$rpmbuild/SOURCES" \
--define "_topdir $$rpmbuild" \
$(def) --rebuild $$rpmpkg || exit 1; \
cp $$rpmbuild/RPMS/*/* . || exit 1; \
rm -R $$rpmbuild)
diff --git a/sys/contrib/openzfs/include/libzfs.h b/sys/contrib/openzfs/include/libzfs.h
index 4adfa38e87be..770c5e1f201c 100644
--- a/sys/contrib/openzfs/include/libzfs.h
+++ b/sys/contrib/openzfs/include/libzfs.h
@@ -1,1049 +1,1052 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2022 by Delphix. All rights reserved.
* Copyright Joyent, Inc.
* Copyright (c) 2013 Steven Hartland. All rights reserved.
* Copyright (c) 2016, Intel Corporation.
* Copyright 2016 Nexenta Systems, Inc.
* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
* Copyright (c) 2019 Datto Inc.
* Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
*/
#ifndef _LIBZFS_H
#define _LIBZFS_H extern __attribute__((visibility("default")))
#include <assert.h>
#include <libshare.h>
#include <libnvpair.h>
#include <sys/mnttab.h>
#include <sys/param.h>
#include <sys/types.h>
#include <sys/fs/zfs.h>
#include <sys/avl.h>
#include <libzfs_core.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Miscellaneous ZFS constants
*/
#define ZFS_MAXPROPLEN MAXPATHLEN
#define ZPOOL_MAXPROPLEN MAXPATHLEN
/*
* libzfs errors
*/
typedef enum zfs_error {
EZFS_SUCCESS = 0, /* no error -- success */
EZFS_NOMEM = 2000, /* out of memory */
EZFS_BADPROP, /* invalid property value */
EZFS_PROPREADONLY, /* cannot set readonly property */
EZFS_PROPTYPE, /* property does not apply to dataset type */
EZFS_PROPNONINHERIT, /* property is not inheritable */
EZFS_PROPSPACE, /* bad quota or reservation */
EZFS_BADTYPE, /* dataset is not of appropriate type */
EZFS_BUSY, /* pool or dataset is busy */
EZFS_EXISTS, /* pool or dataset already exists */
EZFS_NOENT, /* no such pool or dataset */
EZFS_BADSTREAM, /* bad backup stream */
EZFS_DSREADONLY, /* dataset is readonly */
EZFS_VOLTOOBIG, /* volume is too large for 32-bit system */
EZFS_INVALIDNAME, /* invalid dataset name */
EZFS_BADRESTORE, /* unable to restore to destination */
EZFS_BADBACKUP, /* backup failed */
EZFS_BADTARGET, /* bad attach/detach/replace target */
EZFS_NODEVICE, /* no such device in pool */
EZFS_BADDEV, /* invalid device to add */
EZFS_NOREPLICAS, /* no valid replicas */
EZFS_RESILVERING, /* resilvering (healing reconstruction) */
EZFS_BADVERSION, /* unsupported version */
EZFS_POOLUNAVAIL, /* pool is currently unavailable */
EZFS_DEVOVERFLOW, /* too many devices in one vdev */
EZFS_BADPATH, /* must be an absolute path */
EZFS_CROSSTARGET, /* rename or clone across pool or dataset */
EZFS_ZONED, /* used improperly in local zone */
EZFS_MOUNTFAILED, /* failed to mount dataset */
EZFS_UMOUNTFAILED, /* failed to unmount dataset */
EZFS_UNSHARENFSFAILED, /* failed to unshare over nfs */
EZFS_SHARENFSFAILED, /* failed to share over nfs */
EZFS_PERM, /* permission denied */
EZFS_NOSPC, /* out of space */
EZFS_FAULT, /* bad address */
EZFS_IO, /* I/O error */
EZFS_INTR, /* signal received */
EZFS_ISSPARE, /* device is a hot spare */
EZFS_INVALCONFIG, /* invalid vdev configuration */
EZFS_RECURSIVE, /* recursive dependency */
EZFS_NOHISTORY, /* no history object */
EZFS_POOLPROPS, /* couldn't retrieve pool props */
EZFS_POOL_NOTSUP, /* ops not supported for this type of pool */
EZFS_POOL_INVALARG, /* invalid argument for this pool operation */
EZFS_NAMETOOLONG, /* dataset name is too long */
EZFS_OPENFAILED, /* open of device failed */
EZFS_NOCAP, /* couldn't get capacity */
EZFS_LABELFAILED, /* write of label failed */
EZFS_BADWHO, /* invalid permission who */
EZFS_BADPERM, /* invalid permission */
EZFS_BADPERMSET, /* invalid permission set name */
EZFS_NODELEGATION, /* delegated administration is disabled */
EZFS_UNSHARESMBFAILED, /* failed to unshare over smb */
EZFS_SHARESMBFAILED, /* failed to share over smb */
EZFS_BADCACHE, /* bad cache file */
EZFS_ISL2CACHE, /* device is for the level 2 ARC */
EZFS_VDEVNOTSUP, /* unsupported vdev type */
EZFS_NOTSUP, /* ops not supported on this dataset */
EZFS_ACTIVE_SPARE, /* pool has active shared spare devices */
EZFS_UNPLAYED_LOGS, /* log device has unplayed logs */
EZFS_REFTAG_RELE, /* snapshot release: tag not found */
EZFS_REFTAG_HOLD, /* snapshot hold: tag already exists */
EZFS_TAGTOOLONG, /* snapshot hold/rele: tag too long */
EZFS_PIPEFAILED, /* pipe create failed */
EZFS_THREADCREATEFAILED, /* thread create failed */
EZFS_POSTSPLIT_ONLINE, /* onlining a disk after splitting it */
EZFS_SCRUBBING, /* currently scrubbing */
EZFS_ERRORSCRUBBING, /* currently error scrubbing */
EZFS_ERRORSCRUB_PAUSED, /* error scrub currently paused */
EZFS_NO_SCRUB, /* no active scrub */
EZFS_DIFF, /* general failure of zfs diff */
EZFS_DIFFDATA, /* bad zfs diff data */
EZFS_POOLREADONLY, /* pool is in read-only mode */
EZFS_SCRUB_PAUSED, /* scrub currently paused */
EZFS_SCRUB_PAUSED_TO_CANCEL, /* scrub currently paused */
EZFS_ACTIVE_POOL, /* pool is imported on a different system */
EZFS_CRYPTOFAILED, /* failed to setup encryption */
EZFS_NO_PENDING, /* cannot cancel, no operation is pending */
EZFS_CHECKPOINT_EXISTS, /* checkpoint exists */
EZFS_DISCARDING_CHECKPOINT, /* currently discarding a checkpoint */
EZFS_NO_CHECKPOINT, /* pool has no checkpoint */
EZFS_DEVRM_IN_PROGRESS, /* a device is currently being removed */
EZFS_VDEV_TOO_BIG, /* a device is too big to be used */
EZFS_IOC_NOTSUPPORTED, /* operation not supported by zfs module */
EZFS_TOOMANY, /* argument list too long */
EZFS_INITIALIZING, /* currently initializing */
EZFS_NO_INITIALIZE, /* no active initialize */
EZFS_WRONG_PARENT, /* invalid parent dataset (e.g ZVOL) */
EZFS_TRIMMING, /* currently trimming */
EZFS_NO_TRIM, /* no active trim */
EZFS_TRIM_NOTSUP, /* device does not support trim */
EZFS_NO_RESILVER_DEFER, /* pool doesn't support resilver_defer */
EZFS_EXPORT_IN_PROGRESS, /* currently exporting the pool */
EZFS_REBUILDING, /* resilvering (sequential reconstrution) */
EZFS_VDEV_NOTSUP, /* ops not supported for this type of vdev */
EZFS_NOT_USER_NAMESPACE, /* a file is not a user namespace */
EZFS_CKSUM, /* insufficient replicas */
EZFS_RESUME_EXISTS, /* Resume on existing dataset without force */
EZFS_SHAREFAILED, /* filesystem share failed */
EZFS_UNKNOWN
} zfs_error_t;
/*
* The following data structures are all part
* of the zfs_allow_t data structure which is
* used for printing 'allow' permissions.
* It is a linked list of zfs_allow_t's which
* then contain avl tree's for user/group/sets/...
* and each one of the entries in those trees have
* avl tree's for the permissions they belong to and
* whether they are local,descendent or local+descendent
* permissions. The AVL trees are used primarily for
* sorting purposes, but also so that we can quickly find
* a given user and or permission.
*/
typedef struct zfs_perm_node {
avl_node_t z_node;
char z_pname[MAXPATHLEN];
} zfs_perm_node_t;
typedef struct zfs_allow_node {
avl_node_t z_node;
char z_key[MAXPATHLEN]; /* name, such as joe */
avl_tree_t z_localdescend; /* local+descendent perms */
avl_tree_t z_local; /* local permissions */
avl_tree_t z_descend; /* descendent permissions */
} zfs_allow_node_t;
typedef struct zfs_allow {
struct zfs_allow *z_next;
char z_setpoint[MAXPATHLEN];
avl_tree_t z_sets;
avl_tree_t z_crperms;
avl_tree_t z_user;
avl_tree_t z_group;
avl_tree_t z_everyone;
} zfs_allow_t;
/*
* Basic handle types
*/
typedef struct zfs_handle zfs_handle_t;
typedef struct zpool_handle zpool_handle_t;
typedef struct libzfs_handle libzfs_handle_t;
_LIBZFS_H int zpool_wait(zpool_handle_t *, zpool_wait_activity_t);
_LIBZFS_H int zpool_wait_status(zpool_handle_t *, zpool_wait_activity_t,
boolean_t *, boolean_t *);
/*
* Library initialization
*/
_LIBZFS_H libzfs_handle_t *libzfs_init(void);
_LIBZFS_H void libzfs_fini(libzfs_handle_t *);
_LIBZFS_H libzfs_handle_t *zpool_get_handle(zpool_handle_t *);
_LIBZFS_H libzfs_handle_t *zfs_get_handle(zfs_handle_t *);
_LIBZFS_H void libzfs_print_on_error(libzfs_handle_t *, boolean_t);
_LIBZFS_H void zfs_save_arguments(int argc, char **, char *, int);
_LIBZFS_H int zpool_log_history(libzfs_handle_t *, const char *);
_LIBZFS_H int libzfs_errno(libzfs_handle_t *);
_LIBZFS_H const char *libzfs_error_init(int);
_LIBZFS_H const char *libzfs_error_action(libzfs_handle_t *);
_LIBZFS_H const char *libzfs_error_description(libzfs_handle_t *);
_LIBZFS_H int zfs_standard_error(libzfs_handle_t *, int, const char *);
_LIBZFS_H void libzfs_mnttab_init(libzfs_handle_t *);
_LIBZFS_H void libzfs_mnttab_fini(libzfs_handle_t *);
_LIBZFS_H void libzfs_mnttab_cache(libzfs_handle_t *, boolean_t);
_LIBZFS_H int libzfs_mnttab_find(libzfs_handle_t *, const char *,
struct mnttab *);
_LIBZFS_H void libzfs_mnttab_add(libzfs_handle_t *, const char *,
const char *, const char *);
_LIBZFS_H void libzfs_mnttab_remove(libzfs_handle_t *, const char *);
/*
* Basic handle functions
*/
_LIBZFS_H zpool_handle_t *zpool_open(libzfs_handle_t *, const char *);
_LIBZFS_H zpool_handle_t *zpool_open_canfail(libzfs_handle_t *, const char *);
_LIBZFS_H void zpool_close(zpool_handle_t *);
_LIBZFS_H const char *zpool_get_name(zpool_handle_t *);
_LIBZFS_H int zpool_get_state(zpool_handle_t *);
_LIBZFS_H const char *zpool_state_to_name(vdev_state_t, vdev_aux_t);
_LIBZFS_H const char *zpool_pool_state_to_name(pool_state_t);
_LIBZFS_H void zpool_free_handles(libzfs_handle_t *);
/*
* Iterate over all active pools in the system.
*/
typedef int (*zpool_iter_f)(zpool_handle_t *, void *);
_LIBZFS_H int zpool_iter(libzfs_handle_t *, zpool_iter_f, void *);
_LIBZFS_H boolean_t zpool_skip_pool(const char *);
/*
* Functions to create and destroy pools
*/
_LIBZFS_H int zpool_create(libzfs_handle_t *, const char *, nvlist_t *,
nvlist_t *, nvlist_t *);
_LIBZFS_H int zpool_destroy(zpool_handle_t *, const char *);
_LIBZFS_H int zpool_add(zpool_handle_t *, nvlist_t *);
typedef struct splitflags {
/* do not split, but return the config that would be split off */
unsigned int dryrun : 1;
/* after splitting, import the pool */
unsigned int import : 1;
int name_flags;
} splitflags_t;
typedef struct trimflags {
/* requested vdevs are for the entire pool */
boolean_t fullpool;
/* request a secure trim, requires support from device */
boolean_t secure;
/* after starting trim, block until trim completes */
boolean_t wait;
/* trim at the requested rate in bytes/second */
uint64_t rate;
} trimflags_t;
/*
* Functions to manipulate pool and vdev state
*/
_LIBZFS_H int zpool_scan(zpool_handle_t *, pool_scan_func_t, pool_scrub_cmd_t);
_LIBZFS_H int zpool_initialize(zpool_handle_t *, pool_initialize_func_t,
nvlist_t *);
_LIBZFS_H int zpool_initialize_wait(zpool_handle_t *, pool_initialize_func_t,
nvlist_t *);
_LIBZFS_H int zpool_trim(zpool_handle_t *, pool_trim_func_t, nvlist_t *,
trimflags_t *);
_LIBZFS_H int zpool_clear(zpool_handle_t *, const char *, nvlist_t *);
_LIBZFS_H int zpool_reguid(zpool_handle_t *);
_LIBZFS_H int zpool_reopen_one(zpool_handle_t *, void *);
_LIBZFS_H int zpool_sync_one(zpool_handle_t *, void *);
_LIBZFS_H int zpool_vdev_online(zpool_handle_t *, const char *, int,
vdev_state_t *);
_LIBZFS_H int zpool_vdev_offline(zpool_handle_t *, const char *, boolean_t);
_LIBZFS_H int zpool_vdev_attach(zpool_handle_t *, const char *,
const char *, nvlist_t *, int, boolean_t);
_LIBZFS_H int zpool_vdev_detach(zpool_handle_t *, const char *);
_LIBZFS_H int zpool_vdev_remove(zpool_handle_t *, const char *);
_LIBZFS_H int zpool_vdev_remove_cancel(zpool_handle_t *);
_LIBZFS_H int zpool_vdev_indirect_size(zpool_handle_t *, const char *,
uint64_t *);
_LIBZFS_H int zpool_vdev_split(zpool_handle_t *, char *, nvlist_t **,
nvlist_t *, splitflags_t);
_LIBZFS_H int zpool_vdev_remove_wanted(zpool_handle_t *, const char *);
_LIBZFS_H int zpool_vdev_fault(zpool_handle_t *, uint64_t, vdev_aux_t);
_LIBZFS_H int zpool_vdev_degrade(zpool_handle_t *, uint64_t, vdev_aux_t);
+_LIBZFS_H int zpool_vdev_set_removed_state(zpool_handle_t *, uint64_t,
+ vdev_aux_t);
+
_LIBZFS_H int zpool_vdev_clear(zpool_handle_t *, uint64_t);
_LIBZFS_H nvlist_t *zpool_find_vdev(zpool_handle_t *, const char *, boolean_t *,
boolean_t *, boolean_t *);
_LIBZFS_H nvlist_t *zpool_find_vdev_by_physpath(zpool_handle_t *, const char *,
boolean_t *, boolean_t *, boolean_t *);
_LIBZFS_H int zpool_label_disk(libzfs_handle_t *, zpool_handle_t *,
const char *);
_LIBZFS_H int zpool_prepare_disk(zpool_handle_t *zhp, nvlist_t *vdev_nv,
const char *prepare_str, char **lines[], int *lines_cnt);
_LIBZFS_H int zpool_prepare_and_label_disk(libzfs_handle_t *hdl,
zpool_handle_t *, const char *, nvlist_t *vdev_nv, const char *prepare_str,
char **lines[], int *lines_cnt);
_LIBZFS_H char ** zpool_vdev_script_alloc_env(const char *pool_name,
const char *vdev_path, const char *vdev_upath,
const char *vdev_enc_sysfs_path, const char *opt_key, const char *opt_val);
_LIBZFS_H void zpool_vdev_script_free_env(char **env);
_LIBZFS_H uint64_t zpool_vdev_path_to_guid(zpool_handle_t *zhp,
const char *path);
_LIBZFS_H const char *zpool_get_state_str(zpool_handle_t *);
/*
* Functions to manage pool properties
*/
_LIBZFS_H int zpool_set_prop(zpool_handle_t *, const char *, const char *);
_LIBZFS_H int zpool_get_prop(zpool_handle_t *, zpool_prop_t, char *,
size_t proplen, zprop_source_t *, boolean_t literal);
_LIBZFS_H int zpool_get_userprop(zpool_handle_t *, const char *, char *,
size_t proplen, zprop_source_t *);
_LIBZFS_H uint64_t zpool_get_prop_int(zpool_handle_t *, zpool_prop_t,
zprop_source_t *);
_LIBZFS_H int zpool_props_refresh(zpool_handle_t *);
_LIBZFS_H const char *zpool_prop_to_name(zpool_prop_t);
_LIBZFS_H const char *zpool_prop_values(zpool_prop_t);
/*
* Functions to manage vdev properties
*/
_LIBZFS_H int zpool_get_vdev_prop_value(nvlist_t *, vdev_prop_t, char *, char *,
size_t, zprop_source_t *, boolean_t);
_LIBZFS_H int zpool_get_vdev_prop(zpool_handle_t *, const char *, vdev_prop_t,
char *, char *, size_t, zprop_source_t *, boolean_t);
_LIBZFS_H int zpool_get_all_vdev_props(zpool_handle_t *, const char *,
nvlist_t **);
_LIBZFS_H int zpool_set_vdev_prop(zpool_handle_t *, const char *, const char *,
const char *);
_LIBZFS_H const char *vdev_prop_to_name(vdev_prop_t);
_LIBZFS_H const char *vdev_prop_values(vdev_prop_t);
_LIBZFS_H boolean_t vdev_prop_user(const char *name);
_LIBZFS_H const char *vdev_prop_column_name(vdev_prop_t);
_LIBZFS_H boolean_t vdev_prop_align_right(vdev_prop_t);
/*
* Pool health statistics.
*/
typedef enum {
/*
* The following correspond to faults as defined in the (fault.fs.zfs.*)
* event namespace. Each is associated with a corresponding message ID.
* This must be kept in sync with the zfs_msgid_table in
* lib/libzfs/libzfs_status.c.
*/
ZPOOL_STATUS_CORRUPT_CACHE, /* corrupt /kernel/drv/zpool.cache */
ZPOOL_STATUS_MISSING_DEV_R, /* missing device with replicas */
ZPOOL_STATUS_MISSING_DEV_NR, /* missing device with no replicas */
ZPOOL_STATUS_CORRUPT_LABEL_R, /* bad device label with replicas */
ZPOOL_STATUS_CORRUPT_LABEL_NR, /* bad device label with no replicas */
ZPOOL_STATUS_BAD_GUID_SUM, /* sum of device guids didn't match */
ZPOOL_STATUS_CORRUPT_POOL, /* pool metadata is corrupted */
ZPOOL_STATUS_CORRUPT_DATA, /* data errors in user (meta)data */
ZPOOL_STATUS_FAILING_DEV, /* device experiencing errors */
ZPOOL_STATUS_VERSION_NEWER, /* newer on-disk version */
ZPOOL_STATUS_HOSTID_MISMATCH, /* last accessed by another system */
ZPOOL_STATUS_HOSTID_ACTIVE, /* currently active on another system */
ZPOOL_STATUS_HOSTID_REQUIRED, /* multihost=on and hostid=0 */
ZPOOL_STATUS_IO_FAILURE_WAIT, /* failed I/O, failmode 'wait' */
ZPOOL_STATUS_IO_FAILURE_CONTINUE, /* failed I/O, failmode 'continue' */
ZPOOL_STATUS_IO_FAILURE_MMP, /* failed MMP, failmode not 'panic' */
ZPOOL_STATUS_BAD_LOG, /* cannot read log chain(s) */
ZPOOL_STATUS_ERRATA, /* informational errata available */
/*
* If the pool has unsupported features but can still be opened in
* read-only mode, its status is ZPOOL_STATUS_UNSUP_FEAT_WRITE. If the
* pool has unsupported features but cannot be opened at all, its
* status is ZPOOL_STATUS_UNSUP_FEAT_READ.
*/
ZPOOL_STATUS_UNSUP_FEAT_READ, /* unsupported features for read */
ZPOOL_STATUS_UNSUP_FEAT_WRITE, /* unsupported features for write */
/*
* These faults have no corresponding message ID. At the time we are
* checking the status, the original reason for the FMA fault (I/O or
* checksum errors) has been lost.
*/
ZPOOL_STATUS_FAULTED_DEV_R, /* faulted device with replicas */
ZPOOL_STATUS_FAULTED_DEV_NR, /* faulted device with no replicas */
/*
* The following are not faults per se, but still an error possibly
* requiring administrative attention. There is no corresponding
* message ID.
*/
ZPOOL_STATUS_VERSION_OLDER, /* older legacy on-disk version */
ZPOOL_STATUS_FEAT_DISABLED, /* supported features are disabled */
ZPOOL_STATUS_RESILVERING, /* device being resilvered */
ZPOOL_STATUS_OFFLINE_DEV, /* device offline */
ZPOOL_STATUS_REMOVED_DEV, /* removed device */
ZPOOL_STATUS_REBUILDING, /* device being rebuilt */
ZPOOL_STATUS_REBUILD_SCRUB, /* recommend scrubbing the pool */
ZPOOL_STATUS_NON_NATIVE_ASHIFT, /* (e.g. 512e dev with ashift of 9) */
ZPOOL_STATUS_COMPATIBILITY_ERR, /* bad 'compatibility' property */
ZPOOL_STATUS_INCOMPATIBLE_FEAT, /* feature set outside compatibility */
/*
* Finally, the following indicates a healthy pool.
*/
ZPOOL_STATUS_OK
} zpool_status_t;
_LIBZFS_H zpool_status_t zpool_get_status(zpool_handle_t *, const char **,
zpool_errata_t *);
_LIBZFS_H zpool_status_t zpool_import_status(nvlist_t *, const char **,
zpool_errata_t *);
/*
* Statistics and configuration functions.
*/
_LIBZFS_H nvlist_t *zpool_get_config(zpool_handle_t *, nvlist_t **);
_LIBZFS_H nvlist_t *zpool_get_features(zpool_handle_t *);
_LIBZFS_H int zpool_refresh_stats(zpool_handle_t *, boolean_t *);
_LIBZFS_H int zpool_get_errlog(zpool_handle_t *, nvlist_t **);
/*
* Import and export functions
*/
_LIBZFS_H int zpool_export(zpool_handle_t *, boolean_t, const char *);
_LIBZFS_H int zpool_export_force(zpool_handle_t *, const char *);
_LIBZFS_H int zpool_import(libzfs_handle_t *, nvlist_t *, const char *,
char *altroot);
_LIBZFS_H int zpool_import_props(libzfs_handle_t *, nvlist_t *, const char *,
nvlist_t *, int);
_LIBZFS_H void zpool_print_unsup_feat(nvlist_t *config);
/*
* Miscellaneous pool functions
*/
struct zfs_cmd;
_LIBZFS_H const char *const zfs_history_event_names[];
typedef enum {
VDEV_NAME_PATH = 1 << 0,
VDEV_NAME_GUID = 1 << 1,
VDEV_NAME_FOLLOW_LINKS = 1 << 2,
VDEV_NAME_TYPE_ID = 1 << 3,
} vdev_name_t;
_LIBZFS_H char *zpool_vdev_name(libzfs_handle_t *, zpool_handle_t *, nvlist_t *,
int name_flags);
_LIBZFS_H int zpool_upgrade(zpool_handle_t *, uint64_t);
_LIBZFS_H int zpool_get_history(zpool_handle_t *, nvlist_t **, uint64_t *,
boolean_t *);
_LIBZFS_H int zpool_events_next(libzfs_handle_t *, nvlist_t **, int *, unsigned,
int);
_LIBZFS_H int zpool_events_clear(libzfs_handle_t *, int *);
_LIBZFS_H int zpool_events_seek(libzfs_handle_t *, uint64_t, int);
_LIBZFS_H void zpool_obj_to_path_ds(zpool_handle_t *, uint64_t, uint64_t,
char *, size_t);
_LIBZFS_H void zpool_obj_to_path(zpool_handle_t *, uint64_t, uint64_t, char *,
size_t);
_LIBZFS_H int zfs_ioctl(libzfs_handle_t *, int, struct zfs_cmd *);
_LIBZFS_H void zpool_explain_recover(libzfs_handle_t *, const char *, int,
nvlist_t *);
_LIBZFS_H int zpool_checkpoint(zpool_handle_t *);
_LIBZFS_H int zpool_discard_checkpoint(zpool_handle_t *);
_LIBZFS_H boolean_t zpool_is_draid_spare(const char *);
/*
* Basic handle manipulations. These functions do not create or destroy the
* underlying datasets, only the references to them.
*/
_LIBZFS_H zfs_handle_t *zfs_open(libzfs_handle_t *, const char *, int);
_LIBZFS_H zfs_handle_t *zfs_handle_dup(zfs_handle_t *);
_LIBZFS_H void zfs_close(zfs_handle_t *);
_LIBZFS_H zfs_type_t zfs_get_type(const zfs_handle_t *);
_LIBZFS_H zfs_type_t zfs_get_underlying_type(const zfs_handle_t *);
_LIBZFS_H const char *zfs_get_name(const zfs_handle_t *);
_LIBZFS_H zpool_handle_t *zfs_get_pool_handle(const zfs_handle_t *);
_LIBZFS_H const char *zfs_get_pool_name(const zfs_handle_t *);
/*
* Property management functions. Some functions are shared with the kernel,
* and are found in sys/fs/zfs.h.
*/
/*
* zfs dataset property management
*/
_LIBZFS_H const char *zfs_prop_default_string(zfs_prop_t);
_LIBZFS_H uint64_t zfs_prop_default_numeric(zfs_prop_t);
_LIBZFS_H const char *zfs_prop_column_name(zfs_prop_t);
_LIBZFS_H boolean_t zfs_prop_align_right(zfs_prop_t);
_LIBZFS_H nvlist_t *zfs_valid_proplist(libzfs_handle_t *, zfs_type_t,
nvlist_t *, uint64_t, zfs_handle_t *, zpool_handle_t *, boolean_t,
const char *);
_LIBZFS_H const char *zfs_prop_to_name(zfs_prop_t);
_LIBZFS_H int zfs_prop_set(zfs_handle_t *, const char *, const char *);
_LIBZFS_H int zfs_prop_set_list(zfs_handle_t *, nvlist_t *);
_LIBZFS_H int zfs_prop_set_list_flags(zfs_handle_t *, nvlist_t *, int);
_LIBZFS_H int zfs_prop_get(zfs_handle_t *, zfs_prop_t, char *, size_t,
zprop_source_t *, char *, size_t, boolean_t);
_LIBZFS_H int zfs_prop_get_recvd(zfs_handle_t *, const char *, char *, size_t,
boolean_t);
_LIBZFS_H int zfs_prop_get_numeric(zfs_handle_t *, zfs_prop_t, uint64_t *,
zprop_source_t *, char *, size_t);
_LIBZFS_H int zfs_prop_get_userquota_int(zfs_handle_t *zhp,
const char *propname, uint64_t *propvalue);
_LIBZFS_H int zfs_prop_get_userquota(zfs_handle_t *zhp, const char *propname,
char *propbuf, int proplen, boolean_t literal);
_LIBZFS_H int zfs_prop_get_written_int(zfs_handle_t *zhp, const char *propname,
uint64_t *propvalue);
_LIBZFS_H int zfs_prop_get_written(zfs_handle_t *zhp, const char *propname,
char *propbuf, int proplen, boolean_t literal);
_LIBZFS_H int zfs_prop_get_feature(zfs_handle_t *zhp, const char *propname,
char *buf, size_t len);
_LIBZFS_H uint64_t getprop_uint64(zfs_handle_t *, zfs_prop_t, const char **);
_LIBZFS_H uint64_t zfs_prop_get_int(zfs_handle_t *, zfs_prop_t);
_LIBZFS_H int zfs_prop_inherit(zfs_handle_t *, const char *, boolean_t);
_LIBZFS_H const char *zfs_prop_values(zfs_prop_t);
_LIBZFS_H int zfs_prop_is_string(zfs_prop_t prop);
_LIBZFS_H nvlist_t *zfs_get_all_props(zfs_handle_t *);
_LIBZFS_H nvlist_t *zfs_get_user_props(zfs_handle_t *);
_LIBZFS_H nvlist_t *zfs_get_recvd_props(zfs_handle_t *);
_LIBZFS_H nvlist_t *zfs_get_clones_nvl(zfs_handle_t *);
_LIBZFS_H int zfs_wait_status(zfs_handle_t *, zfs_wait_activity_t,
boolean_t *, boolean_t *);
/*
* zfs encryption management
*/
_LIBZFS_H int zfs_crypto_get_encryption_root(zfs_handle_t *, boolean_t *,
char *);
_LIBZFS_H int zfs_crypto_create(libzfs_handle_t *, char *, nvlist_t *,
nvlist_t *, boolean_t stdin_available, uint8_t **, uint_t *);
_LIBZFS_H int zfs_crypto_clone_check(libzfs_handle_t *, zfs_handle_t *, char *,
nvlist_t *);
_LIBZFS_H int zfs_crypto_attempt_load_keys(libzfs_handle_t *, const char *);
_LIBZFS_H int zfs_crypto_load_key(zfs_handle_t *, boolean_t, const char *);
_LIBZFS_H int zfs_crypto_unload_key(zfs_handle_t *);
_LIBZFS_H int zfs_crypto_rewrap(zfs_handle_t *, nvlist_t *, boolean_t);
typedef struct zprop_list {
int pl_prop;
char *pl_user_prop;
struct zprop_list *pl_next;
boolean_t pl_all;
size_t pl_width;
size_t pl_recvd_width;
boolean_t pl_fixed;
} zprop_list_t;
_LIBZFS_H int zfs_expand_proplist(zfs_handle_t *, zprop_list_t **, boolean_t,
boolean_t);
_LIBZFS_H void zfs_prune_proplist(zfs_handle_t *, uint8_t *);
_LIBZFS_H int vdev_expand_proplist(zpool_handle_t *, const char *,
zprop_list_t **);
#define ZFS_MOUNTPOINT_NONE "none"
#define ZFS_MOUNTPOINT_LEGACY "legacy"
#define ZFS_FEATURE_DISABLED "disabled"
#define ZFS_FEATURE_ENABLED "enabled"
#define ZFS_FEATURE_ACTIVE "active"
#define ZFS_UNSUPPORTED_INACTIVE "inactive"
#define ZFS_UNSUPPORTED_READONLY "readonly"
/*
* zpool property management
*/
_LIBZFS_H int zpool_expand_proplist(zpool_handle_t *, zprop_list_t **,
zfs_type_t, boolean_t);
_LIBZFS_H int zpool_prop_get_feature(zpool_handle_t *, const char *, char *,
size_t);
_LIBZFS_H const char *zpool_prop_default_string(zpool_prop_t);
_LIBZFS_H uint64_t zpool_prop_default_numeric(zpool_prop_t);
_LIBZFS_H const char *zpool_prop_column_name(zpool_prop_t);
_LIBZFS_H boolean_t zpool_prop_align_right(zpool_prop_t);
/*
* Functions shared by zfs and zpool property management.
*/
_LIBZFS_H int zprop_iter(zprop_func func, void *cb, boolean_t show_all,
boolean_t ordered, zfs_type_t type);
_LIBZFS_H int zprop_get_list(libzfs_handle_t *, char *, zprop_list_t **,
zfs_type_t);
_LIBZFS_H void zprop_free_list(zprop_list_t *);
#define ZFS_GET_NCOLS 5
typedef enum {
GET_COL_NONE,
GET_COL_NAME,
GET_COL_PROPERTY,
GET_COL_VALUE,
GET_COL_RECVD,
GET_COL_SOURCE
} zfs_get_column_t;
/*
* Functions for printing zfs or zpool properties
*/
typedef struct vdev_cbdata {
int cb_name_flags;
char **cb_names;
unsigned int cb_names_count;
} vdev_cbdata_t;
typedef struct zprop_get_cbdata {
int cb_sources;
zfs_get_column_t cb_columns[ZFS_GET_NCOLS];
int cb_colwidths[ZFS_GET_NCOLS + 1];
boolean_t cb_scripted;
boolean_t cb_literal;
boolean_t cb_first;
zprop_list_t *cb_proplist;
zfs_type_t cb_type;
vdev_cbdata_t cb_vdevs;
} zprop_get_cbdata_t;
#define ZFS_SET_NOMOUNT 1
typedef struct zprop_set_cbdata {
int cb_flags;
nvlist_t *cb_proplist;
} zprop_set_cbdata_t;
_LIBZFS_H void zprop_print_one_property(const char *, zprop_get_cbdata_t *,
const char *, const char *, zprop_source_t, const char *,
const char *);
/*
* Iterator functions.
*/
#define ZFS_ITER_RECURSE (1 << 0)
#define ZFS_ITER_ARGS_CAN_BE_PATHS (1 << 1)
#define ZFS_ITER_PROP_LISTSNAPS (1 << 2)
#define ZFS_ITER_DEPTH_LIMIT (1 << 3)
#define ZFS_ITER_RECVD_PROPS (1 << 4)
#define ZFS_ITER_LITERAL_PROPS (1 << 5)
#define ZFS_ITER_SIMPLE (1 << 6)
typedef int (*zfs_iter_f)(zfs_handle_t *, void *);
_LIBZFS_H int zfs_iter_root(libzfs_handle_t *, zfs_iter_f, void *);
_LIBZFS_H int zfs_iter_children(zfs_handle_t *, zfs_iter_f, void *);
_LIBZFS_H int zfs_iter_dependents(zfs_handle_t *, boolean_t, zfs_iter_f,
void *);
_LIBZFS_H int zfs_iter_filesystems(zfs_handle_t *, zfs_iter_f, void *);
_LIBZFS_H int zfs_iter_snapshots(zfs_handle_t *, boolean_t, zfs_iter_f, void *,
uint64_t, uint64_t);
_LIBZFS_H int zfs_iter_snapshots_sorted(zfs_handle_t *, zfs_iter_f, void *,
uint64_t, uint64_t);
_LIBZFS_H int zfs_iter_snapspec(zfs_handle_t *, const char *, zfs_iter_f,
void *);
_LIBZFS_H int zfs_iter_bookmarks(zfs_handle_t *, zfs_iter_f, void *);
_LIBZFS_H int zfs_iter_children_v2(zfs_handle_t *, int, zfs_iter_f, void *);
_LIBZFS_H int zfs_iter_dependents_v2(zfs_handle_t *, int, boolean_t, zfs_iter_f,
void *);
_LIBZFS_H int zfs_iter_filesystems_v2(zfs_handle_t *, int, zfs_iter_f, void *);
_LIBZFS_H int zfs_iter_snapshots_v2(zfs_handle_t *, int, zfs_iter_f, void *,
uint64_t, uint64_t);
_LIBZFS_H int zfs_iter_snapshots_sorted_v2(zfs_handle_t *, int, zfs_iter_f,
void *, uint64_t, uint64_t);
_LIBZFS_H int zfs_iter_snapspec_v2(zfs_handle_t *, int, const char *,
zfs_iter_f, void *);
_LIBZFS_H int zfs_iter_bookmarks_v2(zfs_handle_t *, int, zfs_iter_f, void *);
_LIBZFS_H int zfs_iter_mounted(zfs_handle_t *, zfs_iter_f, void *);
typedef struct get_all_cb {
zfs_handle_t **cb_handles;
size_t cb_alloc;
size_t cb_used;
} get_all_cb_t;
_LIBZFS_H void zfs_foreach_mountpoint(libzfs_handle_t *, zfs_handle_t **,
size_t, zfs_iter_f, void *, boolean_t);
_LIBZFS_H void libzfs_add_handle(get_all_cb_t *, zfs_handle_t *);
/*
* Functions to create and destroy datasets.
*/
_LIBZFS_H int zfs_create(libzfs_handle_t *, const char *, zfs_type_t,
nvlist_t *);
_LIBZFS_H int zfs_create_ancestors(libzfs_handle_t *, const char *);
_LIBZFS_H int zfs_destroy(zfs_handle_t *, boolean_t);
_LIBZFS_H int zfs_destroy_snaps(zfs_handle_t *, char *, boolean_t);
_LIBZFS_H int zfs_destroy_snaps_nvl(libzfs_handle_t *, nvlist_t *, boolean_t);
_LIBZFS_H int zfs_destroy_snaps_nvl_os(libzfs_handle_t *, nvlist_t *);
_LIBZFS_H int zfs_clone(zfs_handle_t *, const char *, nvlist_t *);
_LIBZFS_H int zfs_snapshot(libzfs_handle_t *, const char *, boolean_t,
nvlist_t *);
_LIBZFS_H int zfs_snapshot_nvl(libzfs_handle_t *hdl, nvlist_t *snaps,
nvlist_t *props);
_LIBZFS_H int zfs_rollback(zfs_handle_t *, zfs_handle_t *, boolean_t);
typedef struct renameflags {
/* recursive rename */
unsigned int recursive : 1;
/* don't unmount file systems */
unsigned int nounmount : 1;
/* force unmount file systems */
unsigned int forceunmount : 1;
} renameflags_t;
_LIBZFS_H int zfs_rename(zfs_handle_t *, const char *, renameflags_t);
typedef struct sendflags {
/* Amount of extra information to print. */
int verbosity;
/* recursive send (ie, -R) */
boolean_t replicate;
/* for recursive send, skip sending missing snapshots */
boolean_t skipmissing;
/* for incrementals, do all intermediate snapshots */
boolean_t doall;
/* if dataset is a clone, do incremental from its origin */
boolean_t fromorigin;
/* field no longer used, maintained for backwards compatibility */
boolean_t pad;
/* send properties (ie, -p) */
boolean_t props;
/* do not send (no-op, ie. -n) */
boolean_t dryrun;
/* parsable verbose output (ie. -P) */
boolean_t parsable;
/* show progress (ie. -v) */
boolean_t progress;
/* show progress as process title (ie. -V) */
boolean_t progressastitle;
/* large blocks (>128K) are permitted */
boolean_t largeblock;
/* WRITE_EMBEDDED records of type DATA are permitted */
boolean_t embed_data;
/* compressed WRITE records are permitted */
boolean_t compress;
/* raw encrypted records are permitted */
boolean_t raw;
/* only send received properties (ie. -b) */
boolean_t backup;
/* include snapshot holds in send stream */
boolean_t holds;
/* stream represents a partially received dataset */
boolean_t saved;
} sendflags_t;
typedef boolean_t (snapfilter_cb_t)(zfs_handle_t *, void *);
_LIBZFS_H int zfs_send(zfs_handle_t *, const char *, const char *,
sendflags_t *, int, snapfilter_cb_t, void *, nvlist_t **);
_LIBZFS_H int zfs_send_one(zfs_handle_t *, const char *, int, sendflags_t *,
const char *);
_LIBZFS_H int zfs_send_progress(zfs_handle_t *, int, uint64_t *, uint64_t *);
_LIBZFS_H int zfs_send_resume(libzfs_handle_t *, sendflags_t *, int outfd,
const char *);
_LIBZFS_H int zfs_send_saved(zfs_handle_t *, sendflags_t *, int, const char *);
_LIBZFS_H nvlist_t *zfs_send_resume_token_to_nvlist(libzfs_handle_t *hdl,
const char *token);
_LIBZFS_H int zfs_promote(zfs_handle_t *);
_LIBZFS_H int zfs_hold(zfs_handle_t *, const char *, const char *,
boolean_t, int);
_LIBZFS_H int zfs_hold_nvl(zfs_handle_t *, int, nvlist_t *);
_LIBZFS_H int zfs_release(zfs_handle_t *, const char *, const char *,
boolean_t);
_LIBZFS_H int zfs_get_holds(zfs_handle_t *, nvlist_t **);
_LIBZFS_H uint64_t zvol_volsize_to_reservation(zpool_handle_t *, uint64_t,
nvlist_t *);
typedef int (*zfs_userspace_cb_t)(void *arg, const char *domain,
uid_t rid, uint64_t space);
_LIBZFS_H int zfs_userspace(zfs_handle_t *, zfs_userquota_prop_t,
zfs_userspace_cb_t, void *);
_LIBZFS_H int zfs_get_fsacl(zfs_handle_t *, nvlist_t **);
_LIBZFS_H int zfs_set_fsacl(zfs_handle_t *, boolean_t, nvlist_t *);
typedef struct recvflags {
/* print informational messages (ie, -v was specified) */
boolean_t verbose;
/* the destination is a prefix, not the exact fs (ie, -d) */
boolean_t isprefix;
/*
* Only the tail of the sent snapshot path is appended to the
* destination to determine the received snapshot name (ie, -e).
*/
boolean_t istail;
/* do not actually do the recv, just check if it would work (ie, -n) */
boolean_t dryrun;
/* rollback/destroy filesystems as necessary (eg, -F) */
boolean_t force;
/* set "canmount=off" on all modified filesystems */
boolean_t canmountoff;
/*
* Mark the file systems as "resumable" and do not destroy them if the
* receive is interrupted
*/
boolean_t resumable;
/* byteswap flag is used internally; callers need not specify */
boolean_t byteswap;
/* do not mount file systems as they are extracted (private) */
boolean_t nomount;
/* Was holds flag set in the compound header? */
boolean_t holds;
/* skip receive of snapshot holds */
boolean_t skipholds;
/* mount the filesystem unless nomount is specified */
boolean_t domount;
/* force unmount while recv snapshot (private) */
boolean_t forceunmount;
/* use this recv to check (and heal if needed) an existing snapshot */
boolean_t heal;
} recvflags_t;
_LIBZFS_H int zfs_receive(libzfs_handle_t *, const char *, nvlist_t *,
recvflags_t *, int, avl_tree_t *);
typedef enum diff_flags {
ZFS_DIFF_PARSEABLE = 1 << 0,
ZFS_DIFF_TIMESTAMP = 1 << 1,
ZFS_DIFF_CLASSIFY = 1 << 2,
ZFS_DIFF_NO_MANGLE = 1 << 3
} diff_flags_t;
_LIBZFS_H int zfs_show_diffs(zfs_handle_t *, int, const char *, const char *,
int);
/*
* Miscellaneous functions.
*/
_LIBZFS_H const char *zfs_type_to_name(zfs_type_t);
_LIBZFS_H void zfs_refresh_properties(zfs_handle_t *);
_LIBZFS_H int zfs_name_valid(const char *, zfs_type_t);
_LIBZFS_H zfs_handle_t *zfs_path_to_zhandle(libzfs_handle_t *, const char *,
zfs_type_t);
_LIBZFS_H int zfs_parent_name(zfs_handle_t *, char *, size_t);
_LIBZFS_H boolean_t zfs_dataset_exists(libzfs_handle_t *, const char *,
zfs_type_t);
_LIBZFS_H int zfs_spa_version(zfs_handle_t *, int *);
_LIBZFS_H boolean_t zfs_bookmark_exists(const char *path);
/*
* Mount support functions.
*/
_LIBZFS_H boolean_t is_mounted(libzfs_handle_t *, const char *special, char **);
_LIBZFS_H boolean_t zfs_is_mounted(zfs_handle_t *, char **);
_LIBZFS_H int zfs_mount(zfs_handle_t *, const char *, int);
_LIBZFS_H int zfs_mount_at(zfs_handle_t *, const char *, int, const char *);
_LIBZFS_H int zfs_unmount(zfs_handle_t *, const char *, int);
_LIBZFS_H int zfs_unmountall(zfs_handle_t *, int);
_LIBZFS_H int zfs_mount_delegation_check(void);
#if defined(__linux__) || defined(__APPLE__)
_LIBZFS_H int zfs_parse_mount_options(const char *mntopts,
unsigned long *mntflags, unsigned long *zfsflags, int sloppy, char *badopt,
char *mtabopt);
_LIBZFS_H void zfs_adjust_mount_options(zfs_handle_t *zhp, const char *mntpoint,
char *mntopts, char *mtabopt);
#endif
/*
* Share support functions.
*
* enum sa_protocol * lists are terminated with SA_NO_PROTOCOL,
* NULL means "all/any known to this libzfs".
*/
#define SA_NO_PROTOCOL -1
_LIBZFS_H boolean_t zfs_is_shared(zfs_handle_t *zhp, char **where,
const enum sa_protocol *proto);
_LIBZFS_H int zfs_share(zfs_handle_t *zhp, const enum sa_protocol *proto);
_LIBZFS_H int zfs_unshare(zfs_handle_t *zhp, const char *mountpoint,
const enum sa_protocol *proto);
_LIBZFS_H int zfs_unshareall(zfs_handle_t *zhp,
const enum sa_protocol *proto);
_LIBZFS_H void zfs_commit_shares(const enum sa_protocol *proto);
_LIBZFS_H void zfs_truncate_shares(const enum sa_protocol *proto);
_LIBZFS_H int zfs_nicestrtonum(libzfs_handle_t *, const char *, uint64_t *);
/*
* Utility functions to run an external process.
*/
#define STDOUT_VERBOSE 0x01
#define STDERR_VERBOSE 0x02
#define NO_DEFAULT_PATH 0x04 /* Don't use $PATH to lookup the command */
_LIBZFS_H int libzfs_run_process(const char *, char **, int);
_LIBZFS_H int libzfs_run_process_get_stdout(const char *, char *[], char *[],
char **[], int *);
_LIBZFS_H int libzfs_run_process_get_stdout_nopath(const char *, char *[],
char *[], char **[], int *);
_LIBZFS_H void libzfs_free_str_array(char **, int);
_LIBZFS_H boolean_t libzfs_envvar_is_set(const char *);
/*
* Utility functions for zfs version
*/
_LIBZFS_H const char *zfs_version_userland(void);
_LIBZFS_H char *zfs_version_kernel(void);
_LIBZFS_H int zfs_version_print(void);
/*
* Given a device or file, determine if it is part of a pool.
*/
_LIBZFS_H int zpool_in_use(libzfs_handle_t *, int, pool_state_t *, char **,
boolean_t *);
/*
* Label manipulation.
*/
_LIBZFS_H int zpool_clear_label(int);
_LIBZFS_H int zpool_set_bootenv(zpool_handle_t *, const nvlist_t *);
_LIBZFS_H int zpool_get_bootenv(zpool_handle_t *, nvlist_t **);
/*
* Management interfaces for SMB ACL files
*/
_LIBZFS_H int zfs_smb_acl_add(libzfs_handle_t *, char *, char *, char *);
_LIBZFS_H int zfs_smb_acl_remove(libzfs_handle_t *, char *, char *, char *);
_LIBZFS_H int zfs_smb_acl_purge(libzfs_handle_t *, char *, char *);
_LIBZFS_H int zfs_smb_acl_rename(libzfs_handle_t *, char *, char *, char *,
char *);
/*
* Enable and disable datasets within a pool by mounting/unmounting and
* sharing/unsharing them.
*/
_LIBZFS_H int zpool_enable_datasets(zpool_handle_t *, const char *, int);
_LIBZFS_H int zpool_disable_datasets(zpool_handle_t *, boolean_t);
_LIBZFS_H void zpool_disable_datasets_os(zpool_handle_t *, boolean_t);
_LIBZFS_H void zpool_disable_volume_os(const char *);
/*
* Parse a features file for -o compatibility
*/
typedef enum {
ZPOOL_COMPATIBILITY_OK,
ZPOOL_COMPATIBILITY_WARNTOKEN,
ZPOOL_COMPATIBILITY_BADTOKEN,
ZPOOL_COMPATIBILITY_BADFILE,
ZPOOL_COMPATIBILITY_NOFILES
} zpool_compat_status_t;
_LIBZFS_H zpool_compat_status_t zpool_load_compat(const char *,
boolean_t *, char *, size_t);
#ifdef __FreeBSD__
/*
* Attach/detach the given filesystem to/from the given jail.
*/
_LIBZFS_H int zfs_jail(zfs_handle_t *zhp, int jailid, int attach);
/*
* Set loader options for next boot.
*/
_LIBZFS_H int zpool_nextboot(libzfs_handle_t *, uint64_t, uint64_t,
const char *);
#endif /* __FreeBSD__ */
#ifdef __linux__
/*
* Add or delete the given filesystem to/from the given user namespace.
*/
_LIBZFS_H int zfs_userns(zfs_handle_t *zhp, const char *nspath, int attach);
#endif
#ifdef __cplusplus
}
#endif
#endif /* _LIBZFS_H */
diff --git a/sys/contrib/openzfs/include/libzutil.h b/sys/contrib/openzfs/include/libzutil.h
index 053b1ed4b52a..839486fb62bf 100644
--- a/sys/contrib/openzfs/include/libzutil.h
+++ b/sys/contrib/openzfs/include/libzutil.h
@@ -1,215 +1,274 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018 by Delphix. All rights reserved.
*/
#ifndef _LIBZUTIL_H
#define _LIBZUTIL_H extern __attribute__((visibility("default")))
#include <sys/nvpair.h>
#include <sys/fs/zfs.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Default wait time in milliseconds for a device name to be created.
*/
#define DISK_LABEL_WAIT (30 * 1000) /* 30 seconds */
/*
* Pool Config Operations
*
* These are specific to the library libzfs or libzpool instance.
*/
typedef nvlist_t *refresh_config_func_t(void *, nvlist_t *);
typedef int pool_active_func_t(void *, const char *, uint64_t, boolean_t *);
typedef const struct pool_config_ops {
refresh_config_func_t *pco_refresh_config;
pool_active_func_t *pco_pool_active;
} pool_config_ops_t;
/*
* An instance of pool_config_ops_t is expected in the caller's binary.
*/
_LIBZUTIL_H pool_config_ops_t libzfs_config_ops;
_LIBZUTIL_H pool_config_ops_t libzpool_config_ops;
typedef enum lpc_error {
LPC_SUCCESS = 0, /* no error -- success */
LPC_BADCACHE = 2000, /* out of memory */
LPC_BADPATH, /* must be an absolute path */
LPC_NOMEM, /* out of memory */
LPC_EACCESS, /* some devices require root privileges */
LPC_UNKNOWN
} lpc_error_t;
typedef struct importargs {
char **path; /* a list of paths to search */
int paths; /* number of paths to search */
const char *poolname; /* name of a pool to find */
uint64_t guid; /* guid of a pool to find */
const char *cachefile; /* cachefile to use for import */
boolean_t can_be_active; /* can the pool be active? */
boolean_t scan; /* prefer scanning to libblkid cache */
nvlist_t *policy; /* load policy (max txg, rewind, etc.) */
} importargs_t;
typedef struct libpc_handle {
int lpc_error;
boolean_t lpc_printerr;
boolean_t lpc_open_access_error;
boolean_t lpc_desc_active;
char lpc_desc[1024];
pool_config_ops_t *lpc_ops;
void *lpc_lib_handle;
} libpc_handle_t;
_LIBZUTIL_H const char *libpc_error_description(libpc_handle_t *);
_LIBZUTIL_H nvlist_t *zpool_search_import(libpc_handle_t *, importargs_t *);
_LIBZUTIL_H int zpool_find_config(libpc_handle_t *, const char *, nvlist_t **,
importargs_t *);
_LIBZUTIL_H const char * const * zpool_default_search_paths(size_t *count);
_LIBZUTIL_H int zpool_read_label(int, nvlist_t **, int *);
_LIBZUTIL_H int zpool_label_disk_wait(const char *, int);
+_LIBZUTIL_H int zpool_disk_wait(const char *);
struct udev_device;
_LIBZUTIL_H int zfs_device_get_devid(struct udev_device *, char *, size_t);
_LIBZUTIL_H int zfs_device_get_physical(struct udev_device *, char *, size_t);
_LIBZUTIL_H void update_vdev_config_dev_strs(nvlist_t *);
/*
* Default device paths
*/
#define DISK_ROOT "/dev"
#define UDISK_ROOT "/dev/disk"
#define ZVOL_ROOT "/dev/zvol"
_LIBZUTIL_H int zfs_append_partition(char *path, size_t max_len);
_LIBZUTIL_H int zfs_resolve_shortname(const char *name, char *path,
size_t pathlen);
_LIBZUTIL_H char *zfs_strip_partition(const char *);
_LIBZUTIL_H const char *zfs_strip_path(const char *);
_LIBZUTIL_H int zfs_strcmp_pathname(const char *, const char *, int);
_LIBZUTIL_H boolean_t zfs_dev_is_dm(const char *);
_LIBZUTIL_H boolean_t zfs_dev_is_whole_disk(const char *);
_LIBZUTIL_H int zfs_dev_flush(int);
_LIBZUTIL_H char *zfs_get_underlying_path(const char *);
_LIBZUTIL_H char *zfs_get_enclosure_sysfs_path(const char *);
_LIBZUTIL_H boolean_t is_mpath_whole_disk(const char *);
_LIBZUTIL_H boolean_t zfs_isnumber(const char *);
/*
* Formats for iostat numbers. Examples: "12K", "30ms", "4B", "2321234", "-".
*
* ZFS_NICENUM_1024: Print kilo, mega, tera, peta, exa..
* ZFS_NICENUM_BYTES: Print single bytes ("13B"), kilo, mega, tera...
* ZFS_NICENUM_TIME: Print nanosecs, microsecs, millisecs, seconds...
* ZFS_NICENUM_RAW: Print the raw number without any formatting
* ZFS_NICENUM_RAWTIME: Same as RAW, but print dashes ('-') for zero.
*/
enum zfs_nicenum_format {
ZFS_NICENUM_1024 = 0,
ZFS_NICENUM_BYTES = 1,
ZFS_NICENUM_TIME = 2,
ZFS_NICENUM_RAW = 3,
ZFS_NICENUM_RAWTIME = 4
};
/*
* Convert a number to a human-readable form.
*/
_LIBZUTIL_H void zfs_nicebytes(uint64_t, char *, size_t);
_LIBZUTIL_H void zfs_nicenum(uint64_t, char *, size_t);
_LIBZUTIL_H void zfs_nicenum_format(uint64_t, char *, size_t,
enum zfs_nicenum_format);
_LIBZUTIL_H void zfs_nicetime(uint64_t, char *, size_t);
_LIBZUTIL_H void zfs_niceraw(uint64_t, char *, size_t);
#define nicenum(num, buf, size) zfs_nicenum(num, buf, size)
_LIBZUTIL_H void zpool_dump_ddt(const ddt_stat_t *, const ddt_histogram_t *);
_LIBZUTIL_H int zpool_history_unpack(char *, uint64_t, uint64_t *, nvlist_t ***,
uint_t *);
+_LIBZUTIL_H void fsleep(float sec);
+_LIBZUTIL_H int zpool_getenv_int(const char *env, int default_val);
struct zfs_cmd;
/*
* List of colors to use
*/
#define ANSI_BLACK "\033[0;30m"
#define ANSI_RED "\033[0;31m"
#define ANSI_GREEN "\033[0;32m"
#define ANSI_YELLOW "\033[0;33m"
#define ANSI_BLUE "\033[0;34m"
#define ANSI_BOLD_BLUE "\033[1;34m" /* light blue */
#define ANSI_MAGENTA "\033[0;35m"
#define ANSI_CYAN "\033[0;36m"
#define ANSI_GRAY "\033[0;37m"
#define ANSI_RESET "\033[0m"
#define ANSI_BOLD "\033[1m"
_LIBZUTIL_H int use_color(void);
_LIBZUTIL_H void color_start(const char *color);
_LIBZUTIL_H void color_end(void);
_LIBZUTIL_H int printf_color(const char *color, const char *format, ...);
_LIBZUTIL_H const char *zfs_basename(const char *path);
_LIBZUTIL_H ssize_t zfs_dirnamelen(const char *path);
#ifdef __linux__
extern char **environ;
_LIBZUTIL_H void zfs_setproctitle_init(int argc, char *argv[], char *envp[]);
_LIBZUTIL_H void zfs_setproctitle(const char *fmt, ...);
#else
#define zfs_setproctitle(fmt, ...) setproctitle(fmt, ##__VA_ARGS__)
#define zfs_setproctitle_init(x, y, z) ((void)0)
#endif
/*
* These functions are used by the ZFS libraries and cmd/zpool code, but are
* not exported in the ABI.
*/
typedef int (*pool_vdev_iter_f)(void *, nvlist_t *, void *);
int for_each_vdev_cb(void *zhp, nvlist_t *nv, pool_vdev_iter_f func,
void *data);
+int for_each_vdev_macro_helper_func(void *zhp_data, nvlist_t *nv, void *data);
+int for_each_real_leaf_vdev_macro_helper_func(void *zhp_data, nvlist_t *nv,
+ void *data);
+/*
+ * Often you'll want to iterate over all the vdevs in the pool, but don't want
+ * to use for_each_vdev() since it requires a callback function.
+ *
+ * Instead you can use FOR_EACH_VDEV():
+ *
+ * zpool_handle_t *zhp // Assume this is initialized
+ * nvlist_t *nv
+ * ...
+ * FOR_EACH_VDEV(zhp, nv) {
+ * const char *path = NULL;
+ * nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path);
+ * printf("Looking at vdev %s\n", path);
+ * }
+ *
+ * Note: FOR_EACH_VDEV runs in O(n^2) time where n = number of vdevs. However,
+ * there's an upper limit of 256 vdevs per dRAID top-level vdevs (TLDs), 255 for
+ * raidz2 TLDs, a real world limit of ~500 vdevs for mirrors, so this shouldn't
+ * really be an issue.
+ *
+ * Here are some micro-benchmarks of a complete FOR_EACH_VDEV loop on a RAID0
+ * pool:
+ *
+ * 100 vdevs = 0.7ms
+ * 500 vdevs = 17ms
+ * 750 vdevs = 40ms
+ * 1000 vdevs = 82ms
+ *
+ * The '__nv += 0' at the end of the for() loop gets around a "comma or
+ * semicolon followed by non-blank" checkstyle error. Note on most compliers
+ * the '__nv += 0' can just be replaced with 'NULL', but gcc on Centos 7
+ * will give a 'warning: statement with no effect' error if you do that.
+ */
+#define __FOR_EACH_VDEV(__zhp, __nv, __func) { \
+ __nv = zpool_get_config(__zhp, NULL); \
+ VERIFY0(nvlist_lookup_nvlist(__nv, ZPOOL_CONFIG_VDEV_TREE, &__nv)); \
+ } \
+ for (nvlist_t *__root_nv = __nv, *__state = (nvlist_t *)0; \
+ for_each_vdev_cb(&__state, __root_nv, __func, &__nv) == 1; \
+ __nv += 0)
+
+#define FOR_EACH_VDEV(__zhp, __nv) \
+ __FOR_EACH_VDEV(__zhp, __nv, for_each_vdev_macro_helper_func)
+
+/*
+ * "real leaf" vdevs are leaf vdevs that are real devices (disks or files).
+ * This excludes leaf vdevs like like draid spares.
+ */
+#define FOR_EACH_REAL_LEAF_VDEV(__zhp, __nv) \
+ __FOR_EACH_VDEV(__zhp, __nv, for_each_real_leaf_vdev_macro_helper_func)
+
int for_each_vdev_in_nvlist(nvlist_t *nvroot, pool_vdev_iter_f func,
void *data);
void update_vdevs_config_dev_sysfs_path(nvlist_t *config);
+_LIBZUTIL_H void update_vdev_config_dev_sysfs_path(nvlist_t *nv,
+ const char *path, const char *key);
#ifdef __cplusplus
}
#endif
#endif /* _LIBZUTIL_H */
diff --git a/sys/contrib/openzfs/include/os/freebsd/spl/sys/mod_os.h b/sys/contrib/openzfs/include/os/freebsd/spl/sys/mod_os.h
index 77ce75ca3f11..150e50380d89 100644
--- a/sys/contrib/openzfs/include/os/freebsd/spl/sys/mod_os.h
+++ b/sys/contrib/openzfs/include/os/freebsd/spl/sys/mod_os.h
@@ -1,131 +1,137 @@
/*
* Copyright (c) 2020 iXsystems, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _SPL_MOD_H
#define _SPL_MOD_H
#include <sys/sysctl.h>
#define ZMOD_RW CTLFLAG_RWTUN
#define ZMOD_RD CTLFLAG_RDTUN
#define ZFS_MODULE_PARAM(scope_prefix, name_prefix, name, type, perm, desc) \
SYSCTL_DECL(_vfs_ ## scope_prefix); \
SYSCTL_##type(_vfs_ ## scope_prefix, OID_AUTO, name, perm, \
&name_prefix ## name, 0, desc)
#define ZFS_MODULE_PARAM_ARGS SYSCTL_HANDLER_ARGS
#define ZFS_MODULE_PARAM_CALL_IMPL(parent, name, perm, args, desc) \
SYSCTL_DECL(parent); \
SYSCTL_PROC(parent, OID_AUTO, name, CTLFLAG_MPSAFE | perm | args, desc)
#define ZFS_MODULE_PARAM_CALL( \
scope_prefix, name_prefix, name, func, _, perm, desc) \
ZFS_MODULE_PARAM_CALL_IMPL(_vfs_ ## scope_prefix, name, perm, \
func ## _args(name_prefix ## name), desc)
#define ZFS_MODULE_VIRTUAL_PARAM_CALL ZFS_MODULE_PARAM_CALL
#define param_set_arc_u64_args(var) \
CTLTYPE_U64, &var, 0, param_set_arc_u64, "QU"
#define param_set_arc_int_args(var) \
CTLTYPE_INT, &var, 0, param_set_arc_int, "I"
#define param_set_arc_min_args(var) \
CTLTYPE_U64, NULL, 0, param_set_arc_min, "QU"
#define param_set_arc_max_args(var) \
CTLTYPE_U64, NULL, 0, param_set_arc_max, "QU"
#define param_set_arc_free_target_args(var) \
CTLTYPE_UINT, NULL, 0, param_set_arc_free_target, "IU"
#define param_set_arc_no_grow_shift_args(var) \
CTLTYPE_INT, NULL, 0, param_set_arc_no_grow_shift, "I"
#define param_set_deadman_failmode_args(var) \
CTLTYPE_STRING, NULL, 0, param_set_deadman_failmode, "A"
#define param_set_deadman_synctime_args(var) \
CTLTYPE_U64, NULL, 0, param_set_deadman_synctime, "QU"
#define param_set_deadman_ziotime_args(var) \
CTLTYPE_U64, NULL, 0, param_set_deadman_ziotime, "QU"
#define param_set_multihost_interval_args(var) \
CTLTYPE_U64, NULL, 0, param_set_multihost_interval, "QU"
#define param_set_slop_shift_args(var) \
CTLTYPE_INT, NULL, 0, param_set_slop_shift, "I"
#define param_set_min_auto_ashift_args(var) \
CTLTYPE_UINT, NULL, 0, param_set_min_auto_ashift, "IU"
#define param_set_max_auto_ashift_args(var) \
CTLTYPE_UINT, NULL, 0, param_set_max_auto_ashift, "IU"
+#define spa_taskq_read_param_set_args(var) \
+ CTLTYPE_STRING, NULL, 0, spa_taskq_read_param, "A"
+
+#define spa_taskq_write_param_set_args(var) \
+ CTLTYPE_STRING, NULL, 0, spa_taskq_write_param, "A"
+
#define fletcher_4_param_set_args(var) \
CTLTYPE_STRING, NULL, 0, fletcher_4_param, "A"
#define blake3_param_set_args(var) \
CTLTYPE_STRING, NULL, 0, blake3_param, "A"
#define sha256_param_set_args(var) \
CTLTYPE_STRING, NULL, 0, sha256_param, "A"
#define sha512_param_set_args(var) \
CTLTYPE_STRING, NULL, 0, sha512_param, "A"
#include <sys/kernel.h>
#define module_init(fn) \
static void \
wrap_ ## fn(void *dummy __unused) \
{ \
fn(); \
} \
SYSINIT(zfs_ ## fn, SI_SUB_LAST, SI_ORDER_FIRST, wrap_ ## fn, NULL)
#define module_init_early(fn) \
static void \
wrap_ ## fn(void *dummy __unused) \
{ \
fn(); \
} \
SYSINIT(zfs_ ## fn, SI_SUB_INT_CONFIG_HOOKS, SI_ORDER_FIRST, wrap_ ## fn, NULL)
#define module_exit(fn) \
static void \
wrap_ ## fn(void *dummy __unused) \
{ \
fn(); \
} \
SYSUNINIT(zfs_ ## fn, SI_SUB_LAST, SI_ORDER_FIRST, wrap_ ## fn, NULL)
#endif /* SPL_MOD_H */
diff --git a/sys/contrib/openzfs/include/os/freebsd/spl/sys/uio.h b/sys/contrib/openzfs/include/os/freebsd/spl/sys/uio.h
index b71f2f2e5625..b9d41903ea63 100644
--- a/sys/contrib/openzfs/include/os/freebsd/spl/sys/uio.h
+++ b/sys/contrib/openzfs/include/os/freebsd/spl/sys/uio.h
@@ -1,81 +1,81 @@
/*
* Copyright (c) 2010 Pawel Jakub Dawidek <pjd@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _OPENSOLARIS_SYS_UIO_H_
#define _OPENSOLARIS_SYS_UIO_H_
#ifndef _STANDALONE
#include_next <sys/uio.h>
#include <sys/_uio.h>
#include <sys/debug.h>
typedef struct iovec iovec_t;
typedef enum uio_seg zfs_uio_seg_t;
typedef enum uio_rw zfs_uio_rw_t;
typedef struct zfs_uio {
struct uio *uio;
} zfs_uio_t;
#define GET_UIO_STRUCT(u) (u)->uio
#define zfs_uio_segflg(u) GET_UIO_STRUCT(u)->uio_segflg
#define zfs_uio_offset(u) GET_UIO_STRUCT(u)->uio_offset
#define zfs_uio_resid(u) GET_UIO_STRUCT(u)->uio_resid
#define zfs_uio_iovcnt(u) GET_UIO_STRUCT(u)->uio_iovcnt
#define zfs_uio_iovlen(u, idx) GET_UIO_STRUCT(u)->uio_iov[(idx)].iov_len
#define zfs_uio_iovbase(u, idx) GET_UIO_STRUCT(u)->uio_iov[(idx)].iov_base
#define zfs_uio_td(u) GET_UIO_STRUCT(u)->uio_td
#define zfs_uio_rw(u) GET_UIO_STRUCT(u)->uio_rw
#define zfs_uio_fault_disable(u, set)
#define zfs_uio_prefaultpages(size, u) (0)
static inline void
zfs_uio_setoffset(zfs_uio_t *uio, offset_t off)
{
zfs_uio_offset(uio) = off;
}
static inline void
-zfs_uio_advance(zfs_uio_t *uio, size_t size)
+zfs_uio_advance(zfs_uio_t *uio, ssize_t size)
{
zfs_uio_resid(uio) -= size;
zfs_uio_offset(uio) += size;
}
static __inline void
zfs_uio_init(zfs_uio_t *uio, struct uio *uio_s)
{
GET_UIO_STRUCT(uio) = uio_s;
}
int zfs_uio_fault_move(void *p, size_t n, zfs_uio_rw_t dir, zfs_uio_t *uio);
#endif /* !_STANDALONE */
#endif /* !_OPENSOLARIS_SYS_UIO_H_ */
diff --git a/sys/contrib/openzfs/include/os/freebsd/zfs/sys/zfs_vfsops_os.h b/sys/contrib/openzfs/include/os/freebsd/zfs/sys/zfs_vfsops_os.h
index 56a0ac96ac19..24bb03575f33 100644
--- a/sys/contrib/openzfs/include/os/freebsd/zfs/sys/zfs_vfsops_os.h
+++ b/sys/contrib/openzfs/include/os/freebsd/zfs/sys/zfs_vfsops_os.h
@@ -1,311 +1,310 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011 Pawel Jakub Dawidek <pawel@dawidek.net>.
* All rights reserved.
*/
#ifndef _SYS_FS_ZFS_VFSOPS_H
#define _SYS_FS_ZFS_VFSOPS_H
#if __FreeBSD_version >= 1300125
#define TEARDOWN_RMS
#endif
#if __FreeBSD_version >= 1300109
#define TEARDOWN_INACTIVE_RMS
#endif
#include <sys/dataset_kstats.h>
#include <sys/list.h>
#include <sys/vfs.h>
#include <sys/zil.h>
#include <sys/sa.h>
#include <sys/rrwlock.h>
#ifdef TEARDOWN_INACTIVE_RMS
#include <sys/rmlock.h>
#endif
#include <sys/zfs_ioctl.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifdef TEARDOWN_RMS
typedef struct rmslock zfs_teardown_lock_t;
#else
#define zfs_teardown_lock_t rrmlock_t
#endif
#ifdef TEARDOWN_INACTIVE_RMS
typedef struct rmslock zfs_teardown_inactive_lock_t;
#else
#define zfs_teardown_inactive_lock_t krwlock_t
#endif
typedef struct zfsvfs zfsvfs_t;
struct znode;
struct zfsvfs {
vfs_t *z_vfs; /* generic fs struct */
zfsvfs_t *z_parent; /* parent fs */
objset_t *z_os; /* objset reference */
uint64_t z_flags; /* super_block flags */
uint64_t z_root; /* id of root znode */
uint64_t z_unlinkedobj; /* id of unlinked zapobj */
uint64_t z_max_blksz; /* maximum block size for files */
uint64_t z_fuid_obj; /* fuid table object number */
uint64_t z_fuid_size; /* fuid table size */
avl_tree_t z_fuid_idx; /* fuid tree keyed by index */
avl_tree_t z_fuid_domain; /* fuid tree keyed by domain */
krwlock_t z_fuid_lock; /* fuid lock */
boolean_t z_fuid_loaded; /* fuid tables are loaded */
boolean_t z_fuid_dirty; /* need to sync fuid table ? */
struct zfs_fuid_info *z_fuid_replay; /* fuid info for replay */
zilog_t *z_log; /* intent log pointer */
uint_t z_acl_type; /* type of acl usable on this fs */
uint_t z_acl_mode; /* acl chmod/mode behavior */
uint_t z_acl_inherit; /* acl inheritance behavior */
zfs_case_t z_case; /* case-sense */
boolean_t z_utf8; /* utf8-only */
int z_norm; /* normalization flags */
boolean_t z_atime; /* enable atimes mount option */
boolean_t z_unmounted; /* unmounted */
zfs_teardown_lock_t z_teardown_lock;
zfs_teardown_inactive_lock_t z_teardown_inactive_lock;
list_t z_all_znodes; /* all vnodes in the fs */
kmutex_t z_znodes_lock; /* lock for z_all_znodes */
struct zfsctl_root *z_ctldir; /* .zfs directory pointer */
boolean_t z_show_ctldir; /* expose .zfs in the root dir */
boolean_t z_issnap; /* true if this is a snapshot */
boolean_t z_use_fuids; /* version allows fuids */
boolean_t z_replay; /* set during ZIL replay */
boolean_t z_use_sa; /* version allow system attributes */
boolean_t z_xattr_sa; /* allow xattrs to be stores as SA */
boolean_t z_use_namecache; /* make use of FreeBSD name cache */
uint8_t z_xattr; /* xattr type in use */
uint64_t z_version; /* ZPL version */
uint64_t z_shares_dir; /* hidden shares dir */
dataset_kstats_t z_kstat; /* fs kstats */
kmutex_t z_lock;
uint64_t z_userquota_obj;
uint64_t z_groupquota_obj;
uint64_t z_userobjquota_obj;
uint64_t z_groupobjquota_obj;
uint64_t z_projectquota_obj;
uint64_t z_projectobjquota_obj;
uint64_t z_replay_eof; /* New end of file - replay only */
sa_attr_type_t *z_attr_table; /* SA attr mapping->id */
#define ZFS_OBJ_MTX_SZ 64
kmutex_t z_hold_mtx[ZFS_OBJ_MTX_SZ]; /* znode hold locks */
struct task z_unlinked_drain_task;
};
#ifdef TEARDOWN_RMS
#define ZFS_TEARDOWN_INIT(zfsvfs) \
rms_init(&(zfsvfs)->z_teardown_lock, "zfs teardown")
#define ZFS_TEARDOWN_DESTROY(zfsvfs) \
rms_destroy(&(zfsvfs)->z_teardown_lock)
#define ZFS_TEARDOWN_ENTER_READ(zfsvfs, tag) \
rms_rlock(&(zfsvfs)->z_teardown_lock);
#define ZFS_TEARDOWN_EXIT_READ(zfsvfs, tag) \
rms_runlock(&(zfsvfs)->z_teardown_lock)
#define ZFS_TEARDOWN_ENTER_WRITE(zfsvfs, tag) \
rms_wlock(&(zfsvfs)->z_teardown_lock)
#define ZFS_TEARDOWN_EXIT_WRITE(zfsvfs) \
rms_wunlock(&(zfsvfs)->z_teardown_lock)
#define ZFS_TEARDOWN_EXIT(zfsvfs, tag) \
rms_unlock(&(zfsvfs)->z_teardown_lock)
#define ZFS_TEARDOWN_READ_HELD(zfsvfs) \
rms_rowned(&(zfsvfs)->z_teardown_lock)
#define ZFS_TEARDOWN_WRITE_HELD(zfsvfs) \
rms_wowned(&(zfsvfs)->z_teardown_lock)
#define ZFS_TEARDOWN_HELD(zfsvfs) \
rms_owned_any(&(zfsvfs)->z_teardown_lock)
#else
#define ZFS_TEARDOWN_INIT(zfsvfs) \
rrm_init(&(zfsvfs)->z_teardown_lock, B_FALSE)
#define ZFS_TEARDOWN_DESTROY(zfsvfs) \
rrm_destroy(&(zfsvfs)->z_teardown_lock)
#define ZFS_TEARDOWN_ENTER_READ(zfsvfs, tag) \
rrm_enter_read(&(zfsvfs)->z_teardown_lock, tag);
#define ZFS_TEARDOWN_EXIT_READ(zfsvfs, tag) \
rrm_exit(&(zfsvfs)->z_teardown_lock, tag)
#define ZFS_TEARDOWN_ENTER_WRITE(zfsvfs, tag) \
rrm_enter(&(zfsvfs)->z_teardown_lock, RW_WRITER, tag)
#define ZFS_TEARDOWN_EXIT_WRITE(zfsvfs) \
rrm_exit(&(zfsvfs)->z_teardown_lock, tag)
#define ZFS_TEARDOWN_EXIT(zfsvfs, tag) \
rrm_exit(&(zfsvfs)->z_teardown_lock, tag)
#define ZFS_TEARDOWN_READ_HELD(zfsvfs) \
RRM_READ_HELD(&(zfsvfs)->z_teardown_lock)
#define ZFS_TEARDOWN_WRITE_HELD(zfsvfs) \
RRM_WRITE_HELD(&(zfsvfs)->z_teardown_lock)
#define ZFS_TEARDOWN_HELD(zfsvfs) \
RRM_LOCK_HELD(&(zfsvfs)->z_teardown_lock)
#endif
#ifdef TEARDOWN_INACTIVE_RMS
#define ZFS_TEARDOWN_INACTIVE_INIT(zfsvfs) \
rms_init(&(zfsvfs)->z_teardown_inactive_lock, "zfs teardown inactive")
#define ZFS_TEARDOWN_INACTIVE_DESTROY(zfsvfs) \
rms_destroy(&(zfsvfs)->z_teardown_inactive_lock)
#define ZFS_TEARDOWN_INACTIVE_TRY_ENTER_READ(zfsvfs) \
rms_try_rlock(&(zfsvfs)->z_teardown_inactive_lock)
#define ZFS_TEARDOWN_INACTIVE_ENTER_READ(zfsvfs) \
rms_rlock(&(zfsvfs)->z_teardown_inactive_lock)
#define ZFS_TEARDOWN_INACTIVE_EXIT_READ(zfsvfs) \
rms_runlock(&(zfsvfs)->z_teardown_inactive_lock)
#define ZFS_TEARDOWN_INACTIVE_ENTER_WRITE(zfsvfs) \
rms_wlock(&(zfsvfs)->z_teardown_inactive_lock)
#define ZFS_TEARDOWN_INACTIVE_EXIT_WRITE(zfsvfs) \
rms_wunlock(&(zfsvfs)->z_teardown_inactive_lock)
#define ZFS_TEARDOWN_INACTIVE_WRITE_HELD(zfsvfs) \
rms_wowned(&(zfsvfs)->z_teardown_inactive_lock)
#else
#define ZFS_TEARDOWN_INACTIVE_INIT(zfsvfs) \
rw_init(&(zfsvfs)->z_teardown_inactive_lock, NULL, RW_DEFAULT, NULL)
#define ZFS_TEARDOWN_INACTIVE_DESTROY(zfsvfs) \
rw_destroy(&(zfsvfs)->z_teardown_inactive_lock)
#define ZFS_TEARDOWN_INACTIVE_TRY_ENTER_READ(zfsvfs) \
rw_tryenter(&(zfsvfs)->z_teardown_inactive_lock, RW_READER)
#define ZFS_TEARDOWN_INACTIVE_ENTER_READ(zfsvfs) \
rw_enter(&(zfsvfs)->z_teardown_inactive_lock, RW_READER)
#define ZFS_TEARDOWN_INACTIVE_EXIT_READ(zfsvfs) \
rw_exit(&(zfsvfs)->z_teardown_inactive_lock)
#define ZFS_TEARDOWN_INACTIVE_ENTER_WRITE(zfsvfs) \
rw_enter(&(zfsvfs)->z_teardown_inactive_lock, RW_WRITER)
#define ZFS_TEARDOWN_INACTIVE_EXIT_WRITE(zfsvfs) \
rw_exit(&(zfsvfs)->z_teardown_inactive_lock)
#define ZFS_TEARDOWN_INACTIVE_WRITE_HELD(zfsvfs) \
RW_WRITE_HELD(&(zfsvfs)->z_teardown_inactive_lock)
#endif
#define ZSB_XATTR 0x0001 /* Enable user xattrs */
/*
* Normal filesystems (those not under .zfs/snapshot) have a total
* file ID size limited to 12 bytes (including the length field) due to
* NFSv2 protocol's limitation of 32 bytes for a filehandle. For historical
* reasons, this same limit is being imposed by the Solaris NFSv3 implementation
* (although the NFSv3 protocol actually permits a maximum of 64 bytes). It
* is not possible to expand beyond 12 bytes without abandoning support
* of NFSv2.
*
* For normal filesystems, we partition up the available space as follows:
* 2 bytes fid length (required)
* 6 bytes object number (48 bits)
* 4 bytes generation number (32 bits)
*
* We reserve only 48 bits for the object number, as this is the limit
* currently defined and imposed by the DMU.
*/
typedef struct zfid_short {
uint16_t zf_len;
uint8_t zf_object[6]; /* obj[i] = obj >> (8 * i) */
uint8_t zf_gen[4]; /* gen[i] = gen >> (8 * i) */
} zfid_short_t;
/*
* Filesystems under .zfs/snapshot have a total file ID size of 22[*] bytes
* (including the length field). This makes files under .zfs/snapshot
* accessible by NFSv3 and NFSv4, but not NFSv2.
*
* For files under .zfs/snapshot, we partition up the available space
* as follows:
* 2 bytes fid length (required)
* 6 bytes object number (48 bits)
* 4 bytes generation number (32 bits)
* 6 bytes objset id (48 bits)
* 4 bytes[**] currently just zero (32 bits)
*
* We reserve only 48 bits for the object number and objset id, as these are
* the limits currently defined and imposed by the DMU.
*
* [*] 20 bytes on FreeBSD to fit into the size of struct fid.
* [**] 2 bytes on FreeBSD for the above reason.
*/
typedef struct zfid_long {
zfid_short_t z_fid;
uint8_t zf_setid[6]; /* obj[i] = obj >> (8 * i) */
uint8_t zf_setgen[2]; /* gen[i] = gen >> (8 * i) */
} zfid_long_t;
#define SHORT_FID_LEN (sizeof (zfid_short_t) - sizeof (uint16_t))
#define LONG_FID_LEN (sizeof (zfid_long_t) - sizeof (uint16_t))
extern uint_t zfs_fsyncer_key;
extern int zfs_super_owner;
-extern int zfs_bclone_enabled;
extern void zfs_init(void);
extern void zfs_fini(void);
extern int zfs_suspend_fs(zfsvfs_t *zfsvfs);
extern int zfs_resume_fs(zfsvfs_t *zfsvfs, struct dsl_dataset *ds);
extern int zfs_end_fs(zfsvfs_t *zfsvfs, struct dsl_dataset *ds);
extern int zfs_set_version(zfsvfs_t *zfsvfs, uint64_t newvers);
extern int zfsvfs_create(const char *name, boolean_t readonly, zfsvfs_t **zfvp);
extern int zfsvfs_create_impl(zfsvfs_t **zfvp, zfsvfs_t *zfsvfs, objset_t *os);
extern void zfsvfs_free(zfsvfs_t *zfsvfs);
extern int zfs_check_global_label(const char *dsname, const char *hexsl);
extern boolean_t zfs_is_readonly(zfsvfs_t *zfsvfs);
extern int zfs_get_temporary_prop(struct dsl_dataset *ds, zfs_prop_t zfs_prop,
uint64_t *val, char *setpoint);
extern int zfs_busy(void);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_FS_ZFS_VFSOPS_H */
diff --git a/sys/contrib/openzfs/include/os/linux/kernel/linux/dcache_compat.h b/sys/contrib/openzfs/include/os/linux/kernel/linux/dcache_compat.h
index 1e35204932d1..ab1711b99f3f 100644
--- a/sys/contrib/openzfs/include/os/linux/kernel/linux/dcache_compat.h
+++ b/sys/contrib/openzfs/include/os/linux/kernel/linux/dcache_compat.h
@@ -1,100 +1,111 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (C) 2011 Lawrence Livermore National Security, LLC.
*/
#ifndef _ZFS_DCACHE_H
#define _ZFS_DCACHE_H
#include <linux/dcache.h>
#define dname(dentry) ((char *)((dentry)->d_name.name))
#define dlen(dentry) ((int)((dentry)->d_name.len))
#ifndef HAVE_D_MAKE_ROOT
#define d_make_root(inode) d_alloc_root(inode)
#endif /* HAVE_D_MAKE_ROOT */
#ifdef HAVE_DENTRY_D_U_ALIASES
#define d_alias d_u.d_alias
#endif
/*
* Starting from Linux 5.13, flush_dcache_page() becomes an inline function
* and under some configurations, may indirectly referencing GPL-only
- * cpu_feature_keys on powerpc. Override this function when it is detected
- * being GPL-only.
+ * symbols, e.g., cpu_feature_keys on powerpc and PageHuge on riscv.
+ * Override this function when it is detected being GPL-only.
*/
#if defined __powerpc__ && defined HAVE_FLUSH_DCACHE_PAGE_GPL_ONLY
#include <linux/simd_powerpc.h>
#define flush_dcache_page(page) do { \
if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) && \
test_bit(PG_dcache_clean, &(page)->flags)) \
clear_bit(PG_dcache_clean, &(page)->flags); \
} while (0)
#endif
+/*
+ * For riscv implementation, the use of PageHuge can be safely removed.
+ * Because it handles pages allocated by HugeTLB, while flush_dcache_page
+ * in zfs module is only called on kernel pages.
+ */
+#if defined __riscv && defined HAVE_FLUSH_DCACHE_PAGE_GPL_ONLY
+#define flush_dcache_page(page) do { \
+ if (test_bit(PG_dcache_clean, &(page)->flags)) \
+ clear_bit(PG_dcache_clean, &(page)->flags); \
+ } while (0)
+#endif
/*
* 2.6.30 API change,
* The const keyword was added to the 'struct dentry_operations' in
* the dentry structure. To handle this we define an appropriate
* dentry_operations_t typedef which can be used.
*/
typedef const struct dentry_operations dentry_operations_t;
/*
* 2.6.38 API addition,
* Added d_clear_d_op() helper function which clears some flags and the
* registered dentry->d_op table. This is required because d_set_d_op()
* issues a warning when the dentry operations table is already set.
* For the .zfs control directory to work properly we must be able to
* override the default operations table and register custom .d_automount
* and .d_revalidate callbacks.
*/
static inline void
d_clear_d_op(struct dentry *dentry)
{
dentry->d_op = NULL;
dentry->d_flags &= ~(
DCACHE_OP_HASH | DCACHE_OP_COMPARE |
DCACHE_OP_REVALIDATE | DCACHE_OP_DELETE);
}
/*
* Walk and invalidate all dentry aliases of an inode
* unless it's a mountpoint
*/
static inline void
zpl_d_drop_aliases(struct inode *inode)
{
struct dentry *dentry;
spin_lock(&inode->i_lock);
hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
if (!IS_ROOT(dentry) && !d_mountpoint(dentry) &&
(dentry->d_inode == inode)) {
d_drop(dentry);
}
}
spin_unlock(&inode->i_lock);
}
#endif /* _ZFS_DCACHE_H */
diff --git a/sys/contrib/openzfs/include/os/linux/kernel/linux/simd_aarch64.h b/sys/contrib/openzfs/include/os/linux/kernel/linux/simd_aarch64.h
index 16276b08c759..123a0c72bc6a 100644
--- a/sys/contrib/openzfs/include/os/linux/kernel/linux/simd_aarch64.h
+++ b/sys/contrib/openzfs/include/os/linux/kernel/linux/simd_aarch64.h
@@ -1,116 +1,122 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (C) 2016 Romain Dolbeau <romain@dolbeau.org>.
* Copyright (C) 2022 Tino Reichardt <milky-zfs@mcmilk.de>
* Copyright (C) 2022 Sebastian Gottschall <s.gottschall@dd-wrt.com>
*/
/*
* USER API:
*
* Kernel fpu methods:
* kfpu_allowed()
* kfpu_begin()
* kfpu_end()
* kfpu_init()
* kfpu_fini()
*
* SIMD support:
*
* Following functions should be called to determine whether CPU feature
* is supported. All functions are usable in kernel and user space.
* If a SIMD algorithm is using more than one instruction set
* all relevant feature test functions should be called.
*
* Supported features:
* zfs_neon_available()
* zfs_sha256_available()
* zfs_sha512_available()
*/
#ifndef _LINUX_SIMD_AARCH64_H
#define _LINUX_SIMD_AARCH64_H
#include <sys/types.h>
#include <asm/neon.h>
#include <asm/elf.h>
#include <asm/hwcap.h>
#include <linux/version.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)
#include <asm/sysreg.h>
#else
#define sys_reg(op0, op1, crn, crm, op2) ( \
((op0) << Op0_shift) | \
((op1) << Op1_shift) | \
((crn) << CRn_shift) | \
((crm) << CRm_shift) | \
((op2) << Op2_shift))
#endif
#define ID_AA64PFR0_EL1 sys_reg(3, 0, 0, 1, 0)
#define ID_AA64ISAR0_EL1 sys_reg(3, 0, 0, 6, 0)
+#if (defined(HAVE_KERNEL_NEON) && defined(CONFIG_KERNEL_MODE_NEON))
#define kfpu_allowed() 1
#define kfpu_begin() kernel_neon_begin()
#define kfpu_end() kernel_neon_end()
+#else
+#define kfpu_allowed() 0
+#define kfpu_begin() do {} while (0)
+#define kfpu_end() do {} while (0)
+#endif
#define kfpu_init() (0)
#define kfpu_fini() do {} while (0)
#define get_ftr(id) { \
unsigned long __val; \
asm("mrs %0, "#id : "=r" (__val)); \
__val; \
}
/*
* Check if NEON is available
*/
static inline boolean_t
zfs_neon_available(void)
{
unsigned long ftr = ((get_ftr(ID_AA64PFR0_EL1)) >> 16) & 0xf;
return (ftr == 0 || ftr == 1);
}
/*
* Check if SHA256 is available
*/
static inline boolean_t
zfs_sha256_available(void)
{
unsigned long ftr = ((get_ftr(ID_AA64ISAR0_EL1)) >> 12) & 0x3;
return (ftr & 0x1);
}
/*
* Check if SHA512 is available
*/
static inline boolean_t
zfs_sha512_available(void)
{
unsigned long ftr = ((get_ftr(ID_AA64ISAR0_EL1)) >> 12) & 0x3;
return (ftr & 0x2);
}
#endif /* _LINUX_SIMD_AARCH64_H */
diff --git a/sys/contrib/openzfs/include/os/linux/kernel/linux/simd_arm.h b/sys/contrib/openzfs/include/os/linux/kernel/linux/simd_arm.h
index c432a6d4abd1..bc70eaef3073 100644
--- a/sys/contrib/openzfs/include/os/linux/kernel/linux/simd_arm.h
+++ b/sys/contrib/openzfs/include/os/linux/kernel/linux/simd_arm.h
@@ -1,80 +1,86 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (C) 2022 Tino Reichardt <milky-zfs@mcmilk.de>
*/
/*
* USER API:
*
* Kernel fpu methods:
* kfpu_allowed()
* kfpu_begin()
* kfpu_end()
* kfpu_init()
* kfpu_fini()
*
* SIMD support:
*
* Following functions should be called to determine whether CPU feature
* is supported. All functions are usable in kernel and user space.
* If a SIMD algorithm is using more than one instruction set
* all relevant feature test functions should be called.
*
* Supported features:
* zfs_neon_available()
* zfs_sha256_available()
*/
#ifndef _LINUX_SIMD_ARM_H
#define _LINUX_SIMD_ARM_H
#include <sys/types.h>
#include <asm/neon.h>
#include <asm/elf.h>
#include <asm/hwcap.h>
+#if (defined(HAVE_KERNEL_NEON) && defined(CONFIG_KERNEL_MODE_NEON))
#define kfpu_allowed() 1
#define kfpu_begin() kernel_neon_begin()
#define kfpu_end() kernel_neon_end()
+#else
+#define kfpu_allowed() 0
+#define kfpu_begin() do {} while (0)
+#define kfpu_end() do {} while (0)
+#endif
#define kfpu_init() (0)
#define kfpu_fini() do {} while (0)
/*
* Check if NEON is available
*/
static inline boolean_t
zfs_neon_available(void)
{
return (elf_hwcap & HWCAP_NEON);
}
/*
* Check if SHA256 is available
*/
static inline boolean_t
zfs_sha256_available(void)
{
return (elf_hwcap2 & HWCAP2_SHA2);
}
#endif /* _LINUX_SIMD_ARM_H */
diff --git a/sys/contrib/openzfs/include/os/linux/spl/sys/cred.h b/sys/contrib/openzfs/include/os/linux/spl/sys/cred.h
index 7fd5f644863f..c19c3c0719ff 100644
--- a/sys/contrib/openzfs/include/os/linux/spl/sys/cred.h
+++ b/sys/contrib/openzfs/include/os/linux/spl/sys/cred.h
@@ -1,141 +1,186 @@
/*
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
* Copyright (C) 2007 The Regents of the University of California.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
* UCRL-CODE-235197
*
* This file is part of the SPL, Solaris Porting Layer.
*
* The SPL is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* The SPL is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _SPL_CRED_H
#define _SPL_CRED_H
#include <linux/module.h>
#include <linux/cred.h>
#include <linux/sched.h>
#include <sys/types.h>
#include <sys/vfs.h>
typedef struct cred cred_t;
extern struct task_struct init_task;
#define kcred ((cred_t *)(init_task.cred))
#define CRED() ((cred_t *)current_cred())
/* Linux 4.9 API change, GROUP_AT was removed */
#ifndef GROUP_AT
#define GROUP_AT(gi, i) ((gi)->gid[i])
#endif
#define KUID_TO_SUID(x) (__kuid_val(x))
#define KGID_TO_SGID(x) (__kgid_val(x))
#define SUID_TO_KUID(x) (KUIDT_INIT(x))
#define SGID_TO_KGID(x) (KGIDT_INIT(x))
#define KGIDP_TO_SGIDP(x) (&(x)->val)
extern zidmap_t *zfs_get_init_idmap(void);
/* Check if the user ns is the initial one */
static inline boolean_t
zfs_is_init_userns(struct user_namespace *user_ns)
{
#if defined(CONFIG_USER_NS)
return (user_ns == kcred->user_ns);
#else
return (B_FALSE);
#endif
}
static inline struct user_namespace *zfs_i_user_ns(struct inode *inode)
{
#ifdef HAVE_SUPER_USER_NS
return (inode->i_sb->s_user_ns);
#else
return (kcred->user_ns);
#endif
}
static inline boolean_t zfs_no_idmapping(struct user_namespace *mnt_userns,
struct user_namespace *fs_userns)
{
- return (zfs_is_init_userns(mnt_userns) || mnt_userns == fs_userns);
+ return (zfs_is_init_userns(mnt_userns) ||
+ mnt_userns == fs_userns);
}
static inline uid_t zfs_uid_to_vfsuid(zidmap_t *mnt_userns,
struct user_namespace *fs_userns, uid_t uid)
{
- struct user_namespace *owner = idmap_owner(mnt_userns);
+ struct user_namespace *owner;
+#ifdef HAVE_IOPS_CREATE_IDMAP
+ if (mnt_userns == zfs_init_idmap)
+ return (uid);
+#endif
+#ifdef HAVE_IDMAP_NO_USERNS
+ struct user_namespace ns;
+ ns.uid_map = mnt_userns->uid_map;
+ owner = &ns;
+#else
+ owner = idmap_owner(mnt_userns);
+#endif
if (zfs_no_idmapping(owner, fs_userns))
return (uid);
if (!zfs_is_init_userns(fs_userns))
uid = from_kuid(fs_userns, KUIDT_INIT(uid));
if (uid == (uid_t)-1)
return (uid);
return (__kuid_val(make_kuid(owner, uid)));
}
static inline gid_t zfs_gid_to_vfsgid(zidmap_t *mnt_userns,
struct user_namespace *fs_userns, gid_t gid)
{
- struct user_namespace *owner = idmap_owner(mnt_userns);
+ struct user_namespace *owner;
+#ifdef HAVE_IOPS_CREATE_IDMAP
+ if (mnt_userns == zfs_init_idmap)
+ return (gid);
+#endif
+#ifdef HAVE_IDMAP_NO_USERNS
+ struct user_namespace ns;
+ ns.gid_map = mnt_userns->gid_map;
+ owner = &ns;
+#else
+ owner = idmap_owner(mnt_userns);
+#endif
if (zfs_no_idmapping(owner, fs_userns))
return (gid);
if (!zfs_is_init_userns(fs_userns))
gid = from_kgid(fs_userns, KGIDT_INIT(gid));
if (gid == (gid_t)-1)
return (gid);
return (__kgid_val(make_kgid(owner, gid)));
}
static inline uid_t zfs_vfsuid_to_uid(zidmap_t *mnt_userns,
struct user_namespace *fs_userns, uid_t uid)
{
- struct user_namespace *owner = idmap_owner(mnt_userns);
+ struct user_namespace *owner;
+#ifdef HAVE_IOPS_CREATE_IDMAP
+ if (mnt_userns == zfs_init_idmap)
+ return (uid);
+#endif
+#ifdef HAVE_IDMAP_NO_USERNS
+ struct user_namespace ns;
+ ns.uid_map = mnt_userns->uid_map;
+ owner = &ns;
+#else
+ owner = idmap_owner(mnt_userns);
+#endif
if (zfs_no_idmapping(owner, fs_userns))
return (uid);
uid = from_kuid(owner, KUIDT_INIT(uid));
if (uid == (uid_t)-1)
return (uid);
if (zfs_is_init_userns(fs_userns))
return (uid);
return (__kuid_val(make_kuid(fs_userns, uid)));
}
static inline gid_t zfs_vfsgid_to_gid(zidmap_t *mnt_userns,
struct user_namespace *fs_userns, gid_t gid)
{
- struct user_namespace *owner = idmap_owner(mnt_userns);
+ struct user_namespace *owner;
+#ifdef HAVE_IOPS_CREATE_IDMAP
+ if (mnt_userns == zfs_init_idmap)
+ return (gid);
+#endif
+#ifdef HAVE_IDMAP_NO_USERNS
+ struct user_namespace ns;
+ ns.gid_map = mnt_userns->gid_map;
+ owner = &ns;
+#else
+ owner = idmap_owner(mnt_userns);
+#endif
if (zfs_no_idmapping(owner, fs_userns))
return (gid);
gid = from_kgid(owner, KGIDT_INIT(gid));
if (gid == (gid_t)-1)
return (gid);
if (zfs_is_init_userns(fs_userns))
return (gid);
return (__kgid_val(make_kgid(fs_userns, gid)));
}
extern void crhold(cred_t *cr);
extern void crfree(cred_t *cr);
extern uid_t crgetuid(const cred_t *cr);
extern uid_t crgetruid(const cred_t *cr);
extern gid_t crgetgid(const cred_t *cr);
extern int crgetngroups(const cred_t *cr);
extern gid_t *crgetgroups(const cred_t *cr);
extern int groupmember(gid_t gid, const cred_t *cr);
#endif /* _SPL_CRED_H */
diff --git a/sys/contrib/openzfs/include/os/linux/spl/sys/kmem_cache.h b/sys/contrib/openzfs/include/os/linux/spl/sys/kmem_cache.h
index 82d50b6034c4..b159bb52d111 100644
--- a/sys/contrib/openzfs/include/os/linux/spl/sys/kmem_cache.h
+++ b/sys/contrib/openzfs/include/os/linux/spl/sys/kmem_cache.h
@@ -1,220 +1,218 @@
/*
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
* Copyright (C) 2007 The Regents of the University of California.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
* UCRL-CODE-235197
*
* This file is part of the SPL, Solaris Porting Layer.
*
* The SPL is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* The SPL is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _SPL_KMEM_CACHE_H
#define _SPL_KMEM_CACHE_H
#include <sys/taskq.h>
/*
* Slab allocation interfaces. The SPL slab differs from the standard
* Linux SLAB or SLUB primarily in that each cache may be backed by slabs
* allocated from the physical or virtual memory address space. The virtual
* slabs allow for good behavior when allocation large objects of identical
* size. This slab implementation also supports both constructors and
* destructors which the Linux slab does not.
*/
typedef enum kmc_bit {
KMC_BIT_NODEBUG = 1, /* Default behavior */
KMC_BIT_KVMEM = 7, /* Use kvmalloc linux allocator */
KMC_BIT_SLAB = 8, /* Use Linux slab cache */
KMC_BIT_DEADLOCKED = 14, /* Deadlock detected */
KMC_BIT_GROWING = 15, /* Growing in progress */
KMC_BIT_REAPING = 16, /* Reaping in progress */
KMC_BIT_DESTROY = 17, /* Destroy in progress */
KMC_BIT_TOTAL = 18, /* Proc handler helper bit */
KMC_BIT_ALLOC = 19, /* Proc handler helper bit */
KMC_BIT_MAX = 20, /* Proc handler helper bit */
} kmc_bit_t;
/* kmem move callback return values */
typedef enum kmem_cbrc {
KMEM_CBRC_YES = 0, /* Object moved */
KMEM_CBRC_NO = 1, /* Object not moved */
KMEM_CBRC_LATER = 2, /* Object not moved, try again later */
KMEM_CBRC_DONT_NEED = 3, /* Neither object is needed */
KMEM_CBRC_DONT_KNOW = 4, /* Object unknown */
} kmem_cbrc_t;
#define KMC_NODEBUG (1 << KMC_BIT_NODEBUG)
#define KMC_KVMEM (1 << KMC_BIT_KVMEM)
#define KMC_SLAB (1 << KMC_BIT_SLAB)
#define KMC_DEADLOCKED (1 << KMC_BIT_DEADLOCKED)
#define KMC_GROWING (1 << KMC_BIT_GROWING)
#define KMC_REAPING (1 << KMC_BIT_REAPING)
#define KMC_DESTROY (1 << KMC_BIT_DESTROY)
#define KMC_TOTAL (1 << KMC_BIT_TOTAL)
#define KMC_ALLOC (1 << KMC_BIT_ALLOC)
#define KMC_MAX (1 << KMC_BIT_MAX)
#define KMC_REAP_CHUNK INT_MAX
#define KMC_DEFAULT_SEEKS 1
-#define KMC_RECLAIM_ONCE 0x1 /* Force a single shrinker pass */
-
extern struct list_head spl_kmem_cache_list;
extern struct rw_semaphore spl_kmem_cache_sem;
#define SKM_MAGIC 0x2e2e2e2e
#define SKO_MAGIC 0x20202020
#define SKS_MAGIC 0x22222222
#define SKC_MAGIC 0x2c2c2c2c
#define SPL_KMEM_CACHE_OBJ_PER_SLAB 8 /* Target objects per slab */
#define SPL_KMEM_CACHE_ALIGN 8 /* Default object alignment */
#ifdef _LP64
#define SPL_KMEM_CACHE_MAX_SIZE 32 /* Max slab size in MB */
#else
#define SPL_KMEM_CACHE_MAX_SIZE 4 /* Max slab size in MB */
#endif
#define SPL_MAX_ORDER (MAX_ORDER - 3)
#define SPL_MAX_ORDER_NR_PAGES (1 << (SPL_MAX_ORDER - 1))
#ifdef CONFIG_SLUB
#define SPL_MAX_KMEM_CACHE_ORDER PAGE_ALLOC_COSTLY_ORDER
#define SPL_MAX_KMEM_ORDER_NR_PAGES (1 << (SPL_MAX_KMEM_CACHE_ORDER - 1))
#else
#define SPL_MAX_KMEM_ORDER_NR_PAGES (KMALLOC_MAX_SIZE >> PAGE_SHIFT)
#endif
typedef int (*spl_kmem_ctor_t)(void *, void *, int);
typedef void (*spl_kmem_dtor_t)(void *, void *);
typedef struct spl_kmem_magazine {
uint32_t skm_magic; /* Sanity magic */
uint32_t skm_avail; /* Available objects */
uint32_t skm_size; /* Magazine size */
uint32_t skm_refill; /* Batch refill size */
struct spl_kmem_cache *skm_cache; /* Owned by cache */
unsigned int skm_cpu; /* Owned by cpu */
void *skm_objs[]; /* Object pointers */
} spl_kmem_magazine_t;
typedef struct spl_kmem_obj {
uint32_t sko_magic; /* Sanity magic */
void *sko_addr; /* Buffer address */
struct spl_kmem_slab *sko_slab; /* Owned by slab */
struct list_head sko_list; /* Free object list linkage */
} spl_kmem_obj_t;
typedef struct spl_kmem_slab {
uint32_t sks_magic; /* Sanity magic */
uint32_t sks_objs; /* Objects per slab */
struct spl_kmem_cache *sks_cache; /* Owned by cache */
struct list_head sks_list; /* Slab list linkage */
struct list_head sks_free_list; /* Free object list */
unsigned long sks_age; /* Last modify jiffie */
uint32_t sks_ref; /* Ref count used objects */
} spl_kmem_slab_t;
typedef struct spl_kmem_alloc {
struct spl_kmem_cache *ska_cache; /* Owned by cache */
int ska_flags; /* Allocation flags */
taskq_ent_t ska_tqe; /* Task queue entry */
} spl_kmem_alloc_t;
typedef struct spl_kmem_emergency {
struct rb_node ske_node; /* Emergency tree linkage */
unsigned long ske_obj; /* Buffer address */
} spl_kmem_emergency_t;
typedef struct spl_kmem_cache {
uint32_t skc_magic; /* Sanity magic */
uint32_t skc_name_size; /* Name length */
char *skc_name; /* Name string */
spl_kmem_magazine_t **skc_mag; /* Per-CPU warm cache */
uint32_t skc_mag_size; /* Magazine size */
uint32_t skc_mag_refill; /* Magazine refill count */
spl_kmem_ctor_t skc_ctor; /* Constructor */
spl_kmem_dtor_t skc_dtor; /* Destructor */
void *skc_private; /* Private data */
void *skc_vmp; /* Unused */
struct kmem_cache *skc_linux_cache; /* Linux slab cache if used */
unsigned long skc_flags; /* Flags */
uint32_t skc_obj_size; /* Object size */
uint32_t skc_obj_align; /* Object alignment */
uint32_t skc_slab_objs; /* Objects per slab */
uint32_t skc_slab_size; /* Slab size */
atomic_t skc_ref; /* Ref count callers */
taskqid_t skc_taskqid; /* Slab reclaim task */
struct list_head skc_list; /* List of caches linkage */
struct list_head skc_complete_list; /* Completely alloc'ed */
struct list_head skc_partial_list; /* Partially alloc'ed */
struct rb_root skc_emergency_tree; /* Min sized objects */
spinlock_t skc_lock; /* Cache lock */
spl_wait_queue_head_t skc_waitq; /* Allocation waiters */
uint64_t skc_slab_fail; /* Slab alloc failures */
uint64_t skc_slab_create; /* Slab creates */
uint64_t skc_slab_destroy; /* Slab destroys */
uint64_t skc_slab_total; /* Slab total current */
uint64_t skc_slab_alloc; /* Slab alloc current */
uint64_t skc_slab_max; /* Slab max historic */
uint64_t skc_obj_total; /* Obj total current */
uint64_t skc_obj_alloc; /* Obj alloc current */
struct percpu_counter skc_linux_alloc; /* Linux-backed Obj alloc */
uint64_t skc_obj_max; /* Obj max historic */
uint64_t skc_obj_deadlock; /* Obj emergency deadlocks */
uint64_t skc_obj_emergency; /* Obj emergency current */
uint64_t skc_obj_emergency_max; /* Obj emergency max */
} spl_kmem_cache_t;
#define kmem_cache_t spl_kmem_cache_t
extern spl_kmem_cache_t *spl_kmem_cache_create(const char *name, size_t size,
size_t align, spl_kmem_ctor_t ctor, spl_kmem_dtor_t dtor,
void *reclaim, void *priv, void *vmp, int flags);
extern void spl_kmem_cache_set_move(spl_kmem_cache_t *,
kmem_cbrc_t (*)(void *, void *, size_t, void *));
extern void spl_kmem_cache_destroy(spl_kmem_cache_t *skc);
extern void *spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags);
extern void spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj);
extern void spl_kmem_cache_set_allocflags(spl_kmem_cache_t *skc, gfp_t flags);
extern void spl_kmem_cache_reap_now(spl_kmem_cache_t *skc);
extern void spl_kmem_reap(void);
extern uint64_t spl_kmem_cache_inuse(kmem_cache_t *cache);
extern uint64_t spl_kmem_cache_entry_size(kmem_cache_t *cache);
#define kmem_cache_create(name, size, align, ctor, dtor, rclm, priv, vmp, fl) \
spl_kmem_cache_create(name, size, align, ctor, dtor, rclm, priv, vmp, fl)
#define kmem_cache_set_move(skc, move) spl_kmem_cache_set_move(skc, move)
#define kmem_cache_destroy(skc) spl_kmem_cache_destroy(skc)
/*
* This is necessary to be compatible with other kernel modules
* or in-tree filesystem that may define kmem_cache_alloc,
* like bcachefs does it now.
*/
#ifdef kmem_cache_alloc
#undef kmem_cache_alloc
#endif
#define kmem_cache_alloc(skc, flags) spl_kmem_cache_alloc(skc, flags)
#define kmem_cache_free(skc, obj) spl_kmem_cache_free(skc, obj)
#define kmem_cache_reap_now(skc) spl_kmem_cache_reap_now(skc)
#define kmem_reap() spl_kmem_reap()
/*
* The following functions are only available for internal use.
*/
extern int spl_kmem_cache_init(void);
extern void spl_kmem_cache_fini(void);
#endif /* _SPL_KMEM_CACHE_H */
diff --git a/sys/contrib/openzfs/include/os/linux/spl/sys/shrinker.h b/sys/contrib/openzfs/include/os/linux/spl/sys/shrinker.h
index d472754be4f4..bca4c850694a 100644
--- a/sys/contrib/openzfs/include/os/linux/spl/sys/shrinker.h
+++ b/sys/contrib/openzfs/include/os/linux/spl/sys/shrinker.h
@@ -1,113 +1,85 @@
/*
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
* Copyright (C) 2007 The Regents of the University of California.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
* UCRL-CODE-235197
*
* This file is part of the SPL, Solaris Porting Layer.
*
* The SPL is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* The SPL is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _SPL_SHRINKER_H
#define _SPL_SHRINKER_H
#include <linux/mm.h>
#include <linux/fs.h>
/*
* Due to frequent changes in the shrinker API the following
- * compatibility wrappers should be used. They are as follows:
+ * compatibility wrapper should be used.
*
- * SPL_SHRINKER_DECLARE(varname, countfunc, scanfunc, seek_cost);
+ * shrinker = spl_register_shrinker(name, countfunc, scanfunc, seek_cost);
+ * spl_unregister_shrinker(shrinker);
*
- * SPL_SHRINKER_DECLARE is used to declare a shrinker with the name varname,
- * which is passed to spl_register_shrinker()/spl_unregister_shrinker().
+ * spl_register_shrinker is used to create and register a shrinker with the
+ * given name.
* The countfunc returns the number of free-able objects.
* The scanfunc returns the number of objects that were freed.
* The callbacks can return SHRINK_STOP if further calls can't make any more
* progress. Note that a return value of SHRINK_EMPTY is currently not
* supported.
*
* Example:
*
* static unsigned long
* my_count(struct shrinker *shrink, struct shrink_control *sc)
* {
* ...calculate number of objects in the cache...
*
* return (number of objects in the cache);
* }
*
* static unsigned long
* my_scan(struct shrinker *shrink, struct shrink_control *sc)
* {
* ...scan objects in the cache and reclaim them...
* }
*
- * SPL_SHRINKER_DECLARE(my_shrinker, my_count, my_scan, DEFAULT_SEEKS);
+ * static struct shrinker *my_shrinker;
*
* void my_init_func(void) {
- * spl_register_shrinker(&my_shrinker);
+ * my_shrinker = spl_register_shrinker("my-shrinker",
+ * my_count, my_scan, DEFAULT_SEEKS);
+ * }
+ *
+ * void my_fini_func(void) {
+ * spl_unregister_shrinker(my_shrinker);
* }
*/
-#ifdef HAVE_REGISTER_SHRINKER_VARARG
-#define spl_register_shrinker(x) register_shrinker(x, "zfs-arc-shrinker")
-#else
-#define spl_register_shrinker(x) register_shrinker(x)
-#endif
-#define spl_unregister_shrinker(x) unregister_shrinker(x)
+typedef unsigned long (*spl_shrinker_cb)
+ (struct shrinker *, struct shrink_control *);
-/*
- * Linux 3.0 to 3.11 Shrinker API Compatibility.
- */
-#if defined(HAVE_SINGLE_SHRINKER_CALLBACK)
-#define SPL_SHRINKER_DECLARE(varname, countfunc, scanfunc, seek_cost) \
-static int \
-__ ## varname ## _wrapper(struct shrinker *shrink, struct shrink_control *sc)\
-{ \
- if (sc->nr_to_scan != 0) { \
- (void) scanfunc(shrink, sc); \
- } \
- return (countfunc(shrink, sc)); \
-} \
- \
-static struct shrinker varname = { \
- .shrink = __ ## varname ## _wrapper, \
- .seeks = seek_cost, \
-}
+struct shrinker *spl_register_shrinker(const char *name,
+ spl_shrinker_cb countfunc, spl_shrinker_cb scanfunc, int seek_cost);
+void spl_unregister_shrinker(struct shrinker *);
+#ifndef SHRINK_STOP
+/* 3.0-3.11 compatibility */
#define SHRINK_STOP (-1)
-
-/*
- * Linux 3.12 and later Shrinker API Compatibility.
- */
-#elif defined(HAVE_SPLIT_SHRINKER_CALLBACK)
-#define SPL_SHRINKER_DECLARE(varname, countfunc, scanfunc, seek_cost) \
-static struct shrinker varname = { \
- .count_objects = countfunc, \
- .scan_objects = scanfunc, \
- .seeks = seek_cost, \
-}
-
-#else
-/*
- * Linux 2.x to 2.6.22, or a newer shrinker API has been introduced.
- */
-#error "Unknown shrinker callback"
#endif
#endif /* SPL_SHRINKER_H */
diff --git a/sys/contrib/openzfs/include/os/linux/spl/sys/string.h b/sys/contrib/openzfs/include/os/linux/spl/sys/string.h
index 38134dcf4c76..f44bf23eb326 100644
--- a/sys/contrib/openzfs/include/os/linux/spl/sys/string.h
+++ b/sys/contrib/openzfs/include/os/linux/spl/sys/string.h
@@ -1 +1,50 @@
+/*
+ * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
+ * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
+ * UCRL-CODE-235197
+ *
+ * This file is part of the SPL, Solaris Porting Layer.
+ *
+ * The SPL is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * The SPL is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with the SPL. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _SPL_STRING_H
+#define _SPL_STRING_H
+
#include <linux/string.h>
+
+/* Fallbacks for kernel missing strlcpy */
+#ifndef HAVE_KERNEL_STRLCPY
+
+#if defined(HAVE_KERNEL_STRSCPY)
+/*
+ * strscpy is strlcpy, but returns an error on truncation. strlcpy is defined
+ * to return strlen(src), so detect error and override it.
+ */
+static inline size_t
+strlcpy(char *dest, const char *src, size_t size)
+{
+ ssize_t ret = strscpy(dest, src, size);
+ if (likely(ret > 0))
+ return ((size_t)ret);
+ return (strlen(src));
+}
+#else
+#error "no strlcpy fallback available"
+#endif
+
+#endif /* HAVE_KERNEL_STRLCPY */
+
+#endif /* _SPL_STRING_H */
diff --git a/sys/contrib/openzfs/include/os/linux/spl/sys/types.h b/sys/contrib/openzfs/include/os/linux/spl/sys/types.h
index d89a91c36f92..20ba457f7efe 100644
--- a/sys/contrib/openzfs/include/os/linux/spl/sys/types.h
+++ b/sys/contrib/openzfs/include/os/linux/spl/sys/types.h
@@ -1,73 +1,84 @@
/*
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
* Copyright (C) 2007 The Regents of the University of California.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
* UCRL-CODE-235197
*
* This file is part of the SPL, Solaris Porting Layer.
*
* The SPL is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* The SPL is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _SPL_TYPES_H
#define _SPL_TYPES_H
#include <linux/types.h>
typedef enum {
B_FALSE = 0,
B_TRUE = 1
} boolean_t;
typedef unsigned char uchar_t;
typedef unsigned short ushort_t;
typedef unsigned int uint_t;
typedef unsigned long ulong_t;
typedef unsigned long long u_longlong_t;
typedef long long longlong_t;
typedef long intptr_t;
typedef unsigned long long rlim64_t;
typedef struct task_struct kthread_t;
typedef struct task_struct proc_t;
typedef int id_t;
typedef short pri_t;
typedef short index_t;
typedef longlong_t offset_t;
typedef u_longlong_t u_offset_t;
typedef ulong_t pgcnt_t;
typedef int major_t;
typedef int minor_t;
struct user_namespace;
#ifdef HAVE_IOPS_CREATE_IDMAP
#include <linux/refcount.h>
+#ifdef HAVE_IDMAP_NO_USERNS
+#include <linux/user_namespace.h>
+struct mnt_idmap {
+ struct uid_gid_map uid_map;
+ struct uid_gid_map gid_map;
+ refcount_t count;
+};
+typedef struct mnt_idmap zidmap_t;
+#define idmap_owner(p) (NULL)
+#else
struct mnt_idmap {
struct user_namespace *owner;
refcount_t count;
};
typedef struct mnt_idmap zidmap_t;
#define idmap_owner(p) (((struct mnt_idmap *)p)->owner)
+#endif
#else
typedef struct user_namespace zidmap_t;
#define idmap_owner(p) ((struct user_namespace *)p)
#endif
extern zidmap_t *zfs_init_idmap;
#endif /* _SPL_TYPES_H */
diff --git a/sys/contrib/openzfs/include/os/linux/spl/sys/uio.h b/sys/contrib/openzfs/include/os/linux/spl/sys/uio.h
index a4b600004c9f..5e6ea8d3c221 100644
--- a/sys/contrib/openzfs/include/os/linux/spl/sys/uio.h
+++ b/sys/contrib/openzfs/include/os/linux/spl/sys/uio.h
@@ -1,180 +1,180 @@
/*
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
* Copyright (C) 2007 The Regents of the University of California.
* Copyright (c) 2015 by Chunwei Chen. All rights reserved.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
* UCRL-CODE-235197
*
* This file is part of the SPL, Solaris Porting Layer.
*
* The SPL is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* The SPL is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _SPL_UIO_H
#define _SPL_UIO_H
#include <sys/debug.h>
#include <linux/uio.h>
#include <linux/blkdev.h>
#include <linux/blkdev_compat.h>
#include <linux/mm.h>
#include <linux/bio.h>
#include <asm/uaccess.h>
#include <sys/types.h>
#if defined(HAVE_VFS_IOV_ITER) && defined(HAVE_FAULT_IN_IOV_ITER_READABLE)
#define iov_iter_fault_in_readable(a, b) fault_in_iov_iter_readable(a, b)
#endif
typedef struct iovec iovec_t;
typedef enum zfs_uio_rw {
UIO_READ = 0,
UIO_WRITE = 1,
} zfs_uio_rw_t;
typedef enum zfs_uio_seg {
UIO_USERSPACE = 0,
UIO_SYSSPACE = 1,
UIO_BVEC = 2,
#if defined(HAVE_VFS_IOV_ITER)
UIO_ITER = 3,
#endif
} zfs_uio_seg_t;
typedef struct zfs_uio {
union {
const struct iovec *uio_iov;
const struct bio_vec *uio_bvec;
#if defined(HAVE_VFS_IOV_ITER)
struct iov_iter *uio_iter;
#endif
};
int uio_iovcnt;
offset_t uio_loffset;
zfs_uio_seg_t uio_segflg;
boolean_t uio_fault_disable;
uint16_t uio_fmode;
uint16_t uio_extflg;
ssize_t uio_resid;
size_t uio_skip;
struct request *rq;
} zfs_uio_t;
#define zfs_uio_segflg(u) (u)->uio_segflg
#define zfs_uio_offset(u) (u)->uio_loffset
#define zfs_uio_resid(u) (u)->uio_resid
#define zfs_uio_iovcnt(u) (u)->uio_iovcnt
#define zfs_uio_iovlen(u, idx) (u)->uio_iov[(idx)].iov_len
#define zfs_uio_iovbase(u, idx) (u)->uio_iov[(idx)].iov_base
#define zfs_uio_fault_disable(u, set) (u)->uio_fault_disable = set
#define zfs_uio_rlimit_fsize(z, u) (0)
#define zfs_uio_fault_move(p, n, rw, u) zfs_uiomove((p), (n), (rw), (u))
extern int zfs_uio_prefaultpages(ssize_t, zfs_uio_t *);
static inline void
zfs_uio_setoffset(zfs_uio_t *uio, offset_t off)
{
uio->uio_loffset = off;
}
static inline void
-zfs_uio_advance(zfs_uio_t *uio, size_t size)
+zfs_uio_advance(zfs_uio_t *uio, ssize_t size)
{
uio->uio_resid -= size;
uio->uio_loffset += size;
}
static inline void
zfs_uio_iovec_init(zfs_uio_t *uio, const struct iovec *iov,
unsigned long nr_segs, offset_t offset, zfs_uio_seg_t seg, ssize_t resid,
size_t skip)
{
ASSERT(seg == UIO_USERSPACE || seg == UIO_SYSSPACE);
uio->uio_iov = iov;
uio->uio_iovcnt = nr_segs;
uio->uio_loffset = offset;
uio->uio_segflg = seg;
uio->uio_fault_disable = B_FALSE;
uio->uio_fmode = 0;
uio->uio_extflg = 0;
uio->uio_resid = resid;
uio->uio_skip = skip;
}
static inline void
zfs_uio_bvec_init(zfs_uio_t *uio, struct bio *bio, struct request *rq)
{
/* Either bio or rq will be set, but not both */
ASSERT3P(uio, !=, bio);
if (bio) {
uio->uio_iovcnt = bio->bi_vcnt - BIO_BI_IDX(bio);
uio->uio_bvec = &bio->bi_io_vec[BIO_BI_IDX(bio)];
} else {
uio->uio_bvec = NULL;
uio->uio_iovcnt = 0;
}
uio->uio_loffset = io_offset(bio, rq);
uio->uio_segflg = UIO_BVEC;
uio->uio_fault_disable = B_FALSE;
uio->uio_fmode = 0;
uio->uio_extflg = 0;
uio->uio_resid = io_size(bio, rq);
if (bio) {
uio->uio_skip = BIO_BI_SKIP(bio);
} else {
uio->uio_skip = 0;
}
uio->rq = rq;
}
#if defined(HAVE_VFS_IOV_ITER)
static inline void
zfs_uio_iov_iter_init(zfs_uio_t *uio, struct iov_iter *iter, offset_t offset,
ssize_t resid, size_t skip)
{
uio->uio_iter = iter;
uio->uio_iovcnt = iter->nr_segs;
uio->uio_loffset = offset;
uio->uio_segflg = UIO_ITER;
uio->uio_fault_disable = B_FALSE;
uio->uio_fmode = 0;
uio->uio_extflg = 0;
uio->uio_resid = resid;
uio->uio_skip = skip;
}
#endif
#if defined(HAVE_ITER_IOV)
#define zfs_uio_iter_iov(iter) iter_iov((iter))
#else
#define zfs_uio_iter_iov(iter) (iter)->iov
#endif
#if defined(HAVE_IOV_ITER_TYPE)
#define zfs_uio_iov_iter_type(iter) iov_iter_type((iter))
#else
#define zfs_uio_iov_iter_type(iter) (iter)->type
#endif
#endif /* SPL_UIO_H */
diff --git a/sys/contrib/openzfs/include/os/linux/zfs/sys/zfs_vfsops_os.h b/sys/contrib/openzfs/include/os/linux/zfs/sys/zfs_vfsops_os.h
index 220466550258..b4d5db21f5e5 100644
--- a/sys/contrib/openzfs/include/os/linux/zfs/sys/zfs_vfsops_os.h
+++ b/sys/contrib/openzfs/include/os/linux/zfs/sys/zfs_vfsops_os.h
@@ -1,257 +1,255 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2018 by Delphix. All rights reserved.
*/
#ifndef _SYS_FS_ZFS_VFSOPS_H
#define _SYS_FS_ZFS_VFSOPS_H
#include <sys/dataset_kstats.h>
#include <sys/isa_defs.h>
#include <sys/types32.h>
#include <sys/list.h>
#include <sys/vfs.h>
#include <sys/zil.h>
#include <sys/sa.h>
#include <sys/rrwlock.h>
#include <sys/dsl_dataset.h>
#include <sys/zfs_ioctl.h>
#include <sys/objlist.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef struct zfsvfs zfsvfs_t;
struct znode;
-extern int zfs_bclone_enabled;
-
/*
* This structure emulates the vfs_t from other platforms. It's purpose
* is to facilitate the handling of mount options and minimize structural
* differences between the platforms.
*/
typedef struct vfs {
struct zfsvfs *vfs_data;
char *vfs_mntpoint; /* Primary mount point */
uint64_t vfs_xattr;
boolean_t vfs_readonly;
boolean_t vfs_do_readonly;
boolean_t vfs_setuid;
boolean_t vfs_do_setuid;
boolean_t vfs_exec;
boolean_t vfs_do_exec;
boolean_t vfs_devices;
boolean_t vfs_do_devices;
boolean_t vfs_do_xattr;
boolean_t vfs_atime;
boolean_t vfs_do_atime;
boolean_t vfs_relatime;
boolean_t vfs_do_relatime;
boolean_t vfs_nbmand;
boolean_t vfs_do_nbmand;
} vfs_t;
typedef struct zfs_mnt {
const char *mnt_osname; /* Objset name */
char *mnt_data; /* Raw mount options */
} zfs_mnt_t;
struct zfsvfs {
vfs_t *z_vfs; /* generic fs struct */
struct super_block *z_sb; /* generic super_block */
struct zfsvfs *z_parent; /* parent fs */
objset_t *z_os; /* objset reference */
uint64_t z_flags; /* super_block flags */
uint64_t z_root; /* id of root znode */
uint64_t z_unlinkedobj; /* id of unlinked zapobj */
uint64_t z_max_blksz; /* maximum block size for files */
uint64_t z_fuid_obj; /* fuid table object number */
uint64_t z_fuid_size; /* fuid table size */
avl_tree_t z_fuid_idx; /* fuid tree keyed by index */
avl_tree_t z_fuid_domain; /* fuid tree keyed by domain */
krwlock_t z_fuid_lock; /* fuid lock */
boolean_t z_fuid_loaded; /* fuid tables are loaded */
boolean_t z_fuid_dirty; /* need to sync fuid table ? */
struct zfs_fuid_info *z_fuid_replay; /* fuid info for replay */
zilog_t *z_log; /* intent log pointer */
uint_t z_acl_mode; /* acl chmod/mode behavior */
uint_t z_acl_inherit; /* acl inheritance behavior */
uint_t z_acl_type; /* type of ACL usable on this FS */
zfs_case_t z_case; /* case-sense */
boolean_t z_utf8; /* utf8-only */
int z_norm; /* normalization flags */
boolean_t z_relatime; /* enable relatime mount option */
boolean_t z_unmounted; /* unmounted */
rrmlock_t z_teardown_lock;
krwlock_t z_teardown_inactive_lock;
list_t z_all_znodes; /* all znodes in the fs */
unsigned long z_rollback_time; /* last online rollback time */
unsigned long z_snap_defer_time; /* last snapshot unmount deferral */
kmutex_t z_znodes_lock; /* lock for z_all_znodes */
arc_prune_t *z_arc_prune; /* called by ARC to prune caches */
struct inode *z_ctldir; /* .zfs directory inode */
boolean_t z_show_ctldir; /* expose .zfs in the root dir */
boolean_t z_issnap; /* true if this is a snapshot */
boolean_t z_use_fuids; /* version allows fuids */
boolean_t z_replay; /* set during ZIL replay */
boolean_t z_use_sa; /* version allow system attributes */
boolean_t z_xattr_sa; /* allow xattrs to be stores as SA */
boolean_t z_draining; /* is true when drain is active */
boolean_t z_drain_cancel; /* signal the unlinked drain to stop */
uint64_t z_version; /* ZPL version */
uint64_t z_shares_dir; /* hidden shares dir */
dataset_kstats_t z_kstat; /* fs kstats */
kmutex_t z_lock;
uint64_t z_userquota_obj;
uint64_t z_groupquota_obj;
uint64_t z_userobjquota_obj;
uint64_t z_groupobjquota_obj;
uint64_t z_projectquota_obj;
uint64_t z_projectobjquota_obj;
uint64_t z_replay_eof; /* New end of file - replay only */
sa_attr_type_t *z_attr_table; /* SA attr mapping->id */
uint64_t z_hold_size; /* znode hold array size */
avl_tree_t *z_hold_trees; /* znode hold trees */
kmutex_t *z_hold_locks; /* znode hold locks */
taskqid_t z_drain_task; /* task id for the unlink drain task */
};
#define ZFS_TEARDOWN_INIT(zfsvfs) \
rrm_init(&(zfsvfs)->z_teardown_lock, B_FALSE)
#define ZFS_TEARDOWN_DESTROY(zfsvfs) \
rrm_destroy(&(zfsvfs)->z_teardown_lock)
#define ZFS_TEARDOWN_ENTER_READ(zfsvfs, tag) \
rrm_enter_read(&(zfsvfs)->z_teardown_lock, tag);
#define ZFS_TEARDOWN_EXIT_READ(zfsvfs, tag) \
rrm_exit(&(zfsvfs)->z_teardown_lock, tag)
#define ZFS_TEARDOWN_ENTER_WRITE(zfsvfs, tag) \
rrm_enter(&(zfsvfs)->z_teardown_lock, RW_WRITER, tag)
#define ZFS_TEARDOWN_EXIT_WRITE(zfsvfs) \
rrm_exit(&(zfsvfs)->z_teardown_lock, tag)
#define ZFS_TEARDOWN_EXIT(zfsvfs, tag) \
rrm_exit(&(zfsvfs)->z_teardown_lock, tag)
#define ZFS_TEARDOWN_READ_HELD(zfsvfs) \
RRM_READ_HELD(&(zfsvfs)->z_teardown_lock)
#define ZFS_TEARDOWN_WRITE_HELD(zfsvfs) \
RRM_WRITE_HELD(&(zfsvfs)->z_teardown_lock)
#define ZFS_TEARDOWN_HELD(zfsvfs) \
RRM_LOCK_HELD(&(zfsvfs)->z_teardown_lock)
#define ZSB_XATTR 0x0001 /* Enable user xattrs */
/*
* Allow a maximum number of links. While ZFS does not internally limit
* this the inode->i_nlink member is defined as an unsigned int. To be
* safe we use 2^31-1 as the limit.
*/
#define ZFS_LINK_MAX ((1U << 31) - 1U)
/*
* Normal filesystems (those not under .zfs/snapshot) have a total
* file ID size limited to 12 bytes (including the length field) due to
* NFSv2 protocol's limitation of 32 bytes for a filehandle. For historical
* reasons, this same limit is being imposed by the Solaris NFSv3 implementation
* (although the NFSv3 protocol actually permits a maximum of 64 bytes). It
* is not possible to expand beyond 12 bytes without abandoning support
* of NFSv2.
*
* For normal filesystems, we partition up the available space as follows:
* 2 bytes fid length (required)
* 6 bytes object number (48 bits)
* 4 bytes generation number (32 bits)
*
* We reserve only 48 bits for the object number, as this is the limit
* currently defined and imposed by the DMU.
*/
typedef struct zfid_short {
uint16_t zf_len;
uint8_t zf_object[6]; /* obj[i] = obj >> (8 * i) */
uint8_t zf_gen[4]; /* gen[i] = gen >> (8 * i) */
} zfid_short_t;
/*
* Filesystems under .zfs/snapshot have a total file ID size of 22 bytes
* (including the length field). This makes files under .zfs/snapshot
* accessible by NFSv3 and NFSv4, but not NFSv2.
*
* For files under .zfs/snapshot, we partition up the available space
* as follows:
* 2 bytes fid length (required)
* 6 bytes object number (48 bits)
* 4 bytes generation number (32 bits)
* 6 bytes objset id (48 bits)
* 4 bytes currently just zero (32 bits)
*
* We reserve only 48 bits for the object number and objset id, as these are
* the limits currently defined and imposed by the DMU.
*/
typedef struct zfid_long {
zfid_short_t z_fid;
uint8_t zf_setid[6]; /* obj[i] = obj >> (8 * i) */
uint8_t zf_setgen[4]; /* gen[i] = gen >> (8 * i) */
} zfid_long_t;
#define SHORT_FID_LEN (sizeof (zfid_short_t) - sizeof (uint16_t))
#define LONG_FID_LEN (sizeof (zfid_long_t) - sizeof (uint16_t))
extern void zfs_init(void);
extern void zfs_fini(void);
extern int zfs_suspend_fs(zfsvfs_t *zfsvfs);
extern int zfs_resume_fs(zfsvfs_t *zfsvfs, struct dsl_dataset *ds);
extern int zfs_end_fs(zfsvfs_t *zfsvfs, struct dsl_dataset *ds);
extern void zfs_exit_fs(zfsvfs_t *zfsvfs);
extern int zfs_set_version(zfsvfs_t *zfsvfs, uint64_t newvers);
extern int zfsvfs_create(const char *name, boolean_t readony, zfsvfs_t **zfvp);
extern int zfsvfs_create_impl(zfsvfs_t **zfvp, zfsvfs_t *zfsvfs, objset_t *os);
extern void zfsvfs_free(zfsvfs_t *zfsvfs);
extern int zfs_check_global_label(const char *dsname, const char *hexsl);
extern boolean_t zfs_is_readonly(zfsvfs_t *zfsvfs);
extern int zfs_domount(struct super_block *sb, zfs_mnt_t *zm, int silent);
extern void zfs_preumount(struct super_block *sb);
extern int zfs_umount(struct super_block *sb);
extern int zfs_remount(struct super_block *sb, int *flags, zfs_mnt_t *zm);
extern int zfs_statvfs(struct inode *ip, struct kstatfs *statp);
extern int zfs_vget(struct super_block *sb, struct inode **ipp, fid_t *fidp);
extern int zfs_prune(struct super_block *sb, unsigned long nr_to_scan,
int *objects);
extern int zfs_get_temporary_prop(dsl_dataset_t *ds, zfs_prop_t zfs_prop,
uint64_t *val, char *setpoint);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_FS_ZFS_VFSOPS_H */
diff --git a/sys/contrib/openzfs/include/os/linux/zfs/sys/zpl.h b/sys/contrib/openzfs/include/os/linux/zfs/sys/zpl.h
index 9b729be6d74d..91a4751fffb0 100644
--- a/sys/contrib/openzfs/include/os/linux/zfs/sys/zpl.h
+++ b/sys/contrib/openzfs/include/os/linux/zfs/sys/zpl.h
@@ -1,277 +1,297 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2011, Lawrence Livermore National Security, LLC.
*/
#ifndef _SYS_ZPL_H
#define _SYS_ZPL_H
#include <sys/mntent.h>
#include <sys/vfs.h>
#include <linux/aio.h>
#include <linux/dcache_compat.h>
#include <linux/exportfs.h>
#include <linux/falloc.h>
#include <linux/parser.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/vfs_compat.h>
#include <linux/writeback.h>
#include <linux/xattr_compat.h>
/* zpl_inode.c */
extern void zpl_vap_init(vattr_t *vap, struct inode *dir,
umode_t mode, cred_t *cr, zidmap_t *mnt_ns);
extern const struct inode_operations zpl_inode_operations;
#ifdef HAVE_RENAME2_OPERATIONS_WRAPPER
extern const struct inode_operations_wrapper zpl_dir_inode_operations;
#else
extern const struct inode_operations zpl_dir_inode_operations;
#endif
extern const struct inode_operations zpl_symlink_inode_operations;
extern const struct inode_operations zpl_special_inode_operations;
/* zpl_file.c */
extern const struct address_space_operations zpl_address_space_operations;
#ifdef HAVE_VFS_FILE_OPERATIONS_EXTEND
extern const struct file_operations_extend zpl_file_operations;
#else
extern const struct file_operations zpl_file_operations;
#endif
extern const struct file_operations zpl_dir_file_operations;
/* zpl_super.c */
extern void zpl_prune_sb(uint64_t nr_to_scan, void *arg);
extern const struct super_operations zpl_super_operations;
extern const struct export_operations zpl_export_operations;
extern struct file_system_type zpl_fs_type;
/* zpl_xattr.c */
extern ssize_t zpl_xattr_list(struct dentry *dentry, char *buf, size_t size);
extern int zpl_xattr_security_init(struct inode *ip, struct inode *dip,
const struct qstr *qstr);
#if defined(CONFIG_FS_POSIX_ACL)
#if defined(HAVE_SET_ACL)
#if defined(HAVE_SET_ACL_IDMAP_DENTRY)
extern int zpl_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
struct posix_acl *acl, int type);
#elif defined(HAVE_SET_ACL_USERNS)
extern int zpl_set_acl(struct user_namespace *userns, struct inode *ip,
struct posix_acl *acl, int type);
#elif defined(HAVE_SET_ACL_USERNS_DENTRY_ARG2)
extern int zpl_set_acl(struct user_namespace *userns, struct dentry *dentry,
struct posix_acl *acl, int type);
#else
extern int zpl_set_acl(struct inode *ip, struct posix_acl *acl, int type);
#endif /* HAVE_SET_ACL_USERNS */
#endif /* HAVE_SET_ACL */
#if defined(HAVE_GET_ACL_RCU) || defined(HAVE_GET_INODE_ACL)
extern struct posix_acl *zpl_get_acl(struct inode *ip, int type, bool rcu);
#elif defined(HAVE_GET_ACL)
extern struct posix_acl *zpl_get_acl(struct inode *ip, int type);
#endif
extern int zpl_init_acl(struct inode *ip, struct inode *dir);
extern int zpl_chmod_acl(struct inode *ip);
#else
static inline int
zpl_init_acl(struct inode *ip, struct inode *dir)
{
return (0);
}
static inline int
zpl_chmod_acl(struct inode *ip)
{
return (0);
}
#endif /* CONFIG_FS_POSIX_ACL */
extern xattr_handler_t *zpl_xattr_handlers[];
/* zpl_ctldir.c */
extern const struct file_operations zpl_fops_root;
extern const struct inode_operations zpl_ops_root;
extern const struct file_operations zpl_fops_snapdir;
extern const struct inode_operations zpl_ops_snapdir;
extern const struct file_operations zpl_fops_shares;
extern const struct inode_operations zpl_ops_shares;
#if defined(HAVE_VFS_ITERATE) || defined(HAVE_VFS_ITERATE_SHARED)
#define ZPL_DIR_CONTEXT_INIT(_dirent, _actor, _pos) { \
.actor = _actor, \
.pos = _pos, \
}
typedef struct dir_context zpl_dir_context_t;
#define zpl_dir_emit dir_emit
#define zpl_dir_emit_dot dir_emit_dot
#define zpl_dir_emit_dotdot dir_emit_dotdot
#define zpl_dir_emit_dots dir_emit_dots
#else
typedef struct zpl_dir_context {
void *dirent;
const filldir_t actor;
loff_t pos;
} zpl_dir_context_t;
#define ZPL_DIR_CONTEXT_INIT(_dirent, _actor, _pos) { \
.dirent = _dirent, \
.actor = _actor, \
.pos = _pos, \
}
static inline bool
zpl_dir_emit(zpl_dir_context_t *ctx, const char *name, int namelen,
uint64_t ino, unsigned type)
{
return (!ctx->actor(ctx->dirent, name, namelen, ctx->pos, ino, type));
}
static inline bool
zpl_dir_emit_dot(struct file *file, zpl_dir_context_t *ctx)
{
return (ctx->actor(ctx->dirent, ".", 1, ctx->pos,
file_inode(file)->i_ino, DT_DIR) == 0);
}
static inline bool
zpl_dir_emit_dotdot(struct file *file, zpl_dir_context_t *ctx)
{
return (ctx->actor(ctx->dirent, "..", 2, ctx->pos,
parent_ino(file_dentry(file)), DT_DIR) == 0);
}
static inline bool
zpl_dir_emit_dots(struct file *file, zpl_dir_context_t *ctx)
{
if (ctx->pos == 0) {
if (!zpl_dir_emit_dot(file, ctx))
return (false);
ctx->pos = 1;
}
if (ctx->pos == 1) {
if (!zpl_dir_emit_dotdot(file, ctx))
return (false);
ctx->pos = 2;
}
return (true);
}
#endif /* HAVE_VFS_ITERATE */
/* zpl_file_range.c */
/* handlers for file_operations of the same name */
extern ssize_t zpl_copy_file_range(struct file *src_file, loff_t src_off,
struct file *dst_file, loff_t dst_off, size_t len, unsigned int flags);
extern loff_t zpl_remap_file_range(struct file *src_file, loff_t src_off,
struct file *dst_file, loff_t dst_off, loff_t len, unsigned int flags);
extern int zpl_clone_file_range(struct file *src_file, loff_t src_off,
struct file *dst_file, loff_t dst_off, uint64_t len);
extern int zpl_dedupe_file_range(struct file *src_file, loff_t src_off,
struct file *dst_file, loff_t dst_off, uint64_t len);
/* compat for FICLONE/FICLONERANGE/FIDEDUPERANGE ioctls */
typedef struct {
int64_t fcr_src_fd;
uint64_t fcr_src_offset;
uint64_t fcr_src_length;
uint64_t fcr_dest_offset;
} zfs_ioc_compat_file_clone_range_t;
typedef struct {
int64_t fdri_dest_fd;
uint64_t fdri_dest_offset;
uint64_t fdri_bytes_deduped;
int32_t fdri_status;
uint32_t fdri_reserved;
} zfs_ioc_compat_dedupe_range_info_t;
typedef struct {
uint64_t fdr_src_offset;
uint64_t fdr_src_length;
uint16_t fdr_dest_count;
uint16_t fdr_reserved1;
uint32_t fdr_reserved2;
zfs_ioc_compat_dedupe_range_info_t fdr_info[];
} zfs_ioc_compat_dedupe_range_t;
#define ZFS_IOC_COMPAT_FICLONE _IOW(0x94, 9, int)
#define ZFS_IOC_COMPAT_FICLONERANGE \
_IOW(0x94, 13, zfs_ioc_compat_file_clone_range_t)
#define ZFS_IOC_COMPAT_FIDEDUPERANGE \
_IOWR(0x94, 54, zfs_ioc_compat_dedupe_range_t)
extern long zpl_ioctl_ficlone(struct file *filp, void *arg);
extern long zpl_ioctl_ficlonerange(struct file *filp, void *arg);
extern long zpl_ioctl_fideduperange(struct file *filp, void *arg);
#if defined(HAVE_INODE_TIMESTAMP_TRUNCATE)
#define zpl_inode_timestamp_truncate(ts, ip) timestamp_truncate(ts, ip)
#elif defined(HAVE_INODE_TIMESPEC64_TIMES)
#define zpl_inode_timestamp_truncate(ts, ip) \
timespec64_trunc(ts, (ip)->i_sb->s_time_gran)
#else
#define zpl_inode_timestamp_truncate(ts, ip) \
timespec_trunc(ts, (ip)->i_sb->s_time_gran)
#endif
#if defined(HAVE_INODE_OWNER_OR_CAPABLE)
#define zpl_inode_owner_or_capable(ns, ip) inode_owner_or_capable(ip)
#elif defined(HAVE_INODE_OWNER_OR_CAPABLE_USERNS)
#define zpl_inode_owner_or_capable(ns, ip) inode_owner_or_capable(ns, ip)
#elif defined(HAVE_INODE_OWNER_OR_CAPABLE_IDMAP)
#define zpl_inode_owner_or_capable(idmap, ip) inode_owner_or_capable(idmap, ip)
#else
#error "Unsupported kernel"
#endif
#if defined(HAVE_SETATTR_PREPARE_USERNS) || defined(HAVE_SETATTR_PREPARE_IDMAP)
#define zpl_setattr_prepare(ns, dentry, ia) setattr_prepare(ns, dentry, ia)
#else
/*
* Use kernel-provided version, or our own from
* linux/vfs_compat.h
*/
#define zpl_setattr_prepare(ns, dentry, ia) setattr_prepare(dentry, ia)
#endif
#ifdef HAVE_INODE_GET_CTIME
#define zpl_inode_get_ctime(ip) inode_get_ctime(ip)
#else
#define zpl_inode_get_ctime(ip) (ip->i_ctime)
#endif
#ifdef HAVE_INODE_SET_CTIME_TO_TS
#define zpl_inode_set_ctime_to_ts(ip, ts) inode_set_ctime_to_ts(ip, ts)
#else
#define zpl_inode_set_ctime_to_ts(ip, ts) (ip->i_ctime = ts)
#endif
+#ifdef HAVE_INODE_GET_ATIME
+#define zpl_inode_get_atime(ip) inode_get_atime(ip)
+#else
+#define zpl_inode_get_atime(ip) (ip->i_atime)
+#endif
+#ifdef HAVE_INODE_SET_ATIME_TO_TS
+#define zpl_inode_set_atime_to_ts(ip, ts) inode_set_atime_to_ts(ip, ts)
+#else
+#define zpl_inode_set_atime_to_ts(ip, ts) (ip->i_atime = ts)
+#endif
+#ifdef HAVE_INODE_GET_MTIME
+#define zpl_inode_get_mtime(ip) inode_get_mtime(ip)
+#else
+#define zpl_inode_get_mtime(ip) (ip->i_mtime)
+#endif
+#ifdef HAVE_INODE_SET_MTIME_TO_TS
+#define zpl_inode_set_mtime_to_ts(ip, ts) inode_set_mtime_to_ts(ip, ts)
+#else
+#define zpl_inode_set_mtime_to_ts(ip, ts) (ip->i_mtime = ts)
+#endif
#endif /* _SYS_ZPL_H */
diff --git a/sys/contrib/openzfs/include/sys/dataset_kstats.h b/sys/contrib/openzfs/include/sys/dataset_kstats.h
index 40cf5258a2e7..c81a07f0c116 100644
--- a/sys/contrib/openzfs/include/sys/dataset_kstats.h
+++ b/sys/contrib/openzfs/include/sys/dataset_kstats.h
@@ -1,81 +1,82 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2018 by Delphix. All rights reserved.
* Copyright (c) 2018 Datto Inc.
*/
#ifndef _SYS_DATASET_KSTATS_H
#define _SYS_DATASET_KSTATS_H
#include <sys/wmsum.h>
#include <sys/dmu.h>
#include <sys/kstat.h>
#include <sys/zil.h>
typedef struct dataset_sum_stats_t {
wmsum_t dss_writes;
wmsum_t dss_nwritten;
wmsum_t dss_reads;
wmsum_t dss_nread;
wmsum_t dss_nunlinks;
wmsum_t dss_nunlinked;
} dataset_sum_stats_t;
typedef struct dataset_kstat_values {
kstat_named_t dkv_ds_name;
kstat_named_t dkv_writes;
kstat_named_t dkv_nwritten;
kstat_named_t dkv_reads;
kstat_named_t dkv_nread;
/*
* nunlinks is initialized to the unlinked set size on mount and
* is incremented whenever a new entry is added to the unlinked set
*/
kstat_named_t dkv_nunlinks;
/*
* nunlinked is initialized to zero on mount and is incremented when an
* entry is removed from the unlinked set
*/
kstat_named_t dkv_nunlinked;
/*
* Per dataset zil kstats
*/
zil_kstat_values_t dkv_zil_stats;
} dataset_kstat_values_t;
typedef struct dataset_kstats {
dataset_sum_stats_t dk_sums;
zil_sums_t dk_zil_sums;
kstat_t *dk_kstats;
} dataset_kstats_t;
int dataset_kstats_create(dataset_kstats_t *, objset_t *);
void dataset_kstats_destroy(dataset_kstats_t *);
+void dataset_kstats_rename(dataset_kstats_t *dk, const char *);
void dataset_kstats_update_write_kstats(dataset_kstats_t *, int64_t);
void dataset_kstats_update_read_kstats(dataset_kstats_t *, int64_t);
void dataset_kstats_update_nunlinks_kstat(dataset_kstats_t *, int64_t);
void dataset_kstats_update_nunlinked_kstat(dataset_kstats_t *, int64_t);
#endif /* _SYS_DATASET_KSTATS_H */
diff --git a/sys/contrib/openzfs/include/sys/dbuf.h b/sys/contrib/openzfs/include/sys/dbuf.h
index 1800a7e31da0..f2a1535c9167 100644
--- a/sys/contrib/openzfs/include/sys/dbuf.h
+++ b/sys/contrib/openzfs/include/sys/dbuf.h
@@ -1,510 +1,510 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2020 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
*/
#ifndef _SYS_DBUF_H
#define _SYS_DBUF_H
#include <sys/dmu.h>
#include <sys/spa.h>
#include <sys/txg.h>
#include <sys/zio.h>
#include <sys/arc.h>
#include <sys/zfs_context.h>
#include <sys/zfs_refcount.h>
#include <sys/zrlock.h>
#include <sys/multilist.h>
#ifdef __cplusplus
extern "C" {
#endif
#define IN_DMU_SYNC 2
/*
* define flags for dbuf_read
*/
#define DB_RF_MUST_SUCCEED (1 << 0)
#define DB_RF_CANFAIL (1 << 1)
#define DB_RF_HAVESTRUCT (1 << 2)
#define DB_RF_NOPREFETCH (1 << 3)
#define DB_RF_NEVERWAIT (1 << 4)
#define DB_RF_CACHED (1 << 5)
#define DB_RF_NO_DECRYPT (1 << 6)
#define DB_RF_PARTIAL_FIRST (1 << 7)
#define DB_RF_PARTIAL_MORE (1 << 8)
/*
* The simplified state transition diagram for dbufs looks like:
*
* +--> READ --+
* | |
* | V
* (alloc)-->UNCACHED CACHED-->EVICTING-->(free)
* ^ | ^ ^
* | | | |
* | +--> FILL --+ |
* | | |
* | | |
* | +------> NOFILL -----+
* | |
* +---------------+
*
* DB_SEARCH is an invalid state for a dbuf. It is used by dbuf_free_range
* to find all dbufs in a range of a dnode and must be less than any other
* dbuf_states_t (see comment on dn_dbufs in dnode.h).
*/
typedef enum dbuf_states {
DB_SEARCH = -1,
DB_UNCACHED,
DB_FILL,
DB_NOFILL,
DB_READ,
DB_CACHED,
DB_EVICTING
} dbuf_states_t;
typedef enum dbuf_cached_state {
DB_NO_CACHE = -1,
DB_DBUF_CACHE,
DB_DBUF_METADATA_CACHE,
DB_CACHE_MAX
} dbuf_cached_state_t;
struct dnode;
struct dmu_tx;
/*
* level = 0 means the user data
* level = 1 means the single indirect block
* etc.
*/
struct dmu_buf_impl;
typedef enum override_states {
DR_NOT_OVERRIDDEN,
DR_IN_DMU_SYNC,
DR_OVERRIDDEN
} override_states_t;
typedef enum db_lock_type {
DLT_NONE,
DLT_PARENT,
DLT_OBJSET
} db_lock_type_t;
typedef struct dbuf_dirty_record {
/* link on our parents dirty list */
list_node_t dr_dirty_node;
/* transaction group this data will sync in */
uint64_t dr_txg;
/* zio of outstanding write IO */
zio_t *dr_zio;
/* pointer back to our dbuf */
struct dmu_buf_impl *dr_dbuf;
/* list link for dbuf dirty records */
list_node_t dr_dbuf_node;
/*
* The dnode we are part of. Note that the dnode can not be moved or
* evicted due to the hold that's added by dnode_setdirty() or
* dmu_objset_sync_dnodes(), and released by dnode_rele_task() or
* userquota_updates_task(). This hold is necessary for
* dirty_lightweight_leaf-type dirty records, which don't have a hold
* on a dbuf.
*/
dnode_t *dr_dnode;
/* pointer to parent dirty record */
struct dbuf_dirty_record *dr_parent;
/* How much space was changed to dsl_pool_dirty_space() for this? */
unsigned int dr_accounted;
/* A copy of the bp that points to us */
blkptr_t dr_bp_copy;
union dirty_types {
struct dirty_indirect {
/* protect access to list */
kmutex_t dr_mtx;
/* Our list of dirty children */
list_t dr_children;
} di;
struct dirty_leaf {
/*
* dr_data is set when we dirty the buffer
* so that we can retain the pointer even if it
* gets COW'd in a subsequent transaction group.
*/
arc_buf_t *dr_data;
blkptr_t dr_overridden_by;
override_states_t dr_override_state;
uint8_t dr_copies;
boolean_t dr_nopwrite;
boolean_t dr_brtwrite;
boolean_t dr_has_raw_params;
/*
* If dr_has_raw_params is set, the following crypt
* params will be set on the BP that's written.
*/
boolean_t dr_byteorder;
uint8_t dr_salt[ZIO_DATA_SALT_LEN];
uint8_t dr_iv[ZIO_DATA_IV_LEN];
uint8_t dr_mac[ZIO_DATA_MAC_LEN];
} dl;
struct dirty_lightweight_leaf {
/*
* This dirty record refers to a leaf (level=0)
* block, whose dbuf has not been instantiated for
* performance reasons.
*/
uint64_t dr_blkid;
abd_t *dr_abd;
zio_prop_t dr_props;
zio_flag_t dr_flags;
} dll;
} dt;
} dbuf_dirty_record_t;
typedef struct dmu_buf_impl {
/*
* The following members are immutable, with the exception of
* db.db_data, which is protected by db_mtx.
*/
/* the publicly visible structure */
dmu_buf_t db;
/* the objset we belong to */
struct objset *db_objset;
/*
* handle to safely access the dnode we belong to (NULL when evicted)
*/
struct dnode_handle *db_dnode_handle;
/*
* our parent buffer; if the dnode points to us directly,
* db_parent == db_dnode_handle->dnh_dnode->dn_dbuf
* only accessed by sync thread ???
* (NULL when evicted)
* May change from NULL to non-NULL under the protection of db_mtx
* (see dbuf_check_blkptr())
*/
struct dmu_buf_impl *db_parent;
/*
* link for hash table of all dmu_buf_impl_t's
*/
struct dmu_buf_impl *db_hash_next;
/*
* Our link on the owner dnodes's dn_dbufs list.
* Protected by its dn_dbufs_mtx. Should be on the same cache line
* as db_level and db_blkid for the best avl_add() performance.
*/
avl_node_t db_link;
/* our block number */
uint64_t db_blkid;
/*
* Pointer to the blkptr_t which points to us. May be NULL if we
* don't have one yet. (NULL when evicted)
*/
blkptr_t *db_blkptr;
/*
* Our indirection level. Data buffers have db_level==0.
* Indirect buffers which point to data buffers have
* db_level==1. etc. Buffers which contain dnodes have
* db_level==0, since the dnodes are stored in a file.
*/
uint8_t db_level;
/*
* Protects db_buf's contents if they contain an indirect block or data
* block of the meta-dnode. We use this lock to protect the structure of
* the block tree. This means that when modifying this dbuf's data, we
* grab its rwlock. When modifying its parent's data (including the
* blkptr to this dbuf), we grab the parent's rwlock. The lock ordering
* for this lock is:
* 1) dn_struct_rwlock
* 2) db_rwlock
* We don't currently grab multiple dbufs' db_rwlocks at once.
*/
krwlock_t db_rwlock;
/* buffer holding our data */
arc_buf_t *db_buf;
/* db_mtx protects the members below */
kmutex_t db_mtx;
/*
* Current state of the buffer
*/
dbuf_states_t db_state;
/*
* Refcount accessed by dmu_buf_{hold,rele}.
* If nonzero, the buffer can't be destroyed.
* Protected by db_mtx.
*/
zfs_refcount_t db_holds;
kcondvar_t db_changed;
dbuf_dirty_record_t *db_data_pending;
/* List of dirty records for the buffer sorted newest to oldest. */
list_t db_dirty_records;
/* Link in dbuf_cache or dbuf_metadata_cache */
multilist_node_t db_cache_link;
/* Tells us which dbuf cache this dbuf is in, if any */
dbuf_cached_state_t db_caching_status;
uint64_t db_hash;
/* Data which is unique to data (leaf) blocks: */
/* User callback information. */
dmu_buf_user_t *db_user;
/*
* Evict user data as soon as the dirty and reference
* counts are equal.
*/
uint8_t db_user_immediate_evict;
/*
* This block was freed while a read or write was
* active.
*/
uint8_t db_freed_in_flight;
/*
* dnode_evict_dbufs() or dnode_evict_bonus() tried to
* evict this dbuf, but couldn't due to outstanding
* references. Evict once the refcount drops to 0.
*/
uint8_t db_pending_evict;
uint8_t db_dirtycnt;
/* The buffer was partially read. More reads may follow. */
uint8_t db_partial_read;
} dmu_buf_impl_t;
#define DBUF_HASH_MUTEX(h, idx) \
(&(h)->hash_mutexes[(idx) & ((h)->hash_mutex_mask)])
typedef struct dbuf_hash_table {
uint64_t hash_table_mask;
uint64_t hash_mutex_mask;
dmu_buf_impl_t **hash_table;
kmutex_t *hash_mutexes;
} dbuf_hash_table_t;
typedef void (*dbuf_prefetch_fn)(void *, uint64_t, uint64_t, boolean_t);
uint64_t dbuf_whichblock(const struct dnode *di, const int64_t level,
const uint64_t offset);
void dbuf_create_bonus(struct dnode *dn);
int dbuf_spill_set_blksz(dmu_buf_t *db, uint64_t blksz, dmu_tx_t *tx);
void dbuf_rm_spill(struct dnode *dn, dmu_tx_t *tx);
dmu_buf_impl_t *dbuf_hold(struct dnode *dn, uint64_t blkid, const void *tag);
dmu_buf_impl_t *dbuf_hold_level(struct dnode *dn, int level, uint64_t blkid,
const void *tag);
int dbuf_hold_impl(struct dnode *dn, uint8_t level, uint64_t blkid,
boolean_t fail_sparse, boolean_t fail_uncached,
const void *tag, dmu_buf_impl_t **dbp);
int dbuf_prefetch_impl(struct dnode *dn, int64_t level, uint64_t blkid,
zio_priority_t prio, arc_flags_t aflags, dbuf_prefetch_fn cb,
void *arg);
int dbuf_prefetch(struct dnode *dn, int64_t level, uint64_t blkid,
zio_priority_t prio, arc_flags_t aflags);
void dbuf_add_ref(dmu_buf_impl_t *db, const void *tag);
boolean_t dbuf_try_add_ref(dmu_buf_t *db, objset_t *os, uint64_t obj,
uint64_t blkid, const void *tag);
uint64_t dbuf_refcount(dmu_buf_impl_t *db);
void dbuf_rele(dmu_buf_impl_t *db, const void *tag);
void dbuf_rele_and_unlock(dmu_buf_impl_t *db, const void *tag,
boolean_t evicting);
dmu_buf_impl_t *dbuf_find(struct objset *os, uint64_t object, uint8_t level,
uint64_t blkid, uint64_t *hash_out);
int dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags);
void dmu_buf_will_clone(dmu_buf_t *db, dmu_tx_t *tx);
void dmu_buf_will_not_fill(dmu_buf_t *db, dmu_tx_t *tx);
-void dmu_buf_will_fill(dmu_buf_t *db, dmu_tx_t *tx);
-void dmu_buf_fill_done(dmu_buf_t *db, dmu_tx_t *tx);
+void dmu_buf_will_fill(dmu_buf_t *db, dmu_tx_t *tx, boolean_t canfail);
+boolean_t dmu_buf_fill_done(dmu_buf_t *db, dmu_tx_t *tx, boolean_t failed);
void dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx);
dbuf_dirty_record_t *dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
dbuf_dirty_record_t *dbuf_dirty_lightweight(dnode_t *dn, uint64_t blkid,
dmu_tx_t *tx);
boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
arc_buf_t *dbuf_loan_arcbuf(dmu_buf_impl_t *db);
void dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
bp_embedded_type_t etype, enum zio_compress comp,
int uncompressed_size, int compressed_size, int byteorder, dmu_tx_t *tx);
int dmu_lightweight_write_by_dnode(dnode_t *dn, uint64_t offset, abd_t *abd,
const struct zio_prop *zp, zio_flag_t flags, dmu_tx_t *tx);
void dmu_buf_redact(dmu_buf_t *dbuf, dmu_tx_t *tx);
void dbuf_destroy(dmu_buf_impl_t *db);
void dbuf_unoverride(dbuf_dirty_record_t *dr);
void dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx);
void dbuf_release_bp(dmu_buf_impl_t *db);
db_lock_type_t dmu_buf_lock_parent(dmu_buf_impl_t *db, krw_t rw,
const void *tag);
void dmu_buf_unlock_parent(dmu_buf_impl_t *db, db_lock_type_t type,
const void *tag);
void dbuf_free_range(struct dnode *dn, uint64_t start, uint64_t end,
struct dmu_tx *);
void dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx);
void dbuf_stats_init(dbuf_hash_table_t *hash);
void dbuf_stats_destroy(void);
int dbuf_dnode_findbp(dnode_t *dn, uint64_t level, uint64_t blkid,
blkptr_t *bp, uint16_t *datablkszsec, uint8_t *indblkshift);
#define DB_DNODE(_db) ((_db)->db_dnode_handle->dnh_dnode)
#define DB_DNODE_LOCK(_db) ((_db)->db_dnode_handle->dnh_zrlock)
#define DB_DNODE_ENTER(_db) (zrl_add(&DB_DNODE_LOCK(_db)))
#define DB_DNODE_EXIT(_db) (zrl_remove(&DB_DNODE_LOCK(_db)))
#define DB_DNODE_HELD(_db) (!zrl_is_zero(&DB_DNODE_LOCK(_db)))
void dbuf_init(void);
void dbuf_fini(void);
boolean_t dbuf_is_metadata(dmu_buf_impl_t *db);
static inline dbuf_dirty_record_t *
dbuf_find_dirty_lte(dmu_buf_impl_t *db, uint64_t txg)
{
dbuf_dirty_record_t *dr;
for (dr = list_head(&db->db_dirty_records);
dr != NULL && dr->dr_txg > txg;
dr = list_next(&db->db_dirty_records, dr))
continue;
return (dr);
}
static inline dbuf_dirty_record_t *
dbuf_find_dirty_eq(dmu_buf_impl_t *db, uint64_t txg)
{
dbuf_dirty_record_t *dr;
dr = dbuf_find_dirty_lte(db, txg);
if (dr && dr->dr_txg == txg)
return (dr);
return (NULL);
}
#define DBUF_GET_BUFC_TYPE(_db) \
(dbuf_is_metadata(_db) ? ARC_BUFC_METADATA : ARC_BUFC_DATA)
#define DBUF_IS_CACHEABLE(_db) \
((_db)->db_objset->os_primary_cache == ZFS_CACHE_ALL || \
(dbuf_is_metadata(_db) && \
((_db)->db_objset->os_primary_cache == ZFS_CACHE_METADATA)))
boolean_t dbuf_is_l2cacheable(dmu_buf_impl_t *db);
#ifdef ZFS_DEBUG
/*
* There should be a ## between the string literal and fmt, to make it
* clear that we're joining two strings together, but gcc does not
* support that preprocessor token.
*/
#define dprintf_dbuf(dbuf, fmt, ...) do { \
if (zfs_flags & ZFS_DEBUG_DPRINTF) { \
char __db_buf[32]; \
uint64_t __db_obj = (dbuf)->db.db_object; \
if (__db_obj == DMU_META_DNODE_OBJECT) \
(void) strlcpy(__db_buf, "mdn", sizeof (__db_buf)); \
else \
(void) snprintf(__db_buf, sizeof (__db_buf), "%lld", \
(u_longlong_t)__db_obj); \
dprintf_ds((dbuf)->db_objset->os_dsl_dataset, \
"obj=%s lvl=%u blkid=%lld " fmt, \
__db_buf, (dbuf)->db_level, \
(u_longlong_t)(dbuf)->db_blkid, __VA_ARGS__); \
} \
} while (0)
#define dprintf_dbuf_bp(db, bp, fmt, ...) do { \
if (zfs_flags & ZFS_DEBUG_DPRINTF) { \
char *__blkbuf = kmem_alloc(BP_SPRINTF_LEN, KM_SLEEP); \
snprintf_blkptr(__blkbuf, BP_SPRINTF_LEN, bp); \
dprintf_dbuf(db, fmt " %s\n", __VA_ARGS__, __blkbuf); \
kmem_free(__blkbuf, BP_SPRINTF_LEN); \
} \
} while (0)
#define DBUF_VERIFY(db) dbuf_verify(db)
#else
#define dprintf_dbuf(db, fmt, ...)
#define dprintf_dbuf_bp(db, bp, fmt, ...)
#define DBUF_VERIFY(db)
#endif
#ifdef __cplusplus
}
#endif
#endif /* _SYS_DBUF_H */
diff --git a/sys/contrib/openzfs/include/sys/dsl_crypt.h b/sys/contrib/openzfs/include/sys/dsl_crypt.h
index 72716e296c9e..fbcae3715355 100644
--- a/sys/contrib/openzfs/include/sys/dsl_crypt.h
+++ b/sys/contrib/openzfs/include/sys/dsl_crypt.h
@@ -1,227 +1,228 @@
/*
* CDDL HEADER START
*
* This file and its contents are supplied under the terms of the
* Common Development and Distribution License ("CDDL"), version 1.0.
* You may only use this file in accordance with the terms of version
* 1.0 of the CDDL.
*
* A full copy of the text of the CDDL should have accompanied this
* source. A copy of the CDDL is also available via the Internet at
* http://www.illumos.org/license/CDDL.
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2017, Datto, Inc. All rights reserved.
*/
#ifndef _SYS_DSL_CRYPT_H
#define _SYS_DSL_CRYPT_H
#include <sys/dmu_tx.h>
#include <sys/dmu.h>
#include <sys/zio_crypt.h>
#include <sys/spa.h>
#include <sys/dsl_dataset.h>
/*
* ZAP entry keys for DSL Crypto Keys stored on disk. In addition,
* ZFS_PROP_KEYFORMAT, ZFS_PROP_PBKDF2_SALT, and ZFS_PROP_PBKDF2_ITERS are
* also maintained here using their respective property names.
*/
#define DSL_CRYPTO_KEY_CRYPTO_SUITE "DSL_CRYPTO_SUITE"
#define DSL_CRYPTO_KEY_GUID "DSL_CRYPTO_GUID"
#define DSL_CRYPTO_KEY_IV "DSL_CRYPTO_IV"
#define DSL_CRYPTO_KEY_MAC "DSL_CRYPTO_MAC"
#define DSL_CRYPTO_KEY_MASTER_KEY "DSL_CRYPTO_MASTER_KEY_1"
#define DSL_CRYPTO_KEY_HMAC_KEY "DSL_CRYPTO_HMAC_KEY_1"
#define DSL_CRYPTO_KEY_ROOT_DDOBJ "DSL_CRYPTO_ROOT_DDOBJ"
#define DSL_CRYPTO_KEY_REFCOUNT "DSL_CRYPTO_REFCOUNT"
#define DSL_CRYPTO_KEY_VERSION "DSL_CRYPTO_VERSION"
/*
* In-memory representation of a wrapping key. One of these structs will exist
* for each encryption root with its key loaded.
*/
typedef struct dsl_wrapping_key {
/* link on spa_keystore_t:sk_wkeys */
avl_node_t wk_avl_link;
/* keyformat property enum */
zfs_keyformat_t wk_keyformat;
/* the pbkdf2 salt, if the keyformat is of type passphrase */
uint64_t wk_salt;
/* the pbkdf2 iterations, if the keyformat is of type passphrase */
uint64_t wk_iters;
/* actual wrapping key */
crypto_key_t wk_key;
/* refcount of number of dsl_crypto_key_t's holding this struct */
zfs_refcount_t wk_refcnt;
/* dsl directory object that owns this wrapping key */
uint64_t wk_ddobj;
} dsl_wrapping_key_t;
/* enum of commands indicating special actions that should be run */
typedef enum dcp_cmd {
/* key creation commands */
DCP_CMD_NONE = 0, /* no specific command */
DCP_CMD_RAW_RECV, /* raw receive */
/* key changing commands */
DCP_CMD_NEW_KEY, /* rewrap key as an encryption root */
DCP_CMD_INHERIT, /* rewrap key with parent's wrapping key */
DCP_CMD_FORCE_NEW_KEY, /* change to encryption root without rewrap */
DCP_CMD_FORCE_INHERIT, /* inherit parent's key without rewrap */
DCP_CMD_MAX
} dcp_cmd_t;
/*
* This struct is a simple wrapper around all the parameters that are usually
* required to setup encryption. It exists so that all of the params can be
* passed around the kernel together for convenience.
*/
typedef struct dsl_crypto_params {
/* command indicating intended action */
dcp_cmd_t cp_cmd;
/* the encryption algorithm */
enum zio_encrypt cp_crypt;
/* keylocation property string */
char *cp_keylocation;
/* the wrapping key */
dsl_wrapping_key_t *cp_wkey;
} dsl_crypto_params_t;
/*
* In-memory representation of a DSL Crypto Key object. One of these structs
* (and corresponding on-disk ZAP object) will exist for each encrypted
* clone family that is mounted or otherwise reading protected data.
*/
typedef struct dsl_crypto_key {
/* link on spa_keystore_t:sk_dsl_keys */
avl_node_t dck_avl_link;
/* refcount of holders of this key */
zfs_refcount_t dck_holds;
/* master key used to derive encryption keys */
zio_crypt_key_t dck_key;
/* wrapping key for syncing this structure to disk */
dsl_wrapping_key_t *dck_wkey;
/* on-disk object id */
uint64_t dck_obj;
} dsl_crypto_key_t;
/*
* In-memory mapping of a dataset object id to a DSL Crypto Key. This is used
* to look up the corresponding dsl_crypto_key_t from the zio layer for
* performing data encryption and decryption.
*/
typedef struct dsl_key_mapping {
/* link on spa_keystore_t:sk_key_mappings */
avl_node_t km_avl_link;
/* refcount of how many users are depending on this mapping */
zfs_refcount_t km_refcnt;
/* dataset this crypto key belongs to (index) */
uint64_t km_dsobj;
/* crypto key (value) of this record */
dsl_crypto_key_t *km_key;
} dsl_key_mapping_t;
/* in memory structure for holding all wrapping and dsl keys */
typedef struct spa_keystore {
/* lock for protecting sk_dsl_keys */
krwlock_t sk_dk_lock;
/* tree of all dsl_crypto_key_t's */
avl_tree_t sk_dsl_keys;
/* lock for protecting sk_key_mappings */
krwlock_t sk_km_lock;
/* tree of all dsl_key_mapping_t's, indexed by dsobj */
avl_tree_t sk_key_mappings;
/* lock for protecting the wrapping keys tree */
krwlock_t sk_wkeys_lock;
/* tree of all dsl_wrapping_key_t's, indexed by ddobj */
avl_tree_t sk_wkeys;
} spa_keystore_t;
int dsl_crypto_params_create_nvlist(dcp_cmd_t cmd, nvlist_t *props,
nvlist_t *crypto_args, dsl_crypto_params_t **dcp_out);
void dsl_crypto_params_free(dsl_crypto_params_t *dcp, boolean_t unload);
void dsl_dataset_crypt_stats(struct dsl_dataset *ds, nvlist_t *nv);
int dsl_crypto_can_set_keylocation(const char *dsname, const char *keylocation);
boolean_t dsl_dir_incompatible_encryption_version(dsl_dir_t *dd);
void spa_keystore_init(spa_keystore_t *sk);
void spa_keystore_fini(spa_keystore_t *sk);
void spa_keystore_dsl_key_rele(spa_t *spa, dsl_crypto_key_t *dck,
const void *tag);
int spa_keystore_load_wkey_impl(spa_t *spa, dsl_wrapping_key_t *wkey);
int spa_keystore_load_wkey(const char *dsname, dsl_crypto_params_t *dcp,
boolean_t noop);
int spa_keystore_unload_wkey_impl(spa_t *spa, uint64_t ddobj);
int spa_keystore_unload_wkey(const char *dsname);
int spa_keystore_create_mapping(spa_t *spa, struct dsl_dataset *ds,
const void *tag, dsl_key_mapping_t **km_out);
int spa_keystore_remove_mapping(spa_t *spa, uint64_t dsobj, const void *tag);
void key_mapping_add_ref(dsl_key_mapping_t *km, const void *tag);
void key_mapping_rele(spa_t *spa, dsl_key_mapping_t *km, const void *tag);
int spa_keystore_lookup_key(spa_t *spa, uint64_t dsobj, const void *tag,
dsl_crypto_key_t **dck_out);
int dsl_crypto_populate_key_nvlist(struct objset *os,
uint64_t from_ivset_guid, nvlist_t **nvl_out);
int dsl_crypto_recv_raw_key_check(struct dsl_dataset *ds,
nvlist_t *nvl, dmu_tx_t *tx);
void dsl_crypto_recv_raw_key_sync(struct dsl_dataset *ds,
nvlist_t *nvl, dmu_tx_t *tx);
int dsl_crypto_recv_raw(const char *poolname, uint64_t dsobj, uint64_t fromobj,
dmu_objset_type_t ostype, nvlist_t *nvl, boolean_t do_key);
int spa_keystore_change_key(const char *dsname, dsl_crypto_params_t *dcp);
int dsl_dir_rename_crypt_check(dsl_dir_t *dd, dsl_dir_t *newparent);
int dsl_dataset_promote_crypt_check(dsl_dir_t *target, dsl_dir_t *origin);
void dsl_dataset_promote_crypt_sync(dsl_dir_t *target, dsl_dir_t *origin,
dmu_tx_t *tx);
int dmu_objset_create_crypt_check(dsl_dir_t *parentdd,
dsl_crypto_params_t *dcp, boolean_t *will_encrypt);
+boolean_t dmu_objset_crypto_key_equal(objset_t *osa, objset_t *osb);
void dsl_dataset_create_crypt_sync(uint64_t dsobj, dsl_dir_t *dd,
struct dsl_dataset *origin, dsl_crypto_params_t *dcp, dmu_tx_t *tx);
uint64_t dsl_crypto_key_create_sync(uint64_t crypt, dsl_wrapping_key_t *wkey,
dmu_tx_t *tx);
uint64_t dsl_crypto_key_clone_sync(dsl_dir_t *origindd, dmu_tx_t *tx);
void dsl_crypto_key_destroy_sync(uint64_t dckobj, dmu_tx_t *tx);
int spa_crypt_get_salt(spa_t *spa, uint64_t dsobj, uint8_t *salt);
int spa_do_crypt_mac_abd(boolean_t generate, spa_t *spa, uint64_t dsobj,
abd_t *abd, uint_t datalen, uint8_t *mac);
int spa_do_crypt_objset_mac_abd(boolean_t generate, spa_t *spa, uint64_t dsobj,
abd_t *abd, uint_t datalen, boolean_t byteswap);
int spa_do_crypt_abd(boolean_t encrypt, spa_t *spa, const zbookmark_phys_t *zb,
dmu_object_type_t ot, boolean_t dedup, boolean_t bswap, uint8_t *salt,
uint8_t *iv, uint8_t *mac, uint_t datalen, abd_t *pabd, abd_t *cabd,
boolean_t *no_crypt);
zfs_keystatus_t dsl_dataset_get_keystatus(dsl_dir_t *dd);
#endif
diff --git a/sys/contrib/openzfs/include/sys/spa_impl.h b/sys/contrib/openzfs/include/sys/spa_impl.h
index cdf65c371337..c7ecd3d0ccd3 100644
--- a/sys/contrib/openzfs/include/sys/spa_impl.h
+++ b/sys/contrib/openzfs/include/sys/spa_impl.h
@@ -1,475 +1,476 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2019 by Delphix. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright 2013 Saso Kiselkov. All rights reserved.
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2019 Datto Inc.
*/
#ifndef _SYS_SPA_IMPL_H
#define _SYS_SPA_IMPL_H
#include <sys/spa.h>
#include <sys/spa_checkpoint.h>
#include <sys/spa_log_spacemap.h>
#include <sys/vdev.h>
#include <sys/vdev_rebuild.h>
#include <sys/vdev_removal.h>
#include <sys/metaslab.h>
#include <sys/dmu.h>
#include <sys/dsl_pool.h>
#include <sys/uberblock_impl.h>
#include <sys/zfs_context.h>
#include <sys/avl.h>
#include <sys/zfs_refcount.h>
#include <sys/bplist.h>
#include <sys/bpobj.h>
#include <sys/dsl_crypt.h>
#include <sys/zfeature.h>
#include <sys/zthr.h>
#include <sys/dsl_deadlist.h>
#include <zfeature_common.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef struct spa_alloc {
kmutex_t spaa_lock;
avl_tree_t spaa_tree;
} ____cacheline_aligned spa_alloc_t;
typedef struct spa_error_entry {
zbookmark_phys_t se_bookmark;
char *se_name;
avl_node_t se_avl;
zbookmark_err_phys_t se_zep; /* not accounted in avl_find */
} spa_error_entry_t;
typedef struct spa_history_phys {
uint64_t sh_pool_create_len; /* ending offset of zpool create */
uint64_t sh_phys_max_off; /* physical EOF */
uint64_t sh_bof; /* logical BOF */
uint64_t sh_eof; /* logical EOF */
uint64_t sh_records_lost; /* num of records overwritten */
} spa_history_phys_t;
/*
* All members must be uint64_t, for byteswap purposes.
*/
typedef struct spa_removing_phys {
uint64_t sr_state; /* dsl_scan_state_t */
/*
* The vdev ID that we most recently attempted to remove,
* or -1 if no removal has been attempted.
*/
uint64_t sr_removing_vdev;
/*
* The vdev ID that we most recently successfully removed,
* or -1 if no devices have been removed.
*/
uint64_t sr_prev_indirect_vdev;
uint64_t sr_start_time;
uint64_t sr_end_time;
/*
* Note that we can not use the space map's or indirect mapping's
* accounting as a substitute for these values, because we need to
* count frees of not-yet-copied data as though it did the copy.
* Otherwise, we could get into a situation where copied > to_copy,
* or we complete before copied == to_copy.
*/
uint64_t sr_to_copy; /* bytes that need to be copied */
uint64_t sr_copied; /* bytes that have been copied or freed */
} spa_removing_phys_t;
/*
* This struct is stored as an entry in the DMU_POOL_DIRECTORY_OBJECT
* (with key DMU_POOL_CONDENSING_INDIRECT). It is present if a condense
* of an indirect vdev's mapping object is in progress.
*/
typedef struct spa_condensing_indirect_phys {
/*
* The vdev ID of the indirect vdev whose indirect mapping is
* being condensed.
*/
uint64_t scip_vdev;
/*
* The vdev's old obsolete spacemap. This spacemap's contents are
* being integrated into the new mapping.
*/
uint64_t scip_prev_obsolete_sm_object;
/*
* The new mapping object that is being created.
*/
uint64_t scip_next_mapping_object;
} spa_condensing_indirect_phys_t;
struct spa_aux_vdev {
uint64_t sav_object; /* MOS object for device list */
nvlist_t *sav_config; /* cached device config */
vdev_t **sav_vdevs; /* devices */
int sav_count; /* number devices */
boolean_t sav_sync; /* sync the device list */
nvlist_t **sav_pending; /* pending device additions */
uint_t sav_npending; /* # pending devices */
};
typedef struct spa_config_lock {
kmutex_t scl_lock;
kthread_t *scl_writer;
int scl_write_wanted;
int scl_count;
kcondvar_t scl_cv;
} ____cacheline_aligned spa_config_lock_t;
typedef struct spa_config_dirent {
list_node_t scd_link;
char *scd_path;
} spa_config_dirent_t;
typedef enum zio_taskq_type {
ZIO_TASKQ_ISSUE = 0,
ZIO_TASKQ_ISSUE_HIGH,
ZIO_TASKQ_INTERRUPT,
ZIO_TASKQ_INTERRUPT_HIGH,
ZIO_TASKQ_TYPES
} zio_taskq_type_t;
/*
* State machine for the zpool-poolname process. The states transitions
* are done as follows:
*
* From To Routine
* PROC_NONE -> PROC_CREATED spa_activate()
* PROC_CREATED -> PROC_ACTIVE spa_thread()
* PROC_ACTIVE -> PROC_DEACTIVATE spa_deactivate()
* PROC_DEACTIVATE -> PROC_GONE spa_thread()
* PROC_GONE -> PROC_NONE spa_deactivate()
*/
typedef enum spa_proc_state {
SPA_PROC_NONE, /* spa_proc = &p0, no process created */
SPA_PROC_CREATED, /* spa_activate() has proc, is waiting */
SPA_PROC_ACTIVE, /* taskqs created, spa_proc set */
SPA_PROC_DEACTIVATE, /* spa_deactivate() requests process exit */
SPA_PROC_GONE /* spa_thread() is exiting, spa_proc = &p0 */
} spa_proc_state_t;
typedef struct spa_taskqs {
uint_t stqs_count;
taskq_t **stqs_taskq;
} spa_taskqs_t;
typedef enum spa_all_vdev_zap_action {
AVZ_ACTION_NONE = 0,
AVZ_ACTION_DESTROY, /* Destroy all per-vdev ZAPs and the AVZ. */
AVZ_ACTION_REBUILD, /* Populate the new AVZ, see spa_avz_rebuild */
AVZ_ACTION_INITIALIZE
} spa_avz_action_t;
typedef enum spa_config_source {
SPA_CONFIG_SRC_NONE = 0,
SPA_CONFIG_SRC_SCAN, /* scan of path (default: /dev/dsk) */
SPA_CONFIG_SRC_CACHEFILE, /* any cachefile */
SPA_CONFIG_SRC_TRYIMPORT, /* returned from call to tryimport */
SPA_CONFIG_SRC_SPLIT, /* new pool in a pool split */
SPA_CONFIG_SRC_MOS /* MOS, but not always from right txg */
} spa_config_source_t;
struct spa {
/*
* Fields protected by spa_namespace_lock.
*/
char spa_name[ZFS_MAX_DATASET_NAME_LEN]; /* pool name */
char *spa_comment; /* comment */
avl_node_t spa_avl; /* node in spa_namespace_avl */
nvlist_t *spa_config; /* last synced config */
nvlist_t *spa_config_syncing; /* currently syncing config */
nvlist_t *spa_config_splitting; /* config for splitting */
nvlist_t *spa_load_info; /* info and errors from load */
uint64_t spa_config_txg; /* txg of last config change */
uint32_t spa_sync_pass; /* iterate-to-convergence */
pool_state_t spa_state; /* pool state */
int spa_inject_ref; /* injection references */
uint8_t spa_sync_on; /* sync threads are running */
spa_load_state_t spa_load_state; /* current load operation */
boolean_t spa_indirect_vdevs_loaded; /* mappings loaded? */
boolean_t spa_trust_config; /* do we trust vdev tree? */
boolean_t spa_is_splitting; /* in the middle of a split? */
spa_config_source_t spa_config_source; /* where config comes from? */
uint64_t spa_import_flags; /* import specific flags */
spa_taskqs_t spa_zio_taskq[ZIO_TYPES][ZIO_TASKQ_TYPES];
dsl_pool_t *spa_dsl_pool;
boolean_t spa_is_initializing; /* true while opening pool */
boolean_t spa_is_exporting; /* true while exporting pool */
metaslab_class_t *spa_normal_class; /* normal data class */
metaslab_class_t *spa_log_class; /* intent log data class */
metaslab_class_t *spa_embedded_log_class; /* log on normal vdevs */
metaslab_class_t *spa_special_class; /* special allocation class */
metaslab_class_t *spa_dedup_class; /* dedup allocation class */
uint64_t spa_first_txg; /* first txg after spa_open() */
uint64_t spa_final_txg; /* txg of export/destroy */
uint64_t spa_freeze_txg; /* freeze pool at this txg */
uint64_t spa_load_max_txg; /* best initial ub_txg */
uint64_t spa_claim_max_txg; /* highest claimed birth txg */
inode_timespec_t spa_loaded_ts; /* 1st successful open time */
objset_t *spa_meta_objset; /* copy of dp->dp_meta_objset */
kmutex_t spa_evicting_os_lock; /* Evicting objset list lock */
list_t spa_evicting_os_list; /* Objsets being evicted. */
kcondvar_t spa_evicting_os_cv; /* Objset Eviction Completion */
txg_list_t spa_vdev_txg_list; /* per-txg dirty vdev list */
vdev_t *spa_root_vdev; /* top-level vdev container */
uint64_t spa_min_ashift; /* of vdevs in normal class */
uint64_t spa_max_ashift; /* of vdevs in normal class */
uint64_t spa_min_alloc; /* of vdevs in normal class */
uint64_t spa_gcd_alloc; /* of vdevs in normal class */
uint64_t spa_config_guid; /* config pool guid */
uint64_t spa_load_guid; /* spa_load initialized guid */
uint64_t spa_last_synced_guid; /* last synced guid */
list_t spa_config_dirty_list; /* vdevs with dirty config */
list_t spa_state_dirty_list; /* vdevs with dirty state */
/*
* spa_allocs is an array, whose lengths is stored in spa_alloc_count.
* There is one tree and one lock for each allocator, to help improve
* allocation performance in write-heavy workloads.
*/
spa_alloc_t *spa_allocs;
int spa_alloc_count;
spa_aux_vdev_t spa_spares; /* hot spares */
spa_aux_vdev_t spa_l2cache; /* L2ARC cache devices */
+ boolean_t spa_aux_sync_uber; /* need to sync aux uber */
nvlist_t *spa_label_features; /* Features for reading MOS */
uint64_t spa_config_object; /* MOS object for pool config */
uint64_t spa_config_generation; /* config generation number */
uint64_t spa_syncing_txg; /* txg currently syncing */
bpobj_t spa_deferred_bpobj; /* deferred-free bplist */
bplist_t spa_free_bplist[TXG_SIZE]; /* bplist of stuff to free */
zio_cksum_salt_t spa_cksum_salt; /* secret salt for cksum */
/* checksum context templates */
kmutex_t spa_cksum_tmpls_lock;
void *spa_cksum_tmpls[ZIO_CHECKSUM_FUNCTIONS];
uberblock_t spa_ubsync; /* last synced uberblock */
uberblock_t spa_uberblock; /* current uberblock */
boolean_t spa_extreme_rewind; /* rewind past deferred frees */
kmutex_t spa_scrub_lock; /* resilver/scrub lock */
uint64_t spa_scrub_inflight; /* in-flight scrub bytes */
/* in-flight verification bytes */
uint64_t spa_load_verify_bytes;
kcondvar_t spa_scrub_io_cv; /* scrub I/O completion */
uint8_t spa_scrub_active; /* active or suspended? */
uint8_t spa_scrub_type; /* type of scrub we're doing */
uint8_t spa_scrub_finished; /* indicator to rotate logs */
uint8_t spa_scrub_started; /* started since last boot */
uint8_t spa_scrub_reopen; /* scrub doing vdev_reopen */
uint64_t spa_scan_pass_start; /* start time per pass/reboot */
uint64_t spa_scan_pass_scrub_pause; /* scrub pause time */
uint64_t spa_scan_pass_scrub_spent_paused; /* total paused */
uint64_t spa_scan_pass_exam; /* examined bytes per pass */
uint64_t spa_scan_pass_issued; /* issued bytes per pass */
/* error scrub pause time in milliseconds */
uint64_t spa_scan_pass_errorscrub_pause;
/* total error scrub paused time in milliseconds */
uint64_t spa_scan_pass_errorscrub_spent_paused;
/*
* We are in the middle of a resilver, and another resilver
* is needed once this one completes. This is set iff any
* vdev_resilver_deferred is set.
*/
boolean_t spa_resilver_deferred;
kmutex_t spa_async_lock; /* protect async state */
kthread_t *spa_async_thread; /* thread doing async task */
int spa_async_suspended; /* async tasks suspended */
kcondvar_t spa_async_cv; /* wait for thread_exit() */
uint16_t spa_async_tasks; /* async task mask */
uint64_t spa_missing_tvds; /* unopenable tvds on load */
uint64_t spa_missing_tvds_allowed; /* allow loading spa? */
uint64_t spa_nonallocating_dspace;
spa_removing_phys_t spa_removing_phys;
spa_vdev_removal_t *spa_vdev_removal;
spa_condensing_indirect_phys_t spa_condensing_indirect_phys;
spa_condensing_indirect_t *spa_condensing_indirect;
zthr_t *spa_condense_zthr; /* zthr doing condense. */
uint64_t spa_checkpoint_txg; /* the txg of the checkpoint */
spa_checkpoint_info_t spa_checkpoint_info; /* checkpoint accounting */
zthr_t *spa_checkpoint_discard_zthr;
space_map_t *spa_syncing_log_sm; /* current log space map */
avl_tree_t spa_sm_logs_by_txg;
kmutex_t spa_flushed_ms_lock; /* for metaslabs_by_flushed */
avl_tree_t spa_metaslabs_by_flushed;
spa_unflushed_stats_t spa_unflushed_stats;
list_t spa_log_summary;
uint64_t spa_log_flushall_txg;
zthr_t *spa_livelist_delete_zthr; /* deleting livelists */
zthr_t *spa_livelist_condense_zthr; /* condensing livelists */
uint64_t spa_livelists_to_delete; /* set of livelists to free */
livelist_condense_entry_t spa_to_condense; /* next to condense */
char *spa_root; /* alternate root directory */
uint64_t spa_ena; /* spa-wide ereport ENA */
int spa_last_open_failed; /* error if last open failed */
uint64_t spa_last_ubsync_txg; /* "best" uberblock txg */
uint64_t spa_last_ubsync_txg_ts; /* timestamp from that ub */
uint64_t spa_load_txg; /* ub txg that loaded */
uint64_t spa_load_txg_ts; /* timestamp from that ub */
uint64_t spa_load_meta_errors; /* verify metadata err count */
uint64_t spa_load_data_errors; /* verify data err count */
uint64_t spa_verify_min_txg; /* start txg of verify scrub */
kmutex_t spa_errlog_lock; /* error log lock */
uint64_t spa_errlog_last; /* last error log object */
uint64_t spa_errlog_scrub; /* scrub error log object */
kmutex_t spa_errlist_lock; /* error list/ereport lock */
avl_tree_t spa_errlist_last; /* last error list */
avl_tree_t spa_errlist_scrub; /* scrub error list */
avl_tree_t spa_errlist_healed; /* list of healed blocks */
uint64_t spa_deflate; /* should we deflate? */
uint64_t spa_history; /* history object */
kmutex_t spa_history_lock; /* history lock */
vdev_t *spa_pending_vdev; /* pending vdev additions */
kmutex_t spa_props_lock; /* property lock */
uint64_t spa_pool_props_object; /* object for properties */
uint64_t spa_bootfs; /* default boot filesystem */
uint64_t spa_failmode; /* failure mode for the pool */
uint64_t spa_deadman_failmode; /* failure mode for deadman */
uint64_t spa_delegation; /* delegation on/off */
list_t spa_config_list; /* previous cache file(s) */
/* per-CPU array of root of async I/O: */
zio_t **spa_async_zio_root;
zio_t *spa_suspend_zio_root; /* root of all suspended I/O */
zio_t *spa_txg_zio[TXG_SIZE]; /* spa_sync() waits for this */
kmutex_t spa_suspend_lock; /* protects suspend_zio_root */
kcondvar_t spa_suspend_cv; /* notification of resume */
zio_suspend_reason_t spa_suspended; /* pool is suspended */
uint8_t spa_claiming; /* pool is doing zil_claim() */
boolean_t spa_is_root; /* pool is root */
int spa_minref; /* num refs when first opened */
spa_mode_t spa_mode; /* SPA_MODE_{READ|WRITE} */
boolean_t spa_read_spacemaps; /* spacemaps available if ro */
spa_log_state_t spa_log_state; /* log state */
uint64_t spa_autoexpand; /* lun expansion on/off */
ddt_t *spa_ddt[ZIO_CHECKSUM_FUNCTIONS]; /* in-core DDTs */
uint64_t spa_ddt_stat_object; /* DDT statistics */
uint64_t spa_dedup_dspace; /* Cache get_dedup_dspace() */
uint64_t spa_dedup_checksum; /* default dedup checksum */
uint64_t spa_dspace; /* dspace in normal class */
struct brt *spa_brt; /* in-core BRT */
kmutex_t spa_vdev_top_lock; /* dueling offline/remove */
kmutex_t spa_proc_lock; /* protects spa_proc* */
kcondvar_t spa_proc_cv; /* spa_proc_state transitions */
spa_proc_state_t spa_proc_state; /* see definition */
proc_t *spa_proc; /* "zpool-poolname" process */
uintptr_t spa_did; /* if procp != p0, did of t1 */
boolean_t spa_autoreplace; /* autoreplace set in open */
int spa_vdev_locks; /* locks grabbed */
uint64_t spa_creation_version; /* version at pool creation */
uint64_t spa_prev_software_version; /* See ub_software_version */
uint64_t spa_feat_for_write_obj; /* required to write to pool */
uint64_t spa_feat_for_read_obj; /* required to read from pool */
uint64_t spa_feat_desc_obj; /* Feature descriptions */
uint64_t spa_feat_enabled_txg_obj; /* Feature enabled txg */
kmutex_t spa_feat_stats_lock; /* protects spa_feat_stats */
nvlist_t *spa_feat_stats; /* Cache of enabled features */
/* cache feature refcounts */
uint64_t spa_feat_refcount_cache[SPA_FEATURES];
taskqid_t spa_deadman_tqid; /* Task id */
uint64_t spa_deadman_calls; /* number of deadman calls */
hrtime_t spa_sync_starttime; /* starting time of spa_sync */
uint64_t spa_deadman_synctime; /* deadman sync expiration */
uint64_t spa_deadman_ziotime; /* deadman zio expiration */
uint64_t spa_all_vdev_zaps; /* ZAP of per-vd ZAP obj #s */
spa_avz_action_t spa_avz_action; /* destroy/rebuild AVZ? */
uint64_t spa_autotrim; /* automatic background trim? */
uint64_t spa_errata; /* errata issues detected */
spa_stats_t spa_stats; /* assorted spa statistics */
spa_keystore_t spa_keystore; /* loaded crypto keys */
/* arc_memory_throttle() parameters during low memory condition */
uint64_t spa_lowmem_page_load; /* memory load during txg */
uint64_t spa_lowmem_last_txg; /* txg window start */
hrtime_t spa_ccw_fail_time; /* Conf cache write fail time */
taskq_t *spa_zvol_taskq; /* Taskq for minor management */
taskq_t *spa_metaslab_taskq; /* Taskq for metaslab preload */
taskq_t *spa_prefetch_taskq; /* Taskq for prefetch threads */
taskq_t *spa_upgrade_taskq; /* Taskq for upgrade jobs */
uint64_t spa_multihost; /* multihost aware (mmp) */
mmp_thread_t spa_mmp; /* multihost mmp thread */
list_t spa_leaf_list; /* list of leaf vdevs */
uint64_t spa_leaf_list_gen; /* track leaf_list changes */
uint32_t spa_hostid; /* cached system hostid */
/* synchronization for threads in spa_wait */
kmutex_t spa_activities_lock;
kcondvar_t spa_activities_cv;
kcondvar_t spa_waiters_cv;
int spa_waiters; /* number of waiting threads */
boolean_t spa_waiters_cancel; /* waiters should return */
char *spa_compatibility; /* compatibility file(s) */
/*
* spa_refcount & spa_config_lock must be the last elements
* because zfs_refcount_t changes size based on compilation options.
* In order for the MDB module to function correctly, the other
* fields must remain in the same location.
*/
spa_config_lock_t spa_config_lock[SCL_LOCKS]; /* config changes */
zfs_refcount_t spa_refcount; /* number of opens */
};
extern char *spa_config_path;
extern const char *zfs_deadman_failmode;
extern uint_t spa_slop_shift;
extern void spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent);
extern void spa_taskq_dispatch_sync(spa_t *, zio_type_t t, zio_taskq_type_t q,
task_func_t *func, void *arg, uint_t flags);
extern void spa_load_spares(spa_t *spa);
extern void spa_load_l2cache(spa_t *spa);
extern sysevent_t *spa_event_create(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl,
const char *name);
extern void spa_event_post(sysevent_t *ev);
extern int param_set_deadman_failmode_common(const char *val);
extern void spa_set_deadman_synctime(hrtime_t ns);
extern void spa_set_deadman_ziotime(hrtime_t ns);
extern const char *spa_history_zone(void);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_SPA_IMPL_H */
diff --git a/sys/contrib/openzfs/include/sys/zfs_vnops.h b/sys/contrib/openzfs/include/sys/zfs_vnops.h
index 5da103f17783..e60b99bed192 100644
--- a/sys/contrib/openzfs/include/sys/zfs_vnops.h
+++ b/sys/contrib/openzfs/include/sys/zfs_vnops.h
@@ -1,59 +1,62 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
*/
#ifndef _SYS_FS_ZFS_VNOPS_H
#define _SYS_FS_ZFS_VNOPS_H
+
#include <sys/zfs_vnops_os.h>
+extern int zfs_bclone_enabled;
+
extern int zfs_fsync(znode_t *, int, cred_t *);
extern int zfs_read(znode_t *, zfs_uio_t *, int, cred_t *);
extern int zfs_write(znode_t *, zfs_uio_t *, int, cred_t *);
extern int zfs_holey(znode_t *, ulong_t, loff_t *);
extern int zfs_access(znode_t *, int, int, cred_t *);
extern int zfs_clone_range(znode_t *, uint64_t *, znode_t *, uint64_t *,
uint64_t *, cred_t *);
extern int zfs_clone_range_replay(znode_t *, uint64_t, uint64_t, uint64_t,
const blkptr_t *, size_t);
extern int zfs_getsecattr(znode_t *, vsecattr_t *, int, cred_t *);
extern int zfs_setsecattr(znode_t *, vsecattr_t *, int, cred_t *);
extern int mappedread(znode_t *, int, zfs_uio_t *);
extern int mappedread_sf(znode_t *, int, zfs_uio_t *);
extern void update_pages(znode_t *, int64_t, int, objset_t *);
/*
* Platform code that asynchronously drops zp's inode / vnode_t.
*
* Asynchronous dropping ensures that the caller will never drop the
* last reference on an inode / vnode_t in the current context.
* Doing so while holding open a tx could result in a deadlock if
* the platform calls into filesystem again in the implementation
* of inode / vnode_t dropping (e.g. call from iput_final()).
*/
extern void zfs_zrele_async(znode_t *zp);
extern zil_get_data_t zfs_get_data;
#endif
diff --git a/sys/contrib/openzfs/lib/libspl/include/assert.h b/sys/contrib/openzfs/lib/libspl/include/assert.h
index c5bf0f0cc8f1..af4957dfbaa6 100644
--- a/sys/contrib/openzfs/lib/libspl/include/assert.h
+++ b/sys/contrib/openzfs/lib/libspl/include/assert.h
@@ -1,162 +1,165 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include_next <assert.h>
#ifndef _LIBSPL_ASSERT_H
#define _LIBSPL_ASSERT_H
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <sys/types.h>
/* Workaround for non-Clang compilers */
#ifndef __has_feature
#define __has_feature(x) 0
#endif
/* We need to workaround libspl_set_assert_ok() that we have for zdb */
#if __has_feature(attribute_analyzer_noreturn) || defined(__COVERITY__)
#define NORETURN __attribute__((__noreturn__))
#else
#define NORETURN
#endif
/* Set to non-zero to avoid abort()ing on an assertion failure */
extern void libspl_set_assert_ok(boolean_t val);
/* printf version of libspl_assert */
extern void libspl_assertf(const char *file, const char *func, int line,
const char *format, ...) NORETURN __attribute__((format(printf, 4, 5)));
static inline int
libspl_assert(const char *buf, const char *file, const char *func, int line)
{
libspl_assertf(file, func, line, "%s", buf);
return (0);
}
#ifdef verify
#undef verify
#endif
+#define PANIC(fmt, a...) \
+ libspl_assertf(__FILE__, __FUNCTION__, __LINE__, fmt, ## a)
+
#define VERIFY(cond) \
(void) ((!(cond)) && \
libspl_assert(#cond, __FILE__, __FUNCTION__, __LINE__))
#define verify(cond) \
(void) ((!(cond)) && \
libspl_assert(#cond, __FILE__, __FUNCTION__, __LINE__))
#define VERIFY3B(LEFT, OP, RIGHT) \
do { \
const boolean_t __left = (boolean_t)(LEFT); \
const boolean_t __right = (boolean_t)(RIGHT); \
if (!(__left OP __right)) \
libspl_assertf(__FILE__, __FUNCTION__, __LINE__, \
"%s %s %s (0x%llx %s 0x%llx)", #LEFT, #OP, #RIGHT, \
(u_longlong_t)__left, #OP, (u_longlong_t)__right); \
} while (0)
#define VERIFY3S(LEFT, OP, RIGHT) \
do { \
const int64_t __left = (int64_t)(LEFT); \
const int64_t __right = (int64_t)(RIGHT); \
if (!(__left OP __right)) \
libspl_assertf(__FILE__, __FUNCTION__, __LINE__, \
"%s %s %s (0x%llx %s 0x%llx)", #LEFT, #OP, #RIGHT, \
(u_longlong_t)__left, #OP, (u_longlong_t)__right); \
} while (0)
#define VERIFY3U(LEFT, OP, RIGHT) \
do { \
const uint64_t __left = (uint64_t)(LEFT); \
const uint64_t __right = (uint64_t)(RIGHT); \
if (!(__left OP __right)) \
libspl_assertf(__FILE__, __FUNCTION__, __LINE__, \
"%s %s %s (0x%llx %s 0x%llx)", #LEFT, #OP, #RIGHT, \
(u_longlong_t)__left, #OP, (u_longlong_t)__right); \
} while (0)
#define VERIFY3P(LEFT, OP, RIGHT) \
do { \
const uintptr_t __left = (uintptr_t)(LEFT); \
const uintptr_t __right = (uintptr_t)(RIGHT); \
if (!(__left OP __right)) \
libspl_assertf(__FILE__, __FUNCTION__, __LINE__, \
"%s %s %s (0x%llx %s 0x%llx)", #LEFT, #OP, #RIGHT, \
(u_longlong_t)__left, #OP, (u_longlong_t)__right); \
} while (0)
#define VERIFY0(LEFT) \
do { \
const uint64_t __left = (uint64_t)(LEFT); \
if (!(__left == 0)) \
libspl_assertf(__FILE__, __FUNCTION__, __LINE__, \
"%s == 0 (0x%llx == 0)", #LEFT, \
(u_longlong_t)__left); \
} while (0)
#ifdef assert
#undef assert
#endif
#ifdef NDEBUG
#define ASSERT3B(x, y, z) \
((void) sizeof ((uintptr_t)(x)), (void) sizeof ((uintptr_t)(z)))
#define ASSERT3S(x, y, z) \
((void) sizeof ((uintptr_t)(x)), (void) sizeof ((uintptr_t)(z)))
#define ASSERT3U(x, y, z) \
((void) sizeof ((uintptr_t)(x)), (void) sizeof ((uintptr_t)(z)))
#define ASSERT3P(x, y, z) \
((void) sizeof ((uintptr_t)(x)), (void) sizeof ((uintptr_t)(z)))
#define ASSERT0(x) ((void) sizeof ((uintptr_t)(x)))
#define ASSERT(x) ((void) sizeof ((uintptr_t)(x)))
#define assert(x) ((void) sizeof ((uintptr_t)(x)))
#define IMPLY(A, B) \
((void) sizeof ((uintptr_t)(A)), (void) sizeof ((uintptr_t)(B)))
#define EQUIV(A, B) \
((void) sizeof ((uintptr_t)(A)), (void) sizeof ((uintptr_t)(B)))
#else
#define ASSERT3B VERIFY3B
#define ASSERT3S VERIFY3S
#define ASSERT3U VERIFY3U
#define ASSERT3P VERIFY3P
#define ASSERT0 VERIFY0
#define ASSERT VERIFY
#define assert VERIFY
#define IMPLY(A, B) \
((void)(((!(A)) || (B)) || \
libspl_assert("(" #A ") implies (" #B ")", \
__FILE__, __FUNCTION__, __LINE__)))
#define EQUIV(A, B) \
((void)((!!(A) == !!(B)) || \
libspl_assert("(" #A ") is equivalent to (" #B ")", \
__FILE__, __FUNCTION__, __LINE__)))
#endif /* NDEBUG */
#endif /* _LIBSPL_ASSERT_H */
diff --git a/sys/contrib/openzfs/lib/libspl/include/os/freebsd/sys/stat.h b/sys/contrib/openzfs/lib/libspl/include/os/freebsd/sys/stat.h
index 88773cceb951..af488244bd05 100644
--- a/sys/contrib/openzfs/lib/libspl/include/os/freebsd/sys/stat.h
+++ b/sys/contrib/openzfs/lib/libspl/include/os/freebsd/sys/stat.h
@@ -1,85 +1,89 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
*/
#ifndef _LIBSPL_SYS_STAT_H
#define _LIBSPL_SYS_STAT_H
#include_next <sys/stat.h>
/* Note: this file can be used on linux/macOS when bootstrapping tools. */
#if defined(__FreeBSD__)
#include <sys/mount.h> /* for BLKGETSIZE64 */
#define stat64 stat
#define MAXOFFSET_T OFF_MAX
#ifndef _KERNEL
#include <sys/disk.h>
static __inline int
fstat64(int fd, struct stat *sb)
{
int ret;
ret = fstat(fd, sb);
if (ret == 0) {
if (S_ISCHR(sb->st_mode))
(void) ioctl(fd, DIOCGMEDIASIZE, &sb->st_size);
}
return (ret);
}
#endif
/*
* Emulate Solaris' behavior of returning the block device size in fstat64().
*/
static inline int
fstat64_blk(int fd, struct stat64 *st)
{
if (fstat64(fd, st) == -1)
return (-1);
/* In Linux we need to use an ioctl to get the size of a block device */
if (S_ISBLK(st->st_mode)) {
if (ioctl(fd, BLKGETSIZE64, &st->st_size) != 0)
return (-1);
}
return (0);
}
#endif /* defined(__FreeBSD__) */
/*
* Only Intel-based Macs have a separate stat64; Arm-based Macs are like
* FreeBSD and have a full 64-bit stat from the start.
+ *
+ * On Linux, musl libc is full 64-bit too and has deprecated its own version
+ * of these defines since version 1.2.4.
*/
-#if defined(__APPLE__) && !(defined(__i386__) || defined(__x86_64__))
+#if (defined(__APPLE__) && !(defined(__i386__) || defined(__x86_64__))) || \
+ (defined(__linux__) && !defined(__GLIBC__))
#define stat64 stat
#define fstat64 fstat
#endif
#endif /* _LIBSPL_SYS_STAT_H */
diff --git a/sys/contrib/openzfs/lib/libspl/include/sys/uio.h b/sys/contrib/openzfs/lib/libspl/include/sys/uio.h
index e9e21819d4f8..665bfc42301b 100644
--- a/sys/contrib/openzfs/lib/libspl/include/sys/uio.h
+++ b/sys/contrib/openzfs/lib/libspl/include/sys/uio.h
@@ -1,112 +1,112 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
/* All Rights Reserved */
/*
* University Copyright- Copyright (c) 1982, 1986, 1988
* The Regents of the University of California
* All Rights Reserved
*
* University Acknowledgment- Portions of this document are derived from
* software developed by the University of California, Berkeley, and its
* contributors.
*/
#ifndef _LIBSPL_SYS_UIO_H
#define _LIBSPL_SYS_UIO_H
#include <sys/types.h>
#include_next <sys/uio.h>
#ifdef __APPLE__
#include <sys/_types/_iovec_t.h>
#endif
#include <stdint.h>
typedef struct iovec iovec_t;
#if defined(__linux__) || defined(__APPLE__)
typedef enum zfs_uio_rw {
UIO_READ = 0,
UIO_WRITE = 1,
} zfs_uio_rw_t;
typedef enum zfs_uio_seg {
UIO_USERSPACE = 0,
UIO_SYSSPACE = 1,
} zfs_uio_seg_t;
#elif defined(__FreeBSD__)
typedef enum uio_seg zfs_uio_seg_t;
#endif
typedef struct zfs_uio {
struct iovec *uio_iov; /* pointer to array of iovecs */
int uio_iovcnt; /* number of iovecs */
offset_t uio_loffset; /* file offset */
zfs_uio_seg_t uio_segflg; /* address space (kernel or user) */
uint16_t uio_fmode; /* file mode flags */
uint16_t uio_extflg; /* extended flags */
ssize_t uio_resid; /* residual count */
} zfs_uio_t;
#define zfs_uio_segflg(uio) (uio)->uio_segflg
#define zfs_uio_offset(uio) (uio)->uio_loffset
#define zfs_uio_resid(uio) (uio)->uio_resid
#define zfs_uio_iovcnt(uio) (uio)->uio_iovcnt
#define zfs_uio_iovlen(uio, idx) (uio)->uio_iov[(idx)].iov_len
#define zfs_uio_iovbase(uio, idx) (uio)->uio_iov[(idx)].iov_base
static inline void
zfs_uio_iov_at_index(zfs_uio_t *uio, uint_t idx, void **base, uint64_t *len)
{
*base = zfs_uio_iovbase(uio, idx);
*len = zfs_uio_iovlen(uio, idx);
}
static inline void
-zfs_uio_advance(zfs_uio_t *uio, size_t size)
+zfs_uio_advance(zfs_uio_t *uio, ssize_t size)
{
uio->uio_resid -= size;
uio->uio_loffset += size;
}
static inline offset_t
zfs_uio_index_at_offset(zfs_uio_t *uio, offset_t off, uint_t *vec_idx)
{
*vec_idx = 0;
while (*vec_idx < (uint_t)zfs_uio_iovcnt(uio) &&
off >= (offset_t)zfs_uio_iovlen(uio, *vec_idx)) {
off -= zfs_uio_iovlen(uio, *vec_idx);
(*vec_idx)++;
}
return (off);
}
#endif /* _SYS_UIO_H */
diff --git a/sys/contrib/openzfs/lib/libzfs/libzfs.abi b/sys/contrib/openzfs/lib/libzfs/libzfs.abi
index 2d612a16b227..9bb8f6a47de1 100644
--- a/sys/contrib/openzfs/lib/libzfs/libzfs.abi
+++ b/sys/contrib/openzfs/lib/libzfs/libzfs.abi
@@ -1,9427 +1,9497 @@
<abi-corpus version='2.0' architecture='elf-amd-x86_64' soname='libzfs.so.4'>
<elf-needed>
<dependency name='libzfs_core.so.3'/>
<dependency name='libnvpair.so.3'/>
<dependency name='libuuid.so.1'/>
<dependency name='libblkid.so.1'/>
<dependency name='libudev.so.1'/>
<dependency name='libuutil.so.3'/>
<dependency name='libm.so.6'/>
<dependency name='libcrypto.so.3'/>
<dependency name='libz.so.1'/>
<dependency name='libc.so.6'/>
<dependency name='ld-linux-x86-64.so.2'/>
</elf-needed>
<elf-function-symbols>
<elf-symbol name='_sol_getmntent' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_16_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_32_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_64_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_8_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_char' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_char_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_int' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_int_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_long' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_long_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_ptr' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_ptr_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_short' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_short_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_16_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_32_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_64_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_8_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_uchar' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_uchar_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_uint_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_ulong_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_ushort_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_ptr' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_uchar' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_clear_long_excl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_16_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_32_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_64_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_8_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_uchar' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_uchar_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_uint_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_ulong_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_ushort_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_16_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_32_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_64_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_8_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_uchar' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_uchar_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_uint_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_ulong_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_ushort_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_16_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_32_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_64_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_8_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_uchar' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_uchar_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_uint_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_ulong_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_ushort_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_set_long_excl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_16_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_32_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_64_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_8_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_char' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_char_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_int' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_int_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_long' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_long_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_ptr' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_ptr_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_short' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_short_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_ptr' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_uchar' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_add' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_destroy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_destroy_nodes' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_find' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_first' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_insert' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_insert_here' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_is_empty' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_last' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_nearest' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_numnodes' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_remove' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_swap' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_update' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_update_gt' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_update_lt' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_walk' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='bookmark_namecheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='cityhash4' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='color_end' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='color_start' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='dataset_namecheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='dataset_nestcheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='efi_alloc_and_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='efi_alloc_and_read' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='efi_err_check' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='efi_free' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='efi_rescan' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='efi_use_whole_disk' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='efi_write' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='entity_namecheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_2_byteswap' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_2_incremental_byteswap' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_2_incremental_native' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_2_native' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_byteswap' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_fini' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_impl_set' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_incremental_byteswap' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_incremental_native' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_native' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_native_varsize' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fsleep' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='get_dataset_depth' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='get_system_hostid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getexecname' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getextmntent' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getmntany' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getprop_uint64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getzoneid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='is_mounted' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='is_mpath_whole_disk' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libpc_error_description' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libspl_assertf' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libspl_set_assert_ok' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_add_handle' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_envvar_is_set' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_errno' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_error_action' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_error_description' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_error_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_fini' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_free_str_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_mnttab_add' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_mnttab_cache' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_mnttab_find' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_mnttab_fini' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_mnttab_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_mnttab_remove' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_print_on_error' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_run_process' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_run_process_get_stdout' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_run_process_get_stdout_nopath' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_destroy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_head' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_insert_after' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_insert_before' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_insert_head' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_insert_tail' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_is_empty' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_link_active' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_link_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_link_replace' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_move_tail' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_next' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_prev' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_remove' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_remove_head' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_remove_tail' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_tail' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='membar_consumer' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='membar_enter' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='membar_exit' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='membar_producer' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='membar_sync' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='mkdirp' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='mountpoint_namecheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='permset_namecheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='pool_namecheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='print_timestamp' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='printf_color' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='sa_commit_shares' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='sa_disable_share' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='sa_enable_share' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='sa_errorstr' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='sa_is_shared' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='sa_truncate_shares' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='sa_validate_shareopts' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='snapshot_namecheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='spl_pagesize' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='strlcat' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='strlcpy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='tpool_abandon' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='tpool_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='tpool_destroy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='tpool_dispatch' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='tpool_member' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='tpool_resume' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='tpool_suspend' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='tpool_suspended' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='tpool_wait' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='update_vdev_config_dev_strs' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='update_vdev_config_dev_sysfs_path' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='use_color' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='vdev_expand_proplist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='vdev_name_to_prop' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='vdev_prop_align_right' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='vdev_prop_column_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='vdev_prop_default_numeric' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='vdev_prop_default_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='vdev_prop_get_table' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='vdev_prop_get_type' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='vdev_prop_index_to_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='vdev_prop_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='vdev_prop_random_value' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='vdev_prop_readonly' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='vdev_prop_string_to_index' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='vdev_prop_to_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='vdev_prop_user' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='vdev_prop_values' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfeature_depends_on' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfeature_is_supported' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfeature_is_valid_guid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfeature_lookup_guid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfeature_lookup_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_adjust_mount_options' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_allocatable_devs' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_append_partition' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_basename' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_bookmark_exists' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_clone' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_close' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_commit_shares' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_component_namecheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_create_ancestors' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_crypto_attempt_load_keys' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_crypto_clone_check' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_crypto_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_crypto_get_encryption_root' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_crypto_load_key' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_crypto_rewrap' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_crypto_unload_key' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_dataset_exists' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_dataset_name_hidden' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_deleg_canonicalize_perm' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_deleg_verify_nvlist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_deleg_whokey' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_destroy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_destroy_snaps' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_destroy_snaps_nvl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_destroy_snaps_nvl_os' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_dev_flush' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_dev_is_dm' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_dev_is_whole_disk' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_device_get_devid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_device_get_physical' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_dirnamelen' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_expand_proplist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_foreach_mountpoint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_all_props' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_clones_nvl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_enclosure_sysfs_path' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_fsacl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_handle' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_holds' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_pool_handle' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_pool_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_recvd_props' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_type' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_underlying_path' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_underlying_type' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_user_props' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_handle_dup' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_hold' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_hold_nvl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_ioctl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_is_mounted' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_is_shared' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_isnumber' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_bookmarks' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_bookmarks_v2' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_children' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_children_v2' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_dependents' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_dependents_v2' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_filesystems' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_filesystems_v2' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_mounted' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_root' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_snapshots' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_snapshots_sorted' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_snapshots_sorted_v2' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_snapshots_v2' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_snapspec' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_snapspec_v2' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_mod_supported' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_mount' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_mount_at' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_mount_delegation_check' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_name_to_prop' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_name_valid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_nicebytes' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_nicenum' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_nicenum_format' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_niceraw' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_nicestrtonum' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_nicetime' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_open' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_parent_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_parse_mount_options' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_path_to_zhandle' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_promote' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_align_right' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_column_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_default_numeric' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_default_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_delegatable' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_encryption_key_param' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_int' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_numeric' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_recvd' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_table' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_type' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_userquota' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_userquota_int' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_written' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_written_int' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_index_to_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_inherit' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_inheritable' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_is_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_random_value' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_readonly' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_set' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_set_list' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_set_list_flags' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_setonce' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_string_to_index' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_to_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_user' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_userquota' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_valid_for_type' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_valid_keylocation' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_values' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_visible' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_written' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prune_proplist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_receive' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_refresh_properties' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_release' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_rename' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_resolve_shortname' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_rollback' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_save_arguments' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_send' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_send_one' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_send_progress' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_send_resume' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_send_resume_token_to_nvlist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_send_saved' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_set_fsacl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_setproctitle' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_setproctitle_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_share' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_show_diffs' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_smb_acl_add' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_smb_acl_purge' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_smb_acl_remove' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_smb_acl_rename' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_snapshot' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_snapshot_nvl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_spa_version' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_spa_version_map' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_special_devs' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_standard_error' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_strcmp_pathname' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_strip_partition' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_strip_path' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_truncate_shares' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_type_to_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_unmount' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_unmountall' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_unshare' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_unshareall' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_userns' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_userspace' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_valid_proplist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_version_kernel' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_version_print' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_version_userland' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_wait_status' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_zpl_version_map' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_add' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_checkpoint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_clear' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_clear_label' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_close' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_default_search_paths' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_destroy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_disable_datasets' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_disable_datasets_os' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_disable_volume_os' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_discard_checkpoint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='zpool_disk_wait' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_dump_ddt' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_enable_datasets' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_events_clear' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_events_next' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_events_seek' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_expand_proplist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_explain_recover' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_export' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_export_force' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_feature_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_find_config' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_find_vdev' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_find_vdev_by_physpath' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_free_handles' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_all_vdev_props' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_bootenv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_config' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_errlog' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_features' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_handle' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_history' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_load_policy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_prop' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_prop_int' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_state' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_state_str' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_status' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_userprop' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_vdev_prop' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_vdev_prop_value' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='zpool_getenv_int' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_history_unpack' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_import' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_import_props' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_import_status' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_in_use' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_initialize' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_initialize_wait' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_is_draid_spare' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_iter' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_label_disk' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_label_disk_wait' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_load_compat' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_log_history' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_name_to_prop' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_obj_to_path' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_obj_to_path_ds' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_open' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_open_canfail' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_pool_state_to_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prepare_and_label_disk' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prepare_disk' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_print_unsup_feat' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_align_right' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_column_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_default_numeric' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_default_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_feature' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_get_feature' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_get_table' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_get_type' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_index_to_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_random_value' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_readonly' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_setonce' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_string_to_index' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_to_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_unsupported' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_values' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_vdev' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_props_refresh' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_read_label' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_refresh_stats' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_reguid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_reopen_one' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_scan' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_search_import' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_set_bootenv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_set_prop' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_set_vdev_prop' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_skip_pool' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_state_to_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_sync_one' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_trim' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_upgrade' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_attach' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_clear' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_degrade' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_detach' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_fault' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_indirect_size' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_offline' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_online' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_path_to_guid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_remove' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_remove_cancel' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_remove_wanted' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_script_alloc_env' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_script_free_env' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='zpool_vdev_set_removed_state' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_split' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_wait' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_wait_status' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_free_list' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_get_list' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_index_to_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_iter' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_iter_common' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_name_to_prop' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_print_one_property' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_random_value' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_register_hidden' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_register_impl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_register_index' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_register_number' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_register_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_string_to_index' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_valid_char' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_valid_for_type' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_values' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_width' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zvol_volsize_to_reservation' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
</elf-function-symbols>
<elf-variable-symbols>
<elf-symbol name='efi_debug' size='4' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_abd_ops' size='24' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_avx2_ops' size='128' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_avx512bw_ops' size='128' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_avx512f_ops' size='128' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_sse2_ops' size='128' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_ssse3_ops' size='128' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_superscalar4_ops' size='128' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_superscalar_ops' size='128' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_config_ops' size='16' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='sa_protocol_names' size='16' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='spa_feature_table' size='2184' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfeature_checks_disable' size='4' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_deleg_perm_tab' size='512' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_history_event_names' size='328' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_max_dataset_nesting' size='4' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_userquota_prop_prefixes' size='96' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
</elf-variable-symbols>
<abi-instr address-size='64' path='lib/libefi/rdwr_efi.c' language='LANG_C99'>
<typedef-decl name='uInt' type-id='f0981eeb' id='09110a74'/>
<var-decl name='efi_debug' type-id='95e97e5e' mangled-name='efi_debug' visibility='default' elf-symbol-id='efi_debug'/>
<function-decl name='uuid_generate' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='cf536864'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='uuid_is_null' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='354f7eb9'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='crc32' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5bbcce85'/>
<parameter type-id='e8cb3e0e'/>
<parameter type-id='09110a74'/>
<return type-id='5bbcce85'/>
</function-decl>
<function-decl name='efi_err_check' mangled-name='efi_err_check' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_err_check'>
<parameter type-id='0d8119a8' name='vtoc'/>
<return type-id='48b5725f'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libshare/libshare.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='b99c00c9' size-in-bits='128' id='2d6895a3'>
<subrange length='2' type-id='7359adad' id='52efc4ef'/>
</array-type-def>
<var-decl name='sa_protocol_names' type-id='2d6895a3' mangled-name='sa_protocol_names' visibility='default' elf-symbol-id='sa_protocol_names'/>
<type-decl name='unsigned long int' size-in-bits='64' id='7359adad'/>
</abi-instr>
<abi-instr address-size='64' path='lib/libshare/nfs.c' language='LANG_C99'>
<function-decl name='rename' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='memchr' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='b59d7dce'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='flock' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='fchmod' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<parameter type-id='e1c52942'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='mkdir' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='e1c52942'/>
<return type-id='95e97e5e'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libshare/os/linux/nfs.c' language='LANG_C99'>
<class-decl name='sa_share_impl' size-in-bits='192' is-struct='yes' visibility='default' id='72b09bf8'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='sa_zfsname' type-id='80f4b756' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='sa_mountpoint' type-id='80f4b756' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='sa_shareopts' type-id='80f4b756' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='sa_share_impl_t' type-id='946a2c6b' id='a48b47d0'/>
<class-decl name='sa_fstype_t' size-in-bits='384' is-struct='yes' naming-typedef-id='639af739' visibility='default' id='944afa86'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='enable_share' type-id='2f78a9c1' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='disable_share' type-id='2f78a9c1' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='is_shared' type-id='81020bc2' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='validate_shareopts' type-id='f194a8fb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='commit_shares' type-id='797ee7da' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='truncate_shares' type-id='5d51038b' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='sa_fstype_t' type-id='944afa86' id='639af739'/>
<qualified-type-def type-id='639af739' const='yes' id='d19dbca9'/>
<qualified-type-def type-id='72b09bf8' const='yes' id='484950e3'/>
<pointer-type-def type-id='484950e3' size-in-bits='64' id='946a2c6b'/>
<pointer-type-def type-id='276427e1' size-in-bits='64' id='1db260e5'/>
<qualified-type-def type-id='1db260e5' const='yes' id='797ee7da'/>
<pointer-type-def type-id='5113b296' size-in-bits='64' id='70487b28'/>
<qualified-type-def type-id='70487b28' const='yes' id='f194a8fb'/>
<pointer-type-def type-id='c13578bc' size-in-bits='64' id='fa1f29ce'/>
<qualified-type-def type-id='fa1f29ce' const='yes' id='2f78a9c1'/>
<pointer-type-def type-id='723e6cf2' size-in-bits='64' id='1d99e49c'/>
<pointer-type-def type-id='86373eb1' size-in-bits='64' id='f337456d'/>
<qualified-type-def type-id='f337456d' const='yes' id='81020bc2'/>
<qualified-type-def type-id='953b12f8' const='yes' id='5d51038b'/>
<var-decl name='libshare_nfs_type' type-id='d19dbca9' visibility='default'/>
<function-decl name='nfs_escape_mountpoint' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='9b23c9ad'/>
<parameter type-id='37e3bd22'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nfs_is_shared_impl' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='a48b47d0'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='nfs_toggle_share' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<parameter type-id='a48b47d0'/>
<parameter type-id='1d99e49c'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nfs_reset_shares' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<return type-id='48b5725f'/>
</function-decl>
<function-type size-in-bits='64' id='276427e1'>
<return type-id='95e97e5e'/>
</function-type>
<function-type size-in-bits='64' id='5113b296'>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-type>
<function-type size-in-bits='64' id='c13578bc'>
<parameter type-id='a48b47d0'/>
<return type-id='95e97e5e'/>
</function-type>
<function-type size-in-bits='64' id='723e6cf2'>
<parameter type-id='a48b47d0'/>
<parameter type-id='822cd80b'/>
<return type-id='95e97e5e'/>
</function-type>
<function-type size-in-bits='64' id='86373eb1'>
<parameter type-id='a48b47d0'/>
<return type-id='c19b74c3'/>
</function-type>
<function-type size-in-bits='64' id='ee076206'>
<return type-id='48b5725f'/>
</function-type>
</abi-instr>
<abi-instr address-size='64' path='lib/libshare/os/linux/smb.c' language='LANG_C99'>
<var-decl name='libshare_smb_type' type-id='d19dbca9' visibility='default'/>
<function-decl name='__fgets_chk' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='266fe297'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='e75a27e9'/>
<return type-id='26a90f95'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libspl/assert.c' language='LANG_C99'>
<function-decl name='libspl_set_assert_ok' mangled-name='libspl_set_assert_ok' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libspl_set_assert_ok'>
<parameter type-id='c19b74c3' name='val'/>
<return type-id='48b5725f'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libspl/atomic.c' language='LANG_C99'>
<typedef-decl name='int8_t' type-id='2171a512' id='ee31ee44'/>
<typedef-decl name='__int8_t' type-id='28577a57' id='2171a512'/>
<qualified-type-def type-id='149c6638' volatile='yes' id='5120c5f7'/>
<pointer-type-def type-id='5120c5f7' size-in-bits='64' id='93977ae7'/>
<qualified-type-def type-id='b96825af' volatile='yes' id='84ff7d66'/>
<pointer-type-def type-id='84ff7d66' size-in-bits='64' id='aa323ea4'/>
<qualified-type-def type-id='ee1f298e' volatile='yes' id='6f7e09cb'/>
<pointer-type-def type-id='6f7e09cb' size-in-bits='64' id='64698d33'/>
<function-decl name='atomic_inc_8' mangled-name='atomic_inc_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_8'>
<parameter type-id='aa323ea4' name='target'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_inc_16' mangled-name='atomic_inc_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_16'>
<parameter type-id='93977ae7' name='target'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_inc_32' mangled-name='atomic_inc_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_32'>
<parameter type-id='3a147f31' name='target'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_inc_ulong' mangled-name='atomic_inc_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_ulong'>
<parameter type-id='64698d33' name='target'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_dec_8' mangled-name='atomic_dec_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_8'>
<parameter type-id='aa323ea4' name='target'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_dec_16' mangled-name='atomic_dec_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_16'>
<parameter type-id='93977ae7' name='target'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_dec_32' mangled-name='atomic_dec_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_32'>
<parameter type-id='3a147f31' name='target'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_dec_ulong' mangled-name='atomic_dec_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_ulong'>
<parameter type-id='64698d33' name='target'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_add_ptr' mangled-name='atomic_add_ptr' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_ptr'>
<parameter type-id='fe09dd29' name='target'/>
<parameter type-id='79a0948f' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_add_8' mangled-name='atomic_add_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_8'>
<parameter type-id='aa323ea4' name='target'/>
<parameter type-id='ee31ee44' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_add_16' mangled-name='atomic_add_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_16'>
<parameter type-id='93977ae7' name='target'/>
<parameter type-id='23bd8cb5' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_add_32' mangled-name='atomic_add_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_32'>
<parameter type-id='3a147f31' name='target'/>
<parameter type-id='3ff5601b' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_sub_ptr' mangled-name='atomic_sub_ptr' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_ptr'>
<parameter type-id='fe09dd29' name='target'/>
<parameter type-id='79a0948f' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_sub_8' mangled-name='atomic_sub_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_8'>
<parameter type-id='aa323ea4' name='target'/>
<parameter type-id='ee31ee44' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_sub_16' mangled-name='atomic_sub_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_16'>
<parameter type-id='93977ae7' name='target'/>
<parameter type-id='23bd8cb5' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_sub_32' mangled-name='atomic_sub_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_32'>
<parameter type-id='3a147f31' name='target'/>
<parameter type-id='3ff5601b' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_or_8' mangled-name='atomic_or_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_8'>
<parameter type-id='aa323ea4' name='target'/>
<parameter type-id='b96825af' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_or_16' mangled-name='atomic_or_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_16'>
<parameter type-id='93977ae7' name='target'/>
<parameter type-id='149c6638' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_or_32' mangled-name='atomic_or_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_32'>
<parameter type-id='3a147f31' name='target'/>
<parameter type-id='8f92235e' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_or_ulong' mangled-name='atomic_or_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_ulong'>
<parameter type-id='64698d33' name='target'/>
<parameter type-id='ee1f298e' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_and_8' mangled-name='atomic_and_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_8'>
<parameter type-id='aa323ea4' name='target'/>
<parameter type-id='b96825af' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_and_16' mangled-name='atomic_and_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_16'>
<parameter type-id='93977ae7' name='target'/>
<parameter type-id='149c6638' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_and_32' mangled-name='atomic_and_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_32'>
<parameter type-id='3a147f31' name='target'/>
<parameter type-id='8f92235e' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_and_ulong' mangled-name='atomic_and_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_ulong'>
<parameter type-id='64698d33' name='target'/>
<parameter type-id='ee1f298e' name='bits'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_inc_8_nv' mangled-name='atomic_inc_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_8_nv'>
<parameter type-id='aa323ea4' name='target'/>
<return type-id='b96825af'/>
</function-decl>
<function-decl name='atomic_inc_16_nv' mangled-name='atomic_inc_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_16_nv'>
<parameter type-id='93977ae7' name='target'/>
<return type-id='149c6638'/>
</function-decl>
<function-decl name='atomic_inc_32_nv' mangled-name='atomic_inc_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_32_nv'>
<parameter type-id='3a147f31' name='target'/>
<return type-id='8f92235e'/>
</function-decl>
<function-decl name='atomic_inc_ulong_nv' mangled-name='atomic_inc_ulong_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_ulong_nv'>
<parameter type-id='64698d33' name='target'/>
<return type-id='ee1f298e'/>
</function-decl>
<function-decl name='atomic_dec_8_nv' mangled-name='atomic_dec_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_8_nv'>
<parameter type-id='aa323ea4' name='target'/>
<return type-id='b96825af'/>
</function-decl>
<function-decl name='atomic_dec_16_nv' mangled-name='atomic_dec_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_16_nv'>
<parameter type-id='93977ae7' name='target'/>
<return type-id='149c6638'/>
</function-decl>
<function-decl name='atomic_dec_32_nv' mangled-name='atomic_dec_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_32_nv'>
<parameter type-id='3a147f31' name='target'/>
<return type-id='8f92235e'/>
</function-decl>
<function-decl name='atomic_dec_ulong_nv' mangled-name='atomic_dec_ulong_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_ulong_nv'>
<parameter type-id='64698d33' name='target'/>
<return type-id='ee1f298e'/>
</function-decl>
<function-decl name='atomic_add_ptr_nv' mangled-name='atomic_add_ptr_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_ptr_nv'>
<parameter type-id='fe09dd29' name='target'/>
<parameter type-id='79a0948f' name='bits'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='atomic_add_8_nv' mangled-name='atomic_add_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_8_nv'>
<parameter type-id='aa323ea4' name='target'/>
<parameter type-id='ee31ee44' name='bits'/>
<return type-id='b96825af'/>
</function-decl>
<function-decl name='atomic_add_16_nv' mangled-name='atomic_add_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_16_nv'>
<parameter type-id='93977ae7' name='target'/>
<parameter type-id='23bd8cb5' name='bits'/>
<return type-id='149c6638'/>
</function-decl>
<function-decl name='atomic_add_32_nv' mangled-name='atomic_add_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_32_nv'>
<parameter type-id='3a147f31' name='target'/>
<parameter type-id='3ff5601b' name='bits'/>
<return type-id='8f92235e'/>
</function-decl>
<function-decl name='atomic_add_long_nv' mangled-name='atomic_add_long_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_long_nv'>
<parameter type-id='64698d33' name='target'/>
<parameter type-id='bd54fe1a' name='bits'/>
<return type-id='ee1f298e'/>
</function-decl>
<function-decl name='atomic_sub_ptr_nv' mangled-name='atomic_sub_ptr_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_ptr_nv'>
<parameter type-id='fe09dd29' name='target'/>
<parameter type-id='79a0948f' name='bits'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='atomic_sub_8_nv' mangled-name='atomic_sub_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_8_nv'>
<parameter type-id='aa323ea4' name='target'/>
<parameter type-id='ee31ee44' name='bits'/>
<return type-id='b96825af'/>
</function-decl>
<function-decl name='atomic_sub_16_nv' mangled-name='atomic_sub_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_16_nv'>
<parameter type-id='93977ae7' name='target'/>
<parameter type-id='23bd8cb5' name='bits'/>
<return type-id='149c6638'/>
</function-decl>
<function-decl name='atomic_sub_32_nv' mangled-name='atomic_sub_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_32_nv'>
<parameter type-id='3a147f31' name='target'/>
<parameter type-id='3ff5601b' name='bits'/>
<return type-id='8f92235e'/>
</function-decl>
<function-decl name='atomic_sub_long_nv' mangled-name='atomic_sub_long_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_long_nv'>
<parameter type-id='64698d33' name='target'/>
<parameter type-id='bd54fe1a' name='bits'/>
<return type-id='ee1f298e'/>
</function-decl>
<function-decl name='atomic_or_8_nv' mangled-name='atomic_or_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_8_nv'>
<parameter type-id='aa323ea4' name='target'/>
<parameter type-id='b96825af' name='bits'/>
<return type-id='b96825af'/>
</function-decl>
<function-decl name='atomic_or_16_nv' mangled-name='atomic_or_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_16_nv'>
<parameter type-id='93977ae7' name='target'/>
<parameter type-id='149c6638' name='bits'/>
<return type-id='149c6638'/>
</function-decl>
<function-decl name='atomic_or_32_nv' mangled-name='atomic_or_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_32_nv'>
<parameter type-id='3a147f31' name='target'/>
<parameter type-id='8f92235e' name='bits'/>
<return type-id='8f92235e'/>
</function-decl>
<function-decl name='atomic_or_ulong_nv' mangled-name='atomic_or_ulong_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_ulong_nv'>
<parameter type-id='64698d33' name='target'/>
<parameter type-id='ee1f298e' name='bits'/>
<return type-id='ee1f298e'/>
</function-decl>
<function-decl name='atomic_and_8_nv' mangled-name='atomic_and_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_8_nv'>
<parameter type-id='aa323ea4' name='target'/>
<parameter type-id='b96825af' name='bits'/>
<return type-id='b96825af'/>
</function-decl>
<function-decl name='atomic_and_16_nv' mangled-name='atomic_and_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_16_nv'>
<parameter type-id='93977ae7' name='target'/>
<parameter type-id='149c6638' name='bits'/>
<return type-id='149c6638'/>
</function-decl>
<function-decl name='atomic_and_32_nv' mangled-name='atomic_and_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_32_nv'>
<parameter type-id='3a147f31' name='target'/>
<parameter type-id='8f92235e' name='bits'/>
<return type-id='8f92235e'/>
</function-decl>
<function-decl name='atomic_and_ulong_nv' mangled-name='atomic_and_ulong_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_ulong_nv'>
<parameter type-id='64698d33' name='target'/>
<parameter type-id='ee1f298e' name='bits'/>
<return type-id='ee1f298e'/>
</function-decl>
<function-decl name='atomic_cas_ptr' mangled-name='atomic_cas_ptr' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_ptr'>
<parameter type-id='fe09dd29' name='target'/>
<parameter type-id='eaa32e2f' name='exp'/>
<parameter type-id='eaa32e2f' name='des'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='atomic_cas_8' mangled-name='atomic_cas_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_8'>
<parameter type-id='aa323ea4' name='target'/>
<parameter type-id='b96825af' name='exp'/>
<parameter type-id='b96825af' name='des'/>
<return type-id='b96825af'/>
</function-decl>
<function-decl name='atomic_cas_16' mangled-name='atomic_cas_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_16'>
<parameter type-id='93977ae7' name='target'/>
<parameter type-id='149c6638' name='exp'/>
<parameter type-id='149c6638' name='des'/>
<return type-id='149c6638'/>
</function-decl>
<function-decl name='atomic_cas_32' mangled-name='atomic_cas_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_32'>
<parameter type-id='3a147f31' name='target'/>
<parameter type-id='8f92235e' name='exp'/>
<parameter type-id='8f92235e' name='des'/>
<return type-id='8f92235e'/>
</function-decl>
<function-decl name='atomic_cas_ulong' mangled-name='atomic_cas_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_ulong'>
<parameter type-id='64698d33' name='target'/>
<parameter type-id='ee1f298e' name='exp'/>
<parameter type-id='ee1f298e' name='des'/>
<return type-id='ee1f298e'/>
</function-decl>
<function-decl name='atomic_swap_8' mangled-name='atomic_swap_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_8'>
<parameter type-id='aa323ea4' name='target'/>
<parameter type-id='b96825af' name='bits'/>
<return type-id='b96825af'/>
</function-decl>
<function-decl name='atomic_swap_16' mangled-name='atomic_swap_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_16'>
<parameter type-id='93977ae7' name='target'/>
<parameter type-id='149c6638' name='bits'/>
<return type-id='149c6638'/>
</function-decl>
<function-decl name='atomic_swap_ulong' mangled-name='atomic_swap_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_ulong'>
<parameter type-id='64698d33' name='target'/>
<parameter type-id='ee1f298e' name='bits'/>
<return type-id='ee1f298e'/>
</function-decl>
<function-decl name='atomic_swap_ptr' mangled-name='atomic_swap_ptr' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_ptr'>
<parameter type-id='fe09dd29' name='target'/>
<parameter type-id='eaa32e2f' name='bits'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='atomic_set_long_excl' mangled-name='atomic_set_long_excl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_set_long_excl'>
<parameter type-id='64698d33' name='target'/>
<parameter type-id='3502e3ff' name='value'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='atomic_clear_long_excl' mangled-name='atomic_clear_long_excl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_clear_long_excl'>
<parameter type-id='64698d33' name='target'/>
<parameter type-id='3502e3ff' name='value'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='membar_enter' mangled-name='membar_enter' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='membar_enter'>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='membar_consumer' mangled-name='membar_consumer' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='membar_consumer'>
<return type-id='48b5725f'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libspl/getexecname.c' language='LANG_C99'>
<function-decl name='getexecname' mangled-name='getexecname' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getexecname'>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='getexecname_impl' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='26a90f95'/>
<return type-id='79a0948f'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libspl/list.c' language='LANG_C99'>
<typedef-decl name='list_node_t' type-id='b0b5e45e' id='b21843b2'/>
<typedef-decl name='list_t' type-id='e824dae9' id='0899125f'/>
<class-decl name='list_node' size-in-bits='128' is-struct='yes' visibility='default' id='b0b5e45e'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='next' type-id='b03eadb4' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='prev' type-id='b03eadb4' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='list' size-in-bits='256' is-struct='yes' visibility='default' id='e824dae9'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='list_size' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='list_offset' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='list_head' type-id='b0b5e45e' visibility='default'/>
</data-member>
</class-decl>
<pointer-type-def type-id='b0b5e45e' size-in-bits='64' id='b03eadb4'/>
<pointer-type-def type-id='b21843b2' size-in-bits='64' id='ccc38265'/>
<pointer-type-def type-id='0899125f' size-in-bits='64' id='352ec160'/>
<function-decl name='list_create' mangled-name='list_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_create'>
<parameter type-id='352ec160' name='list'/>
<parameter type-id='b59d7dce' name='size'/>
<parameter type-id='b59d7dce' name='offset'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='list_destroy' mangled-name='list_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_destroy'>
<parameter type-id='352ec160' name='list'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='list_insert_after' mangled-name='list_insert_after' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_insert_after'>
<parameter type-id='352ec160' name='list'/>
<parameter type-id='eaa32e2f' name='object'/>
<parameter type-id='eaa32e2f' name='nobject'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='list_insert_before' mangled-name='list_insert_before' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_insert_before'>
<parameter type-id='352ec160' name='list'/>
<parameter type-id='eaa32e2f' name='object'/>
<parameter type-id='eaa32e2f' name='nobject'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='list_insert_head' mangled-name='list_insert_head' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_insert_head'>
<parameter type-id='352ec160' name='list'/>
<parameter type-id='eaa32e2f' name='object'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='list_insert_tail' mangled-name='list_insert_tail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_insert_tail'>
<parameter type-id='352ec160' name='list'/>
<parameter type-id='eaa32e2f' name='object'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='list_remove' mangled-name='list_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_remove'>
<parameter type-id='352ec160' name='list'/>
<parameter type-id='eaa32e2f' name='object'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='list_remove_head' mangled-name='list_remove_head' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_remove_head'>
<parameter type-id='352ec160' name='list'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='list_remove_tail' mangled-name='list_remove_tail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_remove_tail'>
<parameter type-id='352ec160' name='list'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='list_head' mangled-name='list_head' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_head'>
<parameter type-id='352ec160' name='list'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='list_tail' mangled-name='list_tail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_tail'>
<parameter type-id='352ec160' name='list'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='list_next' mangled-name='list_next' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_next'>
<parameter type-id='352ec160' name='list'/>
<parameter type-id='eaa32e2f' name='object'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='list_prev' mangled-name='list_prev' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_prev'>
<parameter type-id='352ec160' name='list'/>
<parameter type-id='eaa32e2f' name='object'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='list_move_tail' mangled-name='list_move_tail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_move_tail'>
<parameter type-id='352ec160' name='dst'/>
<parameter type-id='352ec160' name='src'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='list_link_replace' mangled-name='list_link_replace' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_link_replace'>
<parameter type-id='ccc38265' name='lold'/>
<parameter type-id='ccc38265' name='lnew'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='list_link_init' mangled-name='list_link_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_link_init'>
<parameter type-id='ccc38265' name='ln'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='list_link_active' mangled-name='list_link_active' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_link_active'>
<parameter type-id='ccc38265' name='ln'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='list_is_empty' mangled-name='list_is_empty' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_is_empty'>
<parameter type-id='352ec160' name='list'/>
<return type-id='95e97e5e'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libspl/mkdirp.c' language='LANG_C99'>
<typedef-decl name='wchar_t' type-id='95e97e5e' id='928221d2'/>
<qualified-type-def type-id='928221d2' const='yes' id='effb3702'/>
<pointer-type-def type-id='effb3702' size-in-bits='64' id='f077d3f8'/>
<qualified-type-def type-id='f077d3f8' restrict='yes' id='598aab80'/>
<pointer-type-def type-id='928221d2' size-in-bits='64' id='323d93c1'/>
<qualified-type-def type-id='323d93c1' restrict='yes' id='f1358bc3'/>
<function-decl name='__mbstowcs_chk' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='f1358bc3'/>
<parameter type-id='9d26089a'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='b59d7dce'/>
<return type-id='b59d7dce'/>
</function-decl>
<function-decl name='__wcstombs_chk' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='266fe297'/>
<parameter type-id='598aab80'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='b59d7dce'/>
<return type-id='b59d7dce'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libspl/os/linux/getmntany.c' language='LANG_C99'>
<pointer-type-def type-id='56fe4a37' size-in-bits='64' id='b6b61d2f'/>
<qualified-type-def type-id='b6b61d2f' restrict='yes' id='3cad23cd'/>
<function-decl name='getmntent_r' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='e75a27e9'/>
<parameter type-id='3cad23cd'/>
<parameter type-id='266fe297'/>
<parameter type-id='95e97e5e'/>
<return type-id='b6b61d2f'/>
</function-decl>
<function-decl name='feof' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='822cd80b'/>
<return type-id='95e97e5e'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libspl/timestamp.c' language='LANG_C99'>
<typedef-decl name='nl_item' type-id='95e97e5e' id='03b79a94'/>
<function-decl name='nl_langinfo' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='03b79a94'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='print_timestamp' mangled-name='print_timestamp' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='print_timestamp'>
<parameter type-id='3502e3ff' name='timestamp_fmt'/>
<return type-id='48b5725f'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libtpool/thread_pool.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='8901473c' size-in-bits='576' id='f5da478b'>
<subrange length='1' type-id='7359adad' id='52f813b4'/>
</array-type-def>
<array-type-def dimensions='1' type-id='49ef3ffd' size-in-bits='1024' id='a14403f5'>
<subrange length='16' type-id='7359adad' id='848d0938'/>
</array-type-def>
<array-type-def dimensions='1' type-id='a84c031d' size-in-bits='384' id='36d7f119'>
<subrange length='48' type-id='7359adad' id='8f6d2a81'/>
</array-type-def>
<array-type-def dimensions='1' type-id='bd54fe1a' size-in-bits='512' id='5d4efd44'>
<subrange length='8' type-id='7359adad' id='56e0c0b1'/>
</array-type-def>
<array-type-def dimensions='1' type-id='f0981eeb' size-in-bits='64' id='0d532ec1'>
<subrange length='2' type-id='7359adad' id='52efc4ef'/>
</array-type-def>
<array-type-def dimensions='1' type-id='eaa32e2f' size-in-bits='256' id='209ef23f'>
<subrange length='4' type-id='7359adad' id='16fe7105'/>
</array-type-def>
<class-decl name='__cancel_jmp_buf_tag' size-in-bits='576' is-struct='yes' visibility='default' id='8901473c'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='__cancel_jmp_buf' type-id='379a1ab7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
<var-decl name='__mask_was_saved' type-id='95e97e5e' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='__pthread_unwind_buf_t' size-in-bits='832' is-struct='yes' naming-typedef-id='4423cf7f' visibility='default' id='a0abc656'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='__cancel_jmp_buf' type-id='f5da478b' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='576'>
<var-decl name='__pad' type-id='209ef23f' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='__pthread_unwind_buf_t' type-id='a0abc656' id='4423cf7f'/>
<union-decl name='__atomic_wide_counter' size-in-bits='64' naming-typedef-id='f3b40860' visibility='default' id='613ce450'>
<data-member access='public'>
<var-decl name='__value64' type-id='3a47d82b' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='__value32' type-id='e7f43f72' visibility='default'/>
</data-member>
</union-decl>
<class-decl name='__anonymous_struct__' size-in-bits='64' is-struct='yes' is-anonymous='yes' visibility='default' id='e7f43f72'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='__low' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='__high' type-id='f0981eeb' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='__atomic_wide_counter' type-id='613ce450' id='f3b40860'/>
<typedef-decl name='__cpu_mask' type-id='7359adad' id='49ef3ffd'/>
<class-decl name='cpu_set_t' size-in-bits='1024' is-struct='yes' naming-typedef-id='8037c762' visibility='default' id='1f20d231'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='__bits' type-id='a14403f5' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='cpu_set_t' type-id='1f20d231' id='8037c762'/>
<union-decl name='pthread_condattr_t' size-in-bits='32' naming-typedef-id='836265dd' visibility='default' id='33dd3aad'>
<data-member access='public'>
<var-decl name='__size' type-id='8e0573fd' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='__align' type-id='95e97e5e' visibility='default'/>
</data-member>
</union-decl>
<typedef-decl name='pthread_condattr_t' type-id='33dd3aad' id='836265dd'/>
<union-decl name='pthread_cond_t' size-in-bits='384' naming-typedef-id='62fab762' visibility='default' id='cbb12c12'>
<data-member access='public'>
<var-decl name='__data' type-id='c987b47c' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='__size' type-id='36d7f119' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='__align' type-id='1eb56b1e' visibility='default'/>
</data-member>
</union-decl>
<typedef-decl name='pthread_cond_t' type-id='cbb12c12' id='62fab762'/>
<typedef-decl name='__jmp_buf' type-id='5d4efd44' id='379a1ab7'/>
<class-decl name='__pthread_cond_s' size-in-bits='384' is-struct='yes' visibility='default' id='c987b47c'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='__wseq' type-id='f3b40860' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='__g1_start' type-id='f3b40860' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='__g_refs' type-id='0d532ec1' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='__g_size' type-id='0d532ec1' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='__g1_orig_size' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='288'>
<var-decl name='__wrefs' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='__g_signals' type-id='0d532ec1' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='sched_param' size-in-bits='32' is-struct='yes' visibility='default' id='0897719a'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='sched_priority' type-id='95e97e5e' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='tpool_job_t' type-id='3b8579e5' id='66a0afc9'/>
<class-decl name='tpool_job' size-in-bits='192' is-struct='yes' visibility='default' id='3b8579e5'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='tpj_next' type-id='f32b30e4' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='tpj_func' type-id='b7f9d8e6' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='tpj_arg' type-id='eaa32e2f' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='tpool_active_t' type-id='c8d086f4' id='6fcda10e'/>
<class-decl name='tpool_active' size-in-bits='128' is-struct='yes' visibility='default' id='c8d086f4'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='tpa_next' type-id='ad33e5e7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='tpa_tid' type-id='4051f5e7' visibility='default'/>
</data-member>
</class-decl>
<pointer-type-def type-id='8901473c' size-in-bits='64' id='eb91b7ea'/>
<pointer-type-def type-id='4423cf7f' size-in-bits='64' id='ba7c727c'/>
<pointer-type-def type-id='b9c97942' size-in-bits='64' id='bbf06c47'/>
<qualified-type-def type-id='bbf06c47' restrict='yes' id='65e6ec45'/>
<qualified-type-def type-id='b9c97942' const='yes' id='191f6b72'/>
<pointer-type-def type-id='191f6b72' size-in-bits='64' id='e475fb88'/>
<qualified-type-def type-id='e475fb88' restrict='yes' id='5a8729d0'/>
<qualified-type-def type-id='8037c762' const='yes' id='f50ea9b2'/>
<pointer-type-def type-id='f50ea9b2' size-in-bits='64' id='5e14fa48'/>
<qualified-type-def type-id='836265dd' const='yes' id='7d24c58d'/>
<pointer-type-def type-id='7d24c58d' size-in-bits='64' id='a7e325e5'/>
<qualified-type-def type-id='a7e325e5' restrict='yes' id='4c428e67'/>
<qualified-type-def type-id='0897719a' const='yes' id='c4a7b189'/>
<pointer-type-def type-id='c4a7b189' size-in-bits='64' id='36fca399'/>
<qualified-type-def type-id='36fca399' restrict='yes' id='37e4897b'/>
- <qualified-type-def type-id='a9c79a1f' const='yes' id='cd087e36'/>
- <pointer-type-def type-id='cd087e36' size-in-bits='64' id='e05e8614'/>
<qualified-type-def type-id='e05e8614' restrict='yes' id='0be2e71c'/>
<pointer-type-def type-id='8037c762' size-in-bits='64' id='d74a6869'/>
<qualified-type-def type-id='7292109c' restrict='yes' id='6942f6a4'/>
<qualified-type-def type-id='7347a39e' restrict='yes' id='578ba182'/>
<pointer-type-def type-id='62fab762' size-in-bits='64' id='db285b03'/>
<qualified-type-def type-id='db285b03' restrict='yes' id='2a468b41'/>
<qualified-type-def type-id='18c91f9e' restrict='yes' id='6e745582'/>
<pointer-type-def type-id='0897719a' size-in-bits='64' id='23cbcb08'/>
<qualified-type-def type-id='23cbcb08' restrict='yes' id='b09b2050'/>
<pointer-type-def type-id='6fcda10e' size-in-bits='64' id='ad33e5e7'/>
<pointer-type-def type-id='66a0afc9' size-in-bits='64' id='f32b30e4'/>
<qualified-type-def type-id='63e171df' restrict='yes' id='9e7a3a7d'/>
<function-decl name='pthread_self' visibility='default' binding='global' size-in-bits='64'>
<return type-id='4051f5e7'/>
</function-decl>
<function-decl name='pthread_attr_init' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='7347a39e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='pthread_attr_destroy' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='7347a39e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='pthread_attr_getdetachstate' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='540db505'/>
<parameter type-id='7292109c'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='pthread_attr_setdetachstate' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='7347a39e'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='pthread_attr_getguardsize' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='540db505'/>
<parameter type-id='78c01427'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='pthread_attr_setguardsize' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='7347a39e'/>
<parameter type-id='b59d7dce'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='pthread_attr_getschedparam' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='e1815e87'/>
<parameter type-id='b09b2050'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='pthread_attr_setschedparam' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='578ba182'/>
<parameter type-id='37e4897b'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='pthread_attr_getschedpolicy' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='e1815e87'/>
<parameter type-id='6942f6a4'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='pthread_attr_setschedpolicy' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='7347a39e'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='pthread_attr_getinheritsched' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='e1815e87'/>
<parameter type-id='6942f6a4'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='pthread_attr_setinheritsched' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='7347a39e'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='pthread_attr_getscope' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='e1815e87'/>
<parameter type-id='6942f6a4'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='pthread_attr_setscope' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='7347a39e'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='pthread_attr_getstack' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='e1815e87'/>
<parameter type-id='9e7a3a7d'/>
<parameter type-id='d19b2c25'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='pthread_attr_setstack' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='7347a39e'/>
<parameter type-id='eaa32e2f'/>
<parameter type-id='b59d7dce'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='pthread_attr_setaffinity_np' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='7347a39e'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='5e14fa48'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='pthread_attr_getaffinity_np' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='540db505'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='d74a6869'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='pthread_setcancelstate' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<parameter type-id='7292109c'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='pthread_setcanceltype' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<parameter type-id='7292109c'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='__pthread_register_cancel' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='ba7c727c'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='__pthread_unregister_cancel' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='ba7c727c'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='__pthread_unwind_next' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='ba7c727c'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='pthread_cond_init' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='2a468b41'/>
<parameter type-id='4c428e67'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='pthread_cond_signal' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='db285b03'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='pthread_cond_broadcast' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='db285b03'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='pthread_cond_wait' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='2a468b41'/>
<parameter type-id='6e745582'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='pthread_cond_timedwait' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='2a468b41'/>
<parameter type-id='6e745582'/>
<parameter type-id='0be2e71c'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='__sysconf' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<return type-id='bd54fe1a'/>
</function-decl>
<function-decl name='pthread_sigmask' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<parameter type-id='5a8729d0'/>
<parameter type-id='65e6ec45'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='tpool_abandon' mangled-name='tpool_abandon' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_abandon'>
<parameter type-id='9cf59a50' name='tpool'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='tpool_suspend' mangled-name='tpool_suspend' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_suspend'>
<parameter type-id='9cf59a50' name='tpool'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='tpool_suspended' mangled-name='tpool_suspended' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_suspended'>
<parameter type-id='9cf59a50' name='tpool'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='tpool_resume' mangled-name='tpool_resume' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_resume'>
<parameter type-id='9cf59a50' name='tpool'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='tpool_member' mangled-name='tpool_member' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_member'>
<parameter type-id='9cf59a50' name='tpool'/>
<return type-id='95e97e5e'/>
</function-decl>
<pointer-type-def type-id='b1bbf10d' size-in-bits='64' id='9cf59a50'/>
<typedef-decl name='tpool_t' type-id='88d1b7f9' id='b1bbf10d'/>
<class-decl name='tpool' size-in-bits='2496' is-struct='yes' visibility='default' id='88d1b7f9'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='tp_forw' type-id='9cf59a50' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='tp_back' type-id='9cf59a50' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='tp_mutex' type-id='7a6844eb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='tp_busycv' type-id='62fab762' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='832'>
<var-decl name='tp_workcv' type-id='62fab762' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1216'>
<var-decl name='tp_waitcv' type-id='62fab762' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1600'>
<var-decl name='tp_active' type-id='ad33e5e7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1664'>
<var-decl name='tp_head' type-id='f32b30e4' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1728'>
<var-decl name='tp_tail' type-id='f32b30e4' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1792'>
<var-decl name='tp_attr' type-id='7d8569fd' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2240'>
<var-decl name='tp_flags' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2272'>
<var-decl name='tp_linger' type-id='3502e3ff' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2304'>
<var-decl name='tp_njobs' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2336'>
<var-decl name='tp_minimum' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2368'>
<var-decl name='tp_maximum' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2400'>
<var-decl name='tp_current' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2432'>
<var-decl name='tp_idle' type-id='95e97e5e' visibility='default'/>
</data-member>
</class-decl>
<function-type size-in-bits='64' id='c5c76c9c'>
<parameter type-id='eaa32e2f'/>
<return type-id='48b5725f'/>
</function-type>
</abi-instr>
<abi-instr address-size='64' path='lib/libzfs/libzfs_changelist.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='bf311473' size-in-bits='128' id='f0f65199'>
<subrange length='2' type-id='7359adad' id='52efc4ef'/>
</array-type-def>
<type-decl name='char' size-in-bits='8' id='a84c031d'/>
<array-type-def dimensions='1' type-id='a84c031d' size-in-bits='8192' id='b54ce520'>
<subrange length='1024' type-id='7359adad' id='c60446f8'/>
</array-type-def>
<array-type-def dimensions='1' type-id='a84c031d' size-in-bits='2048' id='d1617432'>
<subrange length='256' type-id='7359adad' id='36e5b9fa'/>
</array-type-def>
<array-type-def dimensions='1' type-id='a84c031d' size-in-bits='320' id='36c46961'>
<subrange length='40' type-id='7359adad' id='8f80b239'/>
</array-type-def>
<class-decl name='re_dfa_t' is-struct='yes' visibility='default' is-declaration-only='yes' id='b48d2441'/>
<class-decl name='uu_avl' is-struct='yes' visibility='default' is-declaration-only='yes' id='4af029d1'/>
<class-decl name='uu_avl_pool' is-struct='yes' visibility='default' is-declaration-only='yes' id='12a530a8'/>
<class-decl name='uu_avl_walk' is-struct='yes' visibility='default' is-declaration-only='yes' id='e70a39e3'/>
<type-decl name='int' size-in-bits='32' id='95e97e5e'/>
<type-decl name='long int' size-in-bits='64' id='bd54fe1a'/>
<type-decl name='long long int' size-in-bits='64' id='1eb56b1e'/>
<type-decl name='short int' size-in-bits='16' id='a2185560'/>
<array-type-def dimensions='1' type-id='e475ab95' size-in-bits='192' id='0ce65a8b'>
<subrange length='3' type-id='7359adad' id='56f209d2'/>
</array-type-def>
<type-decl name='unnamed-enum-underlying-type-32' is-anonymous='yes' size-in-bits='32' alignment-in-bits='32' id='9cac1fee'/>
<type-decl name='unsigned char' size-in-bits='8' id='002ac4a6'/>
<type-decl name='unsigned int' size-in-bits='32' id='f0981eeb'/>
<type-decl name='unsigned long int' size-in-bits='64' id='7359adad'/>
<type-decl name='void' id='48b5725f'/>
<typedef-decl name='uu_compare_fn_t' type-id='add6e811' id='40f93560'/>
<typedef-decl name='uu_avl_pool_t' type-id='12a530a8' id='7f84e390'/>
<typedef-decl name='uu_avl_t' type-id='4af029d1' id='bb7f0973'/>
<class-decl name='uu_avl_node' size-in-bits='192' is-struct='yes' visibility='default' id='f65f4326'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='uan_opaque' type-id='0ce65a8b' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='uu_avl_node_t' type-id='f65f4326' id='73a65116'/>
<typedef-decl name='uu_avl_walk_t' type-id='e70a39e3' id='edd8457b'/>
<typedef-decl name='uu_avl_index_t' type-id='e475ab95' id='5d7f5fc8'/>
<typedef-decl name='zfs_handle_t' type-id='f6ee4445' id='775509eb'/>
<typedef-decl name='zpool_handle_t' type-id='67002a8a' id='b1efc708'/>
<typedef-decl name='libzfs_handle_t' type-id='c8a9d9d8' id='95942d0c'/>
<typedef-decl name='zfs_iter_f' type-id='5571cde4' id='d8e49ab9'/>
<typedef-decl name='avl_tree_t' type-id='b351119f' id='f20fbd51'/>
<class-decl name='avl_node' size-in-bits='192' is-struct='yes' visibility='default' id='428b67b3'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='avl_child' type-id='f0f65199' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='avl_pcb' type-id='e475ab95' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='avl_tree' size-in-bits='320' is-struct='yes' visibility='default' id='b351119f'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='avl_root' type-id='bf311473' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='avl_compar' type-id='585e1de9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='avl_offset' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='avl_numnodes' type-id='ee1f298e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='avl_pad' type-id='b59d7dce' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='dmu_objset_stats' size-in-bits='2304' is-struct='yes' visibility='default' id='098f0221'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='dds_num_clones' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='dds_creation_txg' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='dds_guid' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='dds_type' type-id='230f1e16' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='224'>
<var-decl name='dds_is_snapshot' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='232'>
<var-decl name='dds_inconsistent' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='240'>
<var-decl name='dds_redacted' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='248'>
<var-decl name='dds_origin' type-id='d1617432' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='dmu_objset_stats_t' type-id='098f0221' id='b2c14f17'/>
<enum-decl name='zfs_type_t' naming-typedef-id='2e45de5d' id='5d8f7321'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZFS_TYPE_INVALID' value='0'/>
<enumerator name='ZFS_TYPE_FILESYSTEM' value='1'/>
<enumerator name='ZFS_TYPE_SNAPSHOT' value='2'/>
<enumerator name='ZFS_TYPE_VOLUME' value='4'/>
<enumerator name='ZFS_TYPE_POOL' value='8'/>
<enumerator name='ZFS_TYPE_BOOKMARK' value='16'/>
<enumerator name='ZFS_TYPE_VDEV' value='32'/>
</enum-decl>
<typedef-decl name='zfs_type_t' type-id='5d8f7321' id='2e45de5d'/>
<enum-decl name='dmu_objset_type' id='6b1b19f9'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='DMU_OST_NONE' value='0'/>
<enumerator name='DMU_OST_META' value='1'/>
<enumerator name='DMU_OST_ZFS' value='2'/>
<enumerator name='DMU_OST_ZVOL' value='3'/>
<enumerator name='DMU_OST_OTHER' value='4'/>
<enumerator name='DMU_OST_ANY' value='5'/>
<enumerator name='DMU_OST_NUMTYPES' value='6'/>
</enum-decl>
<typedef-decl name='dmu_objset_type_t' type-id='6b1b19f9' id='230f1e16'/>
<enum-decl name='zfs_prop_t' naming-typedef-id='58603c44' id='4b000d60'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZPROP_CONT' value='-2'/>
<enumerator name='ZPROP_INVAL' value='-1'/>
<enumerator name='ZPROP_USERPROP' value='-1'/>
<enumerator name='ZFS_PROP_TYPE' value='0'/>
<enumerator name='ZFS_PROP_CREATION' value='1'/>
<enumerator name='ZFS_PROP_USED' value='2'/>
<enumerator name='ZFS_PROP_AVAILABLE' value='3'/>
<enumerator name='ZFS_PROP_REFERENCED' value='4'/>
<enumerator name='ZFS_PROP_COMPRESSRATIO' value='5'/>
<enumerator name='ZFS_PROP_MOUNTED' value='6'/>
<enumerator name='ZFS_PROP_ORIGIN' value='7'/>
<enumerator name='ZFS_PROP_QUOTA' value='8'/>
<enumerator name='ZFS_PROP_RESERVATION' value='9'/>
<enumerator name='ZFS_PROP_VOLSIZE' value='10'/>
<enumerator name='ZFS_PROP_VOLBLOCKSIZE' value='11'/>
<enumerator name='ZFS_PROP_RECORDSIZE' value='12'/>
<enumerator name='ZFS_PROP_MOUNTPOINT' value='13'/>
<enumerator name='ZFS_PROP_SHARENFS' value='14'/>
<enumerator name='ZFS_PROP_CHECKSUM' value='15'/>
<enumerator name='ZFS_PROP_COMPRESSION' value='16'/>
<enumerator name='ZFS_PROP_ATIME' value='17'/>
<enumerator name='ZFS_PROP_DEVICES' value='18'/>
<enumerator name='ZFS_PROP_EXEC' value='19'/>
<enumerator name='ZFS_PROP_SETUID' value='20'/>
<enumerator name='ZFS_PROP_READONLY' value='21'/>
<enumerator name='ZFS_PROP_ZONED' value='22'/>
<enumerator name='ZFS_PROP_SNAPDIR' value='23'/>
<enumerator name='ZFS_PROP_ACLMODE' value='24'/>
<enumerator name='ZFS_PROP_ACLINHERIT' value='25'/>
<enumerator name='ZFS_PROP_CREATETXG' value='26'/>
<enumerator name='ZFS_PROP_NAME' value='27'/>
<enumerator name='ZFS_PROP_CANMOUNT' value='28'/>
<enumerator name='ZFS_PROP_ISCSIOPTIONS' value='29'/>
<enumerator name='ZFS_PROP_XATTR' value='30'/>
<enumerator name='ZFS_PROP_NUMCLONES' value='31'/>
<enumerator name='ZFS_PROP_COPIES' value='32'/>
<enumerator name='ZFS_PROP_VERSION' value='33'/>
<enumerator name='ZFS_PROP_UTF8ONLY' value='34'/>
<enumerator name='ZFS_PROP_NORMALIZE' value='35'/>
<enumerator name='ZFS_PROP_CASE' value='36'/>
<enumerator name='ZFS_PROP_VSCAN' value='37'/>
<enumerator name='ZFS_PROP_NBMAND' value='38'/>
<enumerator name='ZFS_PROP_SHARESMB' value='39'/>
<enumerator name='ZFS_PROP_REFQUOTA' value='40'/>
<enumerator name='ZFS_PROP_REFRESERVATION' value='41'/>
<enumerator name='ZFS_PROP_GUID' value='42'/>
<enumerator name='ZFS_PROP_PRIMARYCACHE' value='43'/>
<enumerator name='ZFS_PROP_SECONDARYCACHE' value='44'/>
<enumerator name='ZFS_PROP_USEDSNAP' value='45'/>
<enumerator name='ZFS_PROP_USEDDS' value='46'/>
<enumerator name='ZFS_PROP_USEDCHILD' value='47'/>
<enumerator name='ZFS_PROP_USEDREFRESERV' value='48'/>
<enumerator name='ZFS_PROP_USERACCOUNTING' value='49'/>
<enumerator name='ZFS_PROP_STMF_SHAREINFO' value='50'/>
<enumerator name='ZFS_PROP_DEFER_DESTROY' value='51'/>
<enumerator name='ZFS_PROP_USERREFS' value='52'/>
<enumerator name='ZFS_PROP_LOGBIAS' value='53'/>
<enumerator name='ZFS_PROP_UNIQUE' value='54'/>
<enumerator name='ZFS_PROP_OBJSETID' value='55'/>
<enumerator name='ZFS_PROP_DEDUP' value='56'/>
<enumerator name='ZFS_PROP_MLSLABEL' value='57'/>
<enumerator name='ZFS_PROP_SYNC' value='58'/>
<enumerator name='ZFS_PROP_DNODESIZE' value='59'/>
<enumerator name='ZFS_PROP_REFRATIO' value='60'/>
<enumerator name='ZFS_PROP_WRITTEN' value='61'/>
<enumerator name='ZFS_PROP_CLONES' value='62'/>
<enumerator name='ZFS_PROP_LOGICALUSED' value='63'/>
<enumerator name='ZFS_PROP_LOGICALREFERENCED' value='64'/>
<enumerator name='ZFS_PROP_INCONSISTENT' value='65'/>
<enumerator name='ZFS_PROP_VOLMODE' value='66'/>
<enumerator name='ZFS_PROP_FILESYSTEM_LIMIT' value='67'/>
<enumerator name='ZFS_PROP_SNAPSHOT_LIMIT' value='68'/>
<enumerator name='ZFS_PROP_FILESYSTEM_COUNT' value='69'/>
<enumerator name='ZFS_PROP_SNAPSHOT_COUNT' value='70'/>
<enumerator name='ZFS_PROP_SNAPDEV' value='71'/>
<enumerator name='ZFS_PROP_ACLTYPE' value='72'/>
<enumerator name='ZFS_PROP_SELINUX_CONTEXT' value='73'/>
<enumerator name='ZFS_PROP_SELINUX_FSCONTEXT' value='74'/>
<enumerator name='ZFS_PROP_SELINUX_DEFCONTEXT' value='75'/>
<enumerator name='ZFS_PROP_SELINUX_ROOTCONTEXT' value='76'/>
<enumerator name='ZFS_PROP_RELATIME' value='77'/>
<enumerator name='ZFS_PROP_REDUNDANT_METADATA' value='78'/>
<enumerator name='ZFS_PROP_OVERLAY' value='79'/>
<enumerator name='ZFS_PROP_PREV_SNAP' value='80'/>
<enumerator name='ZFS_PROP_RECEIVE_RESUME_TOKEN' value='81'/>
<enumerator name='ZFS_PROP_ENCRYPTION' value='82'/>
<enumerator name='ZFS_PROP_KEYLOCATION' value='83'/>
<enumerator name='ZFS_PROP_KEYFORMAT' value='84'/>
<enumerator name='ZFS_PROP_PBKDF2_SALT' value='85'/>
<enumerator name='ZFS_PROP_PBKDF2_ITERS' value='86'/>
<enumerator name='ZFS_PROP_ENCRYPTION_ROOT' value='87'/>
<enumerator name='ZFS_PROP_KEY_GUID' value='88'/>
<enumerator name='ZFS_PROP_KEYSTATUS' value='89'/>
<enumerator name='ZFS_PROP_REMAPTXG' value='90'/>
<enumerator name='ZFS_PROP_SPECIAL_SMALL_BLOCKS' value='91'/>
<enumerator name='ZFS_PROP_IVSET_GUID' value='92'/>
<enumerator name='ZFS_PROP_REDACTED' value='93'/>
<enumerator name='ZFS_PROP_REDACT_SNAPS' value='94'/>
<enumerator name='ZFS_PROP_SNAPSHOTS_CHANGED' value='95'/>
<enumerator name='ZFS_NUM_PROPS' value='96'/>
</enum-decl>
<typedef-decl name='zfs_prop_t' type-id='4b000d60' id='58603c44'/>
<enum-decl name='zprop_source_t' naming-typedef-id='a2256d42' id='5903f80e'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZPROP_SRC_NONE' value='1'/>
<enumerator name='ZPROP_SRC_DEFAULT' value='2'/>
<enumerator name='ZPROP_SRC_TEMPORARY' value='4'/>
<enumerator name='ZPROP_SRC_LOCAL' value='8'/>
<enumerator name='ZPROP_SRC_INHERITED' value='16'/>
<enumerator name='ZPROP_SRC_RECEIVED' value='32'/>
</enum-decl>
<typedef-decl name='zprop_source_t' type-id='5903f80e' id='a2256d42'/>
<class-decl name='nvlist' size-in-bits='192' is-struct='yes' visibility='default' id='ac266fd9'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='nvl_version' type-id='3ff5601b' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='nvl_nvflag' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='nvl_priv' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='nvl_flag' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='160'>
<var-decl name='nvl_pad' type-id='3ff5601b' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='nvlist_t' type-id='ac266fd9' id='8e8d4be3'/>
<enum-decl name='sa_protocol' id='9155d4b5'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='SA_PROTOCOL_NFS' value='0'/>
<enumerator name='SA_PROTOCOL_SMB' value='1'/>
<enumerator name='SA_PROTOCOL_COUNT' value='2'/>
</enum-decl>
<enum-decl name='boolean_t' naming-typedef-id='c19b74c3' id='f58c8277'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='B_FALSE' value='0'/>
<enumerator name='B_TRUE' value='1'/>
</enum-decl>
<typedef-decl name='boolean_t' type-id='f58c8277' id='c19b74c3'/>
<typedef-decl name='uint_t' type-id='f0981eeb' id='3502e3ff'/>
<typedef-decl name='ulong_t' type-id='7359adad' id='ee1f298e'/>
<typedef-decl name='longlong_t' type-id='1eb56b1e' id='9b3ff54f'/>
<typedef-decl name='diskaddr_t' type-id='9b3ff54f' id='804dc465'/>
<typedef-decl name='zoneid_t' type-id='3502e3ff' id='4da03624'/>
<typedef-decl name='__re_long_size_t' type-id='7359adad' id='ba516949'/>
<typedef-decl name='reg_syntax_t' type-id='7359adad' id='1b72c3b3'/>
<class-decl name='re_pattern_buffer' size-in-bits='512' is-struct='yes' visibility='default' id='19fc9a8c'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='buffer' type-id='33976309' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='allocated' type-id='ba516949' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='used' type-id='ba516949' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='syntax' type-id='1b72c3b3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='fastmap' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='translate' type-id='cf536864' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='re_nsub' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='can_be_null' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='449'>
<var-decl name='regs_allocated' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='451'>
<var-decl name='fastmap_accurate' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='452'>
<var-decl name='no_sub' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='453'>
<var-decl name='not_bol' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='454'>
<var-decl name='not_eol' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='455'>
<var-decl name='newline_anchor' type-id='f0981eeb' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='regex_t' type-id='19fc9a8c' id='aca3bac8'/>
<typedef-decl name='uintptr_t' type-id='7359adad' id='e475ab95'/>
<union-decl name='pthread_mutex_t' size-in-bits='320' naming-typedef-id='7a6844eb' visibility='default' id='70681f9b'>
<data-member access='public'>
<var-decl name='__data' type-id='4c734837' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='__size' type-id='36c46961' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='__align' type-id='bd54fe1a' visibility='default'/>
</data-member>
</union-decl>
<typedef-decl name='pthread_mutex_t' type-id='70681f9b' id='7a6844eb'/>
<typedef-decl name='int32_t' type-id='33f57a65' id='3ff5601b'/>
<typedef-decl name='uint8_t' type-id='c51d6389' id='b96825af'/>
<typedef-decl name='uint32_t' type-id='62f1140c' id='8f92235e'/>
<typedef-decl name='uint64_t' type-id='8910171f' id='9c313c2d'/>
<class-decl name='__pthread_mutex_s' size-in-bits='320' is-struct='yes' visibility='default' id='4c734837'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='__lock' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='__count' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='__owner' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='96'>
<var-decl name='__nusers' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='__kind' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='160'>
<var-decl name='__spins' type-id='a2185560' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='176'>
<var-decl name='__elision' type-id='a2185560' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='__list' type-id='518fb49c' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='__pthread_internal_list' size-in-bits='128' is-struct='yes' visibility='default' id='0e01899c'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='__prev' type-id='4d98cd5a' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='__next' type-id='4d98cd5a' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='__pthread_list_t' type-id='0e01899c' id='518fb49c'/>
<typedef-decl name='__uint8_t' type-id='002ac4a6' id='c51d6389'/>
<typedef-decl name='__int32_t' type-id='95e97e5e' id='33f57a65'/>
<typedef-decl name='__uint32_t' type-id='f0981eeb' id='62f1140c'/>
<typedef-decl name='__uint64_t' type-id='7359adad' id='8910171f'/>
<typedef-decl name='size_t' type-id='7359adad' id='b59d7dce'/>
<class-decl name='libzfs_handle' size-in-bits='18240' is-struct='yes' visibility='default' id='c8a9d9d8'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='libzfs_error' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='libzfs_fd' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='libzfs_pool_handles' type-id='4c81de99' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='libzfs_ns_avlpool' type-id='de82c773' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='libzfs_ns_avl' type-id='a5c21a38' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='libzfs_ns_gen' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='libzfs_desc_active' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='352'>
<var-decl name='libzfs_action' type-id='b54ce520' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='8544'>
<var-decl name='libzfs_desc' type-id='b54ce520' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='16736'>
<var-decl name='libzfs_printerr' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='16768'>
<var-decl name='libzfs_mnttab_enable' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='16832'>
<var-decl name='libzfs_mnttab_cache_lock' type-id='7a6844eb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='17152'>
<var-decl name='libzfs_mnttab_cache' type-id='f20fbd51' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='17472'>
<var-decl name='libzfs_pool_iter' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='17504'>
<var-decl name='libzfs_prop_debug' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='17536'>
<var-decl name='libzfs_urire' type-id='aca3bac8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='18048'>
<var-decl name='libzfs_max_nvlist' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='18112'>
<var-decl name='libfetch' type-id='eaa32e2f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='18176'>
<var-decl name='libfetch_load_error' type-id='26a90f95' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='zfs_handle' size-in-bits='4928' is-struct='yes' visibility='default' id='f6ee4445'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='zfs_hdl' type-id='b0382bb3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='zpool_hdl' type-id='4c81de99' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='zfs_name' type-id='d1617432' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2176'>
<var-decl name='zfs_type' type-id='2e45de5d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2208'>
<var-decl name='zfs_head_type' type-id='2e45de5d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2240'>
<var-decl name='zfs_dmustats' type-id='b2c14f17' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='4544'>
<var-decl name='zfs_props' type-id='5ce45b60' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='4608'>
<var-decl name='zfs_user_props' type-id='5ce45b60' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='4672'>
<var-decl name='zfs_recvd_props' type-id='5ce45b60' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='4736'>
<var-decl name='zfs_mntcheck' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='4800'>
<var-decl name='zfs_mntopts' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='4864'>
<var-decl name='zfs_props_table' type-id='ae3e8ca6' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='zpool_handle' size-in-bits='2560' is-struct='yes' visibility='default' id='67002a8a'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='zpool_hdl' type-id='b0382bb3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='zpool_next' type-id='4c81de99' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='zpool_name' type-id='d1617432' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2176'>
<var-decl name='zpool_state' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2240'>
<var-decl name='zpool_config_size' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2304'>
<var-decl name='zpool_config' type-id='5ce45b60' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2368'>
<var-decl name='zpool_old_config' type-id='5ce45b60' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2432'>
<var-decl name='zpool_props' type-id='5ce45b60' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2496'>
<var-decl name='zpool_start_block' type-id='804dc465' visibility='default'/>
</data-member>
</class-decl>
<pointer-type-def type-id='0e01899c' size-in-bits='64' id='4d98cd5a'/>
<pointer-type-def type-id='428b67b3' size-in-bits='64' id='bf311473'/>
<pointer-type-def type-id='a84c031d' size-in-bits='64' id='26a90f95'/>
<pointer-type-def type-id='26a90f95' size-in-bits='64' id='9b23c9ad'/>
<qualified-type-def type-id='a84c031d' const='yes' id='9b45d938'/>
<pointer-type-def type-id='9b45d938' size-in-bits='64' id='80f4b756'/>
<qualified-type-def type-id='9155d4b5' const='yes' id='9f2c1699'/>
<pointer-type-def type-id='9f2c1699' size-in-bits='64' id='4567bbc9'/>
<qualified-type-def type-id='775509eb' const='yes' id='5eadf2db'/>
<pointer-type-def type-id='5eadf2db' size-in-bits='64' id='fcd57163'/>
<pointer-type-def type-id='96ee24a5' size-in-bits='64' id='585e1de9'/>
<pointer-type-def type-id='cb9628fa' size-in-bits='64' id='5571cde4'/>
<pointer-type-def type-id='95942d0c' size-in-bits='64' id='b0382bb3'/>
<pointer-type-def type-id='8e8d4be3' size-in-bits='64' id='5ce45b60'/>
<pointer-type-def type-id='b48d2441' size-in-bits='64' id='33976309'/>
<pointer-type-def type-id='b96825af' size-in-bits='64' id='ae3e8ca6'/>
<pointer-type-def type-id='002ac4a6' size-in-bits='64' id='cf536864'/>
<pointer-type-def type-id='5d7f5fc8' size-in-bits='64' id='813a2225'/>
<pointer-type-def type-id='73a65116' size-in-bits='64' id='2dc35b9d'/>
<pointer-type-def type-id='7f84e390' size-in-bits='64' id='de82c773'/>
<pointer-type-def type-id='bb7f0973' size-in-bits='64' id='a5c21a38'/>
<pointer-type-def type-id='edd8457b' size-in-bits='64' id='5842d146'/>
<pointer-type-def type-id='40f93560' size-in-bits='64' id='d502b39f'/>
<pointer-type-def type-id='48b5725f' size-in-bits='64' id='eaa32e2f'/>
<pointer-type-def type-id='775509eb' size-in-bits='64' id='9200a744'/>
<pointer-type-def type-id='b1efc708' size-in-bits='64' id='4c81de99'/>
<pointer-type-def type-id='a2256d42' size-in-bits='64' id='debc6aa3'/>
<class-decl name='re_dfa_t' is-struct='yes' visibility='default' is-declaration-only='yes' id='b48d2441'/>
<class-decl name='uu_avl' is-struct='yes' visibility='default' is-declaration-only='yes' id='4af029d1'/>
<class-decl name='uu_avl_pool' is-struct='yes' visibility='default' is-declaration-only='yes' id='12a530a8'/>
<class-decl name='uu_avl_walk' is-struct='yes' visibility='default' is-declaration-only='yes' id='e70a39e3'/>
<function-decl name='uu_avl_pool_create' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='d502b39f'/>
<parameter type-id='8f92235e'/>
<return type-id='de82c773'/>
</function-decl>
<function-decl name='uu_avl_pool_destroy' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='de82c773'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='uu_avl_node_init' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='2dc35b9d'/>
<parameter type-id='de82c773'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='uu_avl_create' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='de82c773'/>
<parameter type-id='eaa32e2f'/>
<parameter type-id='8f92235e'/>
<return type-id='a5c21a38'/>
</function-decl>
<function-decl name='uu_avl_destroy' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='a5c21a38'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='uu_avl_last' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='a5c21a38'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='uu_avl_walk_start' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='a5c21a38'/>
<parameter type-id='8f92235e'/>
<return type-id='5842d146'/>
</function-decl>
<function-decl name='uu_avl_walk_next' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5842d146'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='uu_avl_walk_end' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5842d146'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='uu_avl_find' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='a5c21a38'/>
<parameter type-id='eaa32e2f'/>
<parameter type-id='eaa32e2f'/>
<parameter type-id='813a2225'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='uu_avl_insert' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='a5c21a38'/>
<parameter type-id='eaa32e2f'/>
<parameter type-id='5d7f5fc8'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='uu_avl_remove' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='a5c21a38'/>
<parameter type-id='eaa32e2f'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_get_handle' mangled-name='zfs_get_handle' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_handle'>
<parameter type-id='9200a744'/>
<return type-id='b0382bb3'/>
</function-decl>
<function-decl name='zfs_open' mangled-name='zfs_open' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_open'>
<parameter type-id='b0382bb3'/>
<parameter type-id='80f4b756'/>
<parameter type-id='95e97e5e'/>
<return type-id='9200a744'/>
</function-decl>
<function-decl name='zfs_close' mangled-name='zfs_close' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_close'>
<parameter type-id='9200a744'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_get_name' mangled-name='zfs_get_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_name'>
<parameter type-id='fcd57163'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zfs_prop_get' mangled-name='zfs_prop_get' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get'>
<parameter type-id='9200a744'/>
<parameter type-id='58603c44'/>
<parameter type-id='26a90f95'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='debc6aa3'/>
<parameter type-id='26a90f95'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='c19b74c3'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prop_get_int' mangled-name='zfs_prop_get_int' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_int'>
<parameter type-id='9200a744'/>
<parameter type-id='58603c44'/>
<return type-id='9c313c2d'/>
</function-decl>
<function-decl name='zfs_iter_children_v2' mangled-name='zfs_iter_children_v2' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_children_v2'>
<parameter type-id='9200a744'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='d8e49ab9'/>
<parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_iter_dependents_v2' mangled-name='zfs_iter_dependents_v2' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_dependents_v2'>
<parameter type-id='9200a744'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='c19b74c3'/>
<parameter type-id='d8e49ab9'/>
<parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_iter_mounted' mangled-name='zfs_iter_mounted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_mounted'>
<parameter type-id='9200a744'/>
<parameter type-id='d8e49ab9'/>
<parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_refresh_properties' mangled-name='zfs_refresh_properties' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_refresh_properties'>
<parameter type-id='9200a744'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_is_mounted' mangled-name='zfs_is_mounted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_is_mounted'>
<parameter type-id='9200a744'/>
<parameter type-id='9b23c9ad'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_mount' mangled-name='zfs_mount' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_mount'>
<parameter type-id='9200a744'/>
<parameter type-id='80f4b756'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_unmount' mangled-name='zfs_unmount' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unmount'>
<parameter type-id='9200a744'/>
<parameter type-id='80f4b756'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_is_shared' mangled-name='zfs_is_shared' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_is_shared'>
<parameter type-id='9200a744'/>
<parameter type-id='9b23c9ad'/>
<parameter type-id='4567bbc9'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_share' mangled-name='zfs_share' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_share'>
<parameter type-id='9200a744'/>
<parameter type-id='4567bbc9'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_unshare' mangled-name='zfs_unshare' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshare'>
<parameter type-id='9200a744'/>
<parameter type-id='80f4b756'/>
<parameter type-id='4567bbc9'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_commit_shares' mangled-name='zfs_commit_shares' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_commit_shares'>
<parameter type-id='4567bbc9'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='getzoneid' mangled-name='getzoneid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getzoneid'>
<return type-id='4da03624'/>
</function-decl>
<function-decl name='sa_commit_shares' mangled-name='sa_commit_shares' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='sa_commit_shares'>
<parameter type-id='9155d4b5'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='strlcat' mangled-name='strlcat' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='strlcat'>
<parameter type-id='26a90f95'/>
<parameter type-id='80f4b756'/>
<parameter type-id='b59d7dce'/>
<return type-id='b59d7dce'/>
</function-decl>
<function-decl name='strlcpy' mangled-name='strlcpy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='strlcpy'>
<parameter type-id='26a90f95'/>
<parameter type-id='80f4b756'/>
<parameter type-id='b59d7dce'/>
<return type-id='b59d7dce'/>
</function-decl>
<function-decl name='free' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='eaa32e2f'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='strcmp' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='strncmp' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<parameter type-id='b59d7dce'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='strlen' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<return type-id='b59d7dce'/>
</function-decl>
<function-decl name='zfs_error' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='b0382bb3'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_alloc' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='b0382bb3'/>
<parameter type-id='b59d7dce'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='remove_mountpoint' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='9200a744'/>
<return type-id='48b5725f'/>
</function-decl>
<function-type size-in-bits='64' id='96ee24a5'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/>
</function-type>
<function-type size-in-bits='64' id='add6e811'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='eaa32e2f'/>
<parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/>
</function-type>
<function-type size-in-bits='64' id='cb9628fa'>
<parameter type-id='9200a744'/>
<parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/>
</function-type>
</abi-instr>
<abi-instr address-size='64' path='lib/libzfs/libzfs_config.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='a84c031d' size-in-bits='32768' id='d16c6df4'>
<subrange length='4096' type-id='7359adad' id='bc1b5ddc'/>
</array-type-def>
<array-type-def dimensions='1' type-id='a84c031d' size-in-bits='65536' id='163f6aa5'>
<subrange length='8192' type-id='7359adad' id='c88f397d'/>
</array-type-def>
<array-type-def dimensions='1' type-id='a84c031d' size-in-bits='infinite' id='e84913bd'>
<subrange length='infinite' type-id='7359adad' id='031f2035'/>
</array-type-def>
<array-type-def dimensions='1' type-id='9c313c2d' size-in-bits='128' id='c1c22e6c'>
<subrange length='2' type-id='7359adad' id='52efc4ef'/>
</array-type-def>
<array-type-def dimensions='1' type-id='b96825af' size-in-bits='24' id='d3490169'>
<subrange length='3' type-id='7359adad' id='56f209d2'/>
</array-type-def>
<type-decl name='variadic parameter type' id='2c1145c5'/>
<typedef-decl name='zpool_iter_f' type-id='3aebb66f' id='fa476e62'/>
<enum-decl name='data_type_t' naming-typedef-id='8d0687d2' id='aeeae136'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='DATA_TYPE_DONTCARE' value='-1'/>
<enumerator name='DATA_TYPE_UNKNOWN' value='0'/>
<enumerator name='DATA_TYPE_BOOLEAN' value='1'/>
<enumerator name='DATA_TYPE_BYTE' value='2'/>
<enumerator name='DATA_TYPE_INT16' value='3'/>
<enumerator name='DATA_TYPE_UINT16' value='4'/>
<enumerator name='DATA_TYPE_INT32' value='5'/>
<enumerator name='DATA_TYPE_UINT32' value='6'/>
<enumerator name='DATA_TYPE_INT64' value='7'/>
<enumerator name='DATA_TYPE_UINT64' value='8'/>
<enumerator name='DATA_TYPE_STRING' value='9'/>
<enumerator name='DATA_TYPE_BYTE_ARRAY' value='10'/>
<enumerator name='DATA_TYPE_INT16_ARRAY' value='11'/>
<enumerator name='DATA_TYPE_UINT16_ARRAY' value='12'/>
<enumerator name='DATA_TYPE_INT32_ARRAY' value='13'/>
<enumerator name='DATA_TYPE_UINT32_ARRAY' value='14'/>
<enumerator name='DATA_TYPE_INT64_ARRAY' value='15'/>
<enumerator name='DATA_TYPE_UINT64_ARRAY' value='16'/>
<enumerator name='DATA_TYPE_STRING_ARRAY' value='17'/>
<enumerator name='DATA_TYPE_HRTIME' value='18'/>
<enumerator name='DATA_TYPE_NVLIST' value='19'/>
<enumerator name='DATA_TYPE_NVLIST_ARRAY' value='20'/>
<enumerator name='DATA_TYPE_BOOLEAN_VALUE' value='21'/>
<enumerator name='DATA_TYPE_INT8' value='22'/>
<enumerator name='DATA_TYPE_UINT8' value='23'/>
<enumerator name='DATA_TYPE_BOOLEAN_ARRAY' value='24'/>
<enumerator name='DATA_TYPE_INT8_ARRAY' value='25'/>
<enumerator name='DATA_TYPE_UINT8_ARRAY' value='26'/>
<enumerator name='DATA_TYPE_DOUBLE' value='27'/>
</enum-decl>
<typedef-decl name='data_type_t' type-id='aeeae136' id='8d0687d2'/>
<class-decl name='nvpair' size-in-bits='128' is-struct='yes' visibility='default' id='1c34e459'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='nvp_size' type-id='3ff5601b' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='nvp_name_sz' type-id='23bd8cb5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='48'>
<var-decl name='nvp_reserve' type-id='23bd8cb5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='nvp_value_elem' type-id='3ff5601b' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='96'>
<var-decl name='nvp_type' type-id='8d0687d2' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='nvp_name' type-id='e84913bd' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='nvpair_t' type-id='1c34e459' id='57928edf'/>
<class-decl name='drr_begin' size-in-bits='2432' is-struct='yes' visibility='default' id='09fcdc01'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='drr_magic' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='drr_versioninfo' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='drr_creation_time' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='drr_type' type-id='230f1e16' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='224'>
<var-decl name='drr_flags' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='drr_toguid' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='drr_fromguid' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='drr_toname' type-id='d1617432' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='zinject_record' size-in-bits='2816' is-struct='yes' visibility='default' id='3216f820'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='zi_objset' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='zi_object' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='zi_start' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='zi_end' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='zi_guid' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='zi_level' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='352'>
<var-decl name='zi_error' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='zi_type' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='zi_freq' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='480'>
<var-decl name='zi_failfast' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
<var-decl name='zi_func' type-id='d1617432' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2560'>
<var-decl name='zi_iotype' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2592'>
<var-decl name='zi_duration' type-id='3ff5601b' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2624'>
<var-decl name='zi_timer' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2688'>
<var-decl name='zi_nlanes' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2752'>
<var-decl name='zi_cmd' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2784'>
<var-decl name='zi_dvas' type-id='8f92235e' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zinject_record_t' type-id='3216f820' id='a4301ca6'/>
<class-decl name='zfs_share' size-in-bits='256' is-struct='yes' visibility='default' id='feb6f2da'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='z_exportdata' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='z_sharedata' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='z_sharetype' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='z_sharemax' type-id='9c313c2d' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zfs_share_t' type-id='feb6f2da' id='ee5cec36'/>
<class-decl name='zfs_cmd' size-in-bits='109952' is-struct='yes' visibility='default' id='3522cd69'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='zc_name' type-id='d16c6df4' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32768'>
<var-decl name='zc_nvlist_src' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32832'>
<var-decl name='zc_nvlist_src_size' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32896'>
<var-decl name='zc_nvlist_dst' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32960'>
<var-decl name='zc_nvlist_dst_size' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='33024'>
<var-decl name='zc_nvlist_dst_filled' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='33056'>
<var-decl name='zc_pad2' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='33088'>
<var-decl name='zc_history' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='33152'>
<var-decl name='zc_value' type-id='163f6aa5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='98688'>
<var-decl name='zc_string' type-id='d1617432' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='100736'>
<var-decl name='zc_guid' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='100800'>
<var-decl name='zc_nvlist_conf' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='100864'>
<var-decl name='zc_nvlist_conf_size' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='100928'>
<var-decl name='zc_cookie' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='100992'>
<var-decl name='zc_objset_type' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='101056'>
<var-decl name='zc_perm_action' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='101120'>
<var-decl name='zc_history_len' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='101184'>
<var-decl name='zc_history_offset' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='101248'>
<var-decl name='zc_obj' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='101312'>
<var-decl name='zc_iflags' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='101376'>
<var-decl name='zc_share' type-id='ee5cec36' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='101632'>
<var-decl name='zc_objset_stats' type-id='b2c14f17' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='103936'>
<var-decl name='zc_begin_record' type-id='09fcdc01' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='106368'>
<var-decl name='zc_inject_record' type-id='a4301ca6' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='109184'>
<var-decl name='zc_defer_destroy' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='109216'>
<var-decl name='zc_flags' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='109248'>
<var-decl name='zc_action_handle' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='109312'>
<var-decl name='zc_cleanup_fd' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='109344'>
<var-decl name='zc_simple' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='109352'>
<var-decl name='zc_pad' type-id='d3490169' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='109376'>
<var-decl name='zc_sendobj' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='109440'>
<var-decl name='zc_fromobj' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='109504'>
<var-decl name='zc_createtxg' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='109568'>
<var-decl name='zc_stat' type-id='0371a9c7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='109888'>
<var-decl name='zc_zoneid' type-id='9c313c2d' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zfs_cmd_t' type-id='3522cd69' id='a5559cdd'/>
<class-decl name='zfs_stat' size-in-bits='320' is-struct='yes' visibility='default' id='6417f0b9'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='zs_gen' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='zs_mode' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='zs_links' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='zs_ctime' type-id='c1c22e6c' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zfs_stat_t' type-id='6417f0b9' id='0371a9c7'/>
<typedef-decl name='int16_t' type-id='03896e23' id='23bd8cb5'/>
<typedef-decl name='__int16_t' type-id='a2185560' id='03896e23'/>
<pointer-type-def type-id='c19b74c3' size-in-bits='64' id='37e3bd22'/>
<qualified-type-def type-id='8e8d4be3' const='yes' id='693c3853'/>
<pointer-type-def type-id='693c3853' size-in-bits='64' id='22cce67b'/>
<qualified-type-def type-id='57928edf' const='yes' id='642ee20f'/>
<pointer-type-def type-id='642ee20f' size-in-bits='64' id='dace003f'/>
<pointer-type-def type-id='2bce87e3' size-in-bits='64' id='3aebb66f'/>
<pointer-type-def type-id='95e97e5e' size-in-bits='64' id='7292109c'/>
<pointer-type-def type-id='5ce45b60' size-in-bits='64' id='857bb57e'/>
<pointer-type-def type-id='57928edf' size-in-bits='64' id='3fa542f0'/>
<pointer-type-def type-id='eaa32e2f' size-in-bits='64' id='63e171df'/>
<pointer-type-def type-id='3522cd69' size-in-bits='64' id='b65f7fd1'/>
<pointer-type-def type-id='a5559cdd' size-in-bits='64' id='e4ec4540'/>
<pointer-type-def type-id='4c81de99' size-in-bits='64' id='237193c9'/>
<function-decl name='uu_avl_first' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='a5c21a38'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='uu_avl_next' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='a5c21a38'/>
<parameter type-id='eaa32e2f'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='uu_avl_teardown' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='a5c21a38'/>
<parameter type-id='63e171df'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='zfs_standard_error' mangled-name='zfs_standard_error' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_standard_error'>
<parameter type-id='b0382bb3'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_ioctl' mangled-name='zfs_ioctl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_ioctl'>
<parameter type-id='b0382bb3'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='b65f7fd1'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvlist_free' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='nvlist_dup' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='22cce67b'/>
<parameter type-id='857bb57e'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvlist_lookup_nvlist' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<parameter type-id='857bb57e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvlist_exists' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='22cce67b'/>
<parameter type-id='80f4b756'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='nvlist_next_nvpair' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='dace003f'/>
<return type-id='3fa542f0'/>
</function-decl>
<function-decl name='nvpair_name' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='dace003f'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='fnvpair_value_nvlist' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='3fa542f0'/>
<return type-id='5ce45b60'/>
</function-decl>
<function-decl name='libspl_assertf' mangled-name='libspl_assertf' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libspl_assertf'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='80f4b756'/>
<parameter is-variadic='yes'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='__errno_location' visibility='default' binding='global' size-in-bits='64'>
<return type-id='7292109c'/>
</function-decl>
<function-decl name='dcgettext' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<parameter type-id='95e97e5e'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='getenv' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='strchr' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='95e97e5e'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='zpool_get_config' mangled-name='zpool_get_config' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_config'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='857bb57e' name='oldconfig'/>
<return type-id='5ce45b60'/>
</function-decl>
<function-decl name='zpool_get_features' mangled-name='zpool_get_features' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_features'>
<parameter type-id='4c81de99' name='zhp'/>
<return type-id='5ce45b60'/>
</function-decl>
<function-decl name='zpool_refresh_stats' mangled-name='zpool_refresh_stats' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_refresh_stats'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='37e3bd22' name='missing'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_skip_pool' mangled-name='zpool_skip_pool' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_skip_pool'>
<parameter type-id='80f4b756' name='poolname'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zpool_iter' mangled-name='zpool_iter' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_iter'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='fa476e62' name='func'/>
<parameter type-id='eaa32e2f' name='data'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_iter_root' mangled-name='zfs_iter_root' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_root'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='d8e49ab9' name='func'/>
<parameter type-id='eaa32e2f' name='data'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_strdup' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='b0382bb3'/>
<parameter type-id='80f4b756'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='no_memory' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='b0382bb3'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zcmd_alloc_dst_nvlist' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='b0382bb3'/>
<parameter type-id='e4ec4540'/>
<parameter type-id='b59d7dce'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zcmd_expand_dst_nvlist' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='b0382bb3'/>
<parameter type-id='e4ec4540'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zcmd_read_dst_nvlist' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='b0382bb3'/>
<parameter type-id='e4ec4540'/>
<parameter type-id='857bb57e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zcmd_free_nvlists' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='e4ec4540'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='make_dataset_handle' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='b0382bb3'/>
<parameter type-id='80f4b756'/>
<return type-id='9200a744'/>
</function-decl>
<function-decl name='zpool_open_silent' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='b0382bb3'/>
<parameter type-id='80f4b756'/>
<parameter type-id='237193c9'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-type size-in-bits='64' id='2bce87e3'>
<parameter type-id='4c81de99'/>
<parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/>
</function-type>
</abi-instr>
<abi-instr address-size='64' path='lib/libzfs/libzfs_crypto.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='fb7c6451' size-in-bits='256' id='64177143'>
<subrange length='32' type-id='7359adad' id='ae5bde82'/>
</array-type-def>
<array-type-def dimensions='1' type-id='a84c031d' size-in-bits='8' id='89feb1ec'>
<subrange length='1' type-id='7359adad' id='52f813b4'/>
</array-type-def>
<array-type-def dimensions='1' type-id='a84c031d' size-in-bits='160' id='664ac0b7'>
<subrange length='20' type-id='7359adad' id='fdca39cf'/>
</array-type-def>
<class-decl name='_IO_codecvt' is-struct='yes' visibility='default' is-declaration-only='yes' id='a4036571'/>
<class-decl name='_IO_marker' is-struct='yes' visibility='default' is-declaration-only='yes' id='010ae0b9'/>
<class-decl name='_IO_wide_data' is-struct='yes' visibility='default' is-declaration-only='yes' id='79bd3751'/>
<array-type-def dimensions='1' type-id='95e97e5e' size-in-bits='896' id='47394ee0'>
<subrange length='28' type-id='7359adad' id='3db583d7'/>
</array-type-def>
<type-decl name='signed char' size-in-bits='8' id='28577a57'/>
<array-type-def dimensions='1' type-id='7359adad' size-in-bits='1024' id='d2baa450'>
<subrange length='16' type-id='7359adad' id='848d0938'/>
</array-type-def>
<type-decl name='unsigned short int' size-in-bits='16' id='8efea9e5'/>
<enum-decl name='zpool_prop_t' naming-typedef-id='5d0c23fb' id='af1ba157'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZPOOL_PROP_INVAL' value='-1'/>
<enumerator name='ZPOOL_PROP_NAME' value='0'/>
<enumerator name='ZPOOL_PROP_SIZE' value='1'/>
<enumerator name='ZPOOL_PROP_CAPACITY' value='2'/>
<enumerator name='ZPOOL_PROP_ALTROOT' value='3'/>
<enumerator name='ZPOOL_PROP_HEALTH' value='4'/>
<enumerator name='ZPOOL_PROP_GUID' value='5'/>
<enumerator name='ZPOOL_PROP_VERSION' value='6'/>
<enumerator name='ZPOOL_PROP_BOOTFS' value='7'/>
<enumerator name='ZPOOL_PROP_DELEGATION' value='8'/>
<enumerator name='ZPOOL_PROP_AUTOREPLACE' value='9'/>
<enumerator name='ZPOOL_PROP_CACHEFILE' value='10'/>
<enumerator name='ZPOOL_PROP_FAILUREMODE' value='11'/>
<enumerator name='ZPOOL_PROP_LISTSNAPS' value='12'/>
<enumerator name='ZPOOL_PROP_AUTOEXPAND' value='13'/>
<enumerator name='ZPOOL_PROP_DEDUPDITTO' value='14'/>
<enumerator name='ZPOOL_PROP_DEDUPRATIO' value='15'/>
<enumerator name='ZPOOL_PROP_FREE' value='16'/>
<enumerator name='ZPOOL_PROP_ALLOCATED' value='17'/>
<enumerator name='ZPOOL_PROP_READONLY' value='18'/>
<enumerator name='ZPOOL_PROP_ASHIFT' value='19'/>
<enumerator name='ZPOOL_PROP_COMMENT' value='20'/>
<enumerator name='ZPOOL_PROP_EXPANDSZ' value='21'/>
<enumerator name='ZPOOL_PROP_FREEING' value='22'/>
<enumerator name='ZPOOL_PROP_FRAGMENTATION' value='23'/>
<enumerator name='ZPOOL_PROP_LEAKED' value='24'/>
<enumerator name='ZPOOL_PROP_MAXBLOCKSIZE' value='25'/>
<enumerator name='ZPOOL_PROP_TNAME' value='26'/>
<enumerator name='ZPOOL_PROP_MAXDNODESIZE' value='27'/>
<enumerator name='ZPOOL_PROP_MULTIHOST' value='28'/>
<enumerator name='ZPOOL_PROP_CHECKPOINT' value='29'/>
<enumerator name='ZPOOL_PROP_LOAD_GUID' value='30'/>
<enumerator name='ZPOOL_PROP_AUTOTRIM' value='31'/>
<enumerator name='ZPOOL_PROP_COMPATIBILITY' value='32'/>
<enumerator name='ZPOOL_PROP_BCLONEUSED' value='33'/>
<enumerator name='ZPOOL_PROP_BCLONESAVED' value='34'/>
<enumerator name='ZPOOL_PROP_BCLONERATIO' value='35'/>
<enumerator name='ZPOOL_NUM_PROPS' value='36'/>
</enum-decl>
<typedef-decl name='zpool_prop_t' type-id='af1ba157' id='5d0c23fb'/>
<typedef-decl name='regoff_t' type-id='95e97e5e' id='54a2a2a8'/>
<class-decl name='regmatch_t' size-in-bits='64' is-struct='yes' naming-typedef-id='1b941664' visibility='default' id='4f932615'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='rm_so' type-id='54a2a2a8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='rm_eo' type-id='54a2a2a8' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='regmatch_t' type-id='4f932615' id='1b941664'/>
<typedef-decl name='__sighandler_t' type-id='03347643' id='8cdd9566'/>
<typedef-decl name='ssize_t' type-id='41060289' id='79a0948f'/>
<class-decl name='sigaction' size-in-bits='1216' is-struct='yes' visibility='default' id='fe391c48'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='__sigaction_handler' type-id='ac5ab595' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='sa_mask' type-id='b9c97942' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1088'>
<var-decl name='sa_flags' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1152'>
<var-decl name='sa_restorer' type-id='953b12f8' visibility='default'/>
</data-member>
</class-decl>
<union-decl name='__anonymous_union__' size-in-bits='64' is-anonymous='yes' visibility='default' id='ac5ab595'>
<data-member access='public'>
<var-decl name='sa_handler' type-id='8cdd9566' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='sa_sigaction' type-id='6e756877' visibility='default'/>
</data-member>
</union-decl>
<class-decl name='termios' size-in-bits='480' is-struct='yes' visibility='default' id='ad55d2bc'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='c_iflag' type-id='241ce6f8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='c_oflag' type-id='241ce6f8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='c_cflag' type-id='241ce6f8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='96'>
<var-decl name='c_lflag' type-id='241ce6f8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='c_line' type-id='fb7c6451' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='136'>
<var-decl name='c_cc' type-id='64177143' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='416'>
<var-decl name='c_ispeed' type-id='6a8e8a14' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='c_ospeed' type-id='6a8e8a14' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='cc_t' type-id='002ac4a6' id='fb7c6451'/>
<typedef-decl name='speed_t' type-id='f0981eeb' id='6a8e8a14'/>
<typedef-decl name='tcflag_t' type-id='f0981eeb' id='241ce6f8'/>
<typedef-decl name='__uid_t' type-id='f0981eeb' id='cc5fcceb'/>
<typedef-decl name='__off_t' type-id='bd54fe1a' id='79989e9c'/>
<typedef-decl name='__off64_t' type-id='bd54fe1a' id='724e4de6'/>
<typedef-decl name='__pid_t' type-id='95e97e5e' id='3629bad8'/>
<typedef-decl name='__clock_t' type-id='bd54fe1a' id='4d66c6d7'/>
<typedef-decl name='__ssize_t' type-id='bd54fe1a' id='41060289'/>
<typedef-decl name='FILE' type-id='ec1ed955' id='aa12d1ba'/>
<class-decl name='__sigset_t' size-in-bits='1024' is-struct='yes' naming-typedef-id='b9c97942' visibility='default' id='2616147f'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='__val' type-id='d2baa450' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='__sigset_t' type-id='2616147f' id='b9c97942'/>
<union-decl name='sigval' size-in-bits='64' visibility='default' id='a094b870'>
<data-member access='public'>
<var-decl name='sival_int' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='sival_ptr' type-id='eaa32e2f' visibility='default'/>
</data-member>
</union-decl>
<typedef-decl name='__sigval_t' type-id='a094b870' id='eabacd01'/>
<class-decl name='siginfo_t' size-in-bits='1024' is-struct='yes' naming-typedef-id='cb681f62' visibility='default' id='d8149419'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='si_signo' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='si_errno' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='si_code' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='96'>
<var-decl name='__pad0' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='_sifields' type-id='ac5ab596' visibility='default'/>
</data-member>
</class-decl>
<union-decl name='__anonymous_union__1' size-in-bits='896' is-anonymous='yes' visibility='default' id='ac5ab596'>
<data-member access='public'>
<var-decl name='_pad' type-id='47394ee0' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='_kill' type-id='e7f43f73' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='_timer' type-id='e7f43f74' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='_rt' type-id='e7f43f75' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='_sigchld' type-id='e7f43f76' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='_sigfault' type-id='e7f43f77' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='_sigpoll' type-id='e7f43f78' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='_sigsys' type-id='e7f43f79' visibility='default'/>
</data-member>
</union-decl>
<class-decl name='__anonymous_struct__1' size-in-bits='64' is-struct='yes' is-anonymous='yes' visibility='default' id='e7f43f73'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='si_pid' type-id='3629bad8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='si_uid' type-id='cc5fcceb' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='__anonymous_struct__2' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='e7f43f74'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='si_tid' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='si_overrun' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='si_sigval' type-id='eabacd01' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='__anonymous_struct__3' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='e7f43f75'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='si_pid' type-id='3629bad8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='si_uid' type-id='cc5fcceb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='si_sigval' type-id='eabacd01' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='__anonymous_struct__4' size-in-bits='256' is-struct='yes' is-anonymous='yes' visibility='default' id='e7f43f76'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='si_pid' type-id='3629bad8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='si_uid' type-id='cc5fcceb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='si_status' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='si_utime' type-id='4d66c6d7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='si_stime' type-id='4d66c6d7' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='__anonymous_struct__5' size-in-bits='256' is-struct='yes' is-anonymous='yes' visibility='default' id='e7f43f77'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='si_addr' type-id='eaa32e2f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='si_addr_lsb' type-id='a2185560' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='_bounds' type-id='ac5ab597' visibility='default'/>
</data-member>
</class-decl>
<union-decl name='__anonymous_union__2' size-in-bits='128' is-anonymous='yes' visibility='default' id='ac5ab597'>
<data-member access='public'>
<var-decl name='_addr_bnd' type-id='e7f43f7a' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='_pkey' type-id='62f1140c' visibility='default'/>
</data-member>
</union-decl>
<class-decl name='__anonymous_struct__6' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='e7f43f7a'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='_lower' type-id='eaa32e2f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='_upper' type-id='eaa32e2f' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='__anonymous_struct__7' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='e7f43f78'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='si_band' type-id='bd54fe1a' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='si_fd' type-id='95e97e5e' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='__anonymous_struct__8' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='e7f43f79'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='_call_addr' type-id='eaa32e2f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='_syscall' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='96'>
<var-decl name='_arch' type-id='f0981eeb' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='siginfo_t' type-id='d8149419' id='cb681f62'/>
<typedef-decl name='sigset_t' type-id='b9c97942' id='daf33c64'/>
<typedef-decl name='_IO_lock_t' type-id='48b5725f' id='bb4788fa'/>
<class-decl name='_IO_FILE' size-in-bits='1728' is-struct='yes' visibility='default' id='ec1ed955'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='_flags' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='_IO_read_ptr' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='_IO_read_end' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='_IO_read_base' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='_IO_write_base' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='_IO_write_ptr' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='_IO_write_end' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='_IO_buf_base' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
<var-decl name='_IO_buf_end' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='576'>
<var-decl name='_IO_save_base' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='640'>
<var-decl name='_IO_backup_base' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='704'>
<var-decl name='_IO_save_end' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='768'>
<var-decl name='_markers' type-id='e4c6fa61' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='832'>
<var-decl name='_chain' type-id='dca988a5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='896'>
<var-decl name='_fileno' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='928'>
<var-decl name='_flags2' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='960'>
<var-decl name='_old_offset' type-id='79989e9c' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1024'>
<var-decl name='_cur_column' type-id='8efea9e5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1040'>
<var-decl name='_vtable_offset' type-id='28577a57' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1048'>
<var-decl name='_shortbuf' type-id='89feb1ec' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1088'>
<var-decl name='_lock' type-id='cecf4ea7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1152'>
<var-decl name='_offset' type-id='724e4de6' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1216'>
<var-decl name='_codecvt' type-id='570f8c59' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1280'>
<var-decl name='_wide_data' type-id='c65a1f29' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1344'>
<var-decl name='_freeres_list' type-id='dca988a5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1408'>
<var-decl name='_freeres_buf' type-id='eaa32e2f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1472'>
<var-decl name='__pad5' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1536'>
<var-decl name='_mode' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1568'>
<var-decl name='_unused2' type-id='664ac0b7' visibility='default'/>
</data-member>
</class-decl>
<pointer-type-def type-id='aa12d1ba' size-in-bits='64' id='822cd80b'/>
<qualified-type-def type-id='822cd80b' restrict='yes' id='e75a27e9'/>
<pointer-type-def type-id='ec1ed955' size-in-bits='64' id='dca988a5'/>
<pointer-type-def type-id='a4036571' size-in-bits='64' id='570f8c59'/>
<pointer-type-def type-id='bb4788fa' size-in-bits='64' id='cecf4ea7'/>
<pointer-type-def type-id='010ae0b9' size-in-bits='64' id='e4c6fa61'/>
<pointer-type-def type-id='79bd3751' size-in-bits='64' id='c65a1f29'/>
<qualified-type-def type-id='9b23c9ad' restrict='yes' id='8c85230f'/>
<qualified-type-def type-id='80f4b756' restrict='yes' id='9d26089a'/>
<pointer-type-def type-id='80f4b756' size-in-bits='64' id='7d3cd834'/>
<qualified-type-def type-id='aca3bac8' const='yes' id='2498fd78'/>
<pointer-type-def type-id='2498fd78' size-in-bits='64' id='eed6c816'/>
<qualified-type-def type-id='eed6c816' restrict='yes' id='a431a9da'/>
<qualified-type-def type-id='fe391c48' const='yes' id='14a93b33'/>
<pointer-type-def type-id='14a93b33' size-in-bits='64' id='9f68085b'/>
<qualified-type-def type-id='9f68085b' restrict='yes' id='e2a5e6f9'/>
<qualified-type-def type-id='ad55d2bc' const='yes' id='a46bf13f'/>
<pointer-type-def type-id='a46bf13f' size-in-bits='64' id='eaec840f'/>
<qualified-type-def type-id='002ac4a6' const='yes' id='ea86de29'/>
<pointer-type-def type-id='ea86de29' size-in-bits='64' id='354f7eb9'/>
<qualified-type-def type-id='8efea9e5' const='yes' id='3beb2af4'/>
<pointer-type-def type-id='3beb2af4' size-in-bits='64' id='31347b7a'/>
<pointer-type-def type-id='31347b7a' size-in-bits='64' id='c59e1ef0'/>
<pointer-type-def type-id='1b941664' size-in-bits='64' id='7e2979d5'/>
<qualified-type-def type-id='7e2979d5' restrict='yes' id='fc212857'/>
<pointer-type-def type-id='fe391c48' size-in-bits='64' id='568dd84e'/>
<qualified-type-def type-id='568dd84e' restrict='yes' id='3d8ee6f2'/>
<pointer-type-def type-id='cb681f62' size-in-bits='64' id='185869c1'/>
<pointer-type-def type-id='daf33c64' size-in-bits='64' id='9e80f729'/>
<pointer-type-def type-id='b59d7dce' size-in-bits='64' id='78c01427'/>
<qualified-type-def type-id='78c01427' restrict='yes' id='d19b2c25'/>
<pointer-type-def type-id='ad55d2bc' size-in-bits='64' id='665a4eda'/>
<pointer-type-def type-id='9c313c2d' size-in-bits='64' id='5d6479ae'/>
<pointer-type-def type-id='ae3e8ca6' size-in-bits='64' id='d8774064'/>
<pointer-type-def type-id='3502e3ff' size-in-bits='64' id='4dd26a40'/>
<pointer-type-def type-id='ee076206' size-in-bits='64' id='953b12f8'/>
<pointer-type-def type-id='f712e2b7' size-in-bits='64' id='03347643'/>
<pointer-type-def type-id='ef70d893' size-in-bits='64' id='6e756877'/>
<qualified-type-def type-id='eaa32e2f' restrict='yes' id='1b7446cd'/>
<class-decl name='_IO_codecvt' is-struct='yes' visibility='default' is-declaration-only='yes' id='a4036571'/>
<class-decl name='_IO_marker' is-struct='yes' visibility='default' is-declaration-only='yes' id='010ae0b9'/>
<class-decl name='_IO_wide_data' is-struct='yes' visibility='default' is-declaration-only='yes' id='79bd3751'/>
<function-decl name='zpool_get_prop_int' mangled-name='zpool_get_prop_int' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_prop_int'>
<parameter type-id='4c81de99'/>
<parameter type-id='5d0c23fb'/>
<parameter type-id='debc6aa3'/>
<return type-id='9c313c2d'/>
</function-decl>
<function-decl name='zfs_handle_dup' mangled-name='zfs_handle_dup' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_handle_dup'>
<parameter type-id='9200a744'/>
<return type-id='9200a744'/>
</function-decl>
<function-decl name='zfs_valid_proplist' mangled-name='zfs_valid_proplist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_valid_proplist'>
<parameter type-id='b0382bb3'/>
<parameter type-id='2e45de5d'/>
<parameter type-id='5ce45b60'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='9200a744'/>
<parameter type-id='4c81de99'/>
<parameter type-id='c19b74c3'/>
<parameter type-id='80f4b756'/>
<return type-id='5ce45b60'/>
</function-decl>
<function-decl name='zfs_prop_to_name' mangled-name='zfs_prop_to_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_to_name'>
<parameter type-id='58603c44'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zfs_iter_filesystems_v2' mangled-name='zfs_iter_filesystems_v2' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_filesystems_v2'>
<parameter type-id='9200a744'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='d8e49ab9'/>
<parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_parent_name' mangled-name='zfs_parent_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_parent_name'>
<parameter type-id='9200a744'/>
<parameter type-id='26a90f95'/>
<parameter type-id='b59d7dce'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_load_key' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='c19b74c3'/>
<parameter type-id='ae3e8ca6'/>
<parameter type-id='3502e3ff'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_unload_key' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_change_key' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='5ce45b60'/>
<parameter type-id='ae3e8ca6'/>
<parameter type-id='3502e3ff'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_name_to_prop' mangled-name='zfs_name_to_prop' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_name_to_prop'>
<parameter type-id='80f4b756'/>
<return type-id='58603c44'/>
</function-decl>
<function-decl name='nvlist_add_uint64' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<parameter type-id='9c313c2d'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvlist_add_string' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvlist_lookup_uint64' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='22cce67b'/>
<parameter type-id='80f4b756'/>
<parameter type-id='5d6479ae'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvlist_lookup_string' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='22cce67b'/>
<parameter type-id='80f4b756'/>
<parameter type-id='7d3cd834'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='fnvlist_alloc' visibility='default' binding='global' size-in-bits='64'>
<return type-id='5ce45b60'/>
</function-decl>
<function-decl name='__ctype_b_loc' visibility='default' binding='global' size-in-bits='64'>
<return type-id='c59e1ef0'/>
</function-decl>
<function-decl name='dlopen' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='95e97e5e'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='dlsym' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='1b7446cd'/>
<parameter type-id='9d26089a'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='dlerror' visibility='default' binding='global' size-in-bits='64'>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='PKCS5_PBKDF2_HMAC_SHA1' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='354f7eb9'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='cf536864'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='regexec' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='a431a9da'/>
<parameter type-id='9d26089a'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='fc212857'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='kill' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='3629bad8'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='sigemptyset' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='9e80f729'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='sigaction' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<parameter type-id='e2a5e6f9'/>
<parameter type-id='3d8ee6f2'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='fclose' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='822cd80b'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='fflush' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='822cd80b'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='fdopen' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<parameter type-id='80f4b756'/>
<return type-id='822cd80b'/>
</function-decl>
<function-decl name='fputc' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<parameter type-id='822cd80b'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='__getdelim' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='8c85230f'/>
<parameter type-id='d19b2c25'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='e75a27e9'/>
<return type-id='41060289'/>
</function-decl>
<function-decl name='rewind' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='822cd80b'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='ferror' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='822cd80b'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='fileno' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='822cd80b'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='malloc' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='b59d7dce'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='calloc' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='b59d7dce'/>
<parameter type-id='b59d7dce'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='strdup' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='strerror' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='tcgetattr' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<parameter type-id='665a4eda'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='tcsetattr' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='eaec840f'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='close' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='getpid' visibility='default' binding='global' size-in-bits='64'>
<return type-id='3629bad8'/>
</function-decl>
<function-decl name='isatty' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='unlink' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='__open_too_many_args' visibility='default' binding='global' size-in-bits='64'>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='__open_missing_mode' visibility='default' binding='global' size-in-bits='64'>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='__printf_chk' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<parameter type-id='80f4b756'/>
<parameter is-variadic='yes'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='__asprintf_chk' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='8c85230f'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='9d26089a'/>
<parameter is-variadic='yes'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='__fread_chk' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='1b7446cd'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='e75a27e9'/>
<return type-id='b59d7dce'/>
</function-decl>
<function-decl name='__read_chk' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<parameter type-id='eaa32e2f'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='b59d7dce'/>
<return type-id='79a0948f'/>
</function-decl>
<function-decl name='zfs_crypto_get_encryption_root' mangled-name='zfs_crypto_get_encryption_root' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_get_encryption_root'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='37e3bd22' name='is_encroot'/>
<parameter type-id='26a90f95' name='buf'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_crypto_create' mangled-name='zfs_crypto_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_create'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='26a90f95' name='parent_name'/>
<parameter type-id='5ce45b60' name='props'/>
<parameter type-id='5ce45b60' name='pool_props'/>
<parameter type-id='c19b74c3' name='stdin_available'/>
<parameter type-id='d8774064' name='wkeydata_out'/>
<parameter type-id='4dd26a40' name='wkeylen_out'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_crypto_clone_check' mangled-name='zfs_crypto_clone_check' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_clone_check'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='9200a744' name='origin_zhp'/>
<parameter type-id='26a90f95' name='parent_name'/>
<parameter type-id='5ce45b60' name='props'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_crypto_attempt_load_keys' mangled-name='zfs_crypto_attempt_load_keys' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_attempt_load_keys'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='fsname'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_crypto_load_key' mangled-name='zfs_crypto_load_key' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_load_key'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='c19b74c3' name='noop'/>
<parameter type-id='80f4b756' name='alt_keylocation'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_crypto_unload_key' mangled-name='zfs_crypto_unload_key' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_unload_key'>
<parameter type-id='9200a744' name='zhp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_crypto_rewrap' mangled-name='zfs_crypto_rewrap' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_rewrap'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='5ce45b60' name='raw_props'/>
<parameter type-id='c19b74c3' name='inheritkey'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_error_aux' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='b0382bb3'/>
<parameter type-id='80f4b756'/>
<parameter is-variadic='yes'/>
<return type-id='48b5725f'/>
</function-decl>
<function-type size-in-bits='64' id='f712e2b7'>
<parameter type-id='95e97e5e'/>
<return type-id='48b5725f'/>
</function-type>
<function-type size-in-bits='64' id='ef70d893'>
<parameter type-id='95e97e5e'/>
<parameter type-id='185869c1'/>
<parameter type-id='eaa32e2f'/>
<return type-id='48b5725f'/>
</function-type>
</abi-instr>
<abi-instr address-size='64' path='lib/libzfs/libzfs_dataset.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='a84c031d' size-in-bits='32' id='8e0573fd'>
<subrange length='4' type-id='7359adad' id='16fe7105'/>
</array-type-def>
<class-decl name='prop_changelist' is-struct='yes' visibility='default' is-declaration-only='yes' id='d86edc51'/>
<class-decl name='zprop_list' size-in-bits='448' is-struct='yes' visibility='default' id='bd9b4291'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='pl_prop' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='pl_user_prop' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='pl_next' type-id='9f1a1109' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='pl_all' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='pl_width' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='pl_recvd_width' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='pl_fixed' type-id='c19b74c3' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zprop_list_t' type-id='bd9b4291' id='bdb8ac4f'/>
<class-decl name='renameflags' size-in-bits='32' is-struct='yes' visibility='default' id='7aee5792'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='recursive' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1'>
<var-decl name='nounmount' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2'>
<var-decl name='forceunmount' type-id='f0981eeb' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='renameflags_t' type-id='7aee5792' id='067170c2'/>
<typedef-decl name='zfs_userspace_cb_t' type-id='ca64ff60' id='16c5f410'/>
<enum-decl name='lzc_dataset_type' id='bc9887f1'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='LZC_DATSET_TYPE_ZFS' value='2'/>
<enumerator name='LZC_DATSET_TYPE_ZVOL' value='3'/>
</enum-decl>
<typedef-decl name='avl_index_t' type-id='e475ab95' id='fba6cb51'/>
<enum-decl name='zfs_userquota_prop_t' naming-typedef-id='279fde6a' id='5258d2f6'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZFS_PROP_USERUSED' value='0'/>
<enumerator name='ZFS_PROP_USERQUOTA' value='1'/>
<enumerator name='ZFS_PROP_GROUPUSED' value='2'/>
<enumerator name='ZFS_PROP_GROUPQUOTA' value='3'/>
<enumerator name='ZFS_PROP_USEROBJUSED' value='4'/>
<enumerator name='ZFS_PROP_USEROBJQUOTA' value='5'/>
<enumerator name='ZFS_PROP_GROUPOBJUSED' value='6'/>
<enumerator name='ZFS_PROP_GROUPOBJQUOTA' value='7'/>
<enumerator name='ZFS_PROP_PROJECTUSED' value='8'/>
<enumerator name='ZFS_PROP_PROJECTQUOTA' value='9'/>
<enumerator name='ZFS_PROP_PROJECTOBJUSED' value='10'/>
<enumerator name='ZFS_PROP_PROJECTOBJQUOTA' value='11'/>
<enumerator name='ZFS_NUM_USERQUOTA_PROPS' value='12'/>
</enum-decl>
<typedef-decl name='zfs_userquota_prop_t' type-id='5258d2f6' id='279fde6a'/>
<enum-decl name='zfs_wait_activity_t' naming-typedef-id='3024501a' id='527d5dc6'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZFS_WAIT_DELETEQ' value='0'/>
<enumerator name='ZFS_WAIT_NUM_ACTIVITIES' value='1'/>
</enum-decl>
<typedef-decl name='zfs_wait_activity_t' type-id='527d5dc6' id='3024501a'/>
<enum-decl name='namecheck_err_t' naming-typedef-id='8e0af06e' id='f43bbcda'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='NAME_ERR_LEADING_SLASH' value='0'/>
<enumerator name='NAME_ERR_EMPTY_COMPONENT' value='1'/>
<enumerator name='NAME_ERR_TRAILING_SLASH' value='2'/>
<enumerator name='NAME_ERR_INVALCHAR' value='3'/>
<enumerator name='NAME_ERR_MULTIPLE_DELIMITERS' value='4'/>
<enumerator name='NAME_ERR_NOLETTER' value='5'/>
<enumerator name='NAME_ERR_RESERVED' value='6'/>
<enumerator name='NAME_ERR_DISKLIKE' value='7'/>
<enumerator name='NAME_ERR_TOOLONG' value='8'/>
<enumerator name='NAME_ERR_SELF_REF' value='9'/>
<enumerator name='NAME_ERR_PARENT_REF' value='10'/>
<enumerator name='NAME_ERR_NO_AT' value='11'/>
<enumerator name='NAME_ERR_NO_POUND' value='12'/>
</enum-decl>
<typedef-decl name='namecheck_err_t' type-id='f43bbcda' id='8e0af06e'/>
<enum-decl name='zprop_type_t' naming-typedef-id='31429eff' id='87676253'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='PROP_TYPE_NUMBER' value='0'/>
<enumerator name='PROP_TYPE_STRING' value='1'/>
<enumerator name='PROP_TYPE_INDEX' value='2'/>
</enum-decl>
<typedef-decl name='zprop_type_t' type-id='87676253' id='31429eff'/>
<class-decl name='mnttab' size-in-bits='256' is-struct='yes' visibility='default' id='1b055409'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='mnt_special' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='mnt_mountp' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='mnt_fstype' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='mnt_mntopts' type-id='26a90f95' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='group' size-in-bits='256' is-struct='yes' visibility='default' id='01a1b934'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='gr_name' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='gr_passwd' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='gr_gid' type-id='d94ec6d9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='gr_mem' type-id='9b23c9ad' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='mntent' size-in-bits='320' is-struct='yes' visibility='default' id='56fe4a37'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='mnt_fsname' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='mnt_dir' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='mnt_type' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='mnt_opts' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='mnt_freq' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='288'>
<var-decl name='mnt_passno' type-id='95e97e5e' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='passwd' size-in-bits='384' is-struct='yes' visibility='default' id='a63d15a3'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='pw_name' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='pw_passwd' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='pw_uid' type-id='cc5fcceb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='160'>
<var-decl name='pw_gid' type-id='d94ec6d9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='pw_gecos' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='pw_dir' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='pw_shell' type-id='26a90f95' visibility='default'/>
</data-member>
</class-decl>
<union-decl name='pthread_mutexattr_t' size-in-bits='32' naming-typedef-id='8afd6070' visibility='default' id='7300eb00'>
<data-member access='public'>
<var-decl name='__size' type-id='8e0573fd' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='__align' type-id='95e97e5e' visibility='default'/>
</data-member>
</union-decl>
<typedef-decl name='pthread_mutexattr_t' type-id='7300eb00' id='8afd6070'/>
<typedef-decl name='int64_t' type-id='0c9942d2' id='9da381c4'/>
<typedef-decl name='__int64_t' type-id='bd54fe1a' id='0c9942d2'/>
<typedef-decl name='__gid_t' type-id='f0981eeb' id='d94ec6d9'/>
<typedef-decl name='__time_t' type-id='bd54fe1a' id='65eda9c0'/>
<class-decl name='tm' size-in-bits='448' is-struct='yes' visibility='default' id='dddf6ca2'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='tm_sec' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='tm_min' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='tm_hour' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='96'>
<var-decl name='tm_mday' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='tm_mon' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='160'>
<var-decl name='tm_year' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='tm_wday' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='224'>
<var-decl name='tm_yday' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='tm_isdst' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='tm_gmtoff' type-id='bd54fe1a' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='tm_zone' type-id='80f4b756' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='time_t' type-id='65eda9c0' id='c9d12d66'/>
<typedef-decl name='uid_t' type-id='cc5fcceb' id='354978ed'/>
<typedef-decl name='prop_changelist_t' type-id='d86edc51' id='eae6431d'/>
<pointer-type-def type-id='fba6cb51' size-in-bits='64' id='32adbf30'/>
<pointer-type-def type-id='f20fbd51' size-in-bits='64' id='a3681dea'/>
<qualified-type-def type-id='26a90f95' restrict='yes' id='266fe297'/>
<qualified-type-def type-id='56fe4a37' const='yes' id='a75125ce'/>
<pointer-type-def type-id='a75125ce' size-in-bits='64' id='48bea5ec'/>
<qualified-type-def type-id='8afd6070' const='yes' id='1d853360'/>
<pointer-type-def type-id='1d853360' size-in-bits='64' id='c2afbd7e'/>
<qualified-type-def type-id='c9d12d66' const='yes' id='588b3216'/>
<pointer-type-def type-id='588b3216' size-in-bits='64' id='9f201474'/>
<qualified-type-def type-id='9f201474' restrict='yes' id='d6e2847c'/>
<qualified-type-def type-id='dddf6ca2' const='yes' id='e824a34f'/>
<pointer-type-def type-id='e824a34f' size-in-bits='64' id='d6ad37ff'/>
<qualified-type-def type-id='d6ad37ff' restrict='yes' id='f8c6051d'/>
<qualified-type-def type-id='9c313c2d' const='yes' id='c3b7ba7d'/>
<pointer-type-def type-id='c3b7ba7d' size-in-bits='64' id='713a56f5'/>
<pointer-type-def type-id='01a1b934' size-in-bits='64' id='566b3f52'/>
<pointer-type-def type-id='7e291ce6' size-in-bits='64' id='ca64ff60'/>
<pointer-type-def type-id='9da381c4' size-in-bits='64' id='cb785ebf'/>
<pointer-type-def type-id='1b055409' size-in-bits='64' id='9d424d31'/>
<pointer-type-def type-id='8e0af06e' size-in-bits='64' id='053457bd'/>
<pointer-type-def type-id='857bb57e' size-in-bits='64' id='75be733c'/>
<pointer-type-def type-id='a63d15a3' size-in-bits='64' id='a195f4a3'/>
<pointer-type-def type-id='eae6431d' size-in-bits='64' id='0d41d328'/>
<pointer-type-def type-id='7a6844eb' size-in-bits='64' id='18c91f9e'/>
<pointer-type-def type-id='dddf6ca2' size-in-bits='64' id='d915a820'/>
<qualified-type-def type-id='d915a820' restrict='yes' id='f099ad08'/>
<pointer-type-def type-id='5d6479ae' size-in-bits='64' id='892b4acc'/>
<pointer-type-def type-id='bd9b4291' size-in-bits='64' id='9f1a1109'/>
<pointer-type-def type-id='bdb8ac4f' size-in-bits='64' id='3a9b2288'/>
<pointer-type-def type-id='3a9b2288' size-in-bits='64' id='e4378506'/>
<class-decl name='prop_changelist' is-struct='yes' visibility='default' is-declaration-only='yes' id='d86edc51'/>
<function-decl name='zpool_open' mangled-name='zpool_open' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_open'>
<parameter type-id='b0382bb3'/>
<parameter type-id='80f4b756'/>
<return type-id='4c81de99'/>
</function-decl>
<function-decl name='zpool_open_canfail' mangled-name='zpool_open_canfail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_open_canfail'>
<parameter type-id='b0382bb3'/>
<parameter type-id='80f4b756'/>
<return type-id='4c81de99'/>
</function-decl>
<function-decl name='zpool_close' mangled-name='zpool_close' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_close'>
<parameter type-id='4c81de99'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zpool_get_name' mangled-name='zpool_get_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_name'>
<parameter type-id='4c81de99'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zpool_get_prop' mangled-name='zpool_get_prop' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_prop'>
<parameter type-id='4c81de99'/>
<parameter type-id='5d0c23fb'/>
<parameter type-id='26a90f95'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='debc6aa3'/>
<parameter type-id='c19b74c3'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prop_default_string' mangled-name='zfs_prop_default_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_default_string'>
<parameter type-id='58603c44'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zfs_prop_default_numeric' mangled-name='zfs_prop_default_numeric' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_default_numeric'>
<parameter type-id='58603c44'/>
<return type-id='9c313c2d'/>
</function-decl>
<function-decl name='zpool_prop_get_feature' mangled-name='zpool_prop_get_feature' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_get_feature'>
<parameter type-id='4c81de99'/>
<parameter type-id='80f4b756'/>
<parameter type-id='26a90f95'/>
<parameter type-id='b59d7dce'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_iter_snapshots_v2' mangled-name='zfs_iter_snapshots_v2' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_snapshots_v2'>
<parameter type-id='9200a744'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='d8e49ab9'/>
<parameter type-id='eaa32e2f'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='9c313c2d'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_iter_bookmarks_v2' mangled-name='zfs_iter_bookmarks_v2' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_bookmarks_v2'>
<parameter type-id='9200a744'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='d8e49ab9'/>
<parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_destroy_snaps_nvl_os' mangled-name='zfs_destroy_snaps_nvl_os' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_destroy_snaps_nvl_os'>
<parameter type-id='b0382bb3'/>
<parameter type-id='5ce45b60'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_nicestrtonum' mangled-name='zfs_nicestrtonum' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_nicestrtonum'>
<parameter type-id='b0382bb3'/>
<parameter type-id='80f4b756'/>
<parameter type-id='5d6479ae'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_snapshot' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='5ce45b60'/>
<parameter type-id='857bb57e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_create' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='bc9887f1'/>
<parameter type-id='5ce45b60'/>
<parameter type-id='ae3e8ca6'/>
<parameter type-id='3502e3ff'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_clone' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<parameter type-id='5ce45b60'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_promote' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='26a90f95'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_destroy_snaps' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='c19b74c3'/>
<parameter type-id='857bb57e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_get_bookmarks' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='5ce45b60'/>
<parameter type-id='857bb57e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_destroy_bookmarks' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='857bb57e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_hold' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='857bb57e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_release' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='857bb57e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_get_holds' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='857bb57e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_exists' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='lzc_rollback_to' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_destroy' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_channel_program_nosync' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='5ce45b60'/>
<parameter type-id='857bb57e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_wait_fs' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='3024501a'/>
<parameter type-id='37e3bd22'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_nicebytes' mangled-name='zfs_nicebytes' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_nicebytes'>
<parameter type-id='9c313c2d'/>
<parameter type-id='26a90f95'/>
<parameter type-id='b59d7dce'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_nicenum' mangled-name='zfs_nicenum' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_nicenum'>
<parameter type-id='9c313c2d'/>
<parameter type-id='26a90f95'/>
<parameter type-id='b59d7dce'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='avl_create' mangled-name='avl_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_create'>
<parameter type-id='a3681dea'/>
<parameter type-id='585e1de9'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='b59d7dce'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='avl_find' mangled-name='avl_find' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_find'>
<parameter type-id='a3681dea'/>
<parameter type-id='eaa32e2f'/>
<parameter type-id='32adbf30'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='avl_add' mangled-name='avl_add' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_add'>
<parameter type-id='a3681dea'/>
<parameter type-id='eaa32e2f'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='avl_remove' mangled-name='avl_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_remove'>
<parameter type-id='a3681dea'/>
<parameter type-id='eaa32e2f'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='avl_numnodes' mangled-name='avl_numnodes' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_numnodes'>
<parameter type-id='a3681dea'/>
<return type-id='ee1f298e'/>
</function-decl>
<function-decl name='avl_destroy_nodes' mangled-name='avl_destroy_nodes' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_destroy_nodes'>
<parameter type-id='a3681dea'/>
<parameter type-id='63e171df'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='avl_destroy' mangled-name='avl_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_destroy'>
<parameter type-id='a3681dea'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_prop_readonly' mangled-name='zfs_prop_readonly' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_readonly'>
<parameter type-id='58603c44'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_prop_inheritable' mangled-name='zfs_prop_inheritable' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_inheritable'>
<parameter type-id='58603c44'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_prop_setonce' mangled-name='zfs_prop_setonce' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_setonce'>
<parameter type-id='58603c44'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_prop_encryption_key_param' mangled-name='zfs_prop_encryption_key_param' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_encryption_key_param'>
<parameter type-id='58603c44'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_prop_valid_keylocation' mangled-name='zfs_prop_valid_keylocation' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_valid_keylocation'>
<parameter type-id='80f4b756'/>
<parameter type-id='c19b74c3'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_prop_user' mangled-name='zfs_prop_user' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_user'>
<parameter type-id='80f4b756'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_prop_userquota' mangled-name='zfs_prop_userquota' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_userquota'>
<parameter type-id='80f4b756'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_prop_written' mangled-name='zfs_prop_written' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_written'>
<parameter type-id='80f4b756'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_prop_index_to_string' mangled-name='zfs_prop_index_to_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_index_to_string'>
<parameter type-id='58603c44'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='7d3cd834'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prop_valid_for_type' mangled-name='zfs_prop_valid_for_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_valid_for_type'>
<parameter type-id='95e97e5e'/>
<parameter type-id='2e45de5d'/>
<parameter type-id='c19b74c3'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='nvlist_alloc' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='857bb57e'/>
<parameter type-id='3502e3ff'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvlist_size' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='78c01427'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvlist_pack' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='9b23c9ad'/>
<parameter type-id='78c01427'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvlist_unpack' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='26a90f95'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='857bb57e'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvlist_add_nvlist' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<parameter type-id='22cce67b'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvlist_add_uint64_array' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<parameter type-id='713a56f5'/>
<parameter type-id='3502e3ff'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvlist_remove' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<parameter type-id='8d0687d2'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvlist_remove_all' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvlist_lookup_int64' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='22cce67b'/>
<parameter type-id='80f4b756'/>
<parameter type-id='cb785ebf'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvlist_lookup_uint64_array' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<parameter type-id='892b4acc'/>
<parameter type-id='4dd26a40'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvlist_lookup_nvlist_array' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<parameter type-id='75be733c'/>
<parameter type-id='4dd26a40'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvlist_empty' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='22cce67b'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='nvpair_type' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='dace003f'/>
<return type-id='8d0687d2'/>
</function-decl>
<function-decl name='nvpair_value_uint64' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='dace003f'/>
<parameter type-id='5d6479ae'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvpair_value_string' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='dace003f'/>
<parameter type-id='7d3cd834'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='fnvlist_free' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fnvlist_add_boolean' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fnvlist_add_uint64' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<parameter type-id='9c313c2d'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fnvlist_add_string' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fnvlist_add_nvlist' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<parameter type-id='5ce45b60'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fnvlist_lookup_uint64' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='22cce67b'/>
<parameter type-id='80f4b756'/>
<return type-id='9c313c2d'/>
</function-decl>
<function-decl name='fnvlist_lookup_string' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='22cce67b'/>
<parameter type-id='80f4b756'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='fnvlist_lookup_nvlist' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<return type-id='5ce45b60'/>
</function-decl>
<function-decl name='fnvpair_value_int32' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='dace003f'/>
<return type-id='3ff5601b'/>
</function-decl>
<function-decl name='fnvpair_value_uint64' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='dace003f'/>
<return type-id='9c313c2d'/>
</function-decl>
<function-decl name='entity_namecheck' mangled-name='entity_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='entity_namecheck'>
<parameter type-id='80f4b756'/>
<parameter type-id='053457bd'/>
<parameter type-id='26a90f95'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='dataset_nestcheck' mangled-name='dataset_nestcheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='dataset_nestcheck'>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='mountpoint_namecheck' mangled-name='mountpoint_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='mountpoint_namecheck'>
<parameter type-id='80f4b756'/>
<parameter type-id='053457bd'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prop_get_type' mangled-name='zfs_prop_get_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_type'>
<parameter type-id='58603c44'/>
<return type-id='31429eff'/>
</function-decl>
<function-decl name='sa_validate_shareopts' mangled-name='sa_validate_shareopts' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='sa_validate_shareopts'>
<parameter type-id='80f4b756'/>
<parameter type-id='9155d4b5'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='getmntany' mangled-name='getmntany' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getmntany'>
<parameter type-id='822cd80b'/>
<parameter type-id='9d424d31'/>
<parameter type-id='9d424d31'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='_sol_getmntent' mangled-name='_sol_getmntent' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='_sol_getmntent'>
<parameter type-id='822cd80b'/>
<parameter type-id='9d424d31'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='getgrnam' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<return type-id='566b3f52'/>
</function-decl>
<function-decl name='hasmntopt' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='48bea5ec'/>
<parameter type-id='80f4b756'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='pthread_mutex_init' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='18c91f9e'/>
<parameter type-id='c2afbd7e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='pthread_mutex_destroy' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='18c91f9e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='pthread_mutex_lock' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='18c91f9e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='pthread_mutex_unlock' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='18c91f9e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='getpwnam' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<return type-id='a195f4a3'/>
</function-decl>
<function-decl name='strtol' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='9d26089a'/>
<parameter type-id='8c85230f'/>
<parameter type-id='95e97e5e'/>
<return type-id='bd54fe1a'/>
</function-decl>
<function-decl name='strtoul' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='9d26089a'/>
<parameter type-id='8c85230f'/>
<parameter type-id='95e97e5e'/>
<return type-id='7359adad'/>
</function-decl>
<function-decl name='abort' visibility='default' binding='global' size-in-bits='64'>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='strrchr' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='95e97e5e'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='strcspn' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<return type-id='b59d7dce'/>
</function-decl>
<function-decl name='strstr' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='strsep' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='8c85230f'/>
<parameter type-id='9d26089a'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='strftime' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='266fe297'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='9d26089a'/>
<parameter type-id='f8c6051d'/>
<return type-id='b59d7dce'/>
</function-decl>
<function-decl name='localtime_r' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='d6e2847c'/>
<parameter type-id='f099ad08'/>
<return type-id='d915a820'/>
</function-decl>
<function-decl name='__fprintf_chk' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='e75a27e9'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='9d26089a'/>
<parameter is-variadic='yes'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='ioctl' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<parameter type-id='7359adad'/>
<parameter is-variadic='yes'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_type_to_name' mangled-name='zfs_type_to_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_type_to_name'>
<parameter type-id='2e45de5d' name='type'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zfs_name_valid' mangled-name='zfs_name_valid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_name_valid'>
<parameter type-id='80f4b756' name='name'/>
<parameter type-id='2e45de5d' name='type'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_free_handles' mangled-name='zpool_free_handles' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_free_handles'>
<parameter type-id='b0382bb3' name='hdl'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_bookmark_exists' mangled-name='zfs_bookmark_exists' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_bookmark_exists'>
<parameter type-id='80f4b756' name='path'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='libzfs_mnttab_init' mangled-name='libzfs_mnttab_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_mnttab_init'>
<parameter type-id='b0382bb3' name='hdl'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='libzfs_mnttab_fini' mangled-name='libzfs_mnttab_fini' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_mnttab_fini'>
<parameter type-id='b0382bb3' name='hdl'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='libzfs_mnttab_cache' mangled-name='libzfs_mnttab_cache' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_mnttab_cache'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='c19b74c3' name='enable'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='libzfs_mnttab_find' mangled-name='libzfs_mnttab_find' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_mnttab_find'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='fsname'/>
<parameter type-id='9d424d31' name='entry'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='libzfs_mnttab_add' mangled-name='libzfs_mnttab_add' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_mnttab_add'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='special'/>
<parameter type-id='80f4b756' name='mountp'/>
<parameter type-id='80f4b756' name='mntopts'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='libzfs_mnttab_remove' mangled-name='libzfs_mnttab_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_mnttab_remove'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='fsname'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_spa_version' mangled-name='zfs_spa_version' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_spa_version'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='7292109c' name='spa_version'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prop_set' mangled-name='zfs_prop_set' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_set'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='propname'/>
<parameter type-id='80f4b756' name='propval'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prop_set_list' mangled-name='zfs_prop_set_list' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_set_list'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='5ce45b60' name='props'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prop_set_list_flags' mangled-name='zfs_prop_set_list_flags' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_set_list_flags'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='5ce45b60' name='props'/>
<parameter type-id='95e97e5e' name='flags'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prop_inherit' mangled-name='zfs_prop_inherit' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_inherit'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='propname'/>
<parameter type-id='c19b74c3' name='received'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='getprop_uint64' mangled-name='getprop_uint64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getprop_uint64'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='58603c44' name='prop'/>
<parameter type-id='7d3cd834' name='source'/>
<return type-id='9c313c2d'/>
</function-decl>
<function-decl name='zfs_prop_get_recvd' mangled-name='zfs_prop_get_recvd' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_recvd'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='propname'/>
<parameter type-id='26a90f95' name='propbuf'/>
<parameter type-id='b59d7dce' name='proplen'/>
<parameter type-id='c19b74c3' name='literal'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_get_clones_nvl' mangled-name='zfs_get_clones_nvl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_clones_nvl'>
<parameter type-id='9200a744' name='zhp'/>
<return type-id='5ce45b60'/>
</function-decl>
<function-decl name='zfs_prop_get_numeric' mangled-name='zfs_prop_get_numeric' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_numeric'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='58603c44' name='prop'/>
<parameter type-id='5d6479ae' name='value'/>
<parameter type-id='debc6aa3' name='src'/>
<parameter type-id='26a90f95' name='statbuf'/>
<parameter type-id='b59d7dce' name='statlen'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prop_get_userquota_int' mangled-name='zfs_prop_get_userquota_int' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_userquota_int'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='propname'/>
<parameter type-id='5d6479ae' name='propvalue'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prop_get_userquota' mangled-name='zfs_prop_get_userquota' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_userquota'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='propname'/>
<parameter type-id='26a90f95' name='propbuf'/>
<parameter type-id='95e97e5e' name='proplen'/>
<parameter type-id='c19b74c3' name='literal'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prop_get_written_int' mangled-name='zfs_prop_get_written_int' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_written_int'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='propname'/>
<parameter type-id='5d6479ae' name='propvalue'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prop_get_written' mangled-name='zfs_prop_get_written' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_written'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='propname'/>
<parameter type-id='26a90f95' name='propbuf'/>
<parameter type-id='95e97e5e' name='proplen'/>
<parameter type-id='c19b74c3' name='literal'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_get_pool_name' mangled-name='zfs_get_pool_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_pool_name'>
<parameter type-id='fcd57163' name='zhp'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zfs_get_type' mangled-name='zfs_get_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_type'>
<parameter type-id='fcd57163' name='zhp'/>
<return type-id='2e45de5d'/>
</function-decl>
<function-decl name='zfs_get_underlying_type' mangled-name='zfs_get_underlying_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_underlying_type'>
<parameter type-id='fcd57163' name='zhp'/>
<return type-id='2e45de5d'/>
</function-decl>
<function-decl name='zfs_dataset_exists' mangled-name='zfs_dataset_exists' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_dataset_exists'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='2e45de5d' name='types'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_create_ancestors' mangled-name='zfs_create_ancestors' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_create_ancestors'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='path'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_create' mangled-name='zfs_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_create'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='2e45de5d' name='type'/>
<parameter type-id='5ce45b60' name='props'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_destroy' mangled-name='zfs_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_destroy'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='c19b74c3' name='defer'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_destroy_snaps' mangled-name='zfs_destroy_snaps' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_destroy_snaps'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='26a90f95' name='snapname'/>
<parameter type-id='c19b74c3' name='defer'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_destroy_snaps_nvl' mangled-name='zfs_destroy_snaps_nvl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_destroy_snaps_nvl'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='5ce45b60' name='snaps'/>
<parameter type-id='c19b74c3' name='defer'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_clone' mangled-name='zfs_clone' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_clone'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='target'/>
<parameter type-id='5ce45b60' name='props'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_promote' mangled-name='zfs_promote' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_promote'>
<parameter type-id='9200a744' name='zhp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_snapshot_nvl' mangled-name='zfs_snapshot_nvl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_snapshot_nvl'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='5ce45b60' name='snaps'/>
<parameter type-id='5ce45b60' name='props'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_snapshot' mangled-name='zfs_snapshot' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_snapshot'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='c19b74c3' name='recursive'/>
<parameter type-id='5ce45b60' name='props'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_rollback' mangled-name='zfs_rollback' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_rollback'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='9200a744' name='snap'/>
<parameter type-id='c19b74c3' name='force'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_rename' mangled-name='zfs_rename' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_rename'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='target'/>
<parameter type-id='067170c2' name='flags'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_get_all_props' mangled-name='zfs_get_all_props' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_all_props'>
<parameter type-id='9200a744' name='zhp'/>
<return type-id='5ce45b60'/>
</function-decl>
<function-decl name='zfs_get_recvd_props' mangled-name='zfs_get_recvd_props' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_recvd_props'>
<parameter type-id='9200a744' name='zhp'/>
<return type-id='5ce45b60'/>
</function-decl>
<function-decl name='zfs_get_user_props' mangled-name='zfs_get_user_props' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_user_props'>
<parameter type-id='9200a744' name='zhp'/>
<return type-id='5ce45b60'/>
</function-decl>
<function-decl name='zfs_expand_proplist' mangled-name='zfs_expand_proplist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_expand_proplist'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='e4378506' name='plp'/>
<parameter type-id='c19b74c3' name='received'/>
<parameter type-id='c19b74c3' name='literal'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prune_proplist' mangled-name='zfs_prune_proplist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prune_proplist'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='ae3e8ca6' name='props'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_smb_acl_add' mangled-name='zfs_smb_acl_add' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_smb_acl_add'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='26a90f95' name='dataset'/>
<parameter type-id='26a90f95' name='path'/>
<parameter type-id='26a90f95' name='resource'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_smb_acl_remove' mangled-name='zfs_smb_acl_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_smb_acl_remove'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='26a90f95' name='dataset'/>
<parameter type-id='26a90f95' name='path'/>
<parameter type-id='26a90f95' name='resource'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_smb_acl_purge' mangled-name='zfs_smb_acl_purge' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_smb_acl_purge'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='26a90f95' name='dataset'/>
<parameter type-id='26a90f95' name='path'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_smb_acl_rename' mangled-name='zfs_smb_acl_rename' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_smb_acl_rename'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='26a90f95' name='dataset'/>
<parameter type-id='26a90f95' name='path'/>
<parameter type-id='26a90f95' name='oldname'/>
<parameter type-id='26a90f95' name='newname'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_userspace' mangled-name='zfs_userspace' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_userspace'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='279fde6a' name='type'/>
<parameter type-id='16c5f410' name='func'/>
<parameter type-id='eaa32e2f' name='arg'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_hold' mangled-name='zfs_hold' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_hold'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='snapname'/>
<parameter type-id='80f4b756' name='tag'/>
<parameter type-id='c19b74c3' name='recursive'/>
<parameter type-id='95e97e5e' name='cleanup_fd'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_hold_nvl' mangled-name='zfs_hold_nvl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_hold_nvl'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='95e97e5e' name='cleanup_fd'/>
<parameter type-id='5ce45b60' name='holds'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_release' mangled-name='zfs_release' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_release'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='snapname'/>
<parameter type-id='80f4b756' name='tag'/>
<parameter type-id='c19b74c3' name='recursive'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_get_fsacl' mangled-name='zfs_get_fsacl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_fsacl'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='857bb57e' name='nvl'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_set_fsacl' mangled-name='zfs_set_fsacl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_set_fsacl'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='c19b74c3' name='un'/>
<parameter type-id='5ce45b60' name='nvl'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_get_holds' mangled-name='zfs_get_holds' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_holds'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='857bb57e' name='nvl'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zvol_volsize_to_reservation' mangled-name='zvol_volsize_to_reservation' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zvol_volsize_to_reservation'>
<parameter type-id='4c81de99' name='zph'/>
<parameter type-id='9c313c2d' name='volsize'/>
<parameter type-id='5ce45b60' name='props'/>
<return type-id='9c313c2d'/>
</function-decl>
<function-decl name='zfs_wait_status' mangled-name='zfs_wait_status' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_wait_status'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='3024501a' name='activity'/>
<parameter type-id='37e3bd22' name='missing'/>
<parameter type-id='37e3bd22' name='waited'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_error_fmt' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='b0382bb3'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='80f4b756'/>
<parameter is-variadic='yes'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_standard_error_fmt' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='b0382bb3'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='80f4b756'/>
<parameter is-variadic='yes'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_setprop_error' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='b0382bb3'/>
<parameter type-id='58603c44'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='26a90f95'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zprop_parse_value' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='b0382bb3'/>
<parameter type-id='3fa542f0'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='2e45de5d'/>
<parameter type-id='5ce45b60'/>
<parameter type-id='7d3cd834'/>
<parameter type-id='5d6479ae'/>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zprop_expand_list' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='b0382bb3'/>
<parameter type-id='e4378506'/>
<parameter type-id='2e45de5d'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zcmd_write_src_nvlist' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='b0382bb3'/>
<parameter type-id='e4ec4540'/>
<parameter type-id='5ce45b60'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='changelist_prefix' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='0d41d328'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='changelist_postfix' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='0d41d328'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='changelist_rename' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='0d41d328'/>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='changelist_remove' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='0d41d328'/>
<parameter type-id='80f4b756'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='changelist_free' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='0d41d328'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='changelist_gather' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='9200a744'/>
<parameter type-id='58603c44'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='95e97e5e'/>
<return type-id='0d41d328'/>
</function-decl>
<function-decl name='changelist_haszonedchild' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='0d41d328'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_name_valid' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='b0382bb3'/>
<parameter type-id='c19b74c3'/>
<parameter type-id='80f4b756'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-type size-in-bits='64' id='7e291ce6'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='80f4b756'/>
<parameter type-id='354978ed'/>
<parameter type-id='9c313c2d'/>
<return type-id='95e97e5e'/>
</function-type>
</abi-instr>
<abi-instr address-size='64' path='lib/libzfs/libzfs_diff.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='a84c031d' size-in-bits='448' id='6093ff7c'>
<subrange length='56' type-id='7359adad' id='f8137894'/>
</array-type-def>
<typedef-decl name='pthread_t' type-id='7359adad' id='4051f5e7'/>
<union-decl name='pthread_attr_t' size-in-bits='448' visibility='default' id='b63afacd'>
<data-member access='public'>
<var-decl name='__size' type-id='6093ff7c' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='__align' type-id='bd54fe1a' visibility='default'/>
</data-member>
</union-decl>
<typedef-decl name='pthread_attr_t' type-id='b63afacd' id='7d8569fd'/>
<class-decl name='differ_info' size-in-bits='9088' is-struct='yes' visibility='default' id='d41965ee'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='zhp' type-id='9200a744' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='fromsnap' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='frommnt' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='tosnap' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='tomnt' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='ds' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='dsmnt' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='tmpsnap' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
<var-decl name='errbuf' type-id='b54ce520' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='8704'>
<var-decl name='isclone' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='8736'>
<var-decl name='scripted' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='8768'>
<var-decl name='classify' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='8800'>
<var-decl name='timestamped' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='8832'>
<var-decl name='no_mangle' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='8896'>
<var-decl name='shares' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='8960'>
<var-decl name='zerr' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='8992'>
<var-decl name='cleanupfd' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='9024'>
<var-decl name='outputfd' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='9056'>
<var-decl name='datafd' type-id='95e97e5e' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='differ_info_t' type-id='d41965ee' id='e8525f0e'/>
<qualified-type-def type-id='7d8569fd' const='yes' id='e06dee2d'/>
<pointer-type-def type-id='e06dee2d' size-in-bits='64' id='540db505'/>
<qualified-type-def type-id='540db505' restrict='yes' id='e1815e87'/>
<pointer-type-def type-id='e8525f0e' size-in-bits='64' id='ee78f675'/>
<pointer-type-def type-id='4051f5e7' size-in-bits='64' id='e01b5462'/>
<qualified-type-def type-id='e01b5462' restrict='yes' id='cc338b26'/>
<pointer-type-def type-id='cd5d79f4' size-in-bits='64' id='5ad9edb6'/>
<function-decl name='is_mounted' mangled-name='is_mounted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='is_mounted'>
<parameter type-id='b0382bb3'/>
<parameter type-id='80f4b756'/>
<parameter type-id='9b23c9ad'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='color_start' mangled-name='color_start' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='color_start'>
<parameter type-id='80f4b756'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='color_end' mangled-name='color_end' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='color_end'>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='pthread_create' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='cc338b26'/>
<parameter type-id='e1815e87'/>
<parameter type-id='5ad9edb6'/>
<parameter type-id='1b7446cd'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='pthread_join' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='4051f5e7'/>
<parameter type-id='63e171df'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='pthread_cancel' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='4051f5e7'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='fputs' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='9d26089a'/>
<parameter type-id='e75a27e9'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='pipe2' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='7292109c'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_show_diffs' mangled-name='zfs_show_diffs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_show_diffs'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='95e97e5e' name='outfd'/>
<parameter type-id='80f4b756' name='fromsnap'/>
<parameter type-id='80f4b756' name='tosnap'/>
<parameter type-id='95e97e5e' name='flags'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_asprintf' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='b0382bb3'/>
<parameter type-id='80f4b756'/>
<parameter is-variadic='yes'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='zfs_validate_name' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='b0382bb3'/>
<parameter type-id='80f4b756'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='c19b74c3'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='find_shares_object' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='ee78f675'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-type size-in-bits='64' id='cd5d79f4'>
<parameter type-id='eaa32e2f'/>
<return type-id='eaa32e2f'/>
</function-type>
</abi-instr>
<abi-instr address-size='64' path='lib/libzfs/libzfs_import.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='03085adc' size-in-bits='192' id='083f8d58'>
<subrange length='3' type-id='7359adad' id='56f209d2'/>
</array-type-def>
<typedef-decl name='refresh_config_func_t' type-id='29f040d2' id='b7c58eaa'/>
<typedef-decl name='pool_active_func_t' type-id='baa42fef' id='de5d1d8f'/>
<class-decl name='pool_config_ops' size-in-bits='128' is-struct='yes' visibility='default' id='8b092c69'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='pco_refresh_config' type-id='e7c00489' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='pco_pool_active' type-id='9eadf5e0' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='pool_config_ops_t' type-id='1a21babe' id='b1e62775'/>
<enum-decl name='pool_state' id='4871ac24'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='POOL_STATE_ACTIVE' value='0'/>
<enumerator name='POOL_STATE_EXPORTED' value='1'/>
<enumerator name='POOL_STATE_DESTROYED' value='2'/>
<enumerator name='POOL_STATE_SPARE' value='3'/>
<enumerator name='POOL_STATE_L2CACHE' value='4'/>
<enumerator name='POOL_STATE_UNINITIALIZED' value='5'/>
<enumerator name='POOL_STATE_UNAVAIL' value='6'/>
<enumerator name='POOL_STATE_POTENTIALLY_ACTIVE' value='7'/>
</enum-decl>
<typedef-decl name='pool_state_t' type-id='4871ac24' id='084a08a3'/>
<class-decl name='stat64' size-in-bits='1152' is-struct='yes' visibility='default' id='0bbec9cd'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='st_dev' type-id='35ed8932' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='st_ino' type-id='71288a47' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='st_nlink' type-id='80f0b9df' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='st_mode' type-id='e1c52942' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='224'>
<var-decl name='st_uid' type-id='cc5fcceb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='st_gid' type-id='d94ec6d9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='288'>
<var-decl name='__pad0' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='st_rdev' type-id='35ed8932' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='st_size' type-id='79989e9c' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='st_blksize' type-id='d3f10a7f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
<var-decl name='st_blocks' type-id='4e711bf1' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='576'>
<var-decl name='st_atim' type-id='a9c79a1f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='704'>
<var-decl name='st_mtim' type-id='a9c79a1f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='832'>
<var-decl name='st_ctim' type-id='a9c79a1f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='960'>
<var-decl name='__glibc_reserved' type-id='083f8d58' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='__dev_t' type-id='7359adad' id='35ed8932'/>
<typedef-decl name='__ino64_t' type-id='7359adad' id='71288a47'/>
<typedef-decl name='__mode_t' type-id='f0981eeb' id='e1c52942'/>
<typedef-decl name='__nlink_t' type-id='7359adad' id='80f0b9df'/>
<typedef-decl name='__blksize_t' type-id='bd54fe1a' id='d3f10a7f'/>
<typedef-decl name='__blkcnt64_t' type-id='bd54fe1a' id='4e711bf1'/>
<typedef-decl name='__syscall_slong_t' type-id='bd54fe1a' id='03085adc'/>
<class-decl name='timespec' size-in-bits='128' is-struct='yes' visibility='default' id='a9c79a1f'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='tv_sec' type-id='65eda9c0' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='tv_nsec' type-id='03085adc' visibility='default'/>
</data-member>
</class-decl>
<qualified-type-def type-id='8b092c69' const='yes' id='1a21babe'/>
<pointer-type-def type-id='de5d1d8f' size-in-bits='64' id='9eadf5e0'/>
<pointer-type-def type-id='084a08a3' size-in-bits='64' id='b9ea57b8'/>
<pointer-type-def type-id='b7c58eaa' size-in-bits='64' id='e7c00489'/>
<pointer-type-def type-id='0bbec9cd' size-in-bits='64' id='62f7a03d'/>
<var-decl name='libzfs_config_ops' type-id='b1e62775' mangled-name='libzfs_config_ops' visibility='default' elf-symbol-id='libzfs_config_ops'/>
<function-decl name='zpool_read_label' mangled-name='zpool_read_label' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_read_label'>
<parameter type-id='95e97e5e'/>
<parameter type-id='857bb57e'/>
<parameter type-id='7292109c'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='pwrite64' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<parameter type-id='eaa32e2f'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='724e4de6'/>
<return type-id='79a0948f'/>
</function-decl>
<function-decl name='__pread64_chk' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<parameter type-id='eaa32e2f'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='724e4de6'/>
<parameter type-id='b59d7dce'/>
<return type-id='79a0948f'/>
</function-decl>
<function-decl name='fstat64' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<parameter type-id='62f7a03d'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zcmd_write_conf_nvlist' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='b0382bb3'/>
<parameter type-id='e4ec4540'/>
<parameter type-id='5ce45b60'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zpool_clear_label' mangled-name='zpool_clear_label' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_clear_label'>
<parameter type-id='95e97e5e' name='fd'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_in_use' mangled-name='zpool_in_use' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_in_use'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='95e97e5e' name='fd'/>
<parameter type-id='b9ea57b8' name='state'/>
<parameter type-id='9b23c9ad' name='namestr'/>
<parameter type-id='37e3bd22' name='inuse'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-type size-in-bits='64' id='baa42fef'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='80f4b756'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='37e3bd22'/>
<return type-id='95e97e5e'/>
</function-type>
<function-type size-in-bits='64' id='29f040d2'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='5ce45b60'/>
<return type-id='5ce45b60'/>
</function-type>
</abi-instr>
<abi-instr address-size='64' path='lib/libzfs/libzfs_iter.c' language='LANG_C99'>
<pointer-type-def type-id='b351119f' size-in-bits='64' id='716943c7'/>
<function-decl name='avl_first' mangled-name='avl_first' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_first'>
<parameter type-id='a3681dea'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='avl_walk' mangled-name='avl_walk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_walk'>
<parameter type-id='716943c7'/>
<parameter type-id='eaa32e2f'/>
<parameter type-id='95e97e5e'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='make_dataset_handle_zc' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='b0382bb3'/>
<parameter type-id='e4ec4540'/>
<return type-id='9200a744'/>
</function-decl>
<function-decl name='make_dataset_simple_handle_zc' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='9200a744'/>
<parameter type-id='e4ec4540'/>
<return type-id='9200a744'/>
</function-decl>
<function-decl name='make_bookmark_handle' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='9200a744'/>
<parameter type-id='80f4b756'/>
<parameter type-id='5ce45b60'/>
<return type-id='9200a744'/>
</function-decl>
<function-decl name='zfs_iter_filesystems' mangled-name='zfs_iter_filesystems' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_filesystems'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='d8e49ab9' name='func'/>
<parameter type-id='eaa32e2f' name='data'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_iter_snapshots' mangled-name='zfs_iter_snapshots' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_snapshots'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='c19b74c3' name='simple'/>
<parameter type-id='d8e49ab9' name='func'/>
<parameter type-id='eaa32e2f' name='data'/>
<parameter type-id='9c313c2d' name='min_txg'/>
<parameter type-id='9c313c2d' name='max_txg'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_iter_bookmarks' mangled-name='zfs_iter_bookmarks' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_bookmarks'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='d8e49ab9' name='func'/>
<parameter type-id='eaa32e2f' name='data'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_iter_snapshots_sorted' mangled-name='zfs_iter_snapshots_sorted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_snapshots_sorted'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='d8e49ab9' name='callback'/>
<parameter type-id='eaa32e2f' name='data'/>
<parameter type-id='9c313c2d' name='min_txg'/>
<parameter type-id='9c313c2d' name='max_txg'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_iter_snapshots_sorted_v2' mangled-name='zfs_iter_snapshots_sorted_v2' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_snapshots_sorted_v2'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='95e97e5e' name='flags'/>
<parameter type-id='d8e49ab9' name='callback'/>
<parameter type-id='eaa32e2f' name='data'/>
<parameter type-id='9c313c2d' name='min_txg'/>
<parameter type-id='9c313c2d' name='max_txg'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_iter_snapspec' mangled-name='zfs_iter_snapspec' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_snapspec'>
<parameter type-id='9200a744' name='fs_zhp'/>
<parameter type-id='80f4b756' name='spec_orig'/>
<parameter type-id='d8e49ab9' name='func'/>
<parameter type-id='eaa32e2f' name='arg'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_iter_snapspec_v2' mangled-name='zfs_iter_snapspec_v2' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_snapspec_v2'>
<parameter type-id='9200a744' name='fs_zhp'/>
<parameter type-id='95e97e5e' name='flags'/>
<parameter type-id='80f4b756' name='spec_orig'/>
<parameter type-id='d8e49ab9' name='func'/>
<parameter type-id='eaa32e2f' name='arg'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_iter_children' mangled-name='zfs_iter_children' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_children'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='d8e49ab9' name='func'/>
<parameter type-id='eaa32e2f' name='data'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_iter_dependents' mangled-name='zfs_iter_dependents' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_dependents'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='c19b74c3' name='allowrecursion'/>
<parameter type-id='d8e49ab9' name='func'/>
<parameter type-id='eaa32e2f' name='data'/>
<return type-id='95e97e5e'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libzfs/libzfs_mount.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='6028cbfe' size-in-bits='256' id='b39b9aa7'>
<subrange length='4' type-id='7359adad' id='16fe7105'/>
</array-type-def>
<class-decl name='__dirstream' is-struct='yes' visibility='default' is-declaration-only='yes' id='20cd73f2'/>
<class-decl name='tpool' size-in-bits='2496' is-struct='yes' visibility='default' id='88d1b7f9'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='tp_forw' type-id='9cf59a50' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='tp_back' type-id='9cf59a50' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='tp_mutex' type-id='7a6844eb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='tp_busycv' type-id='62fab762' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='832'>
<var-decl name='tp_workcv' type-id='62fab762' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1216'>
<var-decl name='tp_waitcv' type-id='62fab762' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1600'>
<var-decl name='tp_active' type-id='ad33e5e7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1664'>
<var-decl name='tp_head' type-id='f32b30e4' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1728'>
<var-decl name='tp_tail' type-id='f32b30e4' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1792'>
<var-decl name='tp_attr' type-id='7d8569fd' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2240'>
<var-decl name='tp_flags' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2272'>
<var-decl name='tp_linger' type-id='3502e3ff' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2304'>
<var-decl name='tp_njobs' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2336'>
<var-decl name='tp_minimum' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2368'>
<var-decl name='tp_maximum' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2400'>
<var-decl name='tp_current' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2432'>
<var-decl name='tp_idle' type-id='95e97e5e' visibility='default'/>
</data-member>
</class-decl>
<array-type-def dimensions='1' type-id='95e97e5e' size-in-bits='64' id='e4266c7e'>
<subrange length='2' type-id='7359adad' id='52efc4ef'/>
</array-type-def>
<class-decl name='get_all_cb' size-in-bits='192' is-struct='yes' visibility='default' id='803dac95'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='cb_handles' type-id='4507922a' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='cb_alloc' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='cb_used' type-id='b59d7dce' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='get_all_cb_t' type-id='803dac95' id='9b293607'/>
<typedef-decl name='tpool_t' type-id='88d1b7f9' id='b1bbf10d'/>
<typedef-decl name='DIR' type-id='20cd73f2' id='54a5d683'/>
<typedef-decl name='mode_t' type-id='e1c52942' id='d50d396c'/>
<typedef-decl name='__compar_fn_t' type-id='585e1de9' id='aba7edd8'/>
<class-decl name='dirent64' size-in-bits='2240' is-struct='yes' visibility='default' id='5725d813'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='d_ino' type-id='71288a47' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='d_off' type-id='724e4de6' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='d_reclen' type-id='8efea9e5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='144'>
<var-decl name='d_type' type-id='002ac4a6' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='152'>
<var-decl name='d_name' type-id='d1617432' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='statfs64' size-in-bits='960' is-struct='yes' visibility='default' id='a2a6be1a'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='f_type' type-id='6028cbfe' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='f_bsize' type-id='6028cbfe' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='f_blocks' type-id='95fe1a02' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='f_bfree' type-id='95fe1a02' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='f_bavail' type-id='95fe1a02' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='f_files' type-id='0c3a4dde' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='f_ffree' type-id='0c3a4dde' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='f_fsid' type-id='0f35d263' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
<var-decl name='f_namelen' type-id='6028cbfe' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='576'>
<var-decl name='f_frsize' type-id='6028cbfe' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='640'>
<var-decl name='f_flags' type-id='6028cbfe' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='704'>
<var-decl name='f_spare' type-id='b39b9aa7' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='stat' size-in-bits='1152' is-struct='yes' visibility='default' id='aafc373f'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='st_dev' type-id='35ed8932' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='st_ino' type-id='e43e523d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='st_nlink' type-id='80f0b9df' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='st_mode' type-id='e1c52942' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='224'>
<var-decl name='st_uid' type-id='cc5fcceb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='st_gid' type-id='d94ec6d9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='288'>
<var-decl name='__pad0' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='st_rdev' type-id='35ed8932' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='st_size' type-id='79989e9c' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='st_blksize' type-id='d3f10a7f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
<var-decl name='st_blocks' type-id='dbc43803' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='576'>
<var-decl name='st_atim' type-id='a9c79a1f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='704'>
<var-decl name='st_mtim' type-id='a9c79a1f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='832'>
<var-decl name='st_ctim' type-id='a9c79a1f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='960'>
<var-decl name='__glibc_reserved' type-id='083f8d58' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='__ino_t' type-id='7359adad' id='e43e523d'/>
<class-decl name='__fsid_t' size-in-bits='64' is-struct='yes' naming-typedef-id='0f35d263' visibility='default' id='ea35c84a'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='__val' type-id='e4266c7e' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='__fsid_t' type-id='ea35c84a' id='0f35d263'/>
<typedef-decl name='__blkcnt_t' type-id='bd54fe1a' id='dbc43803'/>
<typedef-decl name='__fsblkcnt64_t' type-id='7359adad' id='95fe1a02'/>
<typedef-decl name='__fsfilcnt64_t' type-id='7359adad' id='0c3a4dde'/>
<typedef-decl name='__fsword_t' type-id='bd54fe1a' id='6028cbfe'/>
<pointer-type-def type-id='54a5d683' size-in-bits='64' id='f09217ba'/>
<pointer-type-def type-id='5725d813' size-in-bits='64' id='07b96073'/>
<pointer-type-def type-id='9b293607' size-in-bits='64' id='77bf1784'/>
<pointer-type-def type-id='7d8569fd' size-in-bits='64' id='7347a39e'/>
<pointer-type-def type-id='aafc373f' size-in-bits='64' id='4330df87'/>
<qualified-type-def type-id='4330df87' restrict='yes' id='73665405'/>
<pointer-type-def type-id='a2a6be1a' size-in-bits='64' id='7fd094c8'/>
<pointer-type-def type-id='b1bbf10d' size-in-bits='64' id='9cf59a50'/>
<pointer-type-def type-id='c5c76c9c' size-in-bits='64' id='b7f9d8e6'/>
<pointer-type-def type-id='9200a744' size-in-bits='64' id='4507922a'/>
<class-decl name='__dirstream' is-struct='yes' visibility='default' is-declaration-only='yes' id='20cd73f2'/>
<function-decl name='zpool_disable_datasets_os' mangled-name='zpool_disable_datasets_os' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_disable_datasets_os'>
<parameter type-id='4c81de99'/>
<parameter type-id='c19b74c3'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zpool_disable_volume_os' mangled-name='zpool_disable_volume_os' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_disable_volume_os'>
<parameter type-id='80f4b756'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='tpool_create' mangled-name='tpool_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_create'>
<parameter type-id='3502e3ff'/>
<parameter type-id='3502e3ff'/>
<parameter type-id='3502e3ff'/>
<parameter type-id='7347a39e'/>
<return type-id='9cf59a50'/>
</function-decl>
<function-decl name='tpool_dispatch' mangled-name='tpool_dispatch' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_dispatch'>
<parameter type-id='9cf59a50'/>
<parameter type-id='b7f9d8e6'/>
<parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='tpool_destroy' mangled-name='tpool_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_destroy'>
<parameter type-id='9cf59a50'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='tpool_wait' mangled-name='tpool_wait' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_wait'>
<parameter type-id='9cf59a50'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='mkdirp' mangled-name='mkdirp' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='mkdirp'>
<parameter type-id='80f4b756'/>
<parameter type-id='d50d396c'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='sa_errorstr' mangled-name='sa_errorstr' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='sa_errorstr'>
<parameter type-id='95e97e5e'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='sa_enable_share' mangled-name='sa_enable_share' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='sa_enable_share'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<parameter type-id='9155d4b5'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='sa_disable_share' mangled-name='sa_disable_share' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='sa_disable_share'>
<parameter type-id='80f4b756'/>
<parameter type-id='9155d4b5'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='sa_is_shared' mangled-name='sa_is_shared' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='sa_is_shared'>
<parameter type-id='80f4b756'/>
<parameter type-id='9155d4b5'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='sa_truncate_shares' mangled-name='sa_truncate_shares' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='sa_truncate_shares'>
<parameter type-id='9155d4b5'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fdopendir' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<return type-id='f09217ba'/>
</function-decl>
<function-decl name='closedir' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='f09217ba'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='readdir64' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='f09217ba'/>
<return type-id='07b96073'/>
</function-decl>
<function-decl name='qsort' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='aba7edd8'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='rmdir' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='__openat_too_many_args' visibility='default' binding='global' size-in-bits='64'>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='__openat_missing_mode' visibility='default' binding='global' size-in-bits='64'>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='statfs64' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='7fd094c8'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_realloc' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='b0382bb3'/>
<parameter type-id='eaa32e2f'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='b59d7dce'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='changelist_unshare' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='0d41d328'/>
<parameter type-id='4567bbc9'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='do_mount' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='9200a744'/>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='do_unmount' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='9200a744'/>
<parameter type-id='80f4b756'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_mount_at' mangled-name='zfs_mount_at' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_mount_at'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='options'/>
<parameter type-id='95e97e5e' name='flags'/>
<parameter type-id='80f4b756' name='mountpoint'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_unmountall' mangled-name='zfs_unmountall' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unmountall'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='95e97e5e' name='flags'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_truncate_shares' mangled-name='zfs_truncate_shares' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_truncate_shares'>
<parameter type-id='4567bbc9' name='proto'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_unshareall' mangled-name='zfs_unshareall' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshareall'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='4567bbc9' name='proto'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='libzfs_add_handle' mangled-name='libzfs_add_handle' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_add_handle'>
<parameter type-id='77bf1784' name='cbp'/>
<parameter type-id='9200a744' name='zhp'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_foreach_mountpoint' mangled-name='zfs_foreach_mountpoint' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_foreach_mountpoint'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='4507922a' name='handles'/>
<parameter type-id='b59d7dce' name='num_handles'/>
<parameter type-id='d8e49ab9' name='func'/>
<parameter type-id='eaa32e2f' name='data'/>
<parameter type-id='c19b74c3' name='parallel'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zpool_enable_datasets' mangled-name='zpool_enable_datasets' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_enable_datasets'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='mntopts'/>
<parameter type-id='95e97e5e' name='flags'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_disable_datasets' mangled-name='zpool_disable_datasets' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_disable_datasets'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='c19b74c3' name='force'/>
<return type-id='95e97e5e'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libzfs/libzfs_pool.c' language='LANG_C99'>
<type-decl name='long long unsigned int' size-in-bits='64' id='3a47d82b'/>
<class-decl name='splitflags' size-in-bits='64' is-struct='yes' visibility='default' id='dc01bf52'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='dryrun' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1'>
<var-decl name='import' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='name_flags' type-id='95e97e5e' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='splitflags_t' type-id='dc01bf52' id='325c1e34'/>
<class-decl name='trimflags' size-in-bits='192' is-struct='yes' visibility='default' id='8ef58008'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='fullpool' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='secure' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='wait' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='rate' type-id='9c313c2d' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='trimflags_t' type-id='8ef58008' id='a093cbb8'/>
<enum-decl name='zpool_status_t' naming-typedef-id='d3dd6294' id='5e770b40'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZPOOL_STATUS_CORRUPT_CACHE' value='0'/>
<enumerator name='ZPOOL_STATUS_MISSING_DEV_R' value='1'/>
<enumerator name='ZPOOL_STATUS_MISSING_DEV_NR' value='2'/>
<enumerator name='ZPOOL_STATUS_CORRUPT_LABEL_R' value='3'/>
<enumerator name='ZPOOL_STATUS_CORRUPT_LABEL_NR' value='4'/>
<enumerator name='ZPOOL_STATUS_BAD_GUID_SUM' value='5'/>
<enumerator name='ZPOOL_STATUS_CORRUPT_POOL' value='6'/>
<enumerator name='ZPOOL_STATUS_CORRUPT_DATA' value='7'/>
<enumerator name='ZPOOL_STATUS_FAILING_DEV' value='8'/>
<enumerator name='ZPOOL_STATUS_VERSION_NEWER' value='9'/>
<enumerator name='ZPOOL_STATUS_HOSTID_MISMATCH' value='10'/>
<enumerator name='ZPOOL_STATUS_HOSTID_ACTIVE' value='11'/>
<enumerator name='ZPOOL_STATUS_HOSTID_REQUIRED' value='12'/>
<enumerator name='ZPOOL_STATUS_IO_FAILURE_WAIT' value='13'/>
<enumerator name='ZPOOL_STATUS_IO_FAILURE_CONTINUE' value='14'/>
<enumerator name='ZPOOL_STATUS_IO_FAILURE_MMP' value='15'/>
<enumerator name='ZPOOL_STATUS_BAD_LOG' value='16'/>
<enumerator name='ZPOOL_STATUS_ERRATA' value='17'/>
<enumerator name='ZPOOL_STATUS_UNSUP_FEAT_READ' value='18'/>
<enumerator name='ZPOOL_STATUS_UNSUP_FEAT_WRITE' value='19'/>
<enumerator name='ZPOOL_STATUS_FAULTED_DEV_R' value='20'/>
<enumerator name='ZPOOL_STATUS_FAULTED_DEV_NR' value='21'/>
<enumerator name='ZPOOL_STATUS_VERSION_OLDER' value='22'/>
<enumerator name='ZPOOL_STATUS_FEAT_DISABLED' value='23'/>
<enumerator name='ZPOOL_STATUS_RESILVERING' value='24'/>
<enumerator name='ZPOOL_STATUS_OFFLINE_DEV' value='25'/>
<enumerator name='ZPOOL_STATUS_REMOVED_DEV' value='26'/>
<enumerator name='ZPOOL_STATUS_REBUILDING' value='27'/>
<enumerator name='ZPOOL_STATUS_REBUILD_SCRUB' value='28'/>
<enumerator name='ZPOOL_STATUS_NON_NATIVE_ASHIFT' value='29'/>
<enumerator name='ZPOOL_STATUS_COMPATIBILITY_ERR' value='30'/>
<enumerator name='ZPOOL_STATUS_INCOMPATIBLE_FEAT' value='31'/>
<enumerator name='ZPOOL_STATUS_OK' value='32'/>
</enum-decl>
<typedef-decl name='zpool_status_t' type-id='5e770b40' id='d3dd6294'/>
<enum-decl name='zpool_compat_status_t' naming-typedef-id='901b78d1' id='20676925'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZPOOL_COMPATIBILITY_OK' value='0'/>
<enumerator name='ZPOOL_COMPATIBILITY_WARNTOKEN' value='1'/>
<enumerator name='ZPOOL_COMPATIBILITY_BADTOKEN' value='2'/>
<enumerator name='ZPOOL_COMPATIBILITY_BADFILE' value='3'/>
<enumerator name='ZPOOL_COMPATIBILITY_NOFILES' value='4'/>
</enum-decl>
<typedef-decl name='zpool_compat_status_t' type-id='20676925' id='901b78d1'/>
<enum-decl name='vdev_prop_t' naming-typedef-id='5aa5c90c' id='1573bec8'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='VDEV_PROP_INVAL' value='-1'/>
<enumerator name='VDEV_PROP_USERPROP' value='-1'/>
<enumerator name='VDEV_PROP_NAME' value='0'/>
<enumerator name='VDEV_PROP_CAPACITY' value='1'/>
<enumerator name='VDEV_PROP_STATE' value='2'/>
<enumerator name='VDEV_PROP_GUID' value='3'/>
<enumerator name='VDEV_PROP_ASIZE' value='4'/>
<enumerator name='VDEV_PROP_PSIZE' value='5'/>
<enumerator name='VDEV_PROP_ASHIFT' value='6'/>
<enumerator name='VDEV_PROP_SIZE' value='7'/>
<enumerator name='VDEV_PROP_FREE' value='8'/>
<enumerator name='VDEV_PROP_ALLOCATED' value='9'/>
<enumerator name='VDEV_PROP_COMMENT' value='10'/>
<enumerator name='VDEV_PROP_EXPANDSZ' value='11'/>
<enumerator name='VDEV_PROP_FRAGMENTATION' value='12'/>
<enumerator name='VDEV_PROP_BOOTSIZE' value='13'/>
<enumerator name='VDEV_PROP_PARITY' value='14'/>
<enumerator name='VDEV_PROP_PATH' value='15'/>
<enumerator name='VDEV_PROP_DEVID' value='16'/>
<enumerator name='VDEV_PROP_PHYS_PATH' value='17'/>
<enumerator name='VDEV_PROP_ENC_PATH' value='18'/>
<enumerator name='VDEV_PROP_FRU' value='19'/>
<enumerator name='VDEV_PROP_PARENT' value='20'/>
<enumerator name='VDEV_PROP_CHILDREN' value='21'/>
<enumerator name='VDEV_PROP_NUMCHILDREN' value='22'/>
<enumerator name='VDEV_PROP_READ_ERRORS' value='23'/>
<enumerator name='VDEV_PROP_WRITE_ERRORS' value='24'/>
<enumerator name='VDEV_PROP_CHECKSUM_ERRORS' value='25'/>
<enumerator name='VDEV_PROP_INITIALIZE_ERRORS' value='26'/>
<enumerator name='VDEV_PROP_OPS_NULL' value='27'/>
<enumerator name='VDEV_PROP_OPS_READ' value='28'/>
<enumerator name='VDEV_PROP_OPS_WRITE' value='29'/>
<enumerator name='VDEV_PROP_OPS_FREE' value='30'/>
<enumerator name='VDEV_PROP_OPS_CLAIM' value='31'/>
<enumerator name='VDEV_PROP_OPS_TRIM' value='32'/>
<enumerator name='VDEV_PROP_BYTES_NULL' value='33'/>
<enumerator name='VDEV_PROP_BYTES_READ' value='34'/>
<enumerator name='VDEV_PROP_BYTES_WRITE' value='35'/>
<enumerator name='VDEV_PROP_BYTES_FREE' value='36'/>
<enumerator name='VDEV_PROP_BYTES_CLAIM' value='37'/>
<enumerator name='VDEV_PROP_BYTES_TRIM' value='38'/>
<enumerator name='VDEV_PROP_REMOVING' value='39'/>
<enumerator name='VDEV_PROP_ALLOCATING' value='40'/>
<enumerator name='VDEV_PROP_FAILFAST' value='41'/>
<enumerator name='VDEV_PROP_CHECKSUM_N' value='42'/>
<enumerator name='VDEV_PROP_CHECKSUM_T' value='43'/>
<enumerator name='VDEV_PROP_IO_N' value='44'/>
<enumerator name='VDEV_PROP_IO_T' value='45'/>
<enumerator name='VDEV_NUM_PROPS' value='46'/>
</enum-decl>
<typedef-decl name='vdev_prop_t' type-id='1573bec8' id='5aa5c90c'/>
<class-decl name='zpool_load_policy' size-in-bits='256' is-struct='yes' visibility='default' id='2f65b36f'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='zlp_rewind' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='zlp_maxmeta' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='zlp_maxdata' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='zlp_txg' type-id='9c313c2d' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zpool_load_policy_t' type-id='2f65b36f' id='d11b7617'/>
<enum-decl name='vdev_state' id='21566197'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='VDEV_STATE_UNKNOWN' value='0'/>
<enumerator name='VDEV_STATE_CLOSED' value='1'/>
<enumerator name='VDEV_STATE_OFFLINE' value='2'/>
<enumerator name='VDEV_STATE_REMOVED' value='3'/>
<enumerator name='VDEV_STATE_CANT_OPEN' value='4'/>
<enumerator name='VDEV_STATE_FAULTED' value='5'/>
<enumerator name='VDEV_STATE_DEGRADED' value='6'/>
<enumerator name='VDEV_STATE_HEALTHY' value='7'/>
</enum-decl>
<typedef-decl name='vdev_state_t' type-id='21566197' id='35acf840'/>
<enum-decl name='vdev_aux' id='7f5bcca4'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='VDEV_AUX_NONE' value='0'/>
<enumerator name='VDEV_AUX_OPEN_FAILED' value='1'/>
<enumerator name='VDEV_AUX_CORRUPT_DATA' value='2'/>
<enumerator name='VDEV_AUX_NO_REPLICAS' value='3'/>
<enumerator name='VDEV_AUX_BAD_GUID_SUM' value='4'/>
<enumerator name='VDEV_AUX_TOO_SMALL' value='5'/>
<enumerator name='VDEV_AUX_BAD_LABEL' value='6'/>
<enumerator name='VDEV_AUX_VERSION_NEWER' value='7'/>
<enumerator name='VDEV_AUX_VERSION_OLDER' value='8'/>
<enumerator name='VDEV_AUX_UNSUP_FEAT' value='9'/>
<enumerator name='VDEV_AUX_SPARED' value='10'/>
<enumerator name='VDEV_AUX_ERR_EXCEEDED' value='11'/>
<enumerator name='VDEV_AUX_IO_FAILURE' value='12'/>
<enumerator name='VDEV_AUX_BAD_LOG' value='13'/>
<enumerator name='VDEV_AUX_EXTERNAL' value='14'/>
<enumerator name='VDEV_AUX_SPLIT_POOL' value='15'/>
<enumerator name='VDEV_AUX_BAD_ASHIFT' value='16'/>
<enumerator name='VDEV_AUX_EXTERNAL_PERSIST' value='17'/>
<enumerator name='VDEV_AUX_ACTIVE' value='18'/>
<enumerator name='VDEV_AUX_CHILDREN_OFFLINE' value='19'/>
<enumerator name='VDEV_AUX_ASHIFT_TOO_BIG' value='20'/>
</enum-decl>
<typedef-decl name='vdev_aux_t' type-id='7f5bcca4' id='9d774e0b'/>
<enum-decl name='pool_scan_func' id='1b092565'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='POOL_SCAN_NONE' value='0'/>
<enumerator name='POOL_SCAN_SCRUB' value='1'/>
<enumerator name='POOL_SCAN_RESILVER' value='2'/>
<enumerator name='POOL_SCAN_ERRORSCRUB' value='3'/>
<enumerator name='POOL_SCAN_FUNCS' value='4'/>
</enum-decl>
<typedef-decl name='pool_scan_func_t' type-id='1b092565' id='7313fbe2'/>
<enum-decl name='pool_scrub_cmd' id='a1474cbd'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='POOL_SCRUB_NORMAL' value='0'/>
<enumerator name='POOL_SCRUB_PAUSE' value='1'/>
<enumerator name='POOL_SCRUB_FLAGS_END' value='2'/>
</enum-decl>
<typedef-decl name='pool_scrub_cmd_t' type-id='a1474cbd' id='b51cf3c2'/>
<enum-decl name='zpool_errata' id='d9abbf54'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZPOOL_ERRATA_NONE' value='0'/>
<enumerator name='ZPOOL_ERRATA_ZOL_2094_SCRUB' value='1'/>
<enumerator name='ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY' value='2'/>
<enumerator name='ZPOOL_ERRATA_ZOL_6845_ENCRYPTION' value='3'/>
<enumerator name='ZPOOL_ERRATA_ZOL_8308_ENCRYPTION' value='4'/>
</enum-decl>
<typedef-decl name='zpool_errata_t' type-id='d9abbf54' id='688c495b'/>
<enum-decl name='pool_initialize_func' id='5c246ad4'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='POOL_INITIALIZE_START' value='0'/>
<enumerator name='POOL_INITIALIZE_CANCEL' value='1'/>
<enumerator name='POOL_INITIALIZE_SUSPEND' value='2'/>
<enumerator name='POOL_INITIALIZE_UNINIT' value='3'/>
<enumerator name='POOL_INITIALIZE_FUNCS' value='4'/>
</enum-decl>
<typedef-decl name='pool_initialize_func_t' type-id='5c246ad4' id='7063e1ab'/>
<enum-decl name='pool_trim_func' id='54ed608a'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='POOL_TRIM_START' value='0'/>
<enumerator name='POOL_TRIM_CANCEL' value='1'/>
<enumerator name='POOL_TRIM_SUSPEND' value='2'/>
<enumerator name='POOL_TRIM_FUNCS' value='3'/>
</enum-decl>
<typedef-decl name='pool_trim_func_t' type-id='54ed608a' id='b1146b8d'/>
<enum-decl name='zpool_wait_activity_t' naming-typedef-id='73446457' id='849338e3'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZPOOL_WAIT_CKPT_DISCARD' value='0'/>
<enumerator name='ZPOOL_WAIT_FREE' value='1'/>
<enumerator name='ZPOOL_WAIT_INITIALIZE' value='2'/>
<enumerator name='ZPOOL_WAIT_REPLACE' value='3'/>
<enumerator name='ZPOOL_WAIT_REMOVE' value='4'/>
<enumerator name='ZPOOL_WAIT_RESILVER' value='5'/>
<enumerator name='ZPOOL_WAIT_SCRUB' value='6'/>
<enumerator name='ZPOOL_WAIT_TRIM' value='7'/>
<enumerator name='ZPOOL_WAIT_NUM_ACTIVITIES' value='8'/>
</enum-decl>
<typedef-decl name='zpool_wait_activity_t' type-id='849338e3' id='73446457'/>
<enum-decl name='spa_feature' id='33ecb627'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='SPA_FEATURE_NONE' value='-1'/>
<enumerator name='SPA_FEATURE_ASYNC_DESTROY' value='0'/>
<enumerator name='SPA_FEATURE_EMPTY_BPOBJ' value='1'/>
<enumerator name='SPA_FEATURE_LZ4_COMPRESS' value='2'/>
<enumerator name='SPA_FEATURE_MULTI_VDEV_CRASH_DUMP' value='3'/>
<enumerator name='SPA_FEATURE_SPACEMAP_HISTOGRAM' value='4'/>
<enumerator name='SPA_FEATURE_ENABLED_TXG' value='5'/>
<enumerator name='SPA_FEATURE_HOLE_BIRTH' value='6'/>
<enumerator name='SPA_FEATURE_EXTENSIBLE_DATASET' value='7'/>
<enumerator name='SPA_FEATURE_EMBEDDED_DATA' value='8'/>
<enumerator name='SPA_FEATURE_BOOKMARKS' value='9'/>
<enumerator name='SPA_FEATURE_FS_SS_LIMIT' value='10'/>
<enumerator name='SPA_FEATURE_LARGE_BLOCKS' value='11'/>
<enumerator name='SPA_FEATURE_LARGE_DNODE' value='12'/>
<enumerator name='SPA_FEATURE_SHA512' value='13'/>
<enumerator name='SPA_FEATURE_SKEIN' value='14'/>
<enumerator name='SPA_FEATURE_EDONR' value='15'/>
<enumerator name='SPA_FEATURE_USEROBJ_ACCOUNTING' value='16'/>
<enumerator name='SPA_FEATURE_ENCRYPTION' value='17'/>
<enumerator name='SPA_FEATURE_PROJECT_QUOTA' value='18'/>
<enumerator name='SPA_FEATURE_DEVICE_REMOVAL' value='19'/>
<enumerator name='SPA_FEATURE_OBSOLETE_COUNTS' value='20'/>
<enumerator name='SPA_FEATURE_POOL_CHECKPOINT' value='21'/>
<enumerator name='SPA_FEATURE_SPACEMAP_V2' value='22'/>
<enumerator name='SPA_FEATURE_ALLOCATION_CLASSES' value='23'/>
<enumerator name='SPA_FEATURE_RESILVER_DEFER' value='24'/>
<enumerator name='SPA_FEATURE_BOOKMARK_V2' value='25'/>
<enumerator name='SPA_FEATURE_REDACTION_BOOKMARKS' value='26'/>
<enumerator name='SPA_FEATURE_REDACTED_DATASETS' value='27'/>
<enumerator name='SPA_FEATURE_BOOKMARK_WRITTEN' value='28'/>
<enumerator name='SPA_FEATURE_LOG_SPACEMAP' value='29'/>
<enumerator name='SPA_FEATURE_LIVELIST' value='30'/>
<enumerator name='SPA_FEATURE_DEVICE_REBUILD' value='31'/>
<enumerator name='SPA_FEATURE_ZSTD_COMPRESS' value='32'/>
<enumerator name='SPA_FEATURE_DRAID' value='33'/>
<enumerator name='SPA_FEATURE_ZILSAXATTR' value='34'/>
<enumerator name='SPA_FEATURE_HEAD_ERRLOG' value='35'/>
<enumerator name='SPA_FEATURE_BLAKE3' value='36'/>
<enumerator name='SPA_FEATURE_BLOCK_CLONING' value='37'/>
<enumerator name='SPA_FEATURE_AVZ_V2' value='38'/>
<enumerator name='SPA_FEATURES' value='39'/>
</enum-decl>
<typedef-decl name='spa_feature_t' type-id='33ecb627' id='d6618c78'/>
<qualified-type-def type-id='22cce67b' const='yes' id='d2816df0'/>
<pointer-type-def type-id='d2816df0' size-in-bits='64' id='3bbfee2e'/>
<qualified-type-def type-id='b96825af' const='yes' id='2b61797f'/>
<pointer-type-def type-id='2b61797f' size-in-bits='64' id='9f7200cf'/>
<pointer-type-def type-id='d6618c78' size-in-bits='64' id='a8425263'/>
<qualified-type-def type-id='62f7a03d' restrict='yes' id='f1cadedf'/>
<pointer-type-def type-id='a093cbb8' size-in-bits='64' id='b13f38c3'/>
<pointer-type-def type-id='35acf840' size-in-bits='64' id='17f3480d'/>
<pointer-type-def type-id='688c495b' size-in-bits='64' id='cec6f2e4'/>
<pointer-type-def type-id='d11b7617' size-in-bits='64' id='23432aaa'/>
<function-decl name='zpool_get_handle' mangled-name='zpool_get_handle' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_handle'>
<parameter type-id='4c81de99'/>
<return type-id='b0382bb3'/>
</function-decl>
<function-decl name='zpool_prop_to_name' mangled-name='zpool_prop_to_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_to_name'>
<parameter type-id='5d0c23fb'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='vdev_prop_to_name' mangled-name='vdev_prop_to_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='vdev_prop_to_name'>
<parameter type-id='5aa5c90c'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='vdev_prop_user' mangled-name='vdev_prop_user' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='vdev_prop_user'>
<parameter type-id='80f4b756'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zpool_get_status' mangled-name='zpool_get_status' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_status'>
<parameter type-id='4c81de99'/>
<parameter type-id='7d3cd834'/>
<parameter type-id='cec6f2e4'/>
<return type-id='d3dd6294'/>
</function-decl>
<function-decl name='zpool_prop_default_string' mangled-name='zpool_prop_default_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_default_string'>
<parameter type-id='5d0c23fb'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zpool_prop_default_numeric' mangled-name='zpool_prop_default_numeric' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_default_numeric'>
<parameter type-id='5d0c23fb'/>
<return type-id='9c313c2d'/>
</function-decl>
<function-decl name='libzfs_envvar_is_set' mangled-name='libzfs_envvar_is_set' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_envvar_is_set'>
<parameter type-id='80f4b756'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='lzc_initialize' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='7063e1ab'/>
<parameter type-id='5ce45b60'/>
<parameter type-id='857bb57e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_trim' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='b1146b8d'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='c19b74c3'/>
<parameter type-id='5ce45b60'/>
<parameter type-id='857bb57e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_sync' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='5ce45b60'/>
<parameter type-id='857bb57e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_reopen' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='c19b74c3'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_pool_checkpoint' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_pool_checkpoint_discard' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_wait' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='73446457'/>
<parameter type-id='37e3bd22'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_wait_tag' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='73446457'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='37e3bd22'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_set_bootenv' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='22cce67b'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_get_bootenv' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='857bb57e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_get_vdev_prop' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='5ce45b60'/>
<parameter type-id='857bb57e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_set_vdev_prop' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='5ce45b60'/>
<parameter type-id='857bb57e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_resolve_shortname' mangled-name='zfs_resolve_shortname' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_resolve_shortname'>
<parameter type-id='80f4b756'/>
<parameter type-id='26a90f95'/>
<parameter type-id='b59d7dce'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_strip_partition' mangled-name='zfs_strip_partition' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_strip_partition'>
<parameter type-id='80f4b756'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='zfs_strip_path' mangled-name='zfs_strip_path' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_strip_path'>
<parameter type-id='80f4b756'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zfs_strcmp_pathname' mangled-name='zfs_strcmp_pathname' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_strcmp_pathname'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_history_unpack' mangled-name='zpool_history_unpack' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_history_unpack'>
<parameter type-id='26a90f95'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='5d6479ae'/>
<parameter type-id='75be733c'/>
<parameter type-id='4dd26a40'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_basename' mangled-name='zfs_basename' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_basename'>
<parameter type-id='80f4b756'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zpool_name_to_prop' mangled-name='zpool_name_to_prop' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_name_to_prop'>
<parameter type-id='80f4b756'/>
<return type-id='5d0c23fb'/>
</function-decl>
<function-decl name='zpool_prop_readonly' mangled-name='zpool_prop_readonly' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_readonly'>
<parameter type-id='5d0c23fb'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zpool_prop_setonce' mangled-name='zpool_prop_setonce' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_setonce'>
<parameter type-id='5d0c23fb'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zpool_prop_feature' mangled-name='zpool_prop_feature' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_feature'>
<parameter type-id='80f4b756'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zpool_prop_index_to_string' mangled-name='zpool_prop_index_to_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_index_to_string'>
<parameter type-id='5d0c23fb'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='7d3cd834'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='vdev_name_to_prop' mangled-name='vdev_name_to_prop' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='vdev_name_to_prop'>
<parameter type-id='80f4b756'/>
<return type-id='5aa5c90c'/>
</function-decl>
<function-decl name='vdev_prop_default_string' mangled-name='vdev_prop_default_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='vdev_prop_default_string'>
<parameter type-id='5aa5c90c'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='vdev_prop_default_numeric' mangled-name='vdev_prop_default_numeric' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='vdev_prop_default_numeric'>
<parameter type-id='5aa5c90c'/>
<return type-id='9c313c2d'/>
</function-decl>
<function-decl name='vdev_prop_readonly' mangled-name='vdev_prop_readonly' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='vdev_prop_readonly'>
<parameter type-id='5aa5c90c'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='vdev_prop_index_to_string' mangled-name='vdev_prop_index_to_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='vdev_prop_index_to_string'>
<parameter type-id='5aa5c90c'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='7d3cd834'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_prop_vdev' mangled-name='zpool_prop_vdev' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_vdev'>
<parameter type-id='80f4b756'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='nvlist_add_nvpair' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='3fa542f0'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvlist_add_uint8_array' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<parameter type-id='9f7200cf'/>
<parameter type-id='3502e3ff'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvlist_add_nvlist_array' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<parameter type-id='3bbfee2e'/>
<parameter type-id='3502e3ff'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvpair_value_nvlist' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='3fa542f0'/>
<parameter type-id='857bb57e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='fnvlist_add_boolean_value' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<parameter type-id='c19b74c3'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fnvlist_add_int64' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<parameter type-id='9da381c4'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fnvlist_add_nvlist_array' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<parameter type-id='3bbfee2e'/>
<parameter type-id='3502e3ff'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fnvlist_lookup_uint64_array' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<parameter type-id='4dd26a40'/>
<return type-id='5d6479ae'/>
</function-decl>
<function-decl name='fnvpair_value_int64' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='dace003f'/>
<return type-id='9da381c4'/>
</function-decl>
<function-decl name='fnvpair_value_string' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='dace003f'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zfeature_is_supported' mangled-name='zfeature_is_supported' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfeature_is_supported'>
<parameter type-id='80f4b756'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfeature_lookup_guid' mangled-name='zfeature_lookup_guid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfeature_lookup_guid'>
<parameter type-id='80f4b756'/>
<parameter type-id='a8425263'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfeature_lookup_name' mangled-name='zfeature_lookup_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfeature_lookup_name'>
<parameter type-id='80f4b756'/>
<parameter type-id='a8425263'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_get_load_policy' mangled-name='zpool_get_load_policy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_load_policy'>
<parameter type-id='5ce45b60'/>
<parameter type-id='23432aaa'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='pool_namecheck' mangled-name='pool_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='pool_namecheck'>
<parameter type-id='80f4b756'/>
<parameter type-id='053457bd'/>
<parameter type-id='26a90f95'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_prop_get_type' mangled-name='zpool_prop_get_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_get_type'>
<parameter type-id='5d0c23fb'/>
<return type-id='31429eff'/>
</function-decl>
<function-decl name='vdev_prop_get_type' mangled-name='vdev_prop_get_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='vdev_prop_get_type'>
<parameter type-id='5aa5c90c'/>
<return type-id='31429eff'/>
</function-decl>
<function-decl name='get_system_hostid' mangled-name='get_system_hostid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='get_system_hostid'>
<return type-id='7359adad'/>
</function-decl>
<function-decl name='strtoull' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='9d26089a'/>
<parameter type-id='8c85230f'/>
<parameter type-id='95e97e5e'/>
<return type-id='3a47d82b'/>
</function-decl>
<function-decl name='memcmp' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='eaa32e2f'/>
<parameter type-id='b59d7dce'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='strtok_r' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='266fe297'/>
<parameter type-id='9d26089a'/>
<parameter type-id='8c85230f'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='__realpath_chk' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='9d26089a'/>
<parameter type-id='266fe297'/>
<parameter type-id='b59d7dce'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='munmap' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='b59d7dce'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='stat64' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='9d26089a'/>
<parameter type-id='f1cadedf'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_standard_error' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='b0382bb3'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_standard_error_fmt' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='b0382bb3'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='80f4b756'/>
<parameter is-variadic='yes'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_relabel_disk' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='b0382bb3'/>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_props_refresh' mangled-name='zpool_props_refresh' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_props_refresh'>
<parameter type-id='4c81de99' name='zhp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_state_to_name' mangled-name='zpool_state_to_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_state_to_name'>
<parameter type-id='35acf840' name='state'/>
<parameter type-id='9d774e0b' name='aux'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zpool_pool_state_to_name' mangled-name='zpool_pool_state_to_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_pool_state_to_name'>
<parameter type-id='084a08a3' name='state'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zpool_get_state_str' mangled-name='zpool_get_state_str' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_state_str'>
<parameter type-id='4c81de99' name='zhp'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zpool_get_userprop' mangled-name='zpool_get_userprop' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_userprop'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='propname'/>
<parameter type-id='26a90f95' name='buf'/>
<parameter type-id='b59d7dce' name='len'/>
<parameter type-id='debc6aa3' name='srctype'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_set_prop' mangled-name='zpool_set_prop' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_set_prop'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='propname'/>
<parameter type-id='80f4b756' name='propval'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_expand_proplist' mangled-name='zpool_expand_proplist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_expand_proplist'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='e4378506' name='plp'/>
<parameter type-id='2e45de5d' name='type'/>
<parameter type-id='c19b74c3' name='literal'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='vdev_expand_proplist' mangled-name='vdev_expand_proplist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='vdev_expand_proplist'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='vdevname'/>
<parameter type-id='e4378506' name='plp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_get_state' mangled-name='zpool_get_state' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_state'>
<parameter type-id='4c81de99' name='zhp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_is_draid_spare' mangled-name='zpool_is_draid_spare' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_is_draid_spare'>
<parameter type-id='80f4b756' name='name'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zpool_create' mangled-name='zpool_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_create'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='pool'/>
<parameter type-id='5ce45b60' name='nvroot'/>
<parameter type-id='5ce45b60' name='props'/>
<parameter type-id='5ce45b60' name='fsprops'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_destroy' mangled-name='zpool_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_destroy'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='log_str'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_checkpoint' mangled-name='zpool_checkpoint' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_checkpoint'>
<parameter type-id='4c81de99' name='zhp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_discard_checkpoint' mangled-name='zpool_discard_checkpoint' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_discard_checkpoint'>
<parameter type-id='4c81de99' name='zhp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_add' mangled-name='zpool_add' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_add'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='5ce45b60' name='nvroot'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_export' mangled-name='zpool_export' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_export'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='c19b74c3' name='force'/>
<parameter type-id='80f4b756' name='log_str'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_export_force' mangled-name='zpool_export_force' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_export_force'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='log_str'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_explain_recover' mangled-name='zpool_explain_recover' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_explain_recover'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='name'/>
<parameter type-id='95e97e5e' name='reason'/>
<parameter type-id='5ce45b60' name='config'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zpool_import' mangled-name='zpool_import' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_import'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='5ce45b60' name='config'/>
<parameter type-id='80f4b756' name='newname'/>
<parameter type-id='26a90f95' name='altroot'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_print_unsup_feat' mangled-name='zpool_print_unsup_feat' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_print_unsup_feat'>
<parameter type-id='5ce45b60' name='config'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zpool_import_props' mangled-name='zpool_import_props' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_import_props'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='5ce45b60' name='config'/>
<parameter type-id='80f4b756' name='newname'/>
<parameter type-id='5ce45b60' name='props'/>
<parameter type-id='95e97e5e' name='flags'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_initialize' mangled-name='zpool_initialize' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_initialize'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='7063e1ab' name='cmd_type'/>
<parameter type-id='5ce45b60' name='vds'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_initialize_wait' mangled-name='zpool_initialize_wait' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_initialize_wait'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='7063e1ab' name='cmd_type'/>
<parameter type-id='5ce45b60' name='vds'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_trim' mangled-name='zpool_trim' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_trim'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='b1146b8d' name='cmd_type'/>
<parameter type-id='5ce45b60' name='vds'/>
<parameter type-id='b13f38c3' name='trim_flags'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_scan' mangled-name='zpool_scan' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_scan'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='7313fbe2' name='func'/>
<parameter type-id='b51cf3c2' name='cmd'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_find_vdev_by_physpath' mangled-name='zpool_find_vdev_by_physpath' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_find_vdev_by_physpath'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='ppath'/>
<parameter type-id='37e3bd22' name='avail_spare'/>
<parameter type-id='37e3bd22' name='l2cache'/>
<parameter type-id='37e3bd22' name='log'/>
<return type-id='5ce45b60'/>
</function-decl>
<function-decl name='zpool_find_vdev' mangled-name='zpool_find_vdev' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_find_vdev'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='37e3bd22' name='avail_spare'/>
<parameter type-id='37e3bd22' name='l2cache'/>
<parameter type-id='37e3bd22' name='log'/>
<return type-id='5ce45b60'/>
</function-decl>
<function-decl name='zpool_vdev_path_to_guid' mangled-name='zpool_vdev_path_to_guid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_path_to_guid'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='path'/>
<return type-id='9c313c2d'/>
</function-decl>
<function-decl name='zpool_vdev_online' mangled-name='zpool_vdev_online' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_online'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='95e97e5e' name='flags'/>
<parameter type-id='17f3480d' name='newstate'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_vdev_offline' mangled-name='zpool_vdev_offline' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_offline'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='c19b74c3' name='istmp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_vdev_remove_wanted' mangled-name='zpool_vdev_remove_wanted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_remove_wanted'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='path'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_vdev_fault' mangled-name='zpool_vdev_fault' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_fault'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='9c313c2d' name='guid'/>
<parameter type-id='9d774e0b' name='aux'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_vdev_degrade' mangled-name='zpool_vdev_degrade' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_degrade'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='9c313c2d' name='guid'/>
<parameter type-id='9d774e0b' name='aux'/>
<return type-id='95e97e5e'/>
</function-decl>
+ <function-decl name='zpool_vdev_set_removed_state' mangled-name='zpool_vdev_set_removed_state' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_set_removed_state'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='9c313c2d' name='guid'/>
+ <parameter type-id='9d774e0b' name='aux'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
<function-decl name='zpool_vdev_attach' mangled-name='zpool_vdev_attach' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_attach'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='old_disk'/>
<parameter type-id='80f4b756' name='new_disk'/>
<parameter type-id='5ce45b60' name='nvroot'/>
<parameter type-id='95e97e5e' name='replacing'/>
<parameter type-id='c19b74c3' name='rebuild'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_vdev_detach' mangled-name='zpool_vdev_detach' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_detach'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='path'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_vdev_split' mangled-name='zpool_vdev_split' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_split'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='26a90f95' name='newname'/>
<parameter type-id='857bb57e' name='newroot'/>
<parameter type-id='5ce45b60' name='props'/>
<parameter type-id='325c1e34' name='flags'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_vdev_remove' mangled-name='zpool_vdev_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_remove'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='path'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_vdev_remove_cancel' mangled-name='zpool_vdev_remove_cancel' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_remove_cancel'>
<parameter type-id='4c81de99' name='zhp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_vdev_indirect_size' mangled-name='zpool_vdev_indirect_size' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_indirect_size'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='5d6479ae' name='sizep'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_clear' mangled-name='zpool_clear' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_clear'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='5ce45b60' name='rewindnvl'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_vdev_clear' mangled-name='zpool_vdev_clear' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_clear'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='9c313c2d' name='guid'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_reguid' mangled-name='zpool_reguid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_reguid'>
<parameter type-id='4c81de99' name='zhp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_reopen_one' mangled-name='zpool_reopen_one' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_reopen_one'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='eaa32e2f' name='data'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_sync_one' mangled-name='zpool_sync_one' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_sync_one'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='eaa32e2f' name='data'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_vdev_name' mangled-name='zpool_vdev_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_name'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='5ce45b60' name='nv'/>
<parameter type-id='95e97e5e' name='name_flags'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='zpool_get_errlog' mangled-name='zpool_get_errlog' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_errlog'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='857bb57e' name='nverrlistp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_upgrade' mangled-name='zpool_upgrade' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_upgrade'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='9c313c2d' name='new_version'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_save_arguments' mangled-name='zfs_save_arguments' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_save_arguments'>
<parameter type-id='95e97e5e' name='argc'/>
<parameter type-id='9b23c9ad' name='argv'/>
<parameter type-id='26a90f95' name='string'/>
<parameter type-id='95e97e5e' name='len'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zpool_log_history' mangled-name='zpool_log_history' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_log_history'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='message'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_get_history' mangled-name='zpool_get_history' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_history'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='857bb57e' name='nvhisp'/>
<parameter type-id='5d6479ae' name='off'/>
<parameter type-id='37e3bd22' name='eof'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_events_next' mangled-name='zpool_events_next' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_events_next'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='857bb57e' name='nvp'/>
<parameter type-id='7292109c' name='dropped'/>
<parameter type-id='f0981eeb' name='flags'/>
<parameter type-id='95e97e5e' name='zevent_fd'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_events_clear' mangled-name='zpool_events_clear' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_events_clear'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='7292109c' name='count'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_events_seek' mangled-name='zpool_events_seek' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_events_seek'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='9c313c2d' name='eid'/>
<parameter type-id='95e97e5e' name='zevent_fd'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_obj_to_path' mangled-name='zpool_obj_to_path' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_obj_to_path'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='9c313c2d' name='dsobj'/>
<parameter type-id='9c313c2d' name='obj'/>
<parameter type-id='26a90f95' name='pathname'/>
<parameter type-id='b59d7dce' name='len'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zpool_obj_to_path_ds' mangled-name='zpool_obj_to_path_ds' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_obj_to_path_ds'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='9c313c2d' name='dsobj'/>
<parameter type-id='9c313c2d' name='obj'/>
<parameter type-id='26a90f95' name='pathname'/>
<parameter type-id='b59d7dce' name='len'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zpool_wait' mangled-name='zpool_wait' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_wait'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='73446457' name='activity'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_wait_status' mangled-name='zpool_wait_status' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_wait_status'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='73446457' name='activity'/>
<parameter type-id='37e3bd22' name='missing'/>
<parameter type-id='37e3bd22' name='waited'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_set_bootenv' mangled-name='zpool_set_bootenv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_set_bootenv'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='22cce67b' name='envmap'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_get_bootenv' mangled-name='zpool_get_bootenv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_bootenv'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='857bb57e' name='nvlp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_load_compat' mangled-name='zpool_load_compat' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_load_compat'>
<parameter type-id='80f4b756' name='compat'/>
<parameter type-id='37e3bd22' name='features'/>
<parameter type-id='26a90f95' name='report'/>
<parameter type-id='b59d7dce' name='rlen'/>
<return type-id='901b78d1'/>
</function-decl>
<function-decl name='zpool_get_vdev_prop_value' mangled-name='zpool_get_vdev_prop_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_vdev_prop_value'>
<parameter type-id='5ce45b60' name='nvprop'/>
<parameter type-id='5aa5c90c' name='prop'/>
<parameter type-id='26a90f95' name='prop_name'/>
<parameter type-id='26a90f95' name='buf'/>
<parameter type-id='b59d7dce' name='len'/>
<parameter type-id='debc6aa3' name='srctype'/>
<parameter type-id='c19b74c3' name='literal'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_get_vdev_prop' mangled-name='zpool_get_vdev_prop' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_vdev_prop'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='vdevname'/>
<parameter type-id='5aa5c90c' name='prop'/>
<parameter type-id='26a90f95' name='prop_name'/>
<parameter type-id='26a90f95' name='buf'/>
<parameter type-id='b59d7dce' name='len'/>
<parameter type-id='debc6aa3' name='srctype'/>
<parameter type-id='c19b74c3' name='literal'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_get_all_vdev_props' mangled-name='zpool_get_all_vdev_props' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_all_vdev_props'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='vdevname'/>
<parameter type-id='857bb57e' name='outnvl'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_set_vdev_prop' mangled-name='zpool_set_vdev_prop' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_set_vdev_prop'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='vdevname'/>
<parameter type-id='80f4b756' name='propname'/>
<parameter type-id='80f4b756' name='propval'/>
<return type-id='95e97e5e'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libzfs/libzfs_sendrecv.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='9c313c2d' size-in-bits='2176' id='8c2bcad1'>
<subrange length='34' type-id='7359adad' id='6a6a7e00'/>
</array-type-def>
<array-type-def dimensions='1' type-id='9c313c2d' size-in-bits='256' id='85c64d26'>
<subrange length='4' type-id='7359adad' id='16fe7105'/>
</array-type-def>
<array-type-def dimensions='1' type-id='b96825af' size-in-bits='96' id='fa8ef949'>
<subrange length='12' type-id='7359adad' id='84827bdc'/>
</array-type-def>
<array-type-def dimensions='1' type-id='b96825af' size-in-bits='128' id='fa9986a5'>
<subrange length='16' type-id='7359adad' id='848d0938'/>
</array-type-def>
<array-type-def dimensions='1' type-id='b96825af' size-in-bits='40' id='0f4ddd0b'>
<subrange length='5' type-id='7359adad' id='53010e10'/>
</array-type-def>
<array-type-def dimensions='1' type-id='b96825af' size-in-bits='48' id='0f562bd0'>
<subrange length='6' type-id='7359adad' id='52fa524b'/>
</array-type-def>
<array-type-def dimensions='1' type-id='b96825af' size-in-bits='64' id='13339fda'>
<subrange length='8' type-id='7359adad' id='56e0c0b1'/>
</array-type-def>
<class-decl name='sendflags' size-in-bits='576' is-struct='yes' visibility='default' id='f6aa15be'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='verbosity' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='replicate' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='skipmissing' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='96'>
<var-decl name='doall' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='fromorigin' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='160'>
<var-decl name='pad' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='props' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='224'>
<var-decl name='dryrun' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='parsable' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='288'>
<var-decl name='progress' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='progressastitle' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='352'>
<var-decl name='largeblock' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='embed_data' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='416'>
<var-decl name='compress' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='raw' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='480'>
<var-decl name='backup' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
<var-decl name='holds' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='544'>
<var-decl name='saved' type-id='c19b74c3' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='sendflags_t' type-id='f6aa15be' id='945467e6'/>
<typedef-decl name='snapfilter_cb_t' type-id='d2a5e211' id='3d3ffb69'/>
<class-decl name='recvflags' size-in-bits='448' is-struct='yes' visibility='default' id='34a384dc'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='verbose' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='isprefix' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='istail' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='96'>
<var-decl name='dryrun' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='force' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='160'>
<var-decl name='canmountoff' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='resumable' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='224'>
<var-decl name='byteswap' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='nomount' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='288'>
<var-decl name='holds' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='skipholds' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='352'>
<var-decl name='domount' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='forceunmount' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='416'>
<var-decl name='heal' type-id='c19b74c3' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='recvflags_t' type-id='34a384dc' id='9e59d1d4'/>
<enum-decl name='lzc_send_flags' id='bfbd3c8e'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='LZC_SEND_FLAG_EMBED_DATA' value='1'/>
<enumerator name='LZC_SEND_FLAG_LARGE_BLOCK' value='2'/>
<enumerator name='LZC_SEND_FLAG_COMPRESS' value='4'/>
<enumerator name='LZC_SEND_FLAG_RAW' value='8'/>
<enumerator name='LZC_SEND_FLAG_SAVED' value='16'/>
</enum-decl>
<class-decl name='ddt_key' size-in-bits='320' is-struct='yes' visibility='default' id='e0a4a1cb'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='ddk_cksum' type-id='39730d0b' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='ddk_prop' type-id='9c313c2d' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='ddt_key_t' type-id='e0a4a1cb' id='67f6d2cf'/>
<enum-decl name='dmu_object_type' id='04b3b0b9'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='DMU_OT_NONE' value='0'/>
<enumerator name='DMU_OT_OBJECT_DIRECTORY' value='1'/>
<enumerator name='DMU_OT_OBJECT_ARRAY' value='2'/>
<enumerator name='DMU_OT_PACKED_NVLIST' value='3'/>
<enumerator name='DMU_OT_PACKED_NVLIST_SIZE' value='4'/>
<enumerator name='DMU_OT_BPOBJ' value='5'/>
<enumerator name='DMU_OT_BPOBJ_HDR' value='6'/>
<enumerator name='DMU_OT_SPACE_MAP_HEADER' value='7'/>
<enumerator name='DMU_OT_SPACE_MAP' value='8'/>
<enumerator name='DMU_OT_INTENT_LOG' value='9'/>
<enumerator name='DMU_OT_DNODE' value='10'/>
<enumerator name='DMU_OT_OBJSET' value='11'/>
<enumerator name='DMU_OT_DSL_DIR' value='12'/>
<enumerator name='DMU_OT_DSL_DIR_CHILD_MAP' value='13'/>
<enumerator name='DMU_OT_DSL_DS_SNAP_MAP' value='14'/>
<enumerator name='DMU_OT_DSL_PROPS' value='15'/>
<enumerator name='DMU_OT_DSL_DATASET' value='16'/>
<enumerator name='DMU_OT_ZNODE' value='17'/>
<enumerator name='DMU_OT_OLDACL' value='18'/>
<enumerator name='DMU_OT_PLAIN_FILE_CONTENTS' value='19'/>
<enumerator name='DMU_OT_DIRECTORY_CONTENTS' value='20'/>
<enumerator name='DMU_OT_MASTER_NODE' value='21'/>
<enumerator name='DMU_OT_UNLINKED_SET' value='22'/>
<enumerator name='DMU_OT_ZVOL' value='23'/>
<enumerator name='DMU_OT_ZVOL_PROP' value='24'/>
<enumerator name='DMU_OT_PLAIN_OTHER' value='25'/>
<enumerator name='DMU_OT_UINT64_OTHER' value='26'/>
<enumerator name='DMU_OT_ZAP_OTHER' value='27'/>
<enumerator name='DMU_OT_ERROR_LOG' value='28'/>
<enumerator name='DMU_OT_SPA_HISTORY' value='29'/>
<enumerator name='DMU_OT_SPA_HISTORY_OFFSETS' value='30'/>
<enumerator name='DMU_OT_POOL_PROPS' value='31'/>
<enumerator name='DMU_OT_DSL_PERMS' value='32'/>
<enumerator name='DMU_OT_ACL' value='33'/>
<enumerator name='DMU_OT_SYSACL' value='34'/>
<enumerator name='DMU_OT_FUID' value='35'/>
<enumerator name='DMU_OT_FUID_SIZE' value='36'/>
<enumerator name='DMU_OT_NEXT_CLONES' value='37'/>
<enumerator name='DMU_OT_SCAN_QUEUE' value='38'/>
<enumerator name='DMU_OT_USERGROUP_USED' value='39'/>
<enumerator name='DMU_OT_USERGROUP_QUOTA' value='40'/>
<enumerator name='DMU_OT_USERREFS' value='41'/>
<enumerator name='DMU_OT_DDT_ZAP' value='42'/>
<enumerator name='DMU_OT_DDT_STATS' value='43'/>
<enumerator name='DMU_OT_SA' value='44'/>
<enumerator name='DMU_OT_SA_MASTER_NODE' value='45'/>
<enumerator name='DMU_OT_SA_ATTR_REGISTRATION' value='46'/>
<enumerator name='DMU_OT_SA_ATTR_LAYOUTS' value='47'/>
<enumerator name='DMU_OT_SCAN_XLATE' value='48'/>
<enumerator name='DMU_OT_DEDUP' value='49'/>
<enumerator name='DMU_OT_DEADLIST' value='50'/>
<enumerator name='DMU_OT_DEADLIST_HDR' value='51'/>
<enumerator name='DMU_OT_DSL_CLONES' value='52'/>
<enumerator name='DMU_OT_BPOBJ_SUBOBJ' value='53'/>
<enumerator name='DMU_OT_NUMTYPES' value='54'/>
<enumerator name='DMU_OTN_UINT8_DATA' value='128'/>
<enumerator name='DMU_OTN_UINT8_METADATA' value='192'/>
<enumerator name='DMU_OTN_UINT16_DATA' value='129'/>
<enumerator name='DMU_OTN_UINT16_METADATA' value='193'/>
<enumerator name='DMU_OTN_UINT32_DATA' value='130'/>
<enumerator name='DMU_OTN_UINT32_METADATA' value='194'/>
<enumerator name='DMU_OTN_UINT64_DATA' value='131'/>
<enumerator name='DMU_OTN_UINT64_METADATA' value='195'/>
<enumerator name='DMU_OTN_ZAP_DATA' value='132'/>
<enumerator name='DMU_OTN_ZAP_METADATA' value='196'/>
<enumerator name='DMU_OTN_UINT8_ENC_DATA' value='160'/>
<enumerator name='DMU_OTN_UINT8_ENC_METADATA' value='224'/>
<enumerator name='DMU_OTN_UINT16_ENC_DATA' value='161'/>
<enumerator name='DMU_OTN_UINT16_ENC_METADATA' value='225'/>
<enumerator name='DMU_OTN_UINT32_ENC_DATA' value='162'/>
<enumerator name='DMU_OTN_UINT32_ENC_METADATA' value='226'/>
<enumerator name='DMU_OTN_UINT64_ENC_DATA' value='163'/>
<enumerator name='DMU_OTN_UINT64_ENC_METADATA' value='227'/>
<enumerator name='DMU_OTN_ZAP_ENC_DATA' value='164'/>
<enumerator name='DMU_OTN_ZAP_ENC_METADATA' value='228'/>
</enum-decl>
<typedef-decl name='dmu_object_type_t' type-id='04b3b0b9' id='5c9d8906'/>
<class-decl name='zio_cksum' size-in-bits='256' is-struct='yes' visibility='default' id='1d53e28b'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='zc_word' type-id='85c64d26' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zio_cksum_t' type-id='1d53e28b' id='39730d0b'/>
<class-decl name='dmu_replay_record' size-in-bits='2496' is-struct='yes' visibility='default' id='781a52d7'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='drr_type' type-id='08f5ca17' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='drr_payloadlen' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='drr_u' type-id='ac5ab598' visibility='default'/>
</data-member>
</class-decl>
<enum-decl name='__anonymous_enum__' is-anonymous='yes' id='08f5ca17'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='DRR_BEGIN' value='0'/>
<enumerator name='DRR_OBJECT' value='1'/>
<enumerator name='DRR_FREEOBJECTS' value='2'/>
<enumerator name='DRR_WRITE' value='3'/>
<enumerator name='DRR_FREE' value='4'/>
<enumerator name='DRR_END' value='5'/>
<enumerator name='DRR_WRITE_BYREF' value='6'/>
<enumerator name='DRR_SPILL' value='7'/>
<enumerator name='DRR_WRITE_EMBEDDED' value='8'/>
<enumerator name='DRR_OBJECT_RANGE' value='9'/>
<enumerator name='DRR_REDACT' value='10'/>
<enumerator name='DRR_NUMTYPES' value='11'/>
</enum-decl>
<union-decl name='__anonymous_union__' size-in-bits='2432' is-anonymous='yes' visibility='default' id='ac5ab598'>
<data-member access='public'>
<var-decl name='drr_begin' type-id='09fcdc01' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='drr_end' type-id='6ee25631' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='drr_object' type-id='f9ad530b' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='drr_freeobjects' type-id='a27d958e' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='drr_write' type-id='4cc69e4b' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='drr_free' type-id='c836cfd2' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='drr_write_byref' type-id='e511cdce' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='drr_spill' type-id='1e69a80a' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='drr_write_embedded' type-id='98b1345e' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='drr_object_range' type-id='aba1f9e1' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='drr_redact' type-id='50389039' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='drr_checksum' type-id='a5fe3647' visibility='default'/>
</data-member>
</union-decl>
<class-decl name='drr_end' size-in-bits='320' is-struct='yes' visibility='default' id='6ee25631'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='drr_checksum' type-id='39730d0b' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='drr_toguid' type-id='9c313c2d' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='drr_object' size-in-bits='448' is-struct='yes' visibility='default' id='f9ad530b'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='drr_object' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='drr_type' type-id='5c9d8906' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='96'>
<var-decl name='drr_bonustype' type-id='5c9d8906' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='drr_blksz' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='160'>
<var-decl name='drr_bonuslen' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='drr_checksumtype' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='200'>
<var-decl name='drr_compress' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='208'>
<var-decl name='drr_dn_slots' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='216'>
<var-decl name='drr_flags' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='224'>
<var-decl name='drr_raw_bonuslen' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='drr_toguid' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='drr_indblkshift' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='328'>
<var-decl name='drr_nlevels' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='336'>
<var-decl name='drr_nblkptr' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='344'>
<var-decl name='drr_pad' type-id='0f4ddd0b' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='drr_maxblkid' type-id='9c313c2d' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='drr_freeobjects' size-in-bits='192' is-struct='yes' visibility='default' id='a27d958e'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='drr_firstobj' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='drr_numobjs' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='drr_toguid' type-id='9c313c2d' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='drr_write' size-in-bits='1088' is-struct='yes' visibility='default' id='4cc69e4b'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='drr_object' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='drr_type' type-id='5c9d8906' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='96'>
<var-decl name='drr_pad' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='drr_offset' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='drr_logical_size' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='drr_toguid' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='drr_checksumtype' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='328'>
<var-decl name='drr_flags' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='336'>
<var-decl name='drr_compressiontype' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='344'>
<var-decl name='drr_pad2' type-id='0f4ddd0b' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='drr_key' type-id='67f6d2cf' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='704'>
<var-decl name='drr_compressed_size' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='768'>
<var-decl name='drr_salt' type-id='13339fda' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='832'>
<var-decl name='drr_iv' type-id='fa8ef949' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='928'>
<var-decl name='drr_mac' type-id='fa9986a5' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='drr_free' size-in-bits='256' is-struct='yes' visibility='default' id='c836cfd2'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='drr_object' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='drr_offset' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='drr_length' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='drr_toguid' type-id='9c313c2d' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='drr_write_byref' size-in-bits='832' is-struct='yes' visibility='default' id='e511cdce'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='drr_object' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='drr_offset' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='drr_length' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='drr_toguid' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='drr_refguid' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='drr_refobject' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='drr_refoffset' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='drr_checksumtype' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='456'>
<var-decl name='drr_flags' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='464'>
<var-decl name='drr_pad2' type-id='0f562bd0' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
<var-decl name='drr_key' type-id='67f6d2cf' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='drr_spill' size-in-bits='640' is-struct='yes' visibility='default' id='1e69a80a'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='drr_object' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='drr_length' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='drr_toguid' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='drr_flags' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='200'>
<var-decl name='drr_compressiontype' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='208'>
<var-decl name='drr_pad' type-id='0f562bd0' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='drr_compressed_size' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='drr_salt' type-id='13339fda' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='drr_iv' type-id='fa8ef949' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='480'>
<var-decl name='drr_mac' type-id='fa9986a5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='608'>
<var-decl name='drr_type' type-id='5c9d8906' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='drr_write_embedded' size-in-bits='384' is-struct='yes' visibility='default' id='98b1345e'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='drr_object' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='drr_offset' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='drr_length' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='drr_toguid' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='drr_compression' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='264'>
<var-decl name='drr_etype' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='272'>
<var-decl name='drr_pad' type-id='0f562bd0' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='drr_lsize' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='352'>
<var-decl name='drr_psize' type-id='8f92235e' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='drr_object_range' size-in-bits='512' is-struct='yes' visibility='default' id='aba1f9e1'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='drr_firstobj' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='drr_numslots' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='drr_toguid' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='drr_salt' type-id='13339fda' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='drr_iv' type-id='fa8ef949' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='352'>
<var-decl name='drr_mac' type-id='fa9986a5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='480'>
<var-decl name='drr_flags' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='488'>
<var-decl name='drr_pad' type-id='d3490169' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='drr_redact' size-in-bits='256' is-struct='yes' visibility='default' id='50389039'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='drr_object' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='drr_offset' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='drr_length' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='drr_toguid' type-id='9c313c2d' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='drr_checksum' size-in-bits='2432' is-struct='yes' visibility='default' id='a5fe3647'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='drr_pad' type-id='8c2bcad1' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2176'>
<var-decl name='drr_checksum' type-id='39730d0b' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='__clockid_t' type-id='95e97e5e' id='08f9a87a'/>
<typedef-decl name='clockid_t' type-id='08f9a87a' id='a1c3b834'/>
<typedef-decl name='Byte' type-id='002ac4a6' id='efb9ba06'/>
<typedef-decl name='uLong' type-id='7359adad' id='5bbcce85'/>
<typedef-decl name='Bytef' type-id='efb9ba06' id='c1606520'/>
<typedef-decl name='uLongf' type-id='5bbcce85' id='4d39af59'/>
<pointer-type-def type-id='c1606520' size-in-bits='64' id='4c667223'/>
<qualified-type-def type-id='c1606520' const='yes' id='a6124a50'/>
<pointer-type-def type-id='a6124a50' size-in-bits='64' id='e8cb3e0e'/>
<qualified-type-def type-id='781a52d7' const='yes' id='413ab2b8'/>
<pointer-type-def type-id='413ab2b8' size-in-bits='64' id='41671bd6'/>
<pointer-type-def type-id='c70fa2e8' size-in-bits='64' id='2e711a2a'/>
<pointer-type-def type-id='3ff5601b' size-in-bits='64' id='4aafb922'/>
<pointer-type-def type-id='9e59d1d4' size-in-bits='64' id='4ea84b4f'/>
<pointer-type-def type-id='945467e6' size-in-bits='64' id='8def7735'/>
<pointer-type-def type-id='3d3ffb69' size-in-bits='64' id='72a26210'/>
<pointer-type-def type-id='c9d12d66' size-in-bits='64' id='b2eb2c3f'/>
<pointer-type-def type-id='a9c79a1f' size-in-bits='64' id='3d83ba87'/>
<pointer-type-def type-id='4d39af59' size-in-bits='64' id='60db3356'/>
<pointer-type-def type-id='39730d0b' size-in-bits='64' id='c24fc2ee'/>
<function-decl name='nvlist_print' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='822cd80b'/>
<parameter type-id='5ce45b60'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_get_pool_handle' mangled-name='zfs_get_pool_handle' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_pool_handle'>
<parameter type-id='fcd57163'/>
<return type-id='4c81de99'/>
</function-decl>
<function-decl name='lzc_send_wrapper' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='2e711a2a'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_send_redacted' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='bfbd3c8e'/>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_send_resume_redacted' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='bfbd3c8e'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_receive_with_cmdprops' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='5ce45b60'/>
<parameter type-id='5ce45b60'/>
<parameter type-id='ae3e8ca6'/>
<parameter type-id='3502e3ff'/>
<parameter type-id='80f4b756'/>
<parameter type-id='c19b74c3'/>
<parameter type-id='c19b74c3'/>
<parameter type-id='c19b74c3'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='41671bd6'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='5d6479ae'/>
<parameter type-id='5d6479ae'/>
<parameter type-id='5d6479ae'/>
<parameter type-id='857bb57e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_receive_with_heal' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='5ce45b60'/>
<parameter type-id='5ce45b60'/>
<parameter type-id='ae3e8ca6'/>
<parameter type-id='3502e3ff'/>
<parameter type-id='80f4b756'/>
<parameter type-id='c19b74c3'/>
<parameter type-id='c19b74c3'/>
<parameter type-id='c19b74c3'/>
<parameter type-id='c19b74c3'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='41671bd6'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='5d6479ae'/>
<parameter type-id='5d6479ae'/>
<parameter type-id='5d6479ae'/>
<parameter type-id='857bb57e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_send_space' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<parameter type-id='bfbd3c8e'/>
<parameter type-id='5d6479ae'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_send_space_resume_redacted' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<parameter type-id='bfbd3c8e'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='80f4b756'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='5d6479ae'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_rename' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_setproctitle' mangled-name='zfs_setproctitle' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_setproctitle'>
<parameter type-id='80f4b756'/>
<parameter is-variadic='yes'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='avl_insert' mangled-name='avl_insert' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_insert'>
<parameter type-id='a3681dea'/>
<parameter type-id='eaa32e2f'/>
<parameter type-id='fba6cb51'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='nvlist_add_boolean' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvlist_lookup_boolean' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='22cce67b'/>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvpair_value_int32' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='dace003f'/>
<parameter type-id='4aafb922'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='fnvlist_size' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<return type-id='b59d7dce'/>
</function-decl>
<function-decl name='fnvlist_merge' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='5ce45b60'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fnvlist_remove' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fnvlist_lookup_boolean_value' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='22cce67b'/>
<parameter type-id='80f4b756'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='fletcher_4_native_varsize' mangled-name='fletcher_4_native_varsize' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_native_varsize'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='c24fc2ee'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fletcher_4_incremental_native' mangled-name='fletcher_4_incremental_native' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_incremental_native'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='fletcher_4_incremental_byteswap' mangled-name='fletcher_4_incremental_byteswap' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_incremental_byteswap'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='perror' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='strndup' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='b59d7dce'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='time' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='b2eb2c3f'/>
<return type-id='c9d12d66'/>
</function-decl>
<function-decl name='clock_gettime' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='a1c3b834'/>
<parameter type-id='3d83ba87'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='write' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<parameter type-id='eaa32e2f'/>
<parameter type-id='b59d7dce'/>
<return type-id='79a0948f'/>
</function-decl>
<function-decl name='sleep' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='f0981eeb'/>
<return type-id='f0981eeb'/>
</function-decl>
<function-decl name='uncompress' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='4c667223'/>
<parameter type-id='60db3356'/>
<parameter type-id='e8cb3e0e'/>
<parameter type-id='5bbcce85'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='create_parents' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='b0382bb3'/>
<parameter type-id='26a90f95'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_send_progress' mangled-name='zfs_send_progress' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_send_progress'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='95e97e5e' name='fd'/>
<parameter type-id='5d6479ae' name='bytes_written'/>
<parameter type-id='5d6479ae' name='blocks_visited'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_send_resume_token_to_nvlist' mangled-name='zfs_send_resume_token_to_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_send_resume_token_to_nvlist'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='token'/>
<return type-id='5ce45b60'/>
</function-decl>
<function-decl name='zfs_send_resume' mangled-name='zfs_send_resume' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_send_resume'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='8def7735' name='flags'/>
<parameter type-id='95e97e5e' name='outfd'/>
<parameter type-id='80f4b756' name='resume_token'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_send_saved' mangled-name='zfs_send_saved' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_send_saved'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='8def7735' name='flags'/>
<parameter type-id='95e97e5e' name='outfd'/>
<parameter type-id='80f4b756' name='resume_token'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_send' mangled-name='zfs_send' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_send'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='fromsnap'/>
<parameter type-id='80f4b756' name='tosnap'/>
<parameter type-id='8def7735' name='flags'/>
<parameter type-id='95e97e5e' name='outfd'/>
<parameter type-id='72a26210' name='filter_func'/>
<parameter type-id='eaa32e2f' name='cb_arg'/>
<parameter type-id='857bb57e' name='debugnvp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_send_one' mangled-name='zfs_send_one' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_send_one'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='from'/>
<parameter type-id='95e97e5e' name='fd'/>
<parameter type-id='8def7735' name='flags'/>
<parameter type-id='80f4b756' name='redactbook'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_receive' mangled-name='zfs_receive' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_receive'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='tosnap'/>
<parameter type-id='5ce45b60' name='props'/>
<parameter type-id='4ea84b4f' name='flags'/>
<parameter type-id='95e97e5e' name='infd'/>
<parameter type-id='a3681dea' name='stream_avl'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-type size-in-bits='64' id='c70fa2e8'>
<parameter type-id='95e97e5e'/>
<parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/>
</function-type>
<function-type size-in-bits='64' id='d2a5e211'>
<parameter type-id='9200a744'/>
<parameter type-id='eaa32e2f'/>
<return type-id='c19b74c3'/>
</function-type>
</abi-instr>
<abi-instr address-size='64' path='lib/libzfs/libzfs_status.c' language='LANG_C99'>
<function-decl name='zpool_import_status' mangled-name='zpool_import_status' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_import_status'>
<parameter type-id='5ce45b60' name='config'/>
<parameter type-id='7d3cd834' name='msgid'/>
<parameter type-id='cec6f2e4' name='errata'/>
<return type-id='d3dd6294'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libzfs/libzfs_util.c' language='LANG_C99'>
<class-decl name='__va_list_tag' size-in-bits='192' is-struct='yes' visibility='default' id='d5027220'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='gp_offset' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='fp_offset' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='overflow_arg_area' type-id='eaa32e2f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='reg_save_area' type-id='eaa32e2f' visibility='default'/>
</data-member>
</class-decl>
<type-decl name='double' size-in-bits='64' id='a0eb0f08'/>
<array-type-def dimensions='1' type-id='95e97e5e' size-in-bits='192' id='e41bdf22'>
<subrange length='6' type-id='7359adad' id='52fa524b'/>
</array-type-def>
<array-type-def dimensions='1' type-id='19cefcee' size-in-bits='160' alignment-in-bits='32' id='3fcf57d2'>
<subrange length='5' type-id='7359adad' id='53010e10'/>
</array-type-def>
<enum-decl name='zfs_get_column_t' naming-typedef-id='19cefcee' id='223bdcaa'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='GET_COL_NONE' value='0'/>
<enumerator name='GET_COL_NAME' value='1'/>
<enumerator name='GET_COL_PROPERTY' value='2'/>
<enumerator name='GET_COL_VALUE' value='3'/>
<enumerator name='GET_COL_RECVD' value='4'/>
<enumerator name='GET_COL_SOURCE' value='5'/>
</enum-decl>
<typedef-decl name='zfs_get_column_t' type-id='223bdcaa' id='19cefcee'/>
<class-decl name='vdev_cbdata' size-in-bits='192' is-struct='yes' visibility='default' id='b8006be8'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='cb_name_flags' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='cb_names' type-id='9b23c9ad' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='cb_names_count' type-id='f0981eeb' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='vdev_cbdata_t' type-id='b8006be8' id='a9679c94'/>
<class-decl name='zprop_get_cbdata' size-in-bits='832' is-struct='yes' visibility='default' id='f3d3c319'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='cb_sources' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='cb_columns' type-id='3fcf57d2' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='cb_colwidths' type-id='e41bdf22' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='cb_scripted' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='416'>
<var-decl name='cb_literal' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='cb_first' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
<var-decl name='cb_proplist' type-id='3a9b2288' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='576'>
<var-decl name='cb_type' type-id='2e45de5d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='640'>
<var-decl name='cb_vdevs' type-id='a9679c94' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zprop_get_cbdata_t' type-id='f3d3c319' id='f3d87113'/>
<typedef-decl name='zprop_func' type-id='2e711a2a' id='1ec3747a'/>
<enum-decl name='zprop_attr_t' naming-typedef-id='999701cc' id='77d05200'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='PROP_DEFAULT' value='0'/>
<enumerator name='PROP_READONLY' value='1'/>
<enumerator name='PROP_INHERIT' value='2'/>
<enumerator name='PROP_ONETIME' value='3'/>
<enumerator name='PROP_ONETIME_DEFAULT' value='4'/>
</enum-decl>
<typedef-decl name='zprop_attr_t' type-id='77d05200' id='999701cc'/>
<class-decl name='zfs_index' size-in-bits='128' is-struct='yes' visibility='default' id='87957af9'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='pi_name' type-id='80f4b756' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='pi_value' type-id='9c313c2d' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zprop_index_t' type-id='87957af9' id='64636ce3'/>
<class-decl name='zprop_desc_t' size-in-bits='640' is-struct='yes' naming-typedef-id='ffa52b96' visibility='default' id='bbff5e4b'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='pd_name' type-id='80f4b756' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='pd_propnum' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='96'>
<var-decl name='pd_proptype' type-id='31429eff' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='pd_strdefault' type-id='80f4b756' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='pd_numdefault' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='pd_attr' type-id='999701cc' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='288'>
<var-decl name='pd_types' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='pd_values' type-id='80f4b756' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='pd_colname' type-id='80f4b756' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='pd_rightalign' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='449'>
<var-decl name='pd_visible' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='450'>
<var-decl name='pd_zfs_mod_supported' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='451'>
<var-decl name='pd_always_flex' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
<var-decl name='pd_table' type-id='c8bc397b' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='576'>
<var-decl name='pd_table_size' type-id='b59d7dce' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zprop_desc_t' type-id='bbff5e4b' id='ffa52b96'/>
<class-decl name='extmnttab' size-in-bits='320' is-struct='yes' visibility='default' id='0c544dc0'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='mnt_special' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='mnt_mountp' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='mnt_fstype' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='mnt_mntopts' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='mnt_major' type-id='3502e3ff' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='288'>
<var-decl name='mnt_minor' type-id='3502e3ff' visibility='default'/>
</data-member>
</class-decl>
<pointer-type-def type-id='d5027220' size-in-bits='64' id='b7f2d5e6'/>
<qualified-type-def type-id='26a90f95' const='yes' id='57de658a'/>
<pointer-type-def type-id='57de658a' size-in-bits='64' id='f319fae0'/>
<pointer-type-def type-id='9b23c9ad' size-in-bits='64' id='c0563f85'/>
<qualified-type-def type-id='33f57a65' const='yes' id='21fd6035'/>
<pointer-type-def type-id='21fd6035' size-in-bits='64' id='a0de50cd'/>
<pointer-type-def type-id='a0de50cd' size-in-bits='64' id='24f95ba5'/>
<qualified-type-def type-id='64636ce3' const='yes' id='072f7953'/>
<pointer-type-def type-id='072f7953' size-in-bits='64' id='c8bc397b'/>
<pointer-type-def type-id='0c544dc0' size-in-bits='64' id='394fc496'/>
<pointer-type-def type-id='aca3bac8' size-in-bits='64' id='d33f11cb'/>
<qualified-type-def type-id='d33f11cb' restrict='yes' id='5c53ba29'/>
<pointer-type-def type-id='ffa52b96' size-in-bits='64' id='76c8174b'/>
<pointer-type-def type-id='f3d87113' size-in-bits='64' id='0d2a0670'/>
+ <function-decl name='zpool_label_disk' mangled-name='zpool_label_disk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_label_disk'>
+ <parameter type-id='b0382bb3'/>
+ <parameter type-id='4c81de99'/>
+ <parameter type-id='80f4b756'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
<function-decl name='zfs_version_kernel' mangled-name='zfs_version_kernel' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_version_kernel'>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='libzfs_core_init' visibility='default' binding='global' size-in-bits='64'>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='libzfs_core_fini' visibility='default' binding='global' size-in-bits='64'>
<return type-id='48b5725f'/>
</function-decl>
+ <function-decl name='zfs_get_underlying_path' mangled-name='zfs_get_underlying_path' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_underlying_path'>
+ <parameter type-id='80f4b756'/>
+ <return type-id='26a90f95'/>
+ </function-decl>
<function-decl name='zpool_prop_unsupported' mangled-name='zpool_prop_unsupported' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_unsupported'>
<parameter type-id='80f4b756'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zpool_feature_init' mangled-name='zpool_feature_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_feature_init'>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fletcher_4_init' mangled-name='fletcher_4_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_init'>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fletcher_4_fini' mangled-name='fletcher_4_fini' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_fini'>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_prop_init' mangled-name='zfs_prop_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_init'>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_prop_get_table' mangled-name='zfs_prop_get_table' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_table'>
<return type-id='76c8174b'/>
</function-decl>
<function-decl name='zpool_prop_init' mangled-name='zpool_prop_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_init'>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zpool_prop_get_table' mangled-name='zpool_prop_get_table' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_get_table'>
<return type-id='76c8174b'/>
</function-decl>
<function-decl name='vdev_prop_init' mangled-name='vdev_prop_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='vdev_prop_init'>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zprop_iter_common' mangled-name='zprop_iter_common' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_iter_common'>
<parameter type-id='1ec3747a'/>
<parameter type-id='eaa32e2f'/>
<parameter type-id='c19b74c3'/>
<parameter type-id='c19b74c3'/>
<parameter type-id='2e45de5d'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zprop_name_to_prop' mangled-name='zprop_name_to_prop' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_name_to_prop'>
<parameter type-id='80f4b756'/>
<parameter type-id='2e45de5d'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zprop_string_to_index' mangled-name='zprop_string_to_index' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_string_to_index'>
<parameter type-id='95e97e5e'/>
<parameter type-id='80f4b756'/>
<parameter type-id='5d6479ae'/>
<parameter type-id='2e45de5d'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zprop_values' mangled-name='zprop_values' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_values'>
<parameter type-id='95e97e5e'/>
<parameter type-id='2e45de5d'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zprop_width' mangled-name='zprop_width' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_width'>
<parameter type-id='95e97e5e'/>
<parameter type-id='37e3bd22'/>
<parameter type-id='2e45de5d'/>
<return type-id='b59d7dce'/>
</function-decl>
<function-decl name='zprop_valid_for_type' mangled-name='zprop_valid_for_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_valid_for_type'>
<parameter type-id='95e97e5e'/>
<parameter type-id='2e45de5d'/>
<parameter type-id='c19b74c3'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='getextmntent' mangled-name='getextmntent' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getextmntent'>
<parameter type-id='80f4b756'/>
<parameter type-id='394fc496'/>
<parameter type-id='62f7a03d'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='__ctype_toupper_loc' visibility='default' binding='global' size-in-bits='64'>
<return type-id='24f95ba5'/>
</function-decl>
<function-decl name='dlclose' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='regcomp' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5c53ba29'/>
<parameter type-id='9d26089a'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='regfree' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='d33f11cb'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='puts' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='strtod' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='9d26089a'/>
<parameter type-id='8c85230f'/>
<return type-id='a0eb0f08'/>
</function-decl>
<function-decl name='realloc' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='b59d7dce'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='exit' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='strnlen' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='b59d7dce'/>
<return type-id='b59d7dce'/>
</function-decl>
<function-decl name='strncasecmp' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<parameter type-id='b59d7dce'/>
<return type-id='95e97e5e'/>
</function-decl>
+ <function-decl name='access' visibility='default' binding='global' size-in-bits='64'>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='95e97e5e'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
<function-decl name='dup2' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='execve' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='f319fae0'/>
<parameter type-id='f319fae0'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='execv' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='f319fae0'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='execvp' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='f319fae0'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='execvpe' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='f319fae0'/>
<parameter type-id='f319fae0'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='_exit' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fork' visibility='default' binding='global' size-in-bits='64'>
<return type-id='3629bad8'/>
</function-decl>
<function-decl name='pow' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='a0eb0f08'/>
<parameter type-id='a0eb0f08'/>
<return type-id='a0eb0f08'/>
</function-decl>
<function-decl name='__vfprintf_chk' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='e75a27e9'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='9d26089a'/>
<parameter type-id='b7f2d5e6'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='__vasprintf_chk' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='8c85230f'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='9d26089a'/>
<parameter type-id='b7f2d5e6'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='waitpid' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='3629bad8'/>
<parameter type-id='7292109c'/>
<parameter type-id='95e97e5e'/>
<return type-id='3629bad8'/>
</function-decl>
<function-decl name='namespace_clear' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='b0382bb3'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='libzfs_load_module' visibility='default' binding='global' size-in-bits='64'>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='libzfs_errno' mangled-name='libzfs_errno' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_errno'>
<parameter type-id='b0382bb3' name='hdl'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='libzfs_error_action' mangled-name='libzfs_error_action' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_error_action'>
<parameter type-id='b0382bb3' name='hdl'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='libzfs_error_description' mangled-name='libzfs_error_description' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_error_description'>
<parameter type-id='b0382bb3' name='hdl'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='libzfs_print_on_error' mangled-name='libzfs_print_on_error' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_print_on_error'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='c19b74c3' name='printerr'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='libzfs_run_process' mangled-name='libzfs_run_process' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_run_process'>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='9b23c9ad' name='argv'/>
<parameter type-id='95e97e5e' name='flags'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='libzfs_run_process_get_stdout' mangled-name='libzfs_run_process_get_stdout' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_run_process_get_stdout'>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='9b23c9ad' name='argv'/>
<parameter type-id='9b23c9ad' name='env'/>
<parameter type-id='c0563f85' name='lines'/>
<parameter type-id='7292109c' name='lines_cnt'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='libzfs_run_process_get_stdout_nopath' mangled-name='libzfs_run_process_get_stdout_nopath' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_run_process_get_stdout_nopath'>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='9b23c9ad' name='argv'/>
<parameter type-id='9b23c9ad' name='env'/>
<parameter type-id='c0563f85' name='lines'/>
<parameter type-id='7292109c' name='lines_cnt'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='libzfs_free_str_array' mangled-name='libzfs_free_str_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_free_str_array'>
<parameter type-id='9b23c9ad' name='strs'/>
<parameter type-id='95e97e5e' name='count'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='libzfs_init' mangled-name='libzfs_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_init'>
<return type-id='b0382bb3'/>
</function-decl>
<function-decl name='libzfs_fini' mangled-name='libzfs_fini' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_fini'>
<parameter type-id='b0382bb3' name='hdl'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_path_to_zhandle' mangled-name='zfs_path_to_zhandle' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_path_to_zhandle'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='2e45de5d' name='argtype'/>
<return type-id='9200a744'/>
</function-decl>
<function-decl name='zprop_print_one_property' mangled-name='zprop_print_one_property' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_print_one_property'>
<parameter type-id='80f4b756' name='name'/>
<parameter type-id='0d2a0670' name='cbp'/>
<parameter type-id='80f4b756' name='propname'/>
<parameter type-id='80f4b756' name='value'/>
<parameter type-id='a2256d42' name='sourcetype'/>
<parameter type-id='80f4b756' name='source'/>
<parameter type-id='80f4b756' name='recvd_value'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zprop_get_list' mangled-name='zprop_get_list' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_get_list'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='26a90f95' name='props'/>
<parameter type-id='e4378506' name='listp'/>
<parameter type-id='2e45de5d' name='type'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zprop_free_list' mangled-name='zprop_free_list' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_free_list'>
<parameter type-id='3a9b2288' name='pl'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zprop_iter' mangled-name='zprop_iter' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_iter'>
<parameter type-id='1ec3747a' name='func'/>
<parameter type-id='eaa32e2f' name='cb'/>
<parameter type-id='c19b74c3' name='show_all'/>
<parameter type-id='c19b74c3' name='ordered'/>
<parameter type-id='2e45de5d' name='type'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_version_userland' mangled-name='zfs_version_userland' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_version_userland'>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zfs_version_print' mangled-name='zfs_version_print' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_version_print'>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='use_color' mangled-name='use_color' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='use_color'>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='printf_color' mangled-name='printf_color' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='printf_color'>
<parameter type-id='80f4b756' name='color'/>
<parameter type-id='80f4b756' name='format'/>
<parameter is-variadic='yes'/>
<return type-id='95e97e5e'/>
</function-decl>
+ <function-decl name='zpool_vdev_script_alloc_env' mangled-name='zpool_vdev_script_alloc_env' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_script_alloc_env'>
+ <parameter type-id='80f4b756' name='pool_name'/>
+ <parameter type-id='80f4b756' name='vdev_path'/>
+ <parameter type-id='80f4b756' name='vdev_upath'/>
+ <parameter type-id='80f4b756' name='vdev_enc_sysfs_path'/>
+ <parameter type-id='80f4b756' name='opt_key'/>
+ <parameter type-id='80f4b756' name='opt_val'/>
+ <return type-id='9b23c9ad'/>
+ </function-decl>
+ <function-decl name='zpool_vdev_script_free_env' mangled-name='zpool_vdev_script_free_env' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_script_free_env'>
+ <parameter type-id='9b23c9ad' name='env'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='zpool_prepare_disk' mangled-name='zpool_prepare_disk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prepare_disk'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='5ce45b60' name='vdev_nv'/>
+ <parameter type-id='80f4b756' name='prepare_str'/>
+ <parameter type-id='c0563f85' name='lines'/>
+ <parameter type-id='7292109c' name='lines_cnt'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='zpool_prepare_and_label_disk' mangled-name='zpool_prepare_and_label_disk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prepare_and_label_disk'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='5ce45b60' name='vdev_nv'/>
+ <parameter type-id='80f4b756' name='prepare_str'/>
+ <parameter type-id='c0563f85' name='lines'/>
+ <parameter type-id='7292109c' name='lines_cnt'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libzfs/os/linux/libzfs_mount_os.c' language='LANG_C99'>
<pointer-type-def type-id='7359adad' size-in-bits='64' id='1d2c2b85'/>
<function-decl name='geteuid' visibility='default' binding='global' size-in-bits='64'>
<return type-id='cc5fcceb'/>
</function-decl>
<function-decl name='mount' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<parameter type-id='7359adad'/>
<parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='umount2' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_parse_mount_options' mangled-name='zfs_parse_mount_options' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_parse_mount_options'>
<parameter type-id='80f4b756' name='mntopts'/>
<parameter type-id='1d2c2b85' name='mntflags'/>
<parameter type-id='1d2c2b85' name='zfsflags'/>
<parameter type-id='95e97e5e' name='sloppy'/>
<parameter type-id='26a90f95' name='badopt'/>
<parameter type-id='26a90f95' name='mtabopt'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_adjust_mount_options' mangled-name='zfs_adjust_mount_options' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_adjust_mount_options'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='mntpoint'/>
<parameter type-id='26a90f95' name='mntopts'/>
<parameter type-id='26a90f95' name='mtabopt'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_mount_delegation_check' mangled-name='zfs_mount_delegation_check' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_mount_delegation_check'>
<return type-id='95e97e5e'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libzfs/os/linux/libzfs_pool_os.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='a84c031d' size-in-bits='288' id='16e6f2c6'>
<subrange length='36' type-id='7359adad' id='ae666bde'/>
</array-type-def>
<array-type-def dimensions='1' type-id='a65ae39c' size-in-bits='960' id='fa198beb'>
<subrange length='1' type-id='7359adad' id='52f813b4'/>
</array-type-def>
<array-type-def dimensions='1' type-id='3502e3ff' size-in-bits='384' id='dba89ba3'>
<subrange length='12' type-id='7359adad' id='84827bdc'/>
</array-type-def>
<array-type-def dimensions='1' type-id='3502e3ff' size-in-bits='256' id='01d84ed4'>
<subrange length='8' type-id='7359adad' id='56e0c0b1'/>
</array-type-def>
<class-decl name='dk_part' size-in-bits='960' is-struct='yes' visibility='default' id='a65ae39c'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='p_start' type-id='804dc465' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='p_size' type-id='804dc465' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='p_guid' type-id='214f32ea' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='p_tag' type-id='d908a348' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='272'>
<var-decl name='p_flag' type-id='d908a348' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='288'>
<var-decl name='p_name' type-id='16e6f2c6' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='576'>
<var-decl name='p_uguid' type-id='214f32ea' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='704'>
<var-decl name='p_resv' type-id='01d84ed4' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='dk_gpt' size-in-bits='1920' is-struct='yes' visibility='default' id='dd4a2e5a'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='efi_version' type-id='3502e3ff' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='efi_nparts' type-id='3502e3ff' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='efi_part_size' type-id='3502e3ff' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='96'>
<var-decl name='efi_lbasize' type-id='3502e3ff' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='efi_last_lba' type-id='804dc465' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='efi_first_u_lba' type-id='804dc465' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='efi_last_u_lba' type-id='804dc465' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='efi_disk_uguid' type-id='214f32ea' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='efi_flags' type-id='3502e3ff' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='480'>
<var-decl name='efi_reserved1' type-id='3502e3ff' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
<var-decl name='efi_altern_lba' type-id='804dc465' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='576'>
<var-decl name='efi_reserved' type-id='dba89ba3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='960'>
<var-decl name='efi_parts' type-id='fa198beb' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='uuid' size-in-bits='128' is-struct='yes' visibility='default' id='214f32ea'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='time_low' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='time_mid' type-id='149c6638' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='48'>
<var-decl name='time_hi_and_version' type-id='149c6638' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='clock_seq_hi_and_reserved' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='72'>
<var-decl name='clock_seq_low' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='80'>
<var-decl name='node_addr' type-id='0f562bd0' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='ushort_t' type-id='8efea9e5' id='d908a348'/>
<typedef-decl name='uint16_t' type-id='253c2d2a' id='149c6638'/>
<typedef-decl name='__uint16_t' type-id='8efea9e5' id='253c2d2a'/>
<pointer-type-def type-id='dd4a2e5a' size-in-bits='64' id='0d8119a8'/>
<pointer-type-def type-id='0d8119a8' size-in-bits='64' id='c43b27a6'/>
<function-decl name='zpool_label_disk_wait' mangled-name='zpool_label_disk_wait' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_label_disk_wait'>
<parameter type-id='80f4b756'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_append_partition' mangled-name='zfs_append_partition' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_append_partition'>
<parameter type-id='26a90f95'/>
<parameter type-id='b59d7dce'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='efi_alloc_and_init' mangled-name='efi_alloc_and_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_alloc_and_init'>
<parameter type-id='95e97e5e'/>
<parameter type-id='8f92235e'/>
<parameter type-id='c43b27a6'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='efi_alloc_and_read' mangled-name='efi_alloc_and_read' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_alloc_and_read'>
<parameter type-id='95e97e5e'/>
<parameter type-id='c43b27a6'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='efi_write' mangled-name='efi_write' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_write'>
<parameter type-id='95e97e5e'/>
<parameter type-id='0d8119a8'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='efi_rescan' mangled-name='efi_rescan' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_rescan'>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='efi_free' mangled-name='efi_free' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_free'>
<parameter type-id='0d8119a8'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='efi_use_whole_disk' mangled-name='efi_use_whole_disk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_use_whole_disk'>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='rand' visibility='default' binding='global' size-in-bits='64'>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='fsync' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zpool_label_disk' mangled-name='zpool_label_disk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_label_disk'>
- <parameter type-id='b0382bb3' name='hdl'/>
- <parameter type-id='4c81de99' name='zhp'/>
- <parameter type-id='80f4b756' name='name'/>
- <return type-id='95e97e5e'/>
- </function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libzfs/os/linux/libzfs_util_os.c' language='LANG_C99'>
<class-decl name='itimerspec' size-in-bits='256' is-struct='yes' visibility='default' id='acbdbcc6'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='it_interval' type-id='a9c79a1f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='it_value' type-id='a9c79a1f' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='nfds_t' type-id='7359adad' id='555eef66'/>
<class-decl name='pollfd' size-in-bits='64' is-struct='yes' visibility='default' id='b440e872'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='fd' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='events' type-id='a2185560' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='48'>
<var-decl name='revents' type-id='a2185560' visibility='default'/>
</data-member>
</class-decl>
<qualified-type-def type-id='acbdbcc6' const='yes' id='4ba62af7'/>
<pointer-type-def type-id='4ba62af7' size-in-bits='64' id='f39579e7'/>
<pointer-type-def type-id='acbdbcc6' size-in-bits='64' id='116842ac'/>
<pointer-type-def type-id='b440e872' size-in-bits='64' id='3ac36db0'/>
- <function-decl name='access' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='80f4b756'/>
- <parameter type-id='95e97e5e'/>
- <return type-id='95e97e5e'/>
- </function-decl>
<function-decl name='__poll_chk' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='3ac36db0'/>
<parameter type-id='555eef66'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='7359adad'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='inotify_init1' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='inotify_add_watch' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<parameter type-id='80f4b756'/>
<parameter type-id='8f92235e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='timerfd_create' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='08f9a87a'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='timerfd_settime' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='f39579e7'/>
<parameter type-id='116842ac'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='libzfs_error_init' mangled-name='libzfs_error_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_error_init'>
<parameter type-id='95e97e5e' name='error'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zfs_userns' mangled-name='zfs_userns' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_userns'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='nspath'/>
<parameter type-id='95e97e5e' name='attach'/>
<return type-id='95e97e5e'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libzutil/os/linux/zutil_device_path_os.c' language='LANG_C99'>
<class-decl name='udev' is-struct='yes' visibility='default' is-declaration-only='yes' id='e4a7fb7f'/>
<class-decl name='udev_device' is-struct='yes' visibility='default' is-declaration-only='yes' id='640b33ca'/>
<pointer-type-def type-id='e4a7fb7f' size-in-bits='64' id='025eefe7'/>
<pointer-type-def type-id='640b33ca' size-in-bits='64' id='b32bae08'/>
<class-decl name='udev' is-struct='yes' visibility='default' is-declaration-only='yes' id='e4a7fb7f'/>
<class-decl name='udev_device' is-struct='yes' visibility='default' is-declaration-only='yes' id='640b33ca'/>
<function-decl name='udev_new' visibility='default' binding='global' size-in-bits='64'>
<return type-id='025eefe7'/>
</function-decl>
<function-decl name='udev_device_unref' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='b32bae08'/>
<return type-id='b32bae08'/>
</function-decl>
<function-decl name='udev_device_new_from_subsystem_sysname' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='025eefe7'/>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<return type-id='b32bae08'/>
</function-decl>
<function-decl name='udev_device_get_property_value' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='b32bae08'/>
<parameter type-id='80f4b756'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='__readlink_chk' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='9d26089a'/>
<parameter type-id='266fe297'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='b59d7dce'/>
<return type-id='79a0948f'/>
</function-decl>
<function-decl name='zfs_get_enclosure_sysfs_path' mangled-name='zfs_get_enclosure_sysfs_path' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_enclosure_sysfs_path'>
<parameter type-id='80f4b756' name='dev_name'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='zfs_dev_is_dm' mangled-name='zfs_dev_is_dm' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_dev_is_dm'>
<parameter type-id='80f4b756' name='dev_name'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_dev_is_whole_disk' mangled-name='zfs_dev_is_whole_disk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_dev_is_whole_disk'>
<parameter type-id='80f4b756' name='dev_name'/>
<return type-id='c19b74c3'/>
</function-decl>
- <function-decl name='zfs_get_underlying_path' mangled-name='zfs_get_underlying_path' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_underlying_path'>
- <parameter type-id='80f4b756' name='dev_name'/>
- <return type-id='26a90f95'/>
- </function-decl>
<function-decl name='is_mpath_whole_disk' mangled-name='is_mpath_whole_disk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='is_mpath_whole_disk'>
<parameter type-id='80f4b756' name='path'/>
<return type-id='c19b74c3'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libzutil/os/linux/zutil_import_os.c' language='LANG_C99'>
<class-decl name='blkid_struct_cache' is-struct='yes' visibility='default' is-declaration-only='yes' id='09286066'/>
<class-decl name='blkid_struct_dev' is-struct='yes' visibility='default' is-declaration-only='yes' id='86223623'/>
<class-decl name='blkid_struct_dev_iterate' is-struct='yes' visibility='default' is-declaration-only='yes' id='d88420d6'/>
<class-decl name='udev_list_entry' is-struct='yes' visibility='default' is-declaration-only='yes' id='e7dbdca3'/>
<typedef-decl name='pool_vdev_iter_f' type-id='6c16a6c8' id='dff793e0'/>
<typedef-decl name='blkid_dev' type-id='8433f053' id='f47b023a'/>
<typedef-decl name='blkid_cache' type-id='940e3afc' id='0882dfdf'/>
<typedef-decl name='blkid_dev_iterate' type-id='b8fa2efc' id='f4760fa7'/>
<typedef-decl name='__useconds_t' type-id='f0981eeb' id='4e80d4b1'/>
<pointer-type-def type-id='0882dfdf' size-in-bits='64' id='2e3e7caa'/>
<pointer-type-def type-id='f47b023a' size-in-bits='64' id='d87f9b75'/>
<pointer-type-def type-id='09286066' size-in-bits='64' id='940e3afc'/>
<pointer-type-def type-id='86223623' size-in-bits='64' id='8433f053'/>
<pointer-type-def type-id='d88420d6' size-in-bits='64' id='b8fa2efc'/>
<pointer-type-def type-id='2ec2411e' size-in-bits='64' id='6c16a6c8'/>
<pointer-type-def type-id='e7dbdca3' size-in-bits='64' id='deabd0d3'/>
<class-decl name='blkid_struct_cache' is-struct='yes' visibility='default' is-declaration-only='yes' id='09286066'/>
<class-decl name='blkid_struct_dev' is-struct='yes' visibility='default' is-declaration-only='yes' id='86223623'/>
<class-decl name='blkid_struct_dev_iterate' is-struct='yes' visibility='default' is-declaration-only='yes' id='d88420d6'/>
<class-decl name='udev_list_entry' is-struct='yes' visibility='default' is-declaration-only='yes' id='e7dbdca3'/>
<function-decl name='for_each_vdev_in_nvlist' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='dff793e0'/>
<parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='label_paths' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5507783b'/>
<parameter type-id='5ce45b60'/>
<parameter type-id='7d3cd834'/>
<parameter type-id='7d3cd834'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zutil_alloc' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5507783b'/>
<parameter type-id='b59d7dce'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='zutil_strdup' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5507783b'/>
<parameter type-id='80f4b756'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='slice_cache_compare' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='blkid_put_cache' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='0882dfdf'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='blkid_get_cache' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='2e3e7caa'/>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='blkid_dev_devname' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='f47b023a'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='blkid_dev_iterate_begin' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='0882dfdf'/>
<return type-id='f4760fa7'/>
</function-decl>
<function-decl name='blkid_dev_set_search' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='f4760fa7'/>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='blkid_dev_next' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='f4760fa7'/>
<parameter type-id='d87f9b75'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='blkid_dev_iterate_end' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='f4760fa7'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='blkid_probe_all_new' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='0882dfdf'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='udev_unref' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='025eefe7'/>
<return type-id='025eefe7'/>
</function-decl>
<function-decl name='udev_list_entry_get_next' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='deabd0d3'/>
<return type-id='deabd0d3'/>
</function-decl>
<function-decl name='udev_list_entry_get_name' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='deabd0d3'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='udev_device_get_parent_with_subsystem_devtype' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='b32bae08'/>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<return type-id='b32bae08'/>
</function-decl>
<function-decl name='udev_device_get_devlinks_list_entry' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='b32bae08'/>
<return type-id='deabd0d3'/>
</function-decl>
<function-decl name='sched_yield' visibility='default' binding='global' size-in-bits='64'>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='usleep' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='4e80d4b1'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_dev_flush' mangled-name='zfs_dev_flush' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_dev_flush'>
<parameter type-id='95e97e5e' name='fd'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_device_get_devid' mangled-name='zfs_device_get_devid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_device_get_devid'>
<parameter type-id='b32bae08' name='dev'/>
<parameter type-id='26a90f95' name='bufptr'/>
<parameter type-id='b59d7dce' name='buflen'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_device_get_physical' mangled-name='zfs_device_get_physical' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_device_get_physical'>
<parameter type-id='b32bae08' name='dev'/>
<parameter type-id='26a90f95' name='bufptr'/>
<parameter type-id='b59d7dce' name='buflen'/>
<return type-id='95e97e5e'/>
</function-decl>
+ <function-decl name='zpool_disk_wait' mangled-name='zpool_disk_wait' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_disk_wait'>
+ <parameter type-id='80f4b756' name='path'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='update_vdev_config_dev_sysfs_path' mangled-name='update_vdev_config_dev_sysfs_path' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='update_vdev_config_dev_sysfs_path'>
+ <parameter type-id='5ce45b60' name='nv'/>
+ <parameter type-id='80f4b756' name='path'/>
+ <parameter type-id='80f4b756' name='key'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
<function-type size-in-bits='64' id='2ec2411e'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='5ce45b60'/>
<parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/>
</function-type>
</abi-instr>
<abi-instr address-size='64' path='lib/libzutil/os/linux/zutil_setproctitle.c' language='LANG_C99'>
<function-decl name='warnx' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter is-variadic='yes'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='setenv' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
+ <function-decl name='clearenv' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='95e97e5e'/>
+ </function-decl>
<function-decl name='zfs_setproctitle_init' mangled-name='zfs_setproctitle_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_setproctitle_init'>
<parameter type-id='95e97e5e' name='argc'/>
<parameter type-id='9b23c9ad' name='argv'/>
<parameter type-id='9b23c9ad' name='envp'/>
<return type-id='48b5725f'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libzutil/zutil_device_path.c' language='LANG_C99'>
<pointer-type-def type-id='b99c00c9' size-in-bits='64' id='13956559'/>
<function-decl name='zpool_default_search_paths' mangled-name='zpool_default_search_paths' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_default_search_paths'>
<parameter type-id='78c01427'/>
<return type-id='13956559'/>
</function-decl>
<function-decl name='strspn' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<return type-id='b59d7dce'/>
</function-decl>
<function-decl name='zfs_dirnamelen' mangled-name='zfs_dirnamelen' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_dirnamelen'>
<parameter type-id='80f4b756' name='path'/>
<return type-id='79a0948f'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libzutil/zutil_import.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='a84c031d' size-in-bits='256' id='16dc656a'>
<subrange length='32' type-id='7359adad' id='ae5bde82'/>
</array-type-def>
<array-type-def dimensions='1' type-id='95e97e5e' size-in-bits='384' id='73b82f0f'>
<subrange length='12' type-id='7359adad' id='84827bdc'/>
</array-type-def>
<class-decl name='importargs' size-in-bits='448' is-struct='yes' visibility='default' id='7ac83801'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='path' type-id='9b23c9ad' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='paths' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='poolname' type-id='80f4b756' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='guid' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='cachefile' type-id='80f4b756' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='can_be_active' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='352'>
<var-decl name='scan' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='policy' type-id='5ce45b60' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='importargs_t' type-id='7ac83801' id='7a842a6b'/>
<class-decl name='libpc_handle' size-in-bits='8448' is-struct='yes' visibility='default' id='7c8737f0'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='lpc_error' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='lpc_printerr' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='lpc_open_access_error' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='96'>
<var-decl name='lpc_desc_active' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='lpc_desc' type-id='b54ce520' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='8320'>
<var-decl name='lpc_ops' type-id='f095e320' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='8384'>
<var-decl name='lpc_lib_handle' type-id='eaa32e2f' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='libpc_handle_t' type-id='7c8737f0' id='8a70a786'/>
<class-decl name='aiocb' size-in-bits='1344' is-struct='yes' visibility='default' id='e4957c49'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='aio_fildes' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='aio_lio_opcode' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='aio_reqprio' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='aio_buf' type-id='fe09dd29' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='aio_nbytes' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='aio_sigevent' type-id='519bc206' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='768'>
<var-decl name='__next_prio' type-id='924bbc81' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='832'>
<var-decl name='__abs_prio' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='864'>
<var-decl name='__policy' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='896'>
<var-decl name='__error_code' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='960'>
<var-decl name='__return_value' type-id='41060289' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1024'>
<var-decl name='aio_offset' type-id='724e4de6' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1088'>
<var-decl name='__glibc_reserved' type-id='16dc656a' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='sigevent' size-in-bits='512' is-struct='yes' visibility='default' id='519bc206'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='sigev_value' type-id='eabacd01' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='sigev_signo' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='96'>
<var-decl name='sigev_notify' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='_sigev_un' type-id='ac5ab599' visibility='default'/>
</data-member>
</class-decl>
<union-decl name='__anonymous_union__' size-in-bits='384' is-anonymous='yes' visibility='default' id='ac5ab599'>
<data-member access='public'>
<var-decl name='_pad' type-id='73b82f0f' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='_tid' type-id='3629bad8' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='_sigev_thread' type-id='e7f43f7b' visibility='default'/>
</data-member>
</union-decl>
<class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='e7f43f7b'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='_function' type-id='5f147c28' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='_attribute' type-id='7347a39e' visibility='default'/>
</data-member>
</class-decl>
<pointer-type-def type-id='e4957c49' size-in-bits='64' id='924bbc81'/>
<qualified-type-def type-id='924bbc81' const='yes' id='5499dcde'/>
<pointer-type-def type-id='5499dcde' size-in-bits='64' id='2236d41c'/>
<qualified-type-def type-id='2236d41c' restrict='yes' id='31488924'/>
<pointer-type-def type-id='a3681dea' size-in-bits='64' id='fce6d540'/>
<qualified-type-def type-id='e4957c49' const='yes' id='fced9da2'/>
<pointer-type-def type-id='fced9da2' size-in-bits='64' id='b20efd18'/>
<pointer-type-def type-id='7a842a6b' size-in-bits='64' id='07ee4a58'/>
<pointer-type-def type-id='8a70a786' size-in-bits='64' id='5507783b'/>
<pointer-type-def type-id='b1e62775' size-in-bits='64' id='f095e320'/>
<pointer-type-def type-id='519bc206' size-in-bits='64' id='ef2f159c'/>
<qualified-type-def type-id='ef2f159c' restrict='yes' id='de0eb5a4'/>
<pointer-type-def type-id='f1abb096' size-in-bits='64' id='5f147c28'/>
<qualified-type-def type-id='48b5725f' volatile='yes' id='b0b3cbf9'/>
<pointer-type-def type-id='b0b3cbf9' size-in-bits='64' id='fe09dd29'/>
<function-decl name='update_vdev_config_dev_strs' mangled-name='update_vdev_config_dev_strs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='update_vdev_config_dev_strs'>
<parameter type-id='5ce45b60'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='update_vdevs_config_dev_sysfs_path' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fnvlist_dup' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='22cce67b'/>
<return type-id='5ce45b60'/>
</function-decl>
<function-decl name='spl_pagesize' mangled-name='spl_pagesize' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='spl_pagesize'>
<return type-id='b59d7dce'/>
</function-decl>
<function-decl name='posix_memalign' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='63e171df'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='b59d7dce'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='sysconf' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<return type-id='bd54fe1a'/>
</function-decl>
<function-decl name='libpc_error_description' mangled-name='libpc_error_description' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libpc_error_description'>
<parameter type-id='5507783b' name='hdl'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zpool_search_import' mangled-name='zpool_search_import' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_search_import'>
<parameter type-id='5507783b' name='hdl'/>
<parameter type-id='07ee4a58' name='import'/>
<return type-id='5ce45b60'/>
</function-decl>
<function-decl name='zpool_find_config' mangled-name='zpool_find_config' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_find_config'>
<parameter type-id='5507783b' name='hdl'/>
<parameter type-id='80f4b756' name='target'/>
<parameter type-id='857bb57e' name='configp'/>
<parameter type-id='07ee4a58' name='args'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_find_import_blkid' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5507783b'/>
<parameter type-id='18c91f9e'/>
<parameter type-id='fce6d540'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_open_func' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='eaa32e2f'/>
<return type-id='48b5725f'/>
</function-decl>
<function-type size-in-bits='64' id='f1abb096'>
<parameter type-id='eabacd01'/>
<return type-id='48b5725f'/>
</function-type>
</abi-instr>
<abi-instr address-size='64' path='lib/libzutil/zutil_nicenum.c' language='LANG_C99'>
<type-decl name='long double' size-in-bits='128' id='e095c704'/>
<enum-decl name='zfs_nicenum_format' id='29cf1969'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZFS_NICENUM_1024' value='0'/>
<enumerator name='ZFS_NICENUM_BYTES' value='1'/>
<enumerator name='ZFS_NICENUM_TIME' value='2'/>
<enumerator name='ZFS_NICENUM_RAW' value='3'/>
<enumerator name='ZFS_NICENUM_RAWTIME' value='4'/>
</enum-decl>
<function-decl name='powl' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='e095c704'/>
<parameter type-id='e095c704'/>
<return type-id='e095c704'/>
</function-decl>
<function-decl name='floor' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='a0eb0f08'/>
<return type-id='a0eb0f08'/>
</function-decl>
<function-decl name='zfs_isnumber' mangled-name='zfs_isnumber' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_isnumber'>
<parameter type-id='80f4b756' name='str'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_nicenum_format' mangled-name='zfs_nicenum_format' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_nicenum_format'>
<parameter type-id='9c313c2d' name='num'/>
<parameter type-id='26a90f95' name='buf'/>
<parameter type-id='b59d7dce' name='buflen'/>
<parameter type-id='29cf1969' name='format'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_nicetime' mangled-name='zfs_nicetime' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_nicetime'>
<parameter type-id='9c313c2d' name='num'/>
<parameter type-id='26a90f95' name='buf'/>
<parameter type-id='b59d7dce' name='buflen'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_niceraw' mangled-name='zfs_niceraw' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_niceraw'>
<parameter type-id='9c313c2d' name='num'/>
<parameter type-id='26a90f95' name='buf'/>
<parameter type-id='b59d7dce' name='buflen'/>
<return type-id='48b5725f'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libzutil/zutil_pool.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='853fd5dc' size-in-bits='32768' id='b505fc2f'>
<subrange length='64' type-id='7359adad' id='b10be967'/>
</array-type-def>
+ <type-decl name='float' size-in-bits='32' id='a6c45d85'/>
<class-decl name='ddt_stat' size-in-bits='512' is-struct='yes' visibility='default' id='65242dfe'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='dds_blocks' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='dds_lsize' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='dds_psize' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='dds_dsize' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='dds_ref_blocks' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='dds_ref_lsize' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='dds_ref_psize' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='dds_ref_dsize' type-id='9c313c2d' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='ddt_stat_t' type-id='65242dfe' id='853fd5dc'/>
<class-decl name='ddt_histogram' size-in-bits='32768' is-struct='yes' visibility='default' id='bc2b3086'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='ddh_stat' type-id='b505fc2f' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='ddt_histogram_t' type-id='bc2b3086' id='2d7fe832'/>
<qualified-type-def type-id='2d7fe832' const='yes' id='ec92d602'/>
<pointer-type-def type-id='ec92d602' size-in-bits='64' id='932720f8'/>
<qualified-type-def type-id='853fd5dc' const='yes' id='764c298c'/>
<pointer-type-def type-id='764c298c' size-in-bits='64' id='dfe59052'/>
+ <qualified-type-def type-id='a9c79a1f' const='yes' id='cd087e36'/>
+ <pointer-type-def type-id='cd087e36' size-in-bits='64' id='e05e8614'/>
+ <function-decl name='nanosleep' visibility='default' binding='global' size-in-bits='64'>
+ <parameter type-id='e05e8614'/>
+ <parameter type-id='3d83ba87'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
<function-decl name='zpool_dump_ddt' mangled-name='zpool_dump_ddt' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_dump_ddt'>
<parameter type-id='dfe59052' name='dds_total'/>
<parameter type-id='932720f8' name='ddh'/>
<return type-id='48b5725f'/>
</function-decl>
+ <function-decl name='fsleep' mangled-name='fsleep' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fsleep'>
+ <parameter type-id='a6c45d85' name='sec'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='zpool_getenv_int' mangled-name='zpool_getenv_int' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_getenv_int'>
+ <parameter type-id='80f4b756' name='env'/>
+ <parameter type-id='95e97e5e' name='default_val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
</abi-instr>
<abi-instr address-size='64' path='module/avl/avl.c' language='LANG_C99'>
<function-decl name='avl_last' mangled-name='avl_last' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_last'>
<parameter type-id='a3681dea' name='tree'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='avl_nearest' mangled-name='avl_nearest' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_nearest'>
<parameter type-id='a3681dea' name='tree'/>
<parameter type-id='fba6cb51' name='where'/>
<parameter type-id='95e97e5e' name='direction'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='avl_insert_here' mangled-name='avl_insert_here' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_insert_here'>
<parameter type-id='a3681dea' name='tree'/>
<parameter type-id='eaa32e2f' name='new_data'/>
<parameter type-id='eaa32e2f' name='here'/>
<parameter type-id='95e97e5e' name='direction'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='avl_update_lt' mangled-name='avl_update_lt' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_update_lt'>
<parameter type-id='a3681dea' name='t'/>
<parameter type-id='eaa32e2f' name='obj'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='avl_update_gt' mangled-name='avl_update_gt' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_update_gt'>
<parameter type-id='a3681dea' name='t'/>
<parameter type-id='eaa32e2f' name='obj'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='avl_update' mangled-name='avl_update' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_update'>
<parameter type-id='a3681dea' name='t'/>
<parameter type-id='eaa32e2f' name='obj'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='avl_swap' mangled-name='avl_swap' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_swap'>
<parameter type-id='a3681dea' name='tree1'/>
<parameter type-id='a3681dea' name='tree2'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='avl_is_empty' mangled-name='avl_is_empty' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_is_empty'>
<parameter type-id='a3681dea' name='tree'/>
<return type-id='c19b74c3'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='module/zcommon/cityhash.c' language='LANG_C99'>
<function-decl name='cityhash4' mangled-name='cityhash4' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='cityhash4'>
<parameter type-id='9c313c2d' name='w1'/>
<parameter type-id='9c313c2d' name='w2'/>
<parameter type-id='9c313c2d' name='w3'/>
<parameter type-id='9c313c2d' name='w4'/>
<return type-id='9c313c2d'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='module/zcommon/zfeature_common.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='83f29ca2' size-in-bits='17472' id='dd432c71'>
<subrange length='39' type-id='7359adad' id='ae4a9561'/>
</array-type-def>
<enum-decl name='zfeature_flags' id='6db816a4'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZFEATURE_FLAG_READONLY_COMPAT' value='1'/>
<enumerator name='ZFEATURE_FLAG_MOS' value='2'/>
<enumerator name='ZFEATURE_FLAG_ACTIVATE_ON_ENABLE' value='4'/>
<enumerator name='ZFEATURE_FLAG_PER_DATASET' value='8'/>
</enum-decl>
<typedef-decl name='zfeature_flags_t' type-id='6db816a4' id='fc329033'/>
<enum-decl name='zfeature_type' id='c4fa2355'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZFEATURE_TYPE_BOOLEAN' value='0'/>
<enumerator name='ZFEATURE_TYPE_UINT64_ARRAY' value='1'/>
<enumerator name='ZFEATURE_NUM_TYPES' value='2'/>
</enum-decl>
<typedef-decl name='zfeature_type_t' type-id='c4fa2355' id='732d2bb2'/>
<class-decl name='zfeature_info' size-in-bits='448' is-struct='yes' visibility='default' id='1178d146'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='fi_feature' type-id='d6618c78' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='fi_uname' type-id='80f4b756' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='fi_guid' type-id='80f4b756' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='fi_desc' type-id='80f4b756' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='fi_flags' type-id='fc329033' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='288'>
<var-decl name='fi_zfs_mod_supported' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='fi_type' type-id='732d2bb2' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='fi_depends' type-id='1acff326' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zfeature_info_t' type-id='1178d146' id='83f29ca2'/>
<typedef-decl name='__free_fn_t' type-id='b7f9d8e6' id='3ff5e51e'/>
<class-decl name='dirent' size-in-bits='2240' is-struct='yes' visibility='default' id='611586a1'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='d_ino' type-id='71288a47' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='d_off' type-id='724e4de6' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='d_reclen' type-id='8efea9e5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='144'>
<var-decl name='d_type' type-id='002ac4a6' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='152'>
<var-decl name='d_name' type-id='d1617432' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='zfs_mod_supported_features' size-in-bits='128' is-struct='yes' visibility='default' id='3eee3342'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='tree' type-id='eaa32e2f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='all_features' type-id='c19b74c3' visibility='default'/>
</data-member>
</class-decl>
<qualified-type-def type-id='d6618c78' const='yes' id='81a65028'/>
<pointer-type-def type-id='81a65028' size-in-bits='64' id='1acff326'/>
<qualified-type-def type-id='3eee3342' const='yes' id='0c1d5bbb'/>
<pointer-type-def type-id='0c1d5bbb' size-in-bits='64' id='a3372543'/>
<pointer-type-def type-id='611586a1' size-in-bits='64' id='2e243169'/>
<qualified-type-def type-id='eaa32e2f' const='yes' id='83be723c'/>
<pointer-type-def type-id='83be723c' size-in-bits='64' id='7acd98a2'/>
<var-decl name='spa_feature_table' type-id='dd432c71' mangled-name='spa_feature_table' visibility='default' elf-symbol-id='spa_feature_table'/>
<var-decl name='zfeature_checks_disable' type-id='c19b74c3' mangled-name='zfeature_checks_disable' visibility='default' elf-symbol-id='zfeature_checks_disable'/>
<function-decl name='opendir' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<return type-id='f09217ba'/>
</function-decl>
<function-decl name='tsearch' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='63e171df'/>
<parameter type-id='aba7edd8'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='tfind' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='7acd98a2'/>
<parameter type-id='aba7edd8'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='tdestroy' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='3ff5e51e'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfeature_is_valid_guid' mangled-name='zfeature_is_valid_guid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfeature_is_valid_guid'>
<parameter type-id='80f4b756' name='name'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfeature_depends_on' mangled-name='zfeature_depends_on' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfeature_depends_on'>
<parameter type-id='d6618c78' name='fid'/>
<parameter type-id='d6618c78' name='check'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_mod_supported' mangled-name='zfs_mod_supported' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_mod_supported'>
<parameter type-id='80f4b756' name='scope'/>
<parameter type-id='80f4b756' name='name'/>
<parameter type-id='a3372543' name='sfeatures'/>
<return type-id='c19b74c3'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='module/zcommon/zfs_comutil.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='b99c00c9' size-in-bits='2624' id='5ce15418'>
<subrange length='41' type-id='7359adad' id='cb834f44'/>
</array-type-def>
<qualified-type-def type-id='80f4b756' const='yes' id='b99c00c9'/>
<pointer-type-def type-id='8f92235e' size-in-bits='64' id='90421557'/>
<function-decl name='nvpair_value_uint32' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='dace003f'/>
<parameter type-id='90421557'/>
<return type-id='95e97e5e'/>
</function-decl>
<var-decl name='zfs_history_event_names' type-id='5ce15418' mangled-name='zfs_history_event_names' visibility='default' elf-symbol-id='zfs_history_event_names'/>
<function-decl name='strpbrk' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='zfs_allocatable_devs' mangled-name='zfs_allocatable_devs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_allocatable_devs'>
<parameter type-id='5ce45b60' name='nv'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_special_devs' mangled-name='zfs_special_devs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_special_devs'>
<parameter type-id='5ce45b60' name='nv'/>
<parameter type-id='80f4b756' name='type'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_zpl_version_map' mangled-name='zfs_zpl_version_map' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_zpl_version_map'>
<parameter type-id='95e97e5e' name='spa_version'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_spa_version_map' mangled-name='zfs_spa_version_map' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_spa_version_map'>
<parameter type-id='95e97e5e' name='zpl_version'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_dataset_name_hidden' mangled-name='zfs_dataset_name_hidden' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_dataset_name_hidden'>
<parameter type-id='80f4b756' name='name'/>
<return type-id='c19b74c3'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='module/zcommon/zfs_deleg.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='fa1870fd' size-in-bits='4096' id='59e94aca'>
<subrange length='32' type-id='7359adad' id='ae5bde82'/>
</array-type-def>
<array-type-def dimensions='1' type-id='fa1870fd' size-in-bits='infinite' id='7c00e69d'>
<subrange length='infinite' id='031f2035'/>
</array-type-def>
<enum-decl name='zfs_deleg_who_type_t' naming-typedef-id='36d4bd5a' id='b5fa5816'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZFS_DELEG_WHO_UNKNOWN' value='0'/>
<enumerator name='ZFS_DELEG_USER' value='117'/>
<enumerator name='ZFS_DELEG_USER_SETS' value='85'/>
<enumerator name='ZFS_DELEG_GROUP' value='103'/>
<enumerator name='ZFS_DELEG_GROUP_SETS' value='71'/>
<enumerator name='ZFS_DELEG_EVERYONE' value='101'/>
<enumerator name='ZFS_DELEG_EVERYONE_SETS' value='69'/>
<enumerator name='ZFS_DELEG_CREATE' value='99'/>
<enumerator name='ZFS_DELEG_CREATE_SETS' value='67'/>
<enumerator name='ZFS_DELEG_NAMED_SET' value='115'/>
<enumerator name='ZFS_DELEG_NAMED_SET_SETS' value='83'/>
</enum-decl>
<typedef-decl name='zfs_deleg_who_type_t' type-id='b5fa5816' id='36d4bd5a'/>
<enum-decl name='zfs_deleg_note_t' naming-typedef-id='4613c173' id='729d4547'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZFS_DELEG_NOTE_CREATE' value='0'/>
<enumerator name='ZFS_DELEG_NOTE_DESTROY' value='1'/>
<enumerator name='ZFS_DELEG_NOTE_SNAPSHOT' value='2'/>
<enumerator name='ZFS_DELEG_NOTE_ROLLBACK' value='3'/>
<enumerator name='ZFS_DELEG_NOTE_CLONE' value='4'/>
<enumerator name='ZFS_DELEG_NOTE_PROMOTE' value='5'/>
<enumerator name='ZFS_DELEG_NOTE_RENAME' value='6'/>
<enumerator name='ZFS_DELEG_NOTE_SEND' value='7'/>
<enumerator name='ZFS_DELEG_NOTE_RECEIVE' value='8'/>
<enumerator name='ZFS_DELEG_NOTE_ALLOW' value='9'/>
<enumerator name='ZFS_DELEG_NOTE_USERPROP' value='10'/>
<enumerator name='ZFS_DELEG_NOTE_MOUNT' value='11'/>
<enumerator name='ZFS_DELEG_NOTE_SHARE' value='12'/>
<enumerator name='ZFS_DELEG_NOTE_USERQUOTA' value='13'/>
<enumerator name='ZFS_DELEG_NOTE_GROUPQUOTA' value='14'/>
<enumerator name='ZFS_DELEG_NOTE_USERUSED' value='15'/>
<enumerator name='ZFS_DELEG_NOTE_GROUPUSED' value='16'/>
<enumerator name='ZFS_DELEG_NOTE_USEROBJQUOTA' value='17'/>
<enumerator name='ZFS_DELEG_NOTE_GROUPOBJQUOTA' value='18'/>
<enumerator name='ZFS_DELEG_NOTE_USEROBJUSED' value='19'/>
<enumerator name='ZFS_DELEG_NOTE_GROUPOBJUSED' value='20'/>
<enumerator name='ZFS_DELEG_NOTE_HOLD' value='21'/>
<enumerator name='ZFS_DELEG_NOTE_RELEASE' value='22'/>
<enumerator name='ZFS_DELEG_NOTE_DIFF' value='23'/>
<enumerator name='ZFS_DELEG_NOTE_BOOKMARK' value='24'/>
<enumerator name='ZFS_DELEG_NOTE_LOAD_KEY' value='25'/>
<enumerator name='ZFS_DELEG_NOTE_CHANGE_KEY' value='26'/>
<enumerator name='ZFS_DELEG_NOTE_PROJECTUSED' value='27'/>
<enumerator name='ZFS_DELEG_NOTE_PROJECTQUOTA' value='28'/>
<enumerator name='ZFS_DELEG_NOTE_PROJECTOBJUSED' value='29'/>
<enumerator name='ZFS_DELEG_NOTE_PROJECTOBJQUOTA' value='30'/>
<enumerator name='ZFS_DELEG_NOTE_NONE' value='31'/>
</enum-decl>
<typedef-decl name='zfs_deleg_note_t' type-id='729d4547' id='4613c173'/>
<class-decl name='zfs_deleg_perm_tab' size-in-bits='128' is-struct='yes' visibility='default' id='5aa05c1f'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='z_perm' type-id='80f4b756' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='z_note' type-id='4613c173' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zfs_deleg_perm_tab_t' type-id='5aa05c1f' id='f3f851ad'/>
<qualified-type-def type-id='f3f851ad' const='yes' id='fa1870fd'/>
<var-decl name='zfs_deleg_perm_tab' type-id='7c00e69d' mangled-name='zfs_deleg_perm_tab' visibility='default' elf-symbol-id='zfs_deleg_perm_tab'/>
<function-decl name='permset_namecheck' mangled-name='permset_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='permset_namecheck'>
<parameter type-id='80f4b756'/>
<parameter type-id='053457bd'/>
<parameter type-id='26a90f95'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prop_delegatable' mangled-name='zfs_prop_delegatable' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_delegatable'>
<parameter type-id='58603c44'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_deleg_canonicalize_perm' mangled-name='zfs_deleg_canonicalize_perm' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_deleg_canonicalize_perm'>
<parameter type-id='80f4b756' name='perm'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zfs_deleg_verify_nvlist' mangled-name='zfs_deleg_verify_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_deleg_verify_nvlist'>
<parameter type-id='5ce45b60' name='nvp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_deleg_whokey' mangled-name='zfs_deleg_whokey' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_deleg_whokey'>
<parameter type-id='26a90f95' name='attr'/>
<parameter type-id='36d4bd5a' name='type'/>
<parameter type-id='a84c031d' name='inheritchr'/>
<parameter type-id='eaa32e2f' name='data'/>
<return type-id='48b5725f'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='module/zcommon/zfs_fletcher.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='9c313c2d' size-in-bits='512' id='c5d13f42'>
<subrange length='8' type-id='7359adad' id='56e0c0b1'/>
</array-type-def>
<array-type-def dimensions='1' type-id='90dbb6d6' size-in-bits='2048' id='16582e69'>
<subrange length='4' type-id='7359adad' id='16fe7105'/>
</array-type-def>
<array-type-def dimensions='1' type-id='8240361c' size-in-bits='1024' id='481f90b1'>
<subrange length='4' type-id='7359adad' id='16fe7105'/>
</array-type-def>
<array-type-def dimensions='1' type-id='7c1ab40c' size-in-bits='512' id='cbd91ec1'>
<subrange length='4' type-id='7359adad' id='16fe7105'/>
</array-type-def>
<array-type-def dimensions='1' type-id='6d059eaa' size-in-bits='1024' id='729b6ebb'>
<subrange length='4' type-id='7359adad' id='16fe7105'/>
</array-type-def>
<enum-decl name='zio_byteorder_t' naming-typedef-id='595a65ec' id='fc861be0'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZIO_CHECKSUM_NATIVE' value='0'/>
<enumerator name='ZIO_CHECKSUM_BYTESWAP' value='1'/>
</enum-decl>
<typedef-decl name='zio_byteorder_t' type-id='fc861be0' id='595a65ec'/>
<class-decl name='zio_abd_checksum_data' size-in-bits='256' is-struct='yes' visibility='default' id='4bf4b004'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='acd_byteorder' type-id='595a65ec' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='acd_ctx' type-id='0f7df99e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='acd_zcp' type-id='c24fc2ee' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='acd_private' type-id='eaa32e2f' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zio_abd_checksum_data_t' type-id='4bf4b004' id='74e39470'/>
<typedef-decl name='zio_abd_checksum_init_t' type-id='a5444274' id='029a8ebe'/>
<typedef-decl name='zio_abd_checksum_fini_t' type-id='a5444274' id='d6fd5c6c'/>
<typedef-decl name='zio_abd_checksum_iter_t' type-id='f4a1892e' id='cefa0f4a'/>
<class-decl name='zio_abd_checksum_func' size-in-bits='192' is-struct='yes' visibility='default' id='aa14691a'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='acf_init' type-id='0bcca125' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='acf_fini' type-id='bfe36153' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='acf_iter' type-id='1e276399' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zio_abd_checksum_func_t' type-id='3f8e8d11' id='c2eb138a'/>
<class-decl name='zfs_fletcher_superscalar' size-in-bits='256' is-struct='yes' visibility='default' id='28efb250'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='v' type-id='85c64d26' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zfs_fletcher_superscalar_t' type-id='28efb250' id='6d059eaa'/>
<class-decl name='zfs_fletcher_sse' size-in-bits='128' is-struct='yes' visibility='default' id='acd4019a'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='v' type-id='c1c22e6c' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zfs_fletcher_sse_t' type-id='acd4019a' id='7c1ab40c'/>
<class-decl name='zfs_fletcher_avx' size-in-bits='256' is-struct='yes' visibility='default' id='8c208dfa'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='v' type-id='85c64d26' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zfs_fletcher_avx_t' type-id='8c208dfa' id='8240361c'/>
<class-decl name='zfs_fletcher_avx512' size-in-bits='512' is-struct='yes' visibility='default' id='c6d0c382'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='v' type-id='c5d13f42' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zfs_fletcher_avx512_t' type-id='c6d0c382' id='90dbb6d6'/>
<union-decl name='fletcher_4_ctx' size-in-bits='2048' visibility='default' id='1f951ade'>
<data-member access='public'>
<var-decl name='scalar' type-id='39730d0b' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='superscalar' type-id='729b6ebb' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='sse' type-id='cbd91ec1' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='avx' type-id='481f90b1' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='avx512' type-id='16582e69' visibility='default'/>
</data-member>
</union-decl>
<typedef-decl name='fletcher_4_ctx_t' type-id='1f951ade' id='4b675395'/>
<qualified-type-def type-id='aa14691a' const='yes' id='3f8e8d11'/>
<pointer-type-def type-id='4b675395' size-in-bits='64' id='0f7df99e'/>
<qualified-type-def type-id='8f92235e' volatile='yes' id='430e0681'/>
<pointer-type-def type-id='430e0681' size-in-bits='64' id='3a147f31'/>
<pointer-type-def type-id='74e39470' size-in-bits='64' id='eefe7427'/>
<pointer-type-def type-id='d6fd5c6c' size-in-bits='64' id='bfe36153'/>
<pointer-type-def type-id='029a8ebe' size-in-bits='64' id='0bcca125'/>
<pointer-type-def type-id='cefa0f4a' size-in-bits='64' id='1e276399'/>
<var-decl name='fletcher_4_abd_ops' type-id='c2eb138a' mangled-name='fletcher_4_abd_ops' visibility='default' elf-symbol-id='fletcher_4_abd_ops'/>
<function-decl name='atomic_swap_32' mangled-name='atomic_swap_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_32'>
<parameter type-id='3a147f31'/>
<parameter type-id='8f92235e'/>
<return type-id='8f92235e'/>
</function-decl>
<function-decl name='membar_producer' mangled-name='membar_producer' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='membar_producer'>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fletcher_init' mangled-name='fletcher_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_init'>
<parameter type-id='c24fc2ee' name='zcp'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fletcher_2_incremental_native' mangled-name='fletcher_2_incremental_native' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_2_incremental_native'>
<parameter type-id='eaa32e2f' name='buf'/>
<parameter type-id='b59d7dce' name='size'/>
<parameter type-id='eaa32e2f' name='data'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='fletcher_2_native' mangled-name='fletcher_2_native' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_2_native'>
<parameter type-id='eaa32e2f' name='buf'/>
<parameter type-id='9c313c2d' name='size'/>
<parameter type-id='eaa32e2f' name='ctx_template'/>
<parameter type-id='c24fc2ee' name='zcp'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fletcher_2_incremental_byteswap' mangled-name='fletcher_2_incremental_byteswap' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_2_incremental_byteswap'>
<parameter type-id='eaa32e2f' name='buf'/>
<parameter type-id='b59d7dce' name='size'/>
<parameter type-id='eaa32e2f' name='data'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='fletcher_2_byteswap' mangled-name='fletcher_2_byteswap' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_2_byteswap'>
<parameter type-id='eaa32e2f' name='buf'/>
<parameter type-id='9c313c2d' name='size'/>
<parameter type-id='eaa32e2f' name='ctx_template'/>
<parameter type-id='c24fc2ee' name='zcp'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fletcher_4_impl_set' mangled-name='fletcher_4_impl_set' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_impl_set'>
<parameter type-id='80f4b756' name='val'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='fletcher_4_native' mangled-name='fletcher_4_native' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_native'>
<parameter type-id='eaa32e2f' name='buf'/>
<parameter type-id='9c313c2d' name='size'/>
<parameter type-id='eaa32e2f' name='ctx_template'/>
<parameter type-id='c24fc2ee' name='zcp'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fletcher_4_byteswap' mangled-name='fletcher_4_byteswap' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_byteswap'>
<parameter type-id='eaa32e2f' name='buf'/>
<parameter type-id='9c313c2d' name='size'/>
<parameter type-id='eaa32e2f' name='ctx_template'/>
<parameter type-id='c24fc2ee' name='zcp'/>
<return type-id='48b5725f'/>
</function-decl>
<function-type size-in-bits='64' id='f4a1892e'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/>
</function-type>
<function-type size-in-bits='64' id='a5444274'>
<parameter type-id='eefe7427'/>
<return type-id='48b5725f'/>
</function-type>
</abi-instr>
<abi-instr address-size='64' path='module/zcommon/zfs_fletcher_avx512.c' language='LANG_C99'>
<typedef-decl name='fletcher_4_init_f' type-id='173aa527' id='b9ae1656'/>
<typedef-decl name='fletcher_4_fini_f' type-id='0ad5b8a8' id='c4c1f4fc'/>
<typedef-decl name='fletcher_4_compute_f' type-id='38147eff' id='ad1dc4cb'/>
<class-decl name='fletcher_4_func' size-in-bits='1024' is-struct='yes' visibility='default' id='57f479a0'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='init_native' type-id='b9ae1656' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='fini_native' type-id='c4c1f4fc' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='compute_native' type-id='ad1dc4cb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='init_byteswap' type-id='b9ae1656' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='fini_byteswap' type-id='c4c1f4fc' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='compute_byteswap' type-id='ad1dc4cb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='valid' type-id='297d38bc' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='uses_fpu' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
<var-decl name='name' type-id='80f4b756' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='fletcher_4_ops_t' type-id='57f479a0' id='eba91718'/>
<qualified-type-def type-id='eba91718' const='yes' id='9eeabdc8'/>
<pointer-type-def type-id='e9e61702' size-in-bits='64' id='297d38bc'/>
<pointer-type-def type-id='fe40251b' size-in-bits='64' id='173aa527'/>
<pointer-type-def type-id='17fb1f83' size-in-bits='64' id='38147eff'/>
<pointer-type-def type-id='fb39e25e' size-in-bits='64' id='0ad5b8a8'/>
<var-decl name='fletcher_4_avx512f_ops' type-id='9eeabdc8' mangled-name='fletcher_4_avx512f_ops' visibility='default' elf-symbol-id='fletcher_4_avx512f_ops'/>
<var-decl name='fletcher_4_avx512bw_ops' type-id='9eeabdc8' mangled-name='fletcher_4_avx512bw_ops' visibility='default' elf-symbol-id='fletcher_4_avx512bw_ops'/>
<function-type size-in-bits='64' id='e9e61702'>
<return type-id='c19b74c3'/>
</function-type>
<function-type size-in-bits='64' id='fe40251b'>
<parameter type-id='0f7df99e'/>
<return type-id='48b5725f'/>
</function-type>
<function-type size-in-bits='64' id='17fb1f83'>
<parameter type-id='0f7df99e'/>
<parameter type-id='eaa32e2f'/>
<parameter type-id='9c313c2d'/>
<return type-id='48b5725f'/>
</function-type>
<function-type size-in-bits='64' id='fb39e25e'>
<parameter type-id='0f7df99e'/>
<parameter type-id='c24fc2ee'/>
<return type-id='48b5725f'/>
</function-type>
</abi-instr>
<abi-instr address-size='64' path='module/zcommon/zfs_fletcher_intel.c' language='LANG_C99'>
<var-decl name='fletcher_4_avx2_ops' type-id='9eeabdc8' mangled-name='fletcher_4_avx2_ops' visibility='default' elf-symbol-id='fletcher_4_avx2_ops'/>
</abi-instr>
<abi-instr address-size='64' path='module/zcommon/zfs_fletcher_sse.c' language='LANG_C99'>
<var-decl name='fletcher_4_sse2_ops' type-id='9eeabdc8' mangled-name='fletcher_4_sse2_ops' visibility='default' elf-symbol-id='fletcher_4_sse2_ops'/>
<var-decl name='fletcher_4_ssse3_ops' type-id='9eeabdc8' mangled-name='fletcher_4_ssse3_ops' visibility='default' elf-symbol-id='fletcher_4_ssse3_ops'/>
</abi-instr>
<abi-instr address-size='64' path='module/zcommon/zfs_fletcher_superscalar.c' language='LANG_C99'>
<var-decl name='fletcher_4_superscalar_ops' type-id='9eeabdc8' mangled-name='fletcher_4_superscalar_ops' visibility='default' elf-symbol-id='fletcher_4_superscalar_ops'/>
</abi-instr>
<abi-instr address-size='64' path='module/zcommon/zfs_fletcher_superscalar4.c' language='LANG_C99'>
<var-decl name='fletcher_4_superscalar4_ops' type-id='9eeabdc8' mangled-name='fletcher_4_superscalar4_ops' visibility='default' elf-symbol-id='fletcher_4_superscalar4_ops'/>
</abi-instr>
<abi-instr address-size='64' path='module/zcommon/zfs_namecheck.c' language='LANG_C99'>
<var-decl name='zfs_max_dataset_nesting' type-id='95e97e5e' mangled-name='zfs_max_dataset_nesting' visibility='default' elf-symbol-id='zfs_max_dataset_nesting'/>
<function-decl name='get_dataset_depth' mangled-name='get_dataset_depth' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='get_dataset_depth'>
<parameter type-id='80f4b756' name='path'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_component_namecheck' mangled-name='zfs_component_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_component_namecheck'>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='053457bd' name='why'/>
<parameter type-id='26a90f95' name='what'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='dataset_namecheck' mangled-name='dataset_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='dataset_namecheck'>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='053457bd' name='why'/>
<parameter type-id='26a90f95' name='what'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='bookmark_namecheck' mangled-name='bookmark_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='bookmark_namecheck'>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='053457bd' name='why'/>
<parameter type-id='26a90f95' name='what'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='snapshot_namecheck' mangled-name='snapshot_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='snapshot_namecheck'>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='053457bd' name='why'/>
<parameter type-id='26a90f95' name='what'/>
<return type-id='95e97e5e'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='module/zcommon/zfs_prop.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='b99c00c9' size-in-bits='768' id='bcc77e38'>
<subrange length='12' type-id='7359adad' id='84827bdc'/>
</array-type-def>
<pointer-type-def type-id='3eee3342' size-in-bits='64' id='73f8e240'/>
<var-decl name='zfs_userquota_prop_prefixes' type-id='bcc77e38' mangled-name='zfs_userquota_prop_prefixes' visibility='default' elf-symbol-id='zfs_userquota_prop_prefixes'/>
<function-decl name='zfs_mod_list_supported' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<return type-id='73f8e240'/>
</function-decl>
<function-decl name='zfs_mod_list_supported_free' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='73f8e240'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zprop_register_impl' mangled-name='zprop_register_impl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_register_impl'>
<parameter type-id='95e97e5e'/>
<parameter type-id='80f4b756'/>
<parameter type-id='31429eff'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='80f4b756'/>
<parameter type-id='999701cc'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<parameter type-id='c19b74c3'/>
<parameter type-id='c19b74c3'/>
<parameter type-id='c19b74c3'/>
<parameter type-id='c8bc397b'/>
<parameter type-id='a3372543'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zprop_register_string' mangled-name='zprop_register_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_register_string'>
<parameter type-id='95e97e5e'/>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<parameter type-id='999701cc'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<parameter type-id='a3372543'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zprop_register_number' mangled-name='zprop_register_number' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_register_number'>
<parameter type-id='95e97e5e'/>
<parameter type-id='80f4b756'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='999701cc'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<parameter type-id='c19b74c3'/>
<parameter type-id='a3372543'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zprop_register_index' mangled-name='zprop_register_index' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_register_index'>
<parameter type-id='95e97e5e'/>
<parameter type-id='80f4b756'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='999701cc'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<parameter type-id='c8bc397b'/>
<parameter type-id='a3372543'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zprop_register_hidden' mangled-name='zprop_register_hidden' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_register_hidden'>
<parameter type-id='95e97e5e'/>
<parameter type-id='80f4b756'/>
<parameter type-id='31429eff'/>
<parameter type-id='999701cc'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='80f4b756'/>
<parameter type-id='c19b74c3'/>
<parameter type-id='a3372543'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zprop_index_to_string' mangled-name='zprop_index_to_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_index_to_string'>
<parameter type-id='95e97e5e'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='7d3cd834'/>
<parameter type-id='2e45de5d'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zprop_random_value' mangled-name='zprop_random_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_random_value'>
<parameter type-id='95e97e5e'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='2e45de5d'/>
<return type-id='9c313c2d'/>
</function-decl>
<function-decl name='zprop_valid_char' mangled-name='zprop_valid_char' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_valid_char'>
<parameter type-id='a84c031d'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prop_string_to_index' mangled-name='zfs_prop_string_to_index' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_string_to_index'>
<parameter type-id='58603c44' name='prop'/>
<parameter type-id='80f4b756' name='string'/>
<parameter type-id='5d6479ae' name='index'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prop_random_value' mangled-name='zfs_prop_random_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_random_value'>
<parameter type-id='58603c44' name='prop'/>
<parameter type-id='9c313c2d' name='seed'/>
<return type-id='9c313c2d'/>
</function-decl>
<function-decl name='zfs_prop_visible' mangled-name='zfs_prop_visible' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_visible'>
<parameter type-id='58603c44' name='prop'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_prop_values' mangled-name='zfs_prop_values' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_values'>
<parameter type-id='58603c44' name='prop'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zfs_prop_is_string' mangled-name='zfs_prop_is_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_is_string'>
<parameter type-id='58603c44' name='prop'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prop_column_name' mangled-name='zfs_prop_column_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_column_name'>
<parameter type-id='58603c44' name='prop'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zfs_prop_align_right' mangled-name='zfs_prop_align_right' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_align_right'>
<parameter type-id='58603c44' name='prop'/>
<return type-id='c19b74c3'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='module/zcommon/zpool_prop.c' language='LANG_C99'>
<function-decl name='zpool_prop_string_to_index' mangled-name='zpool_prop_string_to_index' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_string_to_index'>
<parameter type-id='5d0c23fb' name='prop'/>
<parameter type-id='80f4b756' name='string'/>
<parameter type-id='5d6479ae' name='index'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_prop_random_value' mangled-name='zpool_prop_random_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_random_value'>
<parameter type-id='5d0c23fb' name='prop'/>
<parameter type-id='9c313c2d' name='seed'/>
<return type-id='9c313c2d'/>
</function-decl>
<function-decl name='zpool_prop_values' mangled-name='zpool_prop_values' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_values'>
<parameter type-id='5d0c23fb' name='prop'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zpool_prop_column_name' mangled-name='zpool_prop_column_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_column_name'>
<parameter type-id='5d0c23fb' name='prop'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zpool_prop_align_right' mangled-name='zpool_prop_align_right' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_align_right'>
<parameter type-id='5d0c23fb' name='prop'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='vdev_prop_get_table' mangled-name='vdev_prop_get_table' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='vdev_prop_get_table'>
<return type-id='76c8174b'/>
</function-decl>
<function-decl name='vdev_prop_string_to_index' mangled-name='vdev_prop_string_to_index' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='vdev_prop_string_to_index'>
<parameter type-id='5aa5c90c' name='prop'/>
<parameter type-id='80f4b756' name='string'/>
<parameter type-id='5d6479ae' name='index'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='vdev_prop_random_value' mangled-name='vdev_prop_random_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='vdev_prop_random_value'>
<parameter type-id='5aa5c90c' name='prop'/>
<parameter type-id='9c313c2d' name='seed'/>
<return type-id='9c313c2d'/>
</function-decl>
<function-decl name='vdev_prop_values' mangled-name='vdev_prop_values' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='vdev_prop_values'>
<parameter type-id='5aa5c90c' name='prop'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='vdev_prop_column_name' mangled-name='vdev_prop_column_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='vdev_prop_column_name'>
<parameter type-id='5aa5c90c' name='prop'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='vdev_prop_align_right' mangled-name='vdev_prop_align_right' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='vdev_prop_align_right'>
<parameter type-id='5aa5c90c' name='prop'/>
<return type-id='c19b74c3'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='module/zcommon/zprop_common.c' language='LANG_C99'>
<function-decl name='__ctype_tolower_loc' visibility='default' binding='global' size-in-bits='64'>
<return type-id='24f95ba5'/>
</function-decl>
</abi-instr>
</abi-corpus>
diff --git a/sys/contrib/openzfs/lib/libzfs/libzfs_import.c b/sys/contrib/openzfs/lib/libzfs/libzfs_import.c
index 2a7c5a76a0a6..e2d40a7b3bfb 100644
--- a/sys/contrib/openzfs/lib/libzfs/libzfs_import.c
+++ b/sys/contrib/openzfs/lib/libzfs/libzfs_import.c
@@ -1,439 +1,437 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2015 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
* Copyright 2015 RackTop Systems.
* Copyright (c) 2016, Intel Corporation.
*/
#include <errno.h>
#include <libintl.h>
#include <libgen.h>
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <unistd.h>
#include <sys/vdev_impl.h>
#include <libzfs.h>
#include "libzfs_impl.h"
#include <libzutil.h>
#include <sys/arc_impl.h>
/*
* Returns true if the named pool matches the given GUID.
*/
static int
pool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid,
boolean_t *isactive)
{
zpool_handle_t *zhp;
if (zpool_open_silent(hdl, name, &zhp) != 0)
return (-1);
if (zhp == NULL) {
*isactive = B_FALSE;
return (0);
}
uint64_t theguid = fnvlist_lookup_uint64(zhp->zpool_config,
ZPOOL_CONFIG_POOL_GUID);
zpool_close(zhp);
*isactive = (theguid == guid);
return (0);
}
static nvlist_t *
refresh_config(libzfs_handle_t *hdl, nvlist_t *config)
{
nvlist_t *nvl;
zfs_cmd_t zc = {"\0"};
int err, dstbuf_size;
zcmd_write_conf_nvlist(hdl, &zc, config);
dstbuf_size = MAX(CONFIG_BUF_MINSIZE, zc.zc_nvlist_conf_size * 32);
zcmd_alloc_dst_nvlist(hdl, &zc, dstbuf_size);
while ((err = zfs_ioctl(hdl, ZFS_IOC_POOL_TRYIMPORT,
&zc)) != 0 && errno == ENOMEM)
zcmd_expand_dst_nvlist(hdl, &zc);
if (err) {
zcmd_free_nvlists(&zc);
return (NULL);
}
if (zcmd_read_dst_nvlist(hdl, &zc, &nvl) != 0) {
zcmd_free_nvlists(&zc);
return (NULL);
}
zcmd_free_nvlists(&zc);
return (nvl);
}
static nvlist_t *
refresh_config_libzfs(void *handle, nvlist_t *tryconfig)
{
return (refresh_config((libzfs_handle_t *)handle, tryconfig));
}
static int
pool_active_libzfs(void *handle, const char *name, uint64_t guid,
boolean_t *isactive)
{
return (pool_active((libzfs_handle_t *)handle, name, guid, isactive));
}
const pool_config_ops_t libzfs_config_ops = {
.pco_refresh_config = refresh_config_libzfs,
.pco_pool_active = pool_active_libzfs,
};
/*
* Return the offset of the given label.
*/
static uint64_t
label_offset(uint64_t size, int l)
{
ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0);
return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
0 : size - VDEV_LABELS * sizeof (vdev_label_t)));
}
/*
* Given a file descriptor, clear (zero) the label information. This function
* is used in the appliance stack as part of the ZFS sysevent module and
* to implement the "zpool labelclear" command.
*/
int
zpool_clear_label(int fd)
{
struct stat64 statbuf;
int l;
vdev_label_t *label;
uint64_t size;
boolean_t labels_cleared = B_FALSE, clear_l2arc_header = B_FALSE,
header_cleared = B_FALSE;
if (fstat64_blk(fd, &statbuf) == -1)
return (0);
size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
if ((label = calloc(1, sizeof (vdev_label_t))) == NULL)
return (-1);
for (l = 0; l < VDEV_LABELS; l++) {
uint64_t state, guid, l2cache;
nvlist_t *config;
if (pread64(fd, label, sizeof (vdev_label_t),
label_offset(size, l)) != sizeof (vdev_label_t)) {
continue;
}
if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
sizeof (label->vl_vdev_phys.vp_nvlist), &config, 0) != 0) {
continue;
}
/* Skip labels which do not have a valid guid. */
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
&guid) != 0 || guid == 0) {
nvlist_free(config);
continue;
}
/* Skip labels which are not in a known valid state. */
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
&state) != 0 || state > POOL_STATE_L2CACHE) {
nvlist_free(config);
continue;
}
/* If the device is a cache device clear the header. */
if (!clear_l2arc_header) {
if (nvlist_lookup_uint64(config,
ZPOOL_CONFIG_POOL_STATE, &l2cache) == 0 &&
l2cache == POOL_STATE_L2CACHE) {
clear_l2arc_header = B_TRUE;
}
}
nvlist_free(config);
/*
* A valid label was found, overwrite this label's nvlist
* and uberblocks with zeros on disk. This is done to prevent
* system utilities, like blkid, from incorrectly detecting a
* partial label. The leading pad space is left untouched.
*/
memset(label, 0, sizeof (vdev_label_t));
size_t label_size = sizeof (vdev_label_t) - (2 * VDEV_PAD_SIZE);
if (pwrite64(fd, label, label_size, label_offset(size, l) +
(2 * VDEV_PAD_SIZE)) == label_size)
labels_cleared = B_TRUE;
}
if (clear_l2arc_header) {
_Static_assert(sizeof (*label) >= sizeof (l2arc_dev_hdr_phys_t),
"label < l2arc_dev_hdr_phys_t");
memset(label, 0, sizeof (l2arc_dev_hdr_phys_t));
if (pwrite64(fd, label, sizeof (l2arc_dev_hdr_phys_t),
VDEV_LABEL_START_SIZE) == sizeof (l2arc_dev_hdr_phys_t))
header_cleared = B_TRUE;
}
free(label);
if (!labels_cleared || (clear_l2arc_header && !header_cleared))
return (-1);
return (0);
}
static boolean_t
find_guid(nvlist_t *nv, uint64_t guid)
{
nvlist_t **child;
uint_t c, children;
if (fnvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID) == guid)
return (B_TRUE);
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0) {
for (c = 0; c < children; c++)
if (find_guid(child[c], guid))
return (B_TRUE);
}
return (B_FALSE);
}
typedef struct aux_cbdata {
const char *cb_type;
uint64_t cb_guid;
zpool_handle_t *cb_zhp;
} aux_cbdata_t;
static int
find_aux(zpool_handle_t *zhp, void *data)
{
aux_cbdata_t *cbp = data;
nvlist_t **list;
uint_t count;
nvlist_t *nvroot = fnvlist_lookup_nvlist(zhp->zpool_config,
ZPOOL_CONFIG_VDEV_TREE);
if (nvlist_lookup_nvlist_array(nvroot, cbp->cb_type,
&list, &count) == 0) {
for (uint_t i = 0; i < count; i++) {
uint64_t guid = fnvlist_lookup_uint64(list[i],
ZPOOL_CONFIG_GUID);
if (guid == cbp->cb_guid) {
cbp->cb_zhp = zhp;
return (1);
}
}
}
zpool_close(zhp);
return (0);
}
/*
* Determines if the pool is in use. If so, it returns true and the state of
* the pool as well as the name of the pool. Name string is allocated and
* must be freed by the caller.
*/
int
zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr,
boolean_t *inuse)
{
nvlist_t *config;
const char *name = NULL;
boolean_t ret;
uint64_t guid = 0, vdev_guid;
zpool_handle_t *zhp;
nvlist_t *pool_config;
uint64_t stateval, isspare;
aux_cbdata_t cb = { 0 };
boolean_t isactive;
*inuse = B_FALSE;
- if (zpool_read_label(fd, &config, NULL) != 0) {
- (void) no_memory(hdl);
+ if (zpool_read_label(fd, &config, NULL) != 0)
return (-1);
- }
if (config == NULL)
return (0);
stateval = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE);
vdev_guid = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID);
if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) {
name = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME);
guid = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID);
}
switch (stateval) {
case POOL_STATE_EXPORTED:
/*
* A pool with an exported state may in fact be imported
* read-only, so check the in-core state to see if it's
* active and imported read-only. If it is, set
* its state to active.
*/
if (pool_active(hdl, name, guid, &isactive) == 0 && isactive &&
(zhp = zpool_open_canfail(hdl, name)) != NULL) {
if (zpool_get_prop_int(zhp, ZPOOL_PROP_READONLY, NULL))
stateval = POOL_STATE_ACTIVE;
/*
* All we needed the zpool handle for is the
* readonly prop check.
*/
zpool_close(zhp);
}
ret = B_TRUE;
break;
case POOL_STATE_ACTIVE:
/*
* For an active pool, we have to determine if it's really part
* of a currently active pool (in which case the pool will exist
* and the guid will be the same), or whether it's part of an
* active pool that was disconnected without being explicitly
* exported.
*/
if (pool_active(hdl, name, guid, &isactive) != 0) {
nvlist_free(config);
return (-1);
}
if (isactive) {
/*
* Because the device may have been removed while
* offlined, we only report it as active if the vdev is
* still present in the config. Otherwise, pretend like
* it's not in use.
*/
if ((zhp = zpool_open_canfail(hdl, name)) != NULL &&
(pool_config = zpool_get_config(zhp, NULL))
!= NULL) {
nvlist_t *nvroot = fnvlist_lookup_nvlist(
pool_config, ZPOOL_CONFIG_VDEV_TREE);
ret = find_guid(nvroot, vdev_guid);
} else {
ret = B_FALSE;
}
/*
* If this is an active spare within another pool, we
* treat it like an unused hot spare. This allows the
* user to create a pool with a hot spare that currently
* in use within another pool. Since we return B_TRUE,
* libdiskmgt will continue to prevent generic consumers
* from using the device.
*/
if (ret && nvlist_lookup_uint64(config,
ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare)
stateval = POOL_STATE_SPARE;
if (zhp != NULL)
zpool_close(zhp);
} else {
stateval = POOL_STATE_POTENTIALLY_ACTIVE;
ret = B_TRUE;
}
break;
case POOL_STATE_SPARE:
/*
* For a hot spare, it can be either definitively in use, or
* potentially active. To determine if it's in use, we iterate
* over all pools in the system and search for one with a spare
* with a matching guid.
*
* Due to the shared nature of spares, we don't actually report
* the potentially active case as in use. This means the user
* can freely create pools on the hot spares of exported pools,
* but to do otherwise makes the resulting code complicated, and
* we end up having to deal with this case anyway.
*/
cb.cb_zhp = NULL;
cb.cb_guid = vdev_guid;
cb.cb_type = ZPOOL_CONFIG_SPARES;
if (zpool_iter(hdl, find_aux, &cb) == 1) {
name = (char *)zpool_get_name(cb.cb_zhp);
ret = B_TRUE;
} else {
ret = B_FALSE;
}
break;
case POOL_STATE_L2CACHE:
/*
* Check if any pool is currently using this l2cache device.
*/
cb.cb_zhp = NULL;
cb.cb_guid = vdev_guid;
cb.cb_type = ZPOOL_CONFIG_L2CACHE;
if (zpool_iter(hdl, find_aux, &cb) == 1) {
name = (char *)zpool_get_name(cb.cb_zhp);
ret = B_TRUE;
} else {
ret = B_FALSE;
}
break;
default:
ret = B_FALSE;
}
if (ret) {
*namestr = zfs_strdup(hdl, name);
*state = (pool_state_t)stateval;
}
if (cb.cb_zhp)
zpool_close(cb.cb_zhp);
nvlist_free(config);
*inuse = ret;
return (0);
}
diff --git a/sys/contrib/openzfs/lib/libzfs/libzfs_pool.c b/sys/contrib/openzfs/lib/libzfs/libzfs_pool.c
index 4ebd112f452f..2f9ccbc2ab57 100644
--- a/sys/contrib/openzfs/lib/libzfs/libzfs_pool.c
+++ b/sys/contrib/openzfs/lib/libzfs/libzfs_pool.c
@@ -1,5398 +1,5437 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2015 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
* Copyright (c) 2018 Datto Inc.
* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>
* Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
* Copyright (c) 2021, 2023, Klara Inc.
*/
#include <errno.h>
#include <libintl.h>
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <unistd.h>
#include <libgen.h>
#include <zone.h>
#include <sys/stat.h>
#include <sys/efi_partition.h>
#include <sys/systeminfo.h>
#include <sys/zfs_ioctl.h>
#include <sys/zfs_sysfs.h>
#include <sys/vdev_disk.h>
#include <sys/types.h>
#include <dlfcn.h>
#include <libzutil.h>
#include <fcntl.h>
#include "zfs_namecheck.h"
#include "zfs_prop.h"
#include "libzfs_impl.h"
#include "zfs_comutil.h"
#include "zfeature_common.h"
static boolean_t zpool_vdev_is_interior(const char *name);
typedef struct prop_flags {
unsigned int create:1; /* Validate property on creation */
unsigned int import:1; /* Validate property on import */
unsigned int vdevprop:1; /* Validate property as a VDEV property */
} prop_flags_t;
/*
* ====================================================================
* zpool property functions
* ====================================================================
*/
static int
zpool_get_all_props(zpool_handle_t *zhp)
{
zfs_cmd_t zc = {"\0"};
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zcmd_alloc_dst_nvlist(hdl, &zc, 0);
while (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
if (errno == ENOMEM)
zcmd_expand_dst_nvlist(hdl, &zc);
else {
zcmd_free_nvlists(&zc);
return (-1);
}
}
if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
zcmd_free_nvlists(&zc);
return (-1);
}
zcmd_free_nvlists(&zc);
return (0);
}
int
zpool_props_refresh(zpool_handle_t *zhp)
{
nvlist_t *old_props;
old_props = zhp->zpool_props;
if (zpool_get_all_props(zhp) != 0)
return (-1);
nvlist_free(old_props);
return (0);
}
static const char *
zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
zprop_source_t *src)
{
nvlist_t *nv, *nvl;
const char *value;
zprop_source_t source;
nvl = zhp->zpool_props;
if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
source = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
value = fnvlist_lookup_string(nv, ZPROP_VALUE);
} else {
source = ZPROP_SRC_DEFAULT;
if ((value = zpool_prop_default_string(prop)) == NULL)
value = "-";
}
if (src)
*src = source;
return (value);
}
uint64_t
zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
{
nvlist_t *nv, *nvl;
uint64_t value;
zprop_source_t source;
if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
/*
* zpool_get_all_props() has most likely failed because
* the pool is faulted, but if all we need is the top level
* vdev's guid then get it from the zhp config nvlist.
*/
if ((prop == ZPOOL_PROP_GUID) &&
(nvlist_lookup_nvlist(zhp->zpool_config,
ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
== 0)) {
return (value);
}
return (zpool_prop_default_numeric(prop));
}
nvl = zhp->zpool_props;
if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
source = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
value = fnvlist_lookup_uint64(nv, ZPROP_VALUE);
} else {
source = ZPROP_SRC_DEFAULT;
value = zpool_prop_default_numeric(prop);
}
if (src)
*src = source;
return (value);
}
/*
* Map VDEV STATE to printed strings.
*/
const char *
zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
{
switch (state) {
case VDEV_STATE_CLOSED:
case VDEV_STATE_OFFLINE:
return (gettext("OFFLINE"));
case VDEV_STATE_REMOVED:
return (gettext("REMOVED"));
case VDEV_STATE_CANT_OPEN:
if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
return (gettext("FAULTED"));
else if (aux == VDEV_AUX_SPLIT_POOL)
return (gettext("SPLIT"));
else
return (gettext("UNAVAIL"));
case VDEV_STATE_FAULTED:
return (gettext("FAULTED"));
case VDEV_STATE_DEGRADED:
return (gettext("DEGRADED"));
case VDEV_STATE_HEALTHY:
return (gettext("ONLINE"));
default:
break;
}
return (gettext("UNKNOWN"));
}
/*
* Map POOL STATE to printed strings.
*/
const char *
zpool_pool_state_to_name(pool_state_t state)
{
switch (state) {
default:
break;
case POOL_STATE_ACTIVE:
return (gettext("ACTIVE"));
case POOL_STATE_EXPORTED:
return (gettext("EXPORTED"));
case POOL_STATE_DESTROYED:
return (gettext("DESTROYED"));
case POOL_STATE_SPARE:
return (gettext("SPARE"));
case POOL_STATE_L2CACHE:
return (gettext("L2CACHE"));
case POOL_STATE_UNINITIALIZED:
return (gettext("UNINITIALIZED"));
case POOL_STATE_UNAVAIL:
return (gettext("UNAVAIL"));
case POOL_STATE_POTENTIALLY_ACTIVE:
return (gettext("POTENTIALLY_ACTIVE"));
}
return (gettext("UNKNOWN"));
}
/*
* Given a pool handle, return the pool health string ("ONLINE", "DEGRADED",
* "SUSPENDED", etc).
*/
const char *
zpool_get_state_str(zpool_handle_t *zhp)
{
zpool_errata_t errata;
zpool_status_t status;
const char *str;
status = zpool_get_status(zhp, NULL, &errata);
if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
str = gettext("FAULTED");
} else if (status == ZPOOL_STATUS_IO_FAILURE_WAIT ||
status == ZPOOL_STATUS_IO_FAILURE_CONTINUE ||
status == ZPOOL_STATUS_IO_FAILURE_MMP) {
str = gettext("SUSPENDED");
} else {
nvlist_t *nvroot = fnvlist_lookup_nvlist(
zpool_get_config(zhp, NULL), ZPOOL_CONFIG_VDEV_TREE);
uint_t vsc;
vdev_stat_t *vs = (vdev_stat_t *)fnvlist_lookup_uint64_array(
nvroot, ZPOOL_CONFIG_VDEV_STATS, &vsc);
str = zpool_state_to_name(vs->vs_state, vs->vs_aux);
}
return (str);
}
/*
* Get a zpool property value for 'prop' and return the value in
* a pre-allocated buffer.
*/
int
zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf,
size_t len, zprop_source_t *srctype, boolean_t literal)
{
uint64_t intval;
const char *strval;
zprop_source_t src = ZPROP_SRC_NONE;
if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
switch (prop) {
case ZPOOL_PROP_NAME:
(void) strlcpy(buf, zpool_get_name(zhp), len);
break;
case ZPOOL_PROP_HEALTH:
(void) strlcpy(buf, zpool_get_state_str(zhp), len);
break;
case ZPOOL_PROP_GUID:
intval = zpool_get_prop_int(zhp, prop, &src);
(void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
break;
case ZPOOL_PROP_ALTROOT:
case ZPOOL_PROP_CACHEFILE:
case ZPOOL_PROP_COMMENT:
case ZPOOL_PROP_COMPATIBILITY:
if (zhp->zpool_props != NULL ||
zpool_get_all_props(zhp) == 0) {
(void) strlcpy(buf,
zpool_get_prop_string(zhp, prop, &src),
len);
break;
}
zfs_fallthrough;
default:
(void) strlcpy(buf, "-", len);
break;
}
if (srctype != NULL)
*srctype = src;
return (0);
}
if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
prop != ZPOOL_PROP_NAME)
return (-1);
switch (zpool_prop_get_type(prop)) {
case PROP_TYPE_STRING:
(void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
len);
break;
case PROP_TYPE_NUMBER:
intval = zpool_get_prop_int(zhp, prop, &src);
switch (prop) {
case ZPOOL_PROP_SIZE:
case ZPOOL_PROP_ALLOCATED:
case ZPOOL_PROP_FREE:
case ZPOOL_PROP_FREEING:
case ZPOOL_PROP_LEAKED:
case ZPOOL_PROP_ASHIFT:
case ZPOOL_PROP_MAXBLOCKSIZE:
case ZPOOL_PROP_MAXDNODESIZE:
case ZPOOL_PROP_BCLONESAVED:
case ZPOOL_PROP_BCLONEUSED:
if (literal)
(void) snprintf(buf, len, "%llu",
(u_longlong_t)intval);
else
(void) zfs_nicenum(intval, buf, len);
break;
case ZPOOL_PROP_EXPANDSZ:
case ZPOOL_PROP_CHECKPOINT:
if (intval == 0) {
(void) strlcpy(buf, "-", len);
} else if (literal) {
(void) snprintf(buf, len, "%llu",
(u_longlong_t)intval);
} else {
(void) zfs_nicebytes(intval, buf, len);
}
break;
case ZPOOL_PROP_CAPACITY:
if (literal) {
(void) snprintf(buf, len, "%llu",
(u_longlong_t)intval);
} else {
(void) snprintf(buf, len, "%llu%%",
(u_longlong_t)intval);
}
break;
case ZPOOL_PROP_FRAGMENTATION:
if (intval == UINT64_MAX) {
(void) strlcpy(buf, "-", len);
} else if (literal) {
(void) snprintf(buf, len, "%llu",
(u_longlong_t)intval);
} else {
(void) snprintf(buf, len, "%llu%%",
(u_longlong_t)intval);
}
break;
case ZPOOL_PROP_BCLONERATIO:
case ZPOOL_PROP_DEDUPRATIO:
if (literal)
(void) snprintf(buf, len, "%llu.%02llu",
(u_longlong_t)(intval / 100),
(u_longlong_t)(intval % 100));
else
(void) snprintf(buf, len, "%llu.%02llux",
(u_longlong_t)(intval / 100),
(u_longlong_t)(intval % 100));
break;
case ZPOOL_PROP_HEALTH:
(void) strlcpy(buf, zpool_get_state_str(zhp), len);
break;
case ZPOOL_PROP_VERSION:
if (intval >= SPA_VERSION_FEATURES) {
(void) snprintf(buf, len, "-");
break;
}
zfs_fallthrough;
default:
(void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
}
break;
case PROP_TYPE_INDEX:
intval = zpool_get_prop_int(zhp, prop, &src);
if (zpool_prop_index_to_string(prop, intval, &strval)
!= 0)
return (-1);
(void) strlcpy(buf, strval, len);
break;
default:
abort();
}
if (srctype)
*srctype = src;
return (0);
}
/*
* Get a zpool property value for 'propname' and return the value in
* a pre-allocated buffer.
*/
int
zpool_get_userprop(zpool_handle_t *zhp, const char *propname, char *buf,
size_t len, zprop_source_t *srctype)
{
nvlist_t *nv, *nvl;
uint64_t ival;
const char *value;
zprop_source_t source = ZPROP_SRC_LOCAL;
nvl = zhp->zpool_props;
if (nvlist_lookup_nvlist(nvl, propname, &nv) == 0) {
if (nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0)
source = ival;
verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
} else {
source = ZPROP_SRC_DEFAULT;
value = "-";
}
if (srctype)
*srctype = source;
(void) strlcpy(buf, value, len);
return (0);
}
/*
* Check if the bootfs name has the same pool name as it is set to.
* Assuming bootfs is a valid dataset name.
*/
static boolean_t
bootfs_name_valid(const char *pool, const char *bootfs)
{
int len = strlen(pool);
if (bootfs[0] == '\0')
return (B_TRUE);
if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
return (B_FALSE);
if (strncmp(pool, bootfs, len) == 0 &&
(bootfs[len] == '/' || bootfs[len] == '\0'))
return (B_TRUE);
return (B_FALSE);
}
/*
* Given an nvlist of zpool properties to be set, validate that they are
* correct, and parse any numeric properties (index, boolean, etc) if they are
* specified as strings.
*/
static nvlist_t *
zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
{
nvpair_t *elem;
nvlist_t *retprops;
zpool_prop_t prop;
const char *strval;
uint64_t intval;
const char *slash, *check;
struct stat64 statbuf;
zpool_handle_t *zhp;
char report[1024];
if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
(void) no_memory(hdl);
return (NULL);
}
elem = NULL;
while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
const char *propname = nvpair_name(elem);
if (flags.vdevprop && zpool_prop_vdev(propname)) {
vdev_prop_t vprop = vdev_name_to_prop(propname);
if (vdev_prop_readonly(vprop)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
"is readonly"), propname);
(void) zfs_error(hdl, EZFS_PROPREADONLY,
errbuf);
goto error;
}
if (zprop_parse_value(hdl, elem, vprop, ZFS_TYPE_VDEV,
retprops, &strval, &intval, errbuf) != 0)
goto error;
continue;
} else if (flags.vdevprop && vdev_prop_user(propname)) {
if (nvlist_add_nvpair(retprops, elem) != 0) {
(void) no_memory(hdl);
goto error;
}
continue;
} else if (flags.vdevprop) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid property: '%s'"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
prop = zpool_name_to_prop(propname);
if (prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname)) {
int err;
char *fname = strchr(propname, '@') + 1;
err = zfeature_lookup_name(fname, NULL);
if (err != 0) {
ASSERT3U(err, ==, ENOENT);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"feature '%s' unsupported by kernel"),
fname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (nvpair_type(elem) != DATA_TYPE_STRING) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be a string"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
(void) nvpair_value_string(elem, &strval);
if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0 &&
strcmp(strval, ZFS_FEATURE_DISABLED) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' can only be set to "
"'enabled' or 'disabled'"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (!flags.create &&
strcmp(strval, ZFS_FEATURE_DISABLED) == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' can only be set to "
"'disabled' at creation time"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (nvlist_add_uint64(retprops, propname, 0) != 0) {
(void) no_memory(hdl);
goto error;
}
continue;
} else if (prop == ZPOOL_PROP_INVAL &&
zfs_prop_user(propname)) {
/*
* This is a user property: make sure it's a
* string, and that it's less than ZAP_MAXNAMELEN.
*/
if (nvpair_type(elem) != DATA_TYPE_STRING) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be a string"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (strlen(nvpair_name(elem)) >= ZAP_MAXNAMELEN) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property name '%s' is too long"),
propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
(void) nvpair_value_string(elem, &strval);
if (strlen(strval) >= ZFS_MAXPROPLEN) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property value '%s' is too long"),
strval);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (nvlist_add_string(retprops, propname,
strval) != 0) {
(void) no_memory(hdl);
goto error;
}
continue;
}
/*
* Make sure this property is valid and applies to this type.
*/
if (prop == ZPOOL_PROP_INVAL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid property '%s'"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (zpool_prop_readonly(prop)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
"is readonly"), propname);
(void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
goto error;
}
if (!flags.create && zpool_prop_setonce(prop)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' can only be set at "
"creation time"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
&strval, &intval, errbuf) != 0)
goto error;
/*
* Perform additional checking for specific properties.
*/
switch (prop) {
case ZPOOL_PROP_VERSION:
if (intval < version ||
!SPA_VERSION_IS_SUPPORTED(intval)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' number %llu is invalid."),
propname, (unsigned long long)intval);
(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
goto error;
}
break;
case ZPOOL_PROP_ASHIFT:
if (intval != 0 &&
(intval < ASHIFT_MIN || intval > ASHIFT_MAX)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' number %llu is invalid, "
"only values between %" PRId32 " and %"
PRId32 " are allowed."),
propname, (unsigned long long)intval,
ASHIFT_MIN, ASHIFT_MAX);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
break;
case ZPOOL_PROP_BOOTFS:
if (flags.create || flags.import) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' cannot be set at creation "
"or import time"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (version < SPA_VERSION_BOOTFS) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded to support "
"'%s' property"), propname);
(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
goto error;
}
/*
* bootfs property value has to be a dataset name and
* the dataset has to be in the same pool as it sets to.
*/
if (!bootfs_name_valid(poolname, strval)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
"is an invalid name"), strval);
(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
goto error;
}
if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"could not open pool '%s'"), poolname);
(void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
goto error;
}
zpool_close(zhp);
break;
case ZPOOL_PROP_ALTROOT:
if (!flags.create && !flags.import) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' can only be set during pool "
"creation or import"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (strval[0] != '/') {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"bad alternate root '%s'"), strval);
(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
goto error;
}
break;
case ZPOOL_PROP_CACHEFILE:
if (strval[0] == '\0')
break;
if (strcmp(strval, "none") == 0)
break;
if (strval[0] != '/') {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' must be empty, an "
"absolute path, or 'none'"), propname);
(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
goto error;
}
slash = strrchr(strval, '/');
if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
strcmp(slash, "/..") == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' is not a valid file"), strval);
(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
goto error;
}
*(char *)slash = '\0';
if (strval[0] != '\0' &&
(stat64(strval, &statbuf) != 0 ||
!S_ISDIR(statbuf.st_mode))) {
*(char *)slash = '/';
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' is not a valid directory"),
strval);
(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
goto error;
}
*(char *)slash = '/';
break;
case ZPOOL_PROP_COMPATIBILITY:
switch (zpool_load_compat(strval, NULL, report, 1024)) {
case ZPOOL_COMPATIBILITY_OK:
case ZPOOL_COMPATIBILITY_WARNTOKEN:
break;
case ZPOOL_COMPATIBILITY_BADFILE:
case ZPOOL_COMPATIBILITY_BADTOKEN:
case ZPOOL_COMPATIBILITY_NOFILES:
zfs_error_aux(hdl, "%s", report);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
break;
case ZPOOL_PROP_COMMENT:
for (check = strval; *check != '\0'; check++) {
if (!isprint(*check)) {
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN,
"comment may only have printable "
"characters"));
(void) zfs_error(hdl, EZFS_BADPROP,
errbuf);
goto error;
}
}
if (strlen(strval) > ZPROP_MAX_COMMENT) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"comment must not exceed %d characters"),
ZPROP_MAX_COMMENT);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
break;
case ZPOOL_PROP_READONLY:
if (!flags.import) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' can only be set at "
"import time"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
break;
case ZPOOL_PROP_MULTIHOST:
if (get_system_hostid() == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"requires a non-zero system hostid"));
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
break;
case ZPOOL_PROP_DEDUPDITTO:
printf("Note: property '%s' no longer has "
"any effect\n", propname);
break;
default:
break;
}
}
return (retprops);
error:
nvlist_free(retprops);
return (NULL);
}
/*
* Set zpool property : propname=propval.
*/
int
zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
{
zfs_cmd_t zc = {"\0"};
int ret = -1;
char errbuf[ERRBUFLEN];
nvlist_t *nvl = NULL;
nvlist_t *realprops;
uint64_t version;
prop_flags_t flags = { 0 };
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
zhp->zpool_name);
if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
return (no_memory(zhp->zpool_hdl));
if (nvlist_add_string(nvl, propname, propval) != 0) {
nvlist_free(nvl);
return (no_memory(zhp->zpool_hdl));
}
version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
nvlist_free(nvl);
return (-1);
}
nvlist_free(nvl);
nvl = realprops;
/*
* Execute the corresponding ioctl() to set this property.
*/
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl);
ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
zcmd_free_nvlists(&zc);
nvlist_free(nvl);
if (ret)
(void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
else
(void) zpool_props_refresh(zhp);
return (ret);
}
int
zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp,
zfs_type_t type, boolean_t literal)
{
libzfs_handle_t *hdl = zhp->zpool_hdl;
zprop_list_t *entry;
char buf[ZFS_MAXPROPLEN];
nvlist_t *features = NULL;
nvpair_t *nvp;
zprop_list_t **last;
boolean_t firstexpand = (NULL == *plp);
int i;
if (zprop_expand_list(hdl, plp, type) != 0)
return (-1);
if (type == ZFS_TYPE_VDEV)
return (0);
last = plp;
while (*last != NULL)
last = &(*last)->pl_next;
if ((*plp)->pl_all)
features = zpool_get_features(zhp);
if ((*plp)->pl_all && firstexpand) {
/* Handle userprops in the all properties case */
if (zhp->zpool_props == NULL && zpool_props_refresh(zhp))
return (-1);
nvp = NULL;
while ((nvp = nvlist_next_nvpair(zhp->zpool_props, nvp)) !=
NULL) {
const char *propname = nvpair_name(nvp);
if (!zfs_prop_user(propname))
continue;
entry = zfs_alloc(hdl, sizeof (zprop_list_t));
entry->pl_prop = ZPROP_USERPROP;
entry->pl_user_prop = zfs_strdup(hdl, propname);
entry->pl_width = strlen(entry->pl_user_prop);
entry->pl_all = B_TRUE;
*last = entry;
last = &entry->pl_next;
}
for (i = 0; i < SPA_FEATURES; i++) {
entry = zfs_alloc(hdl, sizeof (zprop_list_t));
entry->pl_prop = ZPROP_USERPROP;
entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
spa_feature_table[i].fi_uname);
entry->pl_width = strlen(entry->pl_user_prop);
entry->pl_all = B_TRUE;
*last = entry;
last = &entry->pl_next;
}
}
/* add any unsupported features */
for (nvp = nvlist_next_nvpair(features, NULL);
nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
char *propname;
boolean_t found;
if (zfeature_is_supported(nvpair_name(nvp)))
continue;
propname = zfs_asprintf(hdl, "unsupported@%s",
nvpair_name(nvp));
/*
* Before adding the property to the list make sure that no
* other pool already added the same property.
*/
found = B_FALSE;
entry = *plp;
while (entry != NULL) {
if (entry->pl_user_prop != NULL &&
strcmp(propname, entry->pl_user_prop) == 0) {
found = B_TRUE;
break;
}
entry = entry->pl_next;
}
if (found) {
free(propname);
continue;
}
entry = zfs_alloc(hdl, sizeof (zprop_list_t));
entry->pl_prop = ZPROP_USERPROP;
entry->pl_user_prop = propname;
entry->pl_width = strlen(entry->pl_user_prop);
entry->pl_all = B_TRUE;
*last = entry;
last = &entry->pl_next;
}
for (entry = *plp; entry != NULL; entry = entry->pl_next) {
if (entry->pl_fixed && !literal)
continue;
if (entry->pl_prop != ZPROP_USERPROP &&
zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
NULL, literal) == 0) {
if (strlen(buf) > entry->pl_width)
entry->pl_width = strlen(buf);
} else if (entry->pl_prop == ZPROP_INVAL &&
zfs_prop_user(entry->pl_user_prop) &&
zpool_get_userprop(zhp, entry->pl_user_prop, buf,
sizeof (buf), NULL) == 0) {
if (strlen(buf) > entry->pl_width)
entry->pl_width = strlen(buf);
}
}
return (0);
}
int
vdev_expand_proplist(zpool_handle_t *zhp, const char *vdevname,
zprop_list_t **plp)
{
zprop_list_t *entry;
char buf[ZFS_MAXPROPLEN];
const char *strval = NULL;
int err = 0;
nvpair_t *elem = NULL;
nvlist_t *vprops = NULL;
nvlist_t *propval = NULL;
const char *propname;
vdev_prop_t prop;
zprop_list_t **last;
for (entry = *plp; entry != NULL; entry = entry->pl_next) {
if (entry->pl_fixed)
continue;
if (zpool_get_vdev_prop(zhp, vdevname, entry->pl_prop,
entry->pl_user_prop, buf, sizeof (buf), NULL,
B_FALSE) == 0) {
if (strlen(buf) > entry->pl_width)
entry->pl_width = strlen(buf);
}
if (entry->pl_prop == VDEV_PROP_NAME &&
strlen(vdevname) > entry->pl_width)
entry->pl_width = strlen(vdevname);
}
/* Handle the all properties case */
last = plp;
if (*last != NULL && (*last)->pl_all == B_TRUE) {
while (*last != NULL)
last = &(*last)->pl_next;
err = zpool_get_all_vdev_props(zhp, vdevname, &vprops);
if (err != 0)
return (err);
while ((elem = nvlist_next_nvpair(vprops, elem)) != NULL) {
propname = nvpair_name(elem);
/* Skip properties that are not user defined */
if ((prop = vdev_name_to_prop(propname)) !=
VDEV_PROP_USERPROP)
continue;
if (nvpair_value_nvlist(elem, &propval) != 0)
continue;
strval = fnvlist_lookup_string(propval, ZPROP_VALUE);
entry = zfs_alloc(zhp->zpool_hdl,
sizeof (zprop_list_t));
entry->pl_prop = prop;
entry->pl_user_prop = zfs_strdup(zhp->zpool_hdl,
propname);
entry->pl_width = strlen(strval);
entry->pl_all = B_TRUE;
*last = entry;
last = &entry->pl_next;
}
}
return (0);
}
/*
* Get the state for the given feature on the given ZFS pool.
*/
int
zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
size_t len)
{
uint64_t refcount;
boolean_t found = B_FALSE;
nvlist_t *features = zpool_get_features(zhp);
boolean_t supported;
const char *feature = strchr(propname, '@') + 1;
supported = zpool_prop_feature(propname);
ASSERT(supported || zpool_prop_unsupported(propname));
/*
* Convert from feature name to feature guid. This conversion is
* unnecessary for unsupported@... properties because they already
* use guids.
*/
if (supported) {
int ret;
spa_feature_t fid;
ret = zfeature_lookup_name(feature, &fid);
if (ret != 0) {
(void) strlcpy(buf, "-", len);
return (ENOTSUP);
}
feature = spa_feature_table[fid].fi_guid;
}
if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
found = B_TRUE;
if (supported) {
if (!found) {
(void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
} else {
if (refcount == 0)
(void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
else
(void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
}
} else {
if (found) {
if (refcount == 0) {
(void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
} else {
(void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
}
} else {
(void) strlcpy(buf, "-", len);
return (ENOTSUP);
}
}
return (0);
}
/*
* Validate the given pool name, optionally putting an extended error message in
* 'buf'.
*/
boolean_t
zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
{
namecheck_err_t why;
char what;
int ret;
ret = pool_namecheck(pool, &why, &what);
/*
* The rules for reserved pool names were extended at a later point.
* But we need to support users with existing pools that may now be
* invalid. So we only check for this expanded set of names during a
* create (or import), and only in userland.
*/
if (ret == 0 && !isopen &&
(strncmp(pool, "mirror", 6) == 0 ||
strncmp(pool, "raidz", 5) == 0 ||
strncmp(pool, "draid", 5) == 0 ||
strncmp(pool, "spare", 5) == 0 ||
strcmp(pool, "log") == 0)) {
if (hdl != NULL)
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "name is reserved"));
return (B_FALSE);
}
if (ret != 0) {
if (hdl != NULL) {
switch (why) {
case NAME_ERR_TOOLONG:
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "name is too long"));
break;
case NAME_ERR_INVALCHAR:
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "invalid character "
"'%c' in pool name"), what);
break;
case NAME_ERR_NOLETTER:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"name must begin with a letter"));
break;
case NAME_ERR_RESERVED:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"name is reserved"));
break;
case NAME_ERR_DISKLIKE:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool name is reserved"));
break;
case NAME_ERR_LEADING_SLASH:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"leading slash in name"));
break;
case NAME_ERR_EMPTY_COMPONENT:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"empty component in name"));
break;
case NAME_ERR_TRAILING_SLASH:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"trailing slash in name"));
break;
case NAME_ERR_MULTIPLE_DELIMITERS:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"multiple '@' and/or '#' delimiters in "
"name"));
break;
case NAME_ERR_NO_AT:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"permission set is missing '@'"));
break;
default:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"(%d) not defined"), why);
break;
}
}
return (B_FALSE);
}
return (B_TRUE);
}
/*
* Open a handle to the given pool, even if the pool is currently in the FAULTED
* state.
*/
zpool_handle_t *
zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
{
zpool_handle_t *zhp;
boolean_t missing;
/*
* Make sure the pool name is valid.
*/
if (!zpool_name_valid(hdl, B_TRUE, pool)) {
(void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
dgettext(TEXT_DOMAIN, "cannot open '%s'"),
pool);
return (NULL);
}
zhp = zfs_alloc(hdl, sizeof (zpool_handle_t));
zhp->zpool_hdl = hdl;
(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
if (zpool_refresh_stats(zhp, &missing) != 0) {
zpool_close(zhp);
return (NULL);
}
if (missing) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
(void) zfs_error_fmt(hdl, EZFS_NOENT,
dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
zpool_close(zhp);
return (NULL);
}
return (zhp);
}
/*
* Like the above, but silent on error. Used when iterating over pools (because
* the configuration cache may be out of date).
*/
int
zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
{
zpool_handle_t *zhp;
boolean_t missing;
zhp = zfs_alloc(hdl, sizeof (zpool_handle_t));
zhp->zpool_hdl = hdl;
(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
if (zpool_refresh_stats(zhp, &missing) != 0) {
zpool_close(zhp);
return (-1);
}
if (missing) {
zpool_close(zhp);
*ret = NULL;
return (0);
}
*ret = zhp;
return (0);
}
/*
* Similar to zpool_open_canfail(), but refuses to open pools in the faulted
* state.
*/
zpool_handle_t *
zpool_open(libzfs_handle_t *hdl, const char *pool)
{
zpool_handle_t *zhp;
if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
return (NULL);
if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
(void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
zpool_close(zhp);
return (NULL);
}
return (zhp);
}
/*
* Close the handle. Simply frees the memory associated with the handle.
*/
void
zpool_close(zpool_handle_t *zhp)
{
nvlist_free(zhp->zpool_config);
nvlist_free(zhp->zpool_old_config);
nvlist_free(zhp->zpool_props);
free(zhp);
}
/*
* Return the name of the pool.
*/
const char *
zpool_get_name(zpool_handle_t *zhp)
{
return (zhp->zpool_name);
}
/*
* Return the state of the pool (ACTIVE or UNAVAILABLE)
*/
int
zpool_get_state(zpool_handle_t *zhp)
{
return (zhp->zpool_state);
}
/*
* Check if vdev list contains a special vdev
*/
static boolean_t
zpool_has_special_vdev(nvlist_t *nvroot)
{
nvlist_t **child;
uint_t children;
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &child,
&children) == 0) {
for (uint_t c = 0; c < children; c++) {
const char *bias;
if (nvlist_lookup_string(child[c],
ZPOOL_CONFIG_ALLOCATION_BIAS, &bias) == 0 &&
strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0) {
return (B_TRUE);
}
}
}
return (B_FALSE);
}
/*
* Check if vdev list contains a dRAID vdev
*/
static boolean_t
zpool_has_draid_vdev(nvlist_t *nvroot)
{
nvlist_t **child;
uint_t children;
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0) {
for (uint_t c = 0; c < children; c++) {
const char *type;
if (nvlist_lookup_string(child[c],
ZPOOL_CONFIG_TYPE, &type) == 0 &&
strcmp(type, VDEV_TYPE_DRAID) == 0) {
return (B_TRUE);
}
}
}
return (B_FALSE);
}
/*
* Output a dRAID top-level vdev name in to the provided buffer.
*/
static char *
zpool_draid_name(char *name, int len, uint64_t data, uint64_t parity,
uint64_t spares, uint64_t children)
{
snprintf(name, len, "%s%llu:%llud:%lluc:%llus",
VDEV_TYPE_DRAID, (u_longlong_t)parity, (u_longlong_t)data,
(u_longlong_t)children, (u_longlong_t)spares);
return (name);
}
/*
* Return B_TRUE if the provided name is a dRAID spare name.
*/
boolean_t
zpool_is_draid_spare(const char *name)
{
uint64_t spare_id, parity, vdev_id;
if (sscanf(name, VDEV_TYPE_DRAID "%llu-%llu-%llu",
(u_longlong_t *)&parity, (u_longlong_t *)&vdev_id,
(u_longlong_t *)&spare_id) == 3) {
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Create the named pool, using the provided vdev list. It is assumed
* that the consumer has already validated the contents of the nvlist, so we
* don't have to worry about error semantics.
*/
int
zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
nvlist_t *props, nvlist_t *fsprops)
{
zfs_cmd_t zc = {"\0"};
nvlist_t *zc_fsprops = NULL;
nvlist_t *zc_props = NULL;
nvlist_t *hidden_args = NULL;
uint8_t *wkeydata = NULL;
uint_t wkeylen = 0;
char errbuf[ERRBUFLEN];
int ret = -1;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot create '%s'"), pool);
if (!zpool_name_valid(hdl, B_FALSE, pool))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
zcmd_write_conf_nvlist(hdl, &zc, nvroot);
if (props) {
prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
if ((zc_props = zpool_valid_proplist(hdl, pool, props,
SPA_VERSION_1, flags, errbuf)) == NULL) {
goto create_failed;
}
}
if (fsprops) {
uint64_t zoned;
const char *zonestr;
zoned = ((nvlist_lookup_string(fsprops,
zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
strcmp(zonestr, "on") == 0);
if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM,
fsprops, zoned, NULL, NULL, B_TRUE, errbuf)) == NULL) {
goto create_failed;
}
if (nvlist_exists(zc_fsprops,
zfs_prop_to_name(ZFS_PROP_SPECIAL_SMALL_BLOCKS)) &&
!zpool_has_special_vdev(nvroot)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"%s property requires a special vdev"),
zfs_prop_to_name(ZFS_PROP_SPECIAL_SMALL_BLOCKS));
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto create_failed;
}
if (!zc_props &&
(nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
goto create_failed;
}
if (zfs_crypto_create(hdl, NULL, zc_fsprops, props, B_TRUE,
&wkeydata, &wkeylen) != 0) {
zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf);
goto create_failed;
}
if (nvlist_add_nvlist(zc_props,
ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
goto create_failed;
}
if (wkeydata != NULL) {
if (nvlist_alloc(&hidden_args, NV_UNIQUE_NAME, 0) != 0)
goto create_failed;
if (nvlist_add_uint8_array(hidden_args, "wkeydata",
wkeydata, wkeylen) != 0)
goto create_failed;
if (nvlist_add_nvlist(zc_props, ZPOOL_HIDDEN_ARGS,
hidden_args) != 0)
goto create_failed;
}
}
if (zc_props)
zcmd_write_src_nvlist(hdl, &zc, zc_props);
(void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
zcmd_free_nvlists(&zc);
nvlist_free(zc_props);
nvlist_free(zc_fsprops);
nvlist_free(hidden_args);
if (wkeydata != NULL)
free(wkeydata);
switch (errno) {
case EBUSY:
/*
* This can happen if the user has specified the same
* device multiple times. We can't reliably detect this
* until we try to add it and see we already have a
* label. This can also happen under if the device is
* part of an active md or lvm device.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more vdevs refer to the same device, or "
"one of\nthe devices is part of an active md or "
"lvm device"));
return (zfs_error(hdl, EZFS_BADDEV, errbuf));
case ERANGE:
/*
* This happens if the record size is smaller or larger
* than the allowed size range, or not a power of 2.
*
* NOTE: although zfs_valid_proplist is called earlier,
* this case may have slipped through since the
* pool does not exist yet and it is therefore
* impossible to read properties e.g. max blocksize
* from the pool.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"record size invalid"));
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
case EOVERFLOW:
/*
* This occurs when one of the devices is below
* SPA_MINDEVSIZE. Unfortunately, we can't detect which
* device was the problem device since there's no
* reliable way to determine device size from userland.
*/
{
char buf[64];
zfs_nicebytes(SPA_MINDEVSIZE, buf,
sizeof (buf));
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more devices is less than the "
"minimum size (%s)"), buf);
}
return (zfs_error(hdl, EZFS_BADDEV, errbuf));
case ENOSPC:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more devices is out of space"));
return (zfs_error(hdl, EZFS_BADDEV, errbuf));
case EINVAL:
if (zpool_has_draid_vdev(nvroot) &&
zfeature_lookup_name("draid", NULL) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dRAID vdevs are unsupported by the "
"kernel"));
return (zfs_error(hdl, EZFS_BADDEV, errbuf));
} else {
return (zpool_standard_error(hdl, errno,
errbuf));
}
default:
return (zpool_standard_error(hdl, errno, errbuf));
}
}
create_failed:
zcmd_free_nvlists(&zc);
nvlist_free(zc_props);
nvlist_free(zc_fsprops);
nvlist_free(hidden_args);
if (wkeydata != NULL)
free(wkeydata);
return (ret);
}
/*
* Destroy the given pool. It is up to the caller to ensure that there are no
* datasets left in the pool.
*/
int
zpool_destroy(zpool_handle_t *zhp, const char *log_str)
{
zfs_cmd_t zc = {"\0"};
zfs_handle_t *zfp = NULL;
libzfs_handle_t *hdl = zhp->zpool_hdl;
char errbuf[ERRBUFLEN];
if (zhp->zpool_state == POOL_STATE_ACTIVE &&
(zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
return (-1);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_history = (uint64_t)(uintptr_t)log_str;
if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot destroy '%s'"), zhp->zpool_name);
if (errno == EROFS) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more devices is read only"));
(void) zfs_error(hdl, EZFS_BADDEV, errbuf);
} else {
(void) zpool_standard_error(hdl, errno, errbuf);
}
if (zfp)
zfs_close(zfp);
return (-1);
}
if (zfp) {
remove_mountpoint(zfp);
zfs_close(zfp);
}
return (0);
}
/*
* Create a checkpoint in the given pool.
*/
int
zpool_checkpoint(zpool_handle_t *zhp)
{
libzfs_handle_t *hdl = zhp->zpool_hdl;
char errbuf[ERRBUFLEN];
int error;
error = lzc_pool_checkpoint(zhp->zpool_name);
if (error != 0) {
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot checkpoint '%s'"), zhp->zpool_name);
(void) zpool_standard_error(hdl, error, errbuf);
return (-1);
}
return (0);
}
/*
* Discard the checkpoint from the given pool.
*/
int
zpool_discard_checkpoint(zpool_handle_t *zhp)
{
libzfs_handle_t *hdl = zhp->zpool_hdl;
char errbuf[ERRBUFLEN];
int error;
error = lzc_pool_checkpoint_discard(zhp->zpool_name);
if (error != 0) {
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot discard checkpoint in '%s'"), zhp->zpool_name);
(void) zpool_standard_error(hdl, error, errbuf);
return (-1);
}
return (0);
}
/*
* Add the given vdevs to the pool. The caller must have already performed the
* necessary verification to ensure that the vdev specification is well-formed.
*/
int
zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
{
zfs_cmd_t zc = {"\0"};
int ret;
libzfs_handle_t *hdl = zhp->zpool_hdl;
char errbuf[ERRBUFLEN];
nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot add to '%s'"), zhp->zpool_name);
if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
SPA_VERSION_SPARES &&
nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
"upgraded to add hot spares"));
return (zfs_error(hdl, EZFS_BADVERSION, errbuf));
}
if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
SPA_VERSION_L2CACHE &&
nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2cache, &nl2cache) == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
"upgraded to add cache devices"));
return (zfs_error(hdl, EZFS_BADVERSION, errbuf));
}
zcmd_write_conf_nvlist(hdl, &zc, nvroot);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
switch (errno) {
case EBUSY:
/*
* This can happen if the user has specified the same
* device multiple times. We can't reliably detect this
* until we try to add it and see we already have a
* label.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more vdevs refer to the same device"));
(void) zfs_error(hdl, EZFS_BADDEV, errbuf);
break;
case EINVAL:
if (zpool_has_draid_vdev(nvroot) &&
zfeature_lookup_name("draid", NULL) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dRAID vdevs are unsupported by the "
"kernel"));
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid config; a pool with removing/"
"removed vdevs does not support adding "
"raidz or dRAID vdevs"));
}
(void) zfs_error(hdl, EZFS_BADDEV, errbuf);
break;
case EOVERFLOW:
/*
* This occurs when one of the devices is below
* SPA_MINDEVSIZE. Unfortunately, we can't detect which
* device was the problem device since there's no
* reliable way to determine device size from userland.
*/
{
char buf[64];
zfs_nicebytes(SPA_MINDEVSIZE, buf,
sizeof (buf));
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"device is less than the minimum "
"size (%s)"), buf);
}
(void) zfs_error(hdl, EZFS_BADDEV, errbuf);
break;
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded to add these vdevs"));
(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
break;
default:
(void) zpool_standard_error(hdl, errno, errbuf);
}
ret = -1;
} else {
ret = 0;
}
zcmd_free_nvlists(&zc);
return (ret);
}
/*
* Exports the pool from the system. The caller must ensure that there are no
* mounted datasets in the pool.
*/
static int
zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
const char *log_str)
{
zfs_cmd_t zc = {"\0"};
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_cookie = force;
zc.zc_guid = hardforce;
zc.zc_history = (uint64_t)(uintptr_t)log_str;
if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
switch (errno) {
case EXDEV:
zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
"use '-f' to override the following errors:\n"
"'%s' has an active shared spare which could be"
" used by other pools once '%s' is exported."),
zhp->zpool_name, zhp->zpool_name);
return (zfs_error_fmt(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
dgettext(TEXT_DOMAIN, "cannot export '%s'"),
zhp->zpool_name));
default:
return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
dgettext(TEXT_DOMAIN, "cannot export '%s'"),
zhp->zpool_name));
}
}
return (0);
}
int
zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
{
return (zpool_export_common(zhp, force, B_FALSE, log_str));
}
int
zpool_export_force(zpool_handle_t *zhp, const char *log_str)
{
return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
}
static void
zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
nvlist_t *config)
{
nvlist_t *nv = NULL;
uint64_t rewindto;
int64_t loss = -1;
struct tm t;
char timestr[128];
if (!hdl->libzfs_printerr || config == NULL)
return;
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
return;
}
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
return;
(void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
if (localtime_r((time_t *)&rewindto, &t) != NULL &&
strftime(timestr, 128, "%c", &t) != 0) {
if (dryrun) {
(void) printf(dgettext(TEXT_DOMAIN,
"Would be able to return %s "
"to its state as of %s.\n"),
name, timestr);
} else {
(void) printf(dgettext(TEXT_DOMAIN,
"Pool %s returned to its state as of %s.\n"),
name, timestr);
}
if (loss > 120) {
(void) printf(dgettext(TEXT_DOMAIN,
"%s approximately %lld "),
dryrun ? "Would discard" : "Discarded",
((longlong_t)loss + 30) / 60);
(void) printf(dgettext(TEXT_DOMAIN,
"minutes of transactions.\n"));
} else if (loss > 0) {
(void) printf(dgettext(TEXT_DOMAIN,
"%s approximately %lld "),
dryrun ? "Would discard" : "Discarded",
(longlong_t)loss);
(void) printf(dgettext(TEXT_DOMAIN,
"seconds of transactions.\n"));
}
}
}
void
zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
nvlist_t *config)
{
nvlist_t *nv = NULL;
int64_t loss = -1;
uint64_t edata = UINT64_MAX;
uint64_t rewindto;
struct tm t;
char timestr[128];
if (!hdl->libzfs_printerr)
return;
if (reason >= 0)
(void) printf(dgettext(TEXT_DOMAIN, "action: "));
else
(void) printf(dgettext(TEXT_DOMAIN, "\t"));
/* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
goto no_info;
(void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
&edata);
(void) printf(dgettext(TEXT_DOMAIN,
"Recovery is possible, but will result in some data loss.\n"));
if (localtime_r((time_t *)&rewindto, &t) != NULL &&
strftime(timestr, 128, "%c", &t) != 0) {
(void) printf(dgettext(TEXT_DOMAIN,
"\tReturning the pool to its state as of %s\n"
"\tshould correct the problem. "),
timestr);
} else {
(void) printf(dgettext(TEXT_DOMAIN,
"\tReverting the pool to an earlier state "
"should correct the problem.\n\t"));
}
if (loss > 120) {
(void) printf(dgettext(TEXT_DOMAIN,
"Approximately %lld minutes of data\n"
"\tmust be discarded, irreversibly. "),
((longlong_t)loss + 30) / 60);
} else if (loss > 0) {
(void) printf(dgettext(TEXT_DOMAIN,
"Approximately %lld seconds of data\n"
"\tmust be discarded, irreversibly. "),
(longlong_t)loss);
}
if (edata != 0 && edata != UINT64_MAX) {
if (edata == 1) {
(void) printf(dgettext(TEXT_DOMAIN,
"After rewind, at least\n"
"\tone persistent user-data error will remain. "));
} else {
(void) printf(dgettext(TEXT_DOMAIN,
"After rewind, several\n"
"\tpersistent user-data errors will remain. "));
}
}
(void) printf(dgettext(TEXT_DOMAIN,
"Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
reason >= 0 ? "clear" : "import", name);
(void) printf(dgettext(TEXT_DOMAIN,
"A scrub of the pool\n"
"\tis strongly recommended after recovery.\n"));
return;
no_info:
(void) printf(dgettext(TEXT_DOMAIN,
"Destroy and re-create the pool from\n\ta backup source.\n"));
}
/*
* zpool_import() is a contracted interface. Should be kept the same
* if possible.
*
* Applications should use zpool_import_props() to import a pool with
* new properties value to be set.
*/
int
zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
char *altroot)
{
nvlist_t *props = NULL;
int ret;
if (altroot != NULL) {
if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
return (zfs_error_fmt(hdl, EZFS_NOMEM,
dgettext(TEXT_DOMAIN, "cannot import '%s'"),
newname));
}
if (nvlist_add_string(props,
zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
nvlist_add_string(props,
zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
nvlist_free(props);
return (zfs_error_fmt(hdl, EZFS_NOMEM,
dgettext(TEXT_DOMAIN, "cannot import '%s'"),
newname));
}
}
ret = zpool_import_props(hdl, config, newname, props,
ZFS_IMPORT_NORMAL);
nvlist_free(props);
return (ret);
}
static void
print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
int indent)
{
nvlist_t **child;
uint_t c, children;
char *vname;
uint64_t is_log = 0;
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
&is_log);
if (name != NULL)
(void) printf("\t%*s%s%s\n", indent, "", name,
is_log ? " [log]" : "");
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
return;
for (c = 0; c < children; c++) {
vname = zpool_vdev_name(hdl, NULL, child[c], VDEV_NAME_TYPE_ID);
print_vdev_tree(hdl, vname, child[c], indent + 2);
free(vname);
}
}
void
zpool_print_unsup_feat(nvlist_t *config)
{
nvlist_t *nvinfo, *unsup_feat;
nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
unsup_feat = fnvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT);
for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL);
nvp != NULL; nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
const char *desc = fnvpair_value_string(nvp);
if (strlen(desc) > 0)
(void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
else
(void) printf("\t%s\n", nvpair_name(nvp));
}
}
/*
* Import the given pool using the known configuration and a list of
* properties to be set. The configuration should have come from
* zpool_find_import(). The 'newname' parameters control whether the pool
* is imported with a different name.
*/
int
zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
nvlist_t *props, int flags)
{
zfs_cmd_t zc = {"\0"};
zpool_load_policy_t policy;
nvlist_t *nv = NULL;
nvlist_t *nvinfo = NULL;
nvlist_t *missing = NULL;
const char *thename;
const char *origname;
int ret;
int error = 0;
char errbuf[ERRBUFLEN];
origname = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME);
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot import pool '%s'"), origname);
if (newname != NULL) {
if (!zpool_name_valid(hdl, B_FALSE, newname))
return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
dgettext(TEXT_DOMAIN, "cannot import '%s'"),
newname));
thename = newname;
} else {
thename = origname;
}
if (props != NULL) {
uint64_t version;
prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
version = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);
if ((props = zpool_valid_proplist(hdl, origname,
props, version, flags, errbuf)) == NULL)
return (-1);
zcmd_write_src_nvlist(hdl, &zc, props);
nvlist_free(props);
}
(void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
zc.zc_guid = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID);
zcmd_write_conf_nvlist(hdl, &zc, config);
zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2);
zc.zc_cookie = flags;
while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
errno == ENOMEM)
zcmd_expand_dst_nvlist(hdl, &zc);
if (ret != 0)
error = errno;
(void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
zcmd_free_nvlists(&zc);
zpool_get_load_policy(config, &policy);
if (error) {
char desc[1024];
char aux[256];
/*
* Dry-run failed, but we print out what success
* looks like if we found a best txg
*/
if (policy.zlp_rewind & ZPOOL_TRY_REWIND) {
zpool_rewind_exclaim(hdl, newname ? origname : thename,
B_TRUE, nv);
nvlist_free(nv);
return (-1);
}
if (newname == NULL)
(void) snprintf(desc, sizeof (desc),
dgettext(TEXT_DOMAIN, "cannot import '%s'"),
thename);
else
(void) snprintf(desc, sizeof (desc),
dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
origname, thename);
switch (error) {
case ENOTSUP:
if (nv != NULL && nvlist_lookup_nvlist(nv,
ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
(void) printf(dgettext(TEXT_DOMAIN, "This "
"pool uses the following feature(s) not "
"supported by this system:\n"));
zpool_print_unsup_feat(nv);
if (nvlist_exists(nvinfo,
ZPOOL_CONFIG_CAN_RDONLY)) {
(void) printf(dgettext(TEXT_DOMAIN,
"All unsupported features are only "
"required for writing to the pool."
"\nThe pool can be imported using "
"'-o readonly=on'.\n"));
}
}
/*
* Unsupported version.
*/
(void) zfs_error(hdl, EZFS_BADVERSION, desc);
break;
case EREMOTEIO:
if (nv != NULL && nvlist_lookup_nvlist(nv,
ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0) {
const char *hostname = "<unknown>";
uint64_t hostid = 0;
mmp_state_t mmp_state;
mmp_state = fnvlist_lookup_uint64(nvinfo,
ZPOOL_CONFIG_MMP_STATE);
if (nvlist_exists(nvinfo,
ZPOOL_CONFIG_MMP_HOSTNAME))
hostname = fnvlist_lookup_string(nvinfo,
ZPOOL_CONFIG_MMP_HOSTNAME);
if (nvlist_exists(nvinfo,
ZPOOL_CONFIG_MMP_HOSTID))
hostid = fnvlist_lookup_uint64(nvinfo,
ZPOOL_CONFIG_MMP_HOSTID);
if (mmp_state == MMP_STATE_ACTIVE) {
(void) snprintf(aux, sizeof (aux),
dgettext(TEXT_DOMAIN, "pool is imp"
"orted on host '%s' (hostid=%lx).\n"
"Export the pool on the other "
"system, then run 'zpool import'."),
hostname, (unsigned long) hostid);
} else if (mmp_state == MMP_STATE_NO_HOSTID) {
(void) snprintf(aux, sizeof (aux),
dgettext(TEXT_DOMAIN, "pool has "
"the multihost property on and "
"the\nsystem's hostid is not set. "
"Set a unique system hostid with "
"the zgenhostid(8) command.\n"));
}
(void) zfs_error_aux(hdl, "%s", aux);
}
(void) zfs_error(hdl, EZFS_ACTIVE_POOL, desc);
break;
case EINVAL:
(void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
break;
case EROFS:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more devices is read only"));
(void) zfs_error(hdl, EZFS_BADDEV, desc);
break;
case ENXIO:
if (nv && nvlist_lookup_nvlist(nv,
ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
nvlist_lookup_nvlist(nvinfo,
ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
(void) printf(dgettext(TEXT_DOMAIN,
"The devices below are missing or "
"corrupted, use '-m' to import the pool "
"anyway:\n"));
print_vdev_tree(hdl, NULL, missing, 2);
(void) printf("\n");
}
(void) zpool_standard_error(hdl, error, desc);
break;
case EEXIST:
(void) zpool_standard_error(hdl, error, desc);
break;
case EBUSY:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more devices are already in use\n"));
(void) zfs_error(hdl, EZFS_BADDEV, desc);
break;
case ENAMETOOLONG:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"new name of at least one dataset is longer than "
"the maximum allowable length"));
(void) zfs_error(hdl, EZFS_NAMETOOLONG, desc);
break;
default:
(void) zpool_standard_error(hdl, error, desc);
zpool_explain_recover(hdl,
newname ? origname : thename, -error, nv);
break;
}
nvlist_free(nv);
ret = -1;
} else {
zpool_handle_t *zhp;
/*
* This should never fail, but play it safe anyway.
*/
if (zpool_open_silent(hdl, thename, &zhp) != 0)
ret = -1;
else if (zhp != NULL)
zpool_close(zhp);
if (policy.zlp_rewind &
(ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
zpool_rewind_exclaim(hdl, newname ? origname : thename,
((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0), nv);
}
nvlist_free(nv);
}
return (ret);
}
/*
* Translate vdev names to guids. If a vdev_path is determined to be
* unsuitable then a vd_errlist is allocated and the vdev path and errno
* are added to it.
*/
static int
zpool_translate_vdev_guids(zpool_handle_t *zhp, nvlist_t *vds,
nvlist_t *vdev_guids, nvlist_t *guids_to_paths, nvlist_t **vd_errlist)
{
nvlist_t *errlist = NULL;
int error = 0;
for (nvpair_t *elem = nvlist_next_nvpair(vds, NULL); elem != NULL;
elem = nvlist_next_nvpair(vds, elem)) {
boolean_t spare, cache;
const char *vd_path = nvpair_name(elem);
nvlist_t *tgt = zpool_find_vdev(zhp, vd_path, &spare, &cache,
NULL);
if ((tgt == NULL) || cache || spare) {
if (errlist == NULL) {
errlist = fnvlist_alloc();
error = EINVAL;
}
uint64_t err = (tgt == NULL) ? EZFS_NODEVICE :
(spare ? EZFS_ISSPARE : EZFS_ISL2CACHE);
fnvlist_add_int64(errlist, vd_path, err);
continue;
}
uint64_t guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
fnvlist_add_uint64(vdev_guids, vd_path, guid);
char msg[MAXNAMELEN];
(void) snprintf(msg, sizeof (msg), "%llu", (u_longlong_t)guid);
fnvlist_add_string(guids_to_paths, msg, vd_path);
}
if (error != 0) {
verify(errlist != NULL);
if (vd_errlist != NULL)
*vd_errlist = errlist;
else
fnvlist_free(errlist);
}
return (error);
}
static int
xlate_init_err(int err)
{
switch (err) {
case ENODEV:
return (EZFS_NODEVICE);
case EINVAL:
case EROFS:
return (EZFS_BADDEV);
case EBUSY:
return (EZFS_INITIALIZING);
case ESRCH:
return (EZFS_NO_INITIALIZE);
}
return (err);
}
/*
* Begin, suspend, cancel, or uninit (clear) the initialization (initializing
* of all free blocks) for the given vdevs in the given pool.
*/
static int
zpool_initialize_impl(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
nvlist_t *vds, boolean_t wait)
{
int err;
nvlist_t *vdev_guids = fnvlist_alloc();
nvlist_t *guids_to_paths = fnvlist_alloc();
nvlist_t *vd_errlist = NULL;
nvlist_t *errlist;
nvpair_t *elem;
err = zpool_translate_vdev_guids(zhp, vds, vdev_guids,
guids_to_paths, &vd_errlist);
if (err != 0) {
verify(vd_errlist != NULL);
goto list_errors;
}
err = lzc_initialize(zhp->zpool_name, cmd_type,
vdev_guids, &errlist);
if (err != 0) {
if (errlist != NULL && nvlist_lookup_nvlist(errlist,
ZPOOL_INITIALIZE_VDEVS, &vd_errlist) == 0) {
goto list_errors;
}
if (err == EINVAL && cmd_type == POOL_INITIALIZE_UNINIT) {
zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
"uninitialize is not supported by kernel"));
}
(void) zpool_standard_error(zhp->zpool_hdl, err,
dgettext(TEXT_DOMAIN, "operation failed"));
goto out;
}
if (wait) {
for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL;
elem = nvlist_next_nvpair(vdev_guids, elem)) {
uint64_t guid = fnvpair_value_uint64(elem);
err = lzc_wait_tag(zhp->zpool_name,
ZPOOL_WAIT_INITIALIZE, guid, NULL);
if (err != 0) {
(void) zpool_standard_error_fmt(zhp->zpool_hdl,
err, dgettext(TEXT_DOMAIN, "error "
"waiting for '%s' to initialize"),
nvpair_name(elem));
goto out;
}
}
}
goto out;
list_errors:
for (elem = nvlist_next_nvpair(vd_errlist, NULL); elem != NULL;
elem = nvlist_next_nvpair(vd_errlist, elem)) {
int64_t vd_error = xlate_init_err(fnvpair_value_int64(elem));
const char *path;
if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem),
&path) != 0)
path = nvpair_name(elem);
(void) zfs_error_fmt(zhp->zpool_hdl, vd_error,
"cannot initialize '%s'", path);
}
out:
fnvlist_free(vdev_guids);
fnvlist_free(guids_to_paths);
if (vd_errlist != NULL)
fnvlist_free(vd_errlist);
return (err == 0 ? 0 : -1);
}
int
zpool_initialize(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
nvlist_t *vds)
{
return (zpool_initialize_impl(zhp, cmd_type, vds, B_FALSE));
}
int
zpool_initialize_wait(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
nvlist_t *vds)
{
return (zpool_initialize_impl(zhp, cmd_type, vds, B_TRUE));
}
static int
xlate_trim_err(int err)
{
switch (err) {
case ENODEV:
return (EZFS_NODEVICE);
case EINVAL:
case EROFS:
return (EZFS_BADDEV);
case EBUSY:
return (EZFS_TRIMMING);
case ESRCH:
return (EZFS_NO_TRIM);
case EOPNOTSUPP:
return (EZFS_TRIM_NOTSUP);
}
return (err);
}
static int
zpool_trim_wait(zpool_handle_t *zhp, nvlist_t *vdev_guids)
{
int err;
nvpair_t *elem;
for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL;
elem = nvlist_next_nvpair(vdev_guids, elem)) {
uint64_t guid = fnvpair_value_uint64(elem);
err = lzc_wait_tag(zhp->zpool_name,
ZPOOL_WAIT_TRIM, guid, NULL);
if (err != 0) {
(void) zpool_standard_error_fmt(zhp->zpool_hdl,
err, dgettext(TEXT_DOMAIN, "error "
"waiting to trim '%s'"), nvpair_name(elem));
return (err);
}
}
return (0);
}
/*
* Check errlist and report any errors, omitting ones which should be
* suppressed. Returns B_TRUE if any errors were reported.
*/
static boolean_t
check_trim_errs(zpool_handle_t *zhp, trimflags_t *trim_flags,
nvlist_t *guids_to_paths, nvlist_t *vds, nvlist_t *errlist)
{
nvpair_t *elem;
boolean_t reported_errs = B_FALSE;
int num_vds = 0;
int num_suppressed_errs = 0;
for (elem = nvlist_next_nvpair(vds, NULL);
elem != NULL; elem = nvlist_next_nvpair(vds, elem)) {
num_vds++;
}
for (elem = nvlist_next_nvpair(errlist, NULL);
elem != NULL; elem = nvlist_next_nvpair(errlist, elem)) {
int64_t vd_error = xlate_trim_err(fnvpair_value_int64(elem));
const char *path;
/*
* If only the pool was specified, and it was not a secure
* trim then suppress warnings for individual vdevs which
* do not support trimming.
*/
if (vd_error == EZFS_TRIM_NOTSUP &&
trim_flags->fullpool &&
!trim_flags->secure) {
num_suppressed_errs++;
continue;
}
reported_errs = B_TRUE;
if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem),
&path) != 0)
path = nvpair_name(elem);
(void) zfs_error_fmt(zhp->zpool_hdl, vd_error,
"cannot trim '%s'", path);
}
if (num_suppressed_errs == num_vds) {
(void) zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
"no devices in pool support trim operations"));
(void) (zfs_error(zhp->zpool_hdl, EZFS_TRIM_NOTSUP,
dgettext(TEXT_DOMAIN, "cannot trim")));
reported_errs = B_TRUE;
}
return (reported_errs);
}
/*
* Begin, suspend, or cancel the TRIM (discarding of all free blocks) for
* the given vdevs in the given pool.
*/
int
zpool_trim(zpool_handle_t *zhp, pool_trim_func_t cmd_type, nvlist_t *vds,
trimflags_t *trim_flags)
{
int err;
int retval = 0;
nvlist_t *vdev_guids = fnvlist_alloc();
nvlist_t *guids_to_paths = fnvlist_alloc();
nvlist_t *errlist = NULL;
err = zpool_translate_vdev_guids(zhp, vds, vdev_guids,
guids_to_paths, &errlist);
if (err != 0) {
check_trim_errs(zhp, trim_flags, guids_to_paths, vds, errlist);
retval = -1;
goto out;
}
err = lzc_trim(zhp->zpool_name, cmd_type, trim_flags->rate,
trim_flags->secure, vdev_guids, &errlist);
if (err != 0) {
nvlist_t *vd_errlist;
if (errlist != NULL && nvlist_lookup_nvlist(errlist,
ZPOOL_TRIM_VDEVS, &vd_errlist) == 0) {
if (check_trim_errs(zhp, trim_flags, guids_to_paths,
vds, vd_errlist)) {
retval = -1;
goto out;
}
} else {
char errbuf[ERRBUFLEN];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "operation failed"));
zpool_standard_error(zhp->zpool_hdl, err, errbuf);
retval = -1;
goto out;
}
}
if (trim_flags->wait)
retval = zpool_trim_wait(zhp, vdev_guids);
out:
if (errlist != NULL)
fnvlist_free(errlist);
fnvlist_free(vdev_guids);
fnvlist_free(guids_to_paths);
return (retval);
}
/*
* Scan the pool.
*/
int
zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func, pool_scrub_cmd_t cmd)
{
char errbuf[ERRBUFLEN];
int err;
libzfs_handle_t *hdl = zhp->zpool_hdl;
nvlist_t *args = fnvlist_alloc();
fnvlist_add_uint64(args, "scan_type", (uint64_t)func);
fnvlist_add_uint64(args, "scan_command", (uint64_t)cmd);
err = lzc_scrub(ZFS_IOC_POOL_SCRUB, zhp->zpool_name, args, NULL);
fnvlist_free(args);
if (err == 0) {
return (0);
} else if (err == ZFS_ERR_IOC_CMD_UNAVAIL) {
zfs_cmd_t zc = {"\0"};
(void) strlcpy(zc.zc_name, zhp->zpool_name,
sizeof (zc.zc_name));
zc.zc_cookie = func;
zc.zc_flags = cmd;
if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0)
return (0);
}
/*
* An ECANCELED on a scrub means one of the following:
* 1. we resumed a paused scrub.
* 2. we resumed a paused error scrub.
* 3. Error scrub is not run because of no error log.
*/
if (err == ECANCELED && (func == POOL_SCAN_SCRUB ||
func == POOL_SCAN_ERRORSCRUB) && cmd == POOL_SCRUB_NORMAL)
return (0);
/*
* The following cases have been handled here:
* 1. Paused a scrub/error scrub if there is none in progress.
*/
if (err == ENOENT && func != POOL_SCAN_NONE && cmd ==
POOL_SCRUB_PAUSE) {
return (0);
}
ASSERT3U(func, >=, POOL_SCAN_NONE);
ASSERT3U(func, <, POOL_SCAN_FUNCS);
if (func == POOL_SCAN_SCRUB || func == POOL_SCAN_ERRORSCRUB) {
if (cmd == POOL_SCRUB_PAUSE) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot pause scrubbing %s"),
zhp->zpool_name);
} else {
assert(cmd == POOL_SCRUB_NORMAL);
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot scrub %s"),
zhp->zpool_name);
}
} else if (func == POOL_SCAN_RESILVER) {
assert(cmd == POOL_SCRUB_NORMAL);
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot restart resilver on %s"), zhp->zpool_name);
} else if (func == POOL_SCAN_NONE) {
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot cancel scrubbing %s"), zhp->zpool_name);
} else {
assert(!"unexpected result");
}
/*
* With EBUSY, five cases are possible:
*
* Current state Requested
* 1. Normal Scrub Running Normal Scrub or Error Scrub
* 2. Normal Scrub Paused Error Scrub
* 3. Normal Scrub Paused Pause Normal Scrub
* 4. Error Scrub Running Normal Scrub or Error Scrub
* 5. Error Scrub Paused Pause Error Scrub
* 6. Resilvering Anything else
*/
if (err == EBUSY) {
nvlist_t *nvroot;
pool_scan_stat_t *ps = NULL;
uint_t psc;
nvroot = fnvlist_lookup_nvlist(zhp->zpool_config,
ZPOOL_CONFIG_VDEV_TREE);
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
if (ps && ps->pss_func == POOL_SCAN_SCRUB &&
ps->pss_state == DSS_SCANNING) {
if (ps->pss_pass_scrub_pause == 0) {
/* handles case 1 */
assert(cmd == POOL_SCRUB_NORMAL);
return (zfs_error(hdl, EZFS_SCRUBBING,
errbuf));
} else {
if (func == POOL_SCAN_ERRORSCRUB) {
/* handles case 2 */
ASSERT3U(cmd, ==, POOL_SCRUB_NORMAL);
return (zfs_error(hdl,
EZFS_SCRUB_PAUSED_TO_CANCEL,
errbuf));
} else {
/* handles case 3 */
ASSERT3U(func, ==, POOL_SCAN_SCRUB);
ASSERT3U(cmd, ==, POOL_SCRUB_PAUSE);
return (zfs_error(hdl,
EZFS_SCRUB_PAUSED, errbuf));
}
}
} else if (ps &&
ps->pss_error_scrub_func == POOL_SCAN_ERRORSCRUB &&
ps->pss_error_scrub_state == DSS_ERRORSCRUBBING) {
if (ps->pss_pass_error_scrub_pause == 0) {
/* handles case 4 */
ASSERT3U(cmd, ==, POOL_SCRUB_NORMAL);
return (zfs_error(hdl, EZFS_ERRORSCRUBBING,
errbuf));
} else {
/* handles case 5 */
ASSERT3U(func, ==, POOL_SCAN_ERRORSCRUB);
ASSERT3U(cmd, ==, POOL_SCRUB_PAUSE);
return (zfs_error(hdl, EZFS_ERRORSCRUB_PAUSED,
errbuf));
}
} else {
/* handles case 6 */
return (zfs_error(hdl, EZFS_RESILVERING, errbuf));
}
} else if (err == ENOENT) {
return (zfs_error(hdl, EZFS_NO_SCRUB, errbuf));
} else if (err == ENOTSUP && func == POOL_SCAN_RESILVER) {
return (zfs_error(hdl, EZFS_NO_RESILVER_DEFER, errbuf));
} else {
return (zpool_standard_error(hdl, err, errbuf));
}
}
/*
* Find a vdev that matches the search criteria specified. We use the
* the nvpair name to determine how we should look for the device.
* 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
* spare; but FALSE if its an INUSE spare.
*/
static nvlist_t *
vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
boolean_t *l2cache, boolean_t *log)
{
uint_t c, children;
nvlist_t **child;
nvlist_t *ret;
uint64_t is_log;
const char *srchkey;
nvpair_t *pair = nvlist_next_nvpair(search, NULL);
/* Nothing to look for */
if (search == NULL || pair == NULL)
return (NULL);
/* Obtain the key we will use to search */
srchkey = nvpair_name(pair);
switch (nvpair_type(pair)) {
case DATA_TYPE_UINT64:
if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
uint64_t srchval = fnvpair_value_uint64(pair);
uint64_t theguid = fnvlist_lookup_uint64(nv,
ZPOOL_CONFIG_GUID);
if (theguid == srchval)
return (nv);
}
break;
case DATA_TYPE_STRING: {
const char *srchval, *val;
srchval = fnvpair_value_string(pair);
if (nvlist_lookup_string(nv, srchkey, &val) != 0)
break;
/*
* Search for the requested value. Special cases:
*
* - ZPOOL_CONFIG_PATH for whole disk entries. These end in
* "-part1", or "p1". The suffix is hidden from the user,
* but included in the string, so this matches around it.
* - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()
* is used to check all possible expanded paths.
* - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
*
* Otherwise, all other searches are simple string compares.
*/
if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) {
uint64_t wholedisk = 0;
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
&wholedisk);
if (zfs_strcmp_pathname(srchval, val, wholedisk) == 0)
return (nv);
} else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0) {
char *type, *idx, *end, *p;
uint64_t id, vdev_id;
/*
* Determine our vdev type, keeping in mind
* that the srchval is composed of a type and
* vdev id pair (i.e. mirror-4).
*/
if ((type = strdup(srchval)) == NULL)
return (NULL);
if ((p = strrchr(type, '-')) == NULL) {
free(type);
break;
}
idx = p + 1;
*p = '\0';
/*
* If the types don't match then keep looking.
*/
if (strncmp(val, type, strlen(val)) != 0) {
free(type);
break;
}
verify(zpool_vdev_is_interior(type));
id = fnvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID);
errno = 0;
vdev_id = strtoull(idx, &end, 10);
/*
* If we are looking for a raidz and a parity is
* specified, make sure it matches.
*/
int rzlen = strlen(VDEV_TYPE_RAIDZ);
assert(rzlen == strlen(VDEV_TYPE_DRAID));
int typlen = strlen(type);
if ((strncmp(type, VDEV_TYPE_RAIDZ, rzlen) == 0 ||
strncmp(type, VDEV_TYPE_DRAID, rzlen) == 0) &&
typlen != rzlen) {
uint64_t vdev_parity;
int parity = *(type + rzlen) - '0';
if (parity <= 0 || parity > 3 ||
(typlen - rzlen) != 1) {
/*
* Nonsense parity specified, can
* never match
*/
free(type);
return (NULL);
}
vdev_parity = fnvlist_lookup_uint64(nv,
ZPOOL_CONFIG_NPARITY);
if ((int)vdev_parity != parity) {
free(type);
break;
}
}
free(type);
if (errno != 0)
return (NULL);
/*
* Now verify that we have the correct vdev id.
*/
if (vdev_id == id)
return (nv);
}
/*
* Common case
*/
if (strcmp(srchval, val) == 0)
return (nv);
break;
}
default:
break;
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
return (NULL);
for (c = 0; c < children; c++) {
if ((ret = vdev_to_nvlist_iter(child[c], search,
avail_spare, l2cache, NULL)) != NULL) {
/*
* The 'is_log' value is only set for the toplevel
* vdev, not the leaf vdevs. So we always lookup the
* log device from the root of the vdev tree (where
* 'log' is non-NULL).
*/
if (log != NULL &&
nvlist_lookup_uint64(child[c],
ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
is_log) {
*log = B_TRUE;
}
return (ret);
}
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
&child, &children) == 0) {
for (c = 0; c < children; c++) {
if ((ret = vdev_to_nvlist_iter(child[c], search,
avail_spare, l2cache, NULL)) != NULL) {
*avail_spare = B_TRUE;
return (ret);
}
}
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
&child, &children) == 0) {
for (c = 0; c < children; c++) {
if ((ret = vdev_to_nvlist_iter(child[c], search,
avail_spare, l2cache, NULL)) != NULL) {
*l2cache = B_TRUE;
return (ret);
}
}
}
return (NULL);
}
/*
* Given a physical path or guid, find the associated vdev.
*/
nvlist_t *
zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
{
nvlist_t *search, *nvroot, *ret;
uint64_t guid;
char *end;
search = fnvlist_alloc();
guid = strtoull(ppath, &end, 0);
if (guid != 0 && *end == '\0') {
fnvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid);
} else {
fnvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath);
}
nvroot = fnvlist_lookup_nvlist(zhp->zpool_config,
ZPOOL_CONFIG_VDEV_TREE);
*avail_spare = B_FALSE;
*l2cache = B_FALSE;
if (log != NULL)
*log = B_FALSE;
ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
fnvlist_free(search);
return (ret);
}
/*
* Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
*/
static boolean_t
zpool_vdev_is_interior(const char *name)
{
if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
strncmp(name, VDEV_TYPE_SPARE, strlen(VDEV_TYPE_SPARE)) == 0 ||
strncmp(name,
VDEV_TYPE_REPLACING, strlen(VDEV_TYPE_REPLACING)) == 0 ||
strncmp(name, VDEV_TYPE_ROOT, strlen(VDEV_TYPE_ROOT)) == 0 ||
strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
return (B_TRUE);
if (strncmp(name, VDEV_TYPE_DRAID, strlen(VDEV_TYPE_DRAID)) == 0 &&
!zpool_is_draid_spare(name))
return (B_TRUE);
return (B_FALSE);
}
+/*
+ * Lookup the nvlist for a given vdev.
+ */
nvlist_t *
zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
boolean_t *l2cache, boolean_t *log)
{
char *end;
nvlist_t *nvroot, *search, *ret;
uint64_t guid;
+ boolean_t __avail_spare, __l2cache, __log;
search = fnvlist_alloc();
guid = strtoull(path, &end, 0);
if (guid != 0 && *end == '\0') {
fnvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid);
} else if (zpool_vdev_is_interior(path)) {
fnvlist_add_string(search, ZPOOL_CONFIG_TYPE, path);
} else {
fnvlist_add_string(search, ZPOOL_CONFIG_PATH, path);
}
nvroot = fnvlist_lookup_nvlist(zhp->zpool_config,
ZPOOL_CONFIG_VDEV_TREE);
+ /*
+ * User can pass NULL for avail_spare, l2cache, and log, but
+ * we still need to provide variables to vdev_to_nvlist_iter(), so
+ * just point them to junk variables here.
+ */
+ if (!avail_spare)
+ avail_spare = &__avail_spare;
+ if (!l2cache)
+ l2cache = &__l2cache;
+ if (!log)
+ log = &__log;
+
*avail_spare = B_FALSE;
*l2cache = B_FALSE;
if (log != NULL)
*log = B_FALSE;
ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
fnvlist_free(search);
return (ret);
}
/*
* Convert a vdev path to a GUID. Returns GUID or 0 on error.
*
* If is_spare, is_l2cache, or is_log is non-NULL, then store within it
* if the VDEV is a spare, l2cache, or log device. If they're NULL then
* ignore them.
*/
static uint64_t
zpool_vdev_path_to_guid_impl(zpool_handle_t *zhp, const char *path,
boolean_t *is_spare, boolean_t *is_l2cache, boolean_t *is_log)
{
boolean_t spare = B_FALSE, l2cache = B_FALSE, log = B_FALSE;
nvlist_t *tgt;
if ((tgt = zpool_find_vdev(zhp, path, &spare, &l2cache,
&log)) == NULL)
return (0);
if (is_spare != NULL)
*is_spare = spare;
if (is_l2cache != NULL)
*is_l2cache = l2cache;
if (is_log != NULL)
*is_log = log;
return (fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID));
}
/* Convert a vdev path to a GUID. Returns GUID or 0 on error. */
uint64_t
zpool_vdev_path_to_guid(zpool_handle_t *zhp, const char *path)
{
return (zpool_vdev_path_to_guid_impl(zhp, path, NULL, NULL, NULL));
}
/*
* Bring the specified vdev online. The 'flags' parameter is a set of the
* ZFS_ONLINE_* flags.
*/
int
zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
vdev_state_t *newstate)
{
zfs_cmd_t zc = {"\0"};
char errbuf[ERRBUFLEN];
nvlist_t *tgt;
boolean_t avail_spare, l2cache, islog;
libzfs_handle_t *hdl = zhp->zpool_hdl;
if (flags & ZFS_ONLINE_EXPAND) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
} else {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot online %s"), path);
}
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
&islog)) == NULL)
return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
if (!(flags & ZFS_ONLINE_SPARE) && avail_spare)
return (zfs_error(hdl, EZFS_ISSPARE, errbuf));
#ifndef __FreeBSD__
const char *pathname;
if ((flags & ZFS_ONLINE_EXPAND ||
zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) &&
nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &pathname) == 0) {
uint64_t wholedisk = 0;
(void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
&wholedisk);
/*
* XXX - L2ARC 1.0 devices can't support expansion.
*/
if (l2cache) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot expand cache devices"));
return (zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf));
}
if (wholedisk) {
const char *fullpath = path;
char buf[MAXPATHLEN];
int error;
if (path[0] != '/') {
error = zfs_resolve_shortname(path, buf,
sizeof (buf));
if (error != 0)
return (zfs_error(hdl, EZFS_NODEVICE,
errbuf));
fullpath = buf;
}
error = zpool_relabel_disk(hdl, fullpath, errbuf);
if (error != 0)
return (error);
}
}
#endif
zc.zc_cookie = VDEV_STATE_ONLINE;
zc.zc_obj = flags;
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
if (errno == EINVAL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
"from this pool into a new one. Use '%s' "
"instead"), "zpool detach");
return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, errbuf));
}
return (zpool_standard_error(hdl, errno, errbuf));
}
*newstate = zc.zc_cookie;
return (0);
}
/*
* Take the specified vdev offline
*/
int
zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
{
zfs_cmd_t zc = {"\0"};
char errbuf[ERRBUFLEN];
nvlist_t *tgt;
boolean_t avail_spare, l2cache;
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
NULL)) == NULL)
return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
if (avail_spare)
return (zfs_error(hdl, EZFS_ISSPARE, errbuf));
zc.zc_cookie = VDEV_STATE_OFFLINE;
zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
return (0);
switch (errno) {
case EBUSY:
/*
* There are no other replicas of this device.
*/
return (zfs_error(hdl, EZFS_NOREPLICAS, errbuf));
case EEXIST:
/*
* The log device has unplayed logs
*/
return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, errbuf));
default:
return (zpool_standard_error(hdl, errno, errbuf));
}
}
/*
* Remove the specified vdev asynchronously from the configuration, so
* that it may come ONLINE if reinserted. This is called from zed on
* Udev remove event.
* Note: We also have a similar function zpool_vdev_remove() that
* removes the vdev from the pool.
*/
int
zpool_vdev_remove_wanted(zpool_handle_t *zhp, const char *path)
{
zfs_cmd_t zc = {"\0"};
char errbuf[ERRBUFLEN];
nvlist_t *tgt;
boolean_t avail_spare, l2cache;
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
NULL)) == NULL)
return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
zc.zc_cookie = VDEV_STATE_REMOVED;
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
return (0);
return (zpool_standard_error(hdl, errno, errbuf));
}
/*
* Mark the given vdev faulted.
*/
int
zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
{
zfs_cmd_t zc = {"\0"};
char errbuf[ERRBUFLEN];
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_guid = guid;
zc.zc_cookie = VDEV_STATE_FAULTED;
zc.zc_obj = aux;
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
return (0);
switch (errno) {
case EBUSY:
/*
* There are no other replicas of this device.
*/
return (zfs_error(hdl, EZFS_NOREPLICAS, errbuf));
default:
return (zpool_standard_error(hdl, errno, errbuf));
}
}
/*
- * Mark the given vdev degraded.
+ * Generic set vdev state function
*/
-int
-zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
+static int
+zpool_vdev_set_state(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux,
+ vdev_state_t state)
{
zfs_cmd_t zc = {"\0"};
char errbuf[ERRBUFLEN];
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(errbuf, sizeof (errbuf),
- dgettext(TEXT_DOMAIN, "cannot degrade %llu"), (u_longlong_t)guid);
+ dgettext(TEXT_DOMAIN, "cannot set %s %llu"),
+ zpool_state_to_name(state, aux), (u_longlong_t)guid);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_guid = guid;
- zc.zc_cookie = VDEV_STATE_DEGRADED;
+ zc.zc_cookie = state;
zc.zc_obj = aux;
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
return (0);
return (zpool_standard_error(hdl, errno, errbuf));
}
+/*
+ * Mark the given vdev degraded.
+ */
+int
+zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
+{
+ return (zpool_vdev_set_state(zhp, guid, aux, VDEV_STATE_DEGRADED));
+}
+
+/*
+ * Mark the given vdev as in a removed state (as if the device does not exist).
+ *
+ * This is different than zpool_vdev_remove() which does a removal of a device
+ * from the pool (but the device does exist).
+ */
+int
+zpool_vdev_set_removed_state(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
+{
+ return (zpool_vdev_set_state(zhp, guid, aux, VDEV_STATE_REMOVED));
+}
+
/*
* Returns TRUE if the given nvlist is a vdev that was originally swapped in as
* a hot spare.
*/
static boolean_t
is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
{
nvlist_t **child;
uint_t c, children;
if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
&children) == 0) {
const char *type = fnvlist_lookup_string(search,
ZPOOL_CONFIG_TYPE);
if ((strcmp(type, VDEV_TYPE_SPARE) == 0 ||
strcmp(type, VDEV_TYPE_DRAID_SPARE) == 0) &&
children == 2 && child[which] == tgt)
return (B_TRUE);
for (c = 0; c < children; c++)
if (is_replacing_spare(child[c], tgt, which))
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Attach new_disk (fully described by nvroot) to old_disk.
* If 'replacing' is specified, the new disk will replace the old one.
*/
int
zpool_vdev_attach(zpool_handle_t *zhp, const char *old_disk,
const char *new_disk, nvlist_t *nvroot, int replacing, boolean_t rebuild)
{
zfs_cmd_t zc = {"\0"};
char errbuf[ERRBUFLEN];
int ret;
nvlist_t *tgt;
boolean_t avail_spare, l2cache, islog;
uint64_t val;
char *newname;
nvlist_t **child;
uint_t children;
nvlist_t *config_root;
libzfs_handle_t *hdl = zhp->zpool_hdl;
if (replacing)
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot replace %s with %s"), old_disk, new_disk);
else
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot attach %s to %s"), new_disk, old_disk);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
&islog)) == NULL)
return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
if (avail_spare)
return (zfs_error(hdl, EZFS_ISSPARE, errbuf));
if (l2cache)
return (zfs_error(hdl, EZFS_ISL2CACHE, errbuf));
zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
zc.zc_cookie = replacing;
zc.zc_simple = rebuild;
if (rebuild &&
zfeature_lookup_guid("org.openzfs:device_rebuild", NULL) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"the loaded zfs module doesn't support device rebuilds"));
return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
}
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0 || children != 1) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"new device must be a single disk"));
return (zfs_error(hdl, EZFS_INVALCONFIG, errbuf));
}
config_root = fnvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
ZPOOL_CONFIG_VDEV_TREE);
if ((newname = zpool_vdev_name(NULL, NULL, child[0], 0)) == NULL)
return (-1);
/*
* If the target is a hot spare that has been swapped in, we can only
* replace it with another hot spare.
*/
if (replacing &&
nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
(zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
NULL) == NULL || !avail_spare) &&
is_replacing_spare(config_root, tgt, 1)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"can only be replaced by another hot spare"));
free(newname);
return (zfs_error(hdl, EZFS_BADTARGET, errbuf));
}
free(newname);
zcmd_write_conf_nvlist(hdl, &zc, nvroot);
ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
zcmd_free_nvlists(&zc);
if (ret == 0)
return (0);
switch (errno) {
case ENOTSUP:
/*
* Can't attach to or replace this type of vdev.
*/
if (replacing) {
uint64_t version = zpool_get_prop_int(zhp,
ZPOOL_PROP_VERSION, NULL);
if (islog) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot replace a log with a spare"));
} else if (rebuild) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"only mirror and dRAID vdevs support "
"sequential reconstruction"));
} else if (zpool_is_draid_spare(new_disk)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dRAID spares can only replace child "
"devices in their parent's dRAID vdev"));
} else if (version >= SPA_VERSION_MULTI_REPLACE) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"already in replacing/spare config; wait "
"for completion or use 'zpool detach'"));
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot replace a replacing device"));
}
} else {
char status[64] = {0};
zpool_prop_get_feature(zhp,
"feature@device_rebuild", status, 63);
if (rebuild &&
strncmp(status, ZFS_FEATURE_DISABLED, 64) == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"device_rebuild feature must be enabled "
"in order to use sequential "
"reconstruction"));
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"can only attach to mirrors and top-level "
"disks"));
}
}
(void) zfs_error(hdl, EZFS_BADTARGET, errbuf);
break;
case EINVAL:
/*
* The new device must be a single disk.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"new device must be a single disk"));
(void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
break;
case EBUSY:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy, "
"or device removal is in progress"),
new_disk);
(void) zfs_error(hdl, EZFS_BADDEV, errbuf);
break;
case EOVERFLOW:
/*
* The new device is too small.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"device is too small"));
(void) zfs_error(hdl, EZFS_BADDEV, errbuf);
break;
case EDOM:
/*
* The new device has a different optimal sector size.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"new device has a different optimal sector size; use the "
"option '-o ashift=N' to override the optimal size"));
(void) zfs_error(hdl, EZFS_BADDEV, errbuf);
break;
case ENAMETOOLONG:
/*
* The resulting top-level vdev spec won't fit in the label.
*/
(void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf);
break;
default:
(void) zpool_standard_error(hdl, errno, errbuf);
}
return (-1);
}
/*
* Detach the specified device.
*/
int
zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
{
zfs_cmd_t zc = {"\0"};
char errbuf[ERRBUFLEN];
nvlist_t *tgt;
boolean_t avail_spare, l2cache;
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
NULL)) == NULL)
return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
if (avail_spare)
return (zfs_error(hdl, EZFS_ISSPARE, errbuf));
if (l2cache)
return (zfs_error(hdl, EZFS_ISL2CACHE, errbuf));
zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
return (0);
switch (errno) {
case ENOTSUP:
/*
* Can't detach from this type of vdev.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
"applicable to mirror and replacing vdevs"));
(void) zfs_error(hdl, EZFS_BADTARGET, errbuf);
break;
case EBUSY:
/*
* There are no other replicas of this device.
*/
(void) zfs_error(hdl, EZFS_NOREPLICAS, errbuf);
break;
default:
(void) zpool_standard_error(hdl, errno, errbuf);
}
return (-1);
}
/*
* Find a mirror vdev in the source nvlist.
*
* The mchild array contains a list of disks in one of the top-level mirrors
* of the source pool. The schild array contains a list of disks that the
* user specified on the command line. We loop over the mchild array to
* see if any entry in the schild array matches.
*
* If a disk in the mchild array is found in the schild array, we return
* the index of that entry. Otherwise we return -1.
*/
static int
find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
nvlist_t **schild, uint_t schildren)
{
uint_t mc;
for (mc = 0; mc < mchildren; mc++) {
uint_t sc;
char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
mchild[mc], 0);
for (sc = 0; sc < schildren; sc++) {
char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
schild[sc], 0);
boolean_t result = (strcmp(mpath, spath) == 0);
free(spath);
if (result) {
free(mpath);
return (mc);
}
}
free(mpath);
}
return (-1);
}
/*
* Split a mirror pool. If newroot points to null, then a new nvlist
* is generated and it is the responsibility of the caller to free it.
*/
int
zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
nvlist_t *props, splitflags_t flags)
{
zfs_cmd_t zc = {"\0"};
char errbuf[ERRBUFLEN];
const char *bias;
nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
nvlist_t **varray = NULL, *zc_props = NULL;
uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
libzfs_handle_t *hdl = zhp->zpool_hdl;
uint64_t vers, readonly = B_FALSE;
boolean_t freelist = B_FALSE, memory_err = B_TRUE;
int retval = 0;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
if (!zpool_name_valid(hdl, B_FALSE, newname))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
if ((config = zpool_get_config(zhp, NULL)) == NULL) {
(void) fprintf(stderr, gettext("Internal error: unable to "
"retrieve pool configuration\n"));
return (-1);
}
tree = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
vers = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);
if (props) {
prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
props, vers, flags, errbuf)) == NULL)
return (-1);
(void) nvlist_lookup_uint64(zc_props,
zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
if (readonly) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property %s can only be set at import time"),
zpool_prop_to_name(ZPOOL_PROP_READONLY));
return (-1);
}
}
if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
&children) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Source pool is missing vdev tree"));
nvlist_free(zc_props);
return (-1);
}
varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
vcount = 0;
if (*newroot == NULL ||
nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
&newchild, &newchildren) != 0)
newchildren = 0;
for (c = 0; c < children; c++) {
uint64_t is_log = B_FALSE, is_hole = B_FALSE;
boolean_t is_special = B_FALSE, is_dedup = B_FALSE;
const char *type;
nvlist_t **mchild, *vdev;
uint_t mchildren;
int entry;
/*
* Unlike cache & spares, slogs are stored in the
* ZPOOL_CONFIG_CHILDREN array. We filter them out here.
*/
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&is_log);
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
&is_hole);
if (is_log || is_hole) {
/*
* Create a hole vdev and put it in the config.
*/
if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
goto out;
if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
VDEV_TYPE_HOLE) != 0)
goto out;
if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
1) != 0)
goto out;
if (lastlog == 0)
lastlog = vcount;
varray[vcount++] = vdev;
continue;
}
lastlog = 0;
type = fnvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE);
if (strcmp(type, VDEV_TYPE_INDIRECT) == 0) {
vdev = child[c];
if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
goto out;
continue;
} else if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Source pool must be composed only of mirrors\n"));
retval = zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
goto out;
}
if (nvlist_lookup_string(child[c],
ZPOOL_CONFIG_ALLOCATION_BIAS, &bias) == 0) {
if (strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0)
is_special = B_TRUE;
else if (strcmp(bias, VDEV_ALLOC_BIAS_DEDUP) == 0)
is_dedup = B_TRUE;
}
verify(nvlist_lookup_nvlist_array(child[c],
ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
/* find or add an entry for this top-level vdev */
if (newchildren > 0 &&
(entry = find_vdev_entry(zhp, mchild, mchildren,
newchild, newchildren)) >= 0) {
/* We found a disk that the user specified. */
vdev = mchild[entry];
++found;
} else {
/* User didn't specify a disk for this vdev. */
vdev = mchild[mchildren - 1];
}
if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
goto out;
if (flags.dryrun != 0) {
if (is_dedup == B_TRUE) {
if (nvlist_add_string(varray[vcount - 1],
ZPOOL_CONFIG_ALLOCATION_BIAS,
VDEV_ALLOC_BIAS_DEDUP) != 0)
goto out;
} else if (is_special == B_TRUE) {
if (nvlist_add_string(varray[vcount - 1],
ZPOOL_CONFIG_ALLOCATION_BIAS,
VDEV_ALLOC_BIAS_SPECIAL) != 0)
goto out;
}
}
}
/* did we find every disk the user specified? */
if (found != newchildren) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
"include at most one disk from each mirror"));
retval = zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
goto out;
}
/* Prepare the nvlist for populating. */
if (*newroot == NULL) {
if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
goto out;
freelist = B_TRUE;
if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
VDEV_TYPE_ROOT) != 0)
goto out;
} else {
verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
}
/* Add all the children we found */
if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
(const nvlist_t **)varray, lastlog == 0 ? vcount : lastlog) != 0)
goto out;
/*
* If we're just doing a dry run, exit now with success.
*/
if (flags.dryrun) {
memory_err = B_FALSE;
freelist = B_FALSE;
goto out;
}
/* now build up the config list & call the ioctl */
if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
goto out;
if (nvlist_add_nvlist(newconfig,
ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
nvlist_add_string(newconfig,
ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
goto out;
/*
* The new pool is automatically part of the namespace unless we
* explicitly export it.
*/
if (!flags.import)
zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
(void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
zcmd_write_conf_nvlist(hdl, &zc, newconfig);
if (zc_props != NULL)
zcmd_write_src_nvlist(hdl, &zc, zc_props);
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
retval = zpool_standard_error(hdl, errno, errbuf);
goto out;
}
freelist = B_FALSE;
memory_err = B_FALSE;
out:
if (varray != NULL) {
int v;
for (v = 0; v < vcount; v++)
nvlist_free(varray[v]);
free(varray);
}
zcmd_free_nvlists(&zc);
nvlist_free(zc_props);
nvlist_free(newconfig);
if (freelist) {
nvlist_free(*newroot);
*newroot = NULL;
}
if (retval != 0)
return (retval);
if (memory_err)
return (no_memory(hdl));
return (0);
}
/*
* Remove the given device.
*/
int
zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
{
zfs_cmd_t zc = {"\0"};
char errbuf[ERRBUFLEN];
nvlist_t *tgt;
boolean_t avail_spare, l2cache, islog;
libzfs_handle_t *hdl = zhp->zpool_hdl;
uint64_t version;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
if (zpool_is_draid_spare(path)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dRAID spares cannot be removed"));
return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
}
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
&islog)) == NULL)
return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
if (islog && version < SPA_VERSION_HOLES) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded to support log removal"));
return (zfs_error(hdl, EZFS_BADVERSION, errbuf));
}
zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
return (0);
switch (errno) {
case EALREADY:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"removal for this vdev is already in progress."));
(void) zfs_error(hdl, EZFS_BUSY, errbuf);
break;
case EINVAL:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid config; all top-level vdevs must "
"have the same sector size and not be raidz."));
(void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
break;
case EBUSY:
if (islog) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Mount encrypted datasets to replay logs."));
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Pool busy; removal may already be in progress"));
}
(void) zfs_error(hdl, EZFS_BUSY, errbuf);
break;
case EACCES:
if (islog) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Mount encrypted datasets to replay logs."));
(void) zfs_error(hdl, EZFS_BUSY, errbuf);
} else {
(void) zpool_standard_error(hdl, errno, errbuf);
}
break;
default:
(void) zpool_standard_error(hdl, errno, errbuf);
}
return (-1);
}
int
zpool_vdev_remove_cancel(zpool_handle_t *zhp)
{
zfs_cmd_t zc = {{0}};
char errbuf[ERRBUFLEN];
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot cancel removal"));
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_cookie = 1;
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
return (0);
return (zpool_standard_error(hdl, errno, errbuf));
}
int
zpool_vdev_indirect_size(zpool_handle_t *zhp, const char *path,
uint64_t *sizep)
{
char errbuf[ERRBUFLEN];
nvlist_t *tgt;
boolean_t avail_spare, l2cache, islog;
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot determine indirect size of %s"),
path);
if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
&islog)) == NULL)
return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
if (avail_spare || l2cache || islog) {
*sizep = 0;
return (0);
}
if (nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_INDIRECT_SIZE, sizep) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"indirect size not available"));
return (zfs_error(hdl, EINVAL, errbuf));
}
return (0);
}
/*
* Clear the errors for the pool, or the particular device if specified.
*/
int
zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
{
zfs_cmd_t zc = {"\0"};
char errbuf[ERRBUFLEN];
nvlist_t *tgt;
zpool_load_policy_t policy;
boolean_t avail_spare, l2cache;
libzfs_handle_t *hdl = zhp->zpool_hdl;
nvlist_t *nvi = NULL;
int error;
if (path)
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
path);
else
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
zhp->zpool_name);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if (path) {
if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
&l2cache, NULL)) == NULL)
return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
/*
* Don't allow error clearing for hot spares. Do allow
* error clearing for l2cache devices.
*/
if (avail_spare)
return (zfs_error(hdl, EZFS_ISSPARE, errbuf));
zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
}
zpool_get_load_policy(rewindnvl, &policy);
zc.zc_cookie = policy.zlp_rewind;
zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2);
zcmd_write_src_nvlist(hdl, &zc, rewindnvl);
while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
errno == ENOMEM)
zcmd_expand_dst_nvlist(hdl, &zc);
if (!error || ((policy.zlp_rewind & ZPOOL_TRY_REWIND) &&
errno != EPERM && errno != EACCES)) {
if (policy.zlp_rewind &
(ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
(void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
zpool_rewind_exclaim(hdl, zc.zc_name,
((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0),
nvi);
nvlist_free(nvi);
}
zcmd_free_nvlists(&zc);
return (0);
}
zcmd_free_nvlists(&zc);
return (zpool_standard_error(hdl, errno, errbuf));
}
/*
* Similar to zpool_clear(), but takes a GUID (used by fmd).
*/
int
zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
{
zfs_cmd_t zc = {"\0"};
char errbuf[ERRBUFLEN];
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
(u_longlong_t)guid);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_guid = guid;
zc.zc_cookie = ZPOOL_NO_REWIND;
if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0)
return (0);
return (zpool_standard_error(hdl, errno, errbuf));
}
/*
* Change the GUID for a pool.
*/
int
zpool_reguid(zpool_handle_t *zhp)
{
char errbuf[ERRBUFLEN];
libzfs_handle_t *hdl = zhp->zpool_hdl;
zfs_cmd_t zc = {"\0"};
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
return (0);
return (zpool_standard_error(hdl, errno, errbuf));
}
/*
* Reopen the pool.
*/
int
zpool_reopen_one(zpool_handle_t *zhp, void *data)
{
libzfs_handle_t *hdl = zpool_get_handle(zhp);
const char *pool_name = zpool_get_name(zhp);
boolean_t *scrub_restart = data;
int error;
error = lzc_reopen(pool_name, *scrub_restart);
if (error) {
return (zpool_standard_error_fmt(hdl, error,
dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), pool_name));
}
return (0);
}
/* call into libzfs_core to execute the sync IOCTL per pool */
int
zpool_sync_one(zpool_handle_t *zhp, void *data)
{
int ret;
libzfs_handle_t *hdl = zpool_get_handle(zhp);
const char *pool_name = zpool_get_name(zhp);
boolean_t *force = data;
nvlist_t *innvl = fnvlist_alloc();
fnvlist_add_boolean_value(innvl, "force", *force);
if ((ret = lzc_sync(pool_name, innvl, NULL)) != 0) {
nvlist_free(innvl);
return (zpool_standard_error_fmt(hdl, ret,
dgettext(TEXT_DOMAIN, "sync '%s' failed"), pool_name));
}
nvlist_free(innvl);
return (0);
}
#define PATH_BUF_LEN 64
/*
* Given a vdev, return the name to display in iostat. If the vdev has a path,
* we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
* We also check if this is a whole disk, in which case we strip off the
* trailing 's0' slice name.
*
* This routine is also responsible for identifying when disks have been
* reconfigured in a new location. The kernel will have opened the device by
* devid, but the path will still refer to the old location. To catch this, we
* first do a path -> devid translation (which is fast for the common case). If
* the devid matches, we're done. If not, we do a reverse devid -> path
* translation and issue the appropriate ioctl() to update the path of the vdev.
* If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
* of these checks.
*/
char *
zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
int name_flags)
{
const char *type, *tpath;
const char *path;
uint64_t value;
char buf[PATH_BUF_LEN];
char tmpbuf[PATH_BUF_LEN * 2];
/*
* vdev_name will be "root"/"root-0" for the root vdev, but it is the
* zpool name that will be displayed to the user.
*/
type = fnvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE);
if (zhp != NULL && strcmp(type, "root") == 0)
return (zfs_strdup(hdl, zpool_get_name(zhp)));
if (libzfs_envvar_is_set("ZPOOL_VDEV_NAME_PATH"))
name_flags |= VDEV_NAME_PATH;
if (libzfs_envvar_is_set("ZPOOL_VDEV_NAME_GUID"))
name_flags |= VDEV_NAME_GUID;
if (libzfs_envvar_is_set("ZPOOL_VDEV_NAME_FOLLOW_LINKS"))
name_flags |= VDEV_NAME_FOLLOW_LINKS;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 ||
name_flags & VDEV_NAME_GUID) {
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value);
(void) snprintf(buf, sizeof (buf), "%llu", (u_longlong_t)value);
path = buf;
} else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &tpath) == 0) {
path = tpath;
if (name_flags & VDEV_NAME_FOLLOW_LINKS) {
char *rp = realpath(path, NULL);
if (rp) {
strlcpy(buf, rp, sizeof (buf));
path = buf;
free(rp);
}
}
/*
* For a block device only use the name.
*/
if ((strcmp(type, VDEV_TYPE_DISK) == 0) &&
!(name_flags & VDEV_NAME_PATH)) {
path = zfs_strip_path(path);
}
/*
* Remove the partition from the path if this is a whole disk.
*/
if (strcmp(type, VDEV_TYPE_DRAID_SPARE) != 0 &&
nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &value)
== 0 && value && !(name_flags & VDEV_NAME_PATH)) {
return (zfs_strip_partition(path));
}
} else {
path = type;
/*
* If it's a raidz device, we need to stick in the parity level.
*/
if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
value = fnvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY);
(void) snprintf(buf, sizeof (buf), "%s%llu", path,
(u_longlong_t)value);
path = buf;
}
/*
* If it's a dRAID device, we add parity, groups, and spares.
*/
if (strcmp(path, VDEV_TYPE_DRAID) == 0) {
uint64_t ndata, nparity, nspares;
nvlist_t **child;
uint_t children;
verify(nvlist_lookup_nvlist_array(nv,
ZPOOL_CONFIG_CHILDREN, &child, &children) == 0);
nparity = fnvlist_lookup_uint64(nv,
ZPOOL_CONFIG_NPARITY);
ndata = fnvlist_lookup_uint64(nv,
ZPOOL_CONFIG_DRAID_NDATA);
nspares = fnvlist_lookup_uint64(nv,
ZPOOL_CONFIG_DRAID_NSPARES);
path = zpool_draid_name(buf, sizeof (buf), ndata,
nparity, nspares, children);
}
/*
* We identify each top-level vdev by using a <type-id>
* naming convention.
*/
if (name_flags & VDEV_NAME_TYPE_ID) {
uint64_t id = fnvlist_lookup_uint64(nv,
ZPOOL_CONFIG_ID);
(void) snprintf(tmpbuf, sizeof (tmpbuf), "%s-%llu",
path, (u_longlong_t)id);
path = tmpbuf;
}
}
return (zfs_strdup(hdl, path));
}
static int
zbookmark_mem_compare(const void *a, const void *b)
{
return (memcmp(a, b, sizeof (zbookmark_phys_t)));
}
/*
* Retrieve the persistent error log, uniquify the members, and return to the
* caller.
*/
int
zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
{
zfs_cmd_t zc = {"\0"};
libzfs_handle_t *hdl = zhp->zpool_hdl;
zbookmark_phys_t *buf;
uint64_t buflen = 10000; /* approx. 1MB of RAM */
if (fnvlist_lookup_uint64(zhp->zpool_config,
ZPOOL_CONFIG_ERRCOUNT) == 0)
return (0);
/*
* Retrieve the raw error list from the kernel. If it doesn't fit,
* allocate a larger buffer and retry.
*/
(void) strcpy(zc.zc_name, zhp->zpool_name);
for (;;) {
buf = zfs_alloc(zhp->zpool_hdl,
buflen * sizeof (zbookmark_phys_t));
zc.zc_nvlist_dst = (uintptr_t)buf;
zc.zc_nvlist_dst_size = buflen;
if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_ERROR_LOG,
&zc) != 0) {
free(buf);
if (errno == ENOMEM) {
buflen *= 2;
} else {
return (zpool_standard_error_fmt(hdl, errno,
dgettext(TEXT_DOMAIN, "errors: List of "
"errors unavailable")));
}
} else {
break;
}
}
/*
* Sort the resulting bookmarks. This is a little confusing due to the
* implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
* to first, and 'zc_nvlist_dst_size' indicates the number of bookmarks
* _not_ copied as part of the process. So we point the start of our
* array appropriate and decrement the total number of elements.
*/
zbookmark_phys_t *zb = buf + zc.zc_nvlist_dst_size;
uint64_t zblen = buflen - zc.zc_nvlist_dst_size;
qsort(zb, zblen, sizeof (zbookmark_phys_t), zbookmark_mem_compare);
verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
/*
* Fill in the nverrlistp with nvlist's of dataset and object numbers.
*/
for (uint64_t i = 0; i < zblen; i++) {
nvlist_t *nv;
/* ignoring zb_blkid and zb_level for now */
if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
zb[i-1].zb_object == zb[i].zb_object)
continue;
if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
goto nomem;
if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
zb[i].zb_objset) != 0) {
nvlist_free(nv);
goto nomem;
}
if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
zb[i].zb_object) != 0) {
nvlist_free(nv);
goto nomem;
}
if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
nvlist_free(nv);
goto nomem;
}
nvlist_free(nv);
}
free(buf);
return (0);
nomem:
free(buf);
return (no_memory(zhp->zpool_hdl));
}
/*
* Upgrade a ZFS pool to the latest on-disk version.
*/
int
zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
{
zfs_cmd_t zc = {"\0"};
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) strcpy(zc.zc_name, zhp->zpool_name);
zc.zc_cookie = new_version;
if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
return (zpool_standard_error_fmt(hdl, errno,
dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
zhp->zpool_name));
return (0);
}
void
zfs_save_arguments(int argc, char **argv, char *string, int len)
{
int i;
(void) strlcpy(string, zfs_basename(argv[0]), len);
for (i = 1; i < argc; i++) {
(void) strlcat(string, " ", len);
(void) strlcat(string, argv[i], len);
}
}
int
zpool_log_history(libzfs_handle_t *hdl, const char *message)
{
zfs_cmd_t zc = {"\0"};
nvlist_t *args;
args = fnvlist_alloc();
fnvlist_add_string(args, "message", message);
zcmd_write_src_nvlist(hdl, &zc, args);
int err = zfs_ioctl(hdl, ZFS_IOC_LOG_HISTORY, &zc);
nvlist_free(args);
zcmd_free_nvlists(&zc);
return (err);
}
/*
* Perform ioctl to get some command history of a pool.
*
* 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
* logical offset of the history buffer to start reading from.
*
* Upon return, 'off' is the next logical offset to read from and
* 'len' is the actual amount of bytes read into 'buf'.
*/
static int
get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
{
zfs_cmd_t zc = {"\0"};
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_history = (uint64_t)(uintptr_t)buf;
zc.zc_history_len = *len;
zc.zc_history_offset = *off;
if (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
switch (errno) {
case EPERM:
return (zfs_error_fmt(hdl, EZFS_PERM,
dgettext(TEXT_DOMAIN,
"cannot show history for pool '%s'"),
zhp->zpool_name));
case ENOENT:
return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
dgettext(TEXT_DOMAIN, "cannot get history for pool "
"'%s'"), zhp->zpool_name));
case ENOTSUP:
return (zfs_error_fmt(hdl, EZFS_BADVERSION,
dgettext(TEXT_DOMAIN, "cannot get history for pool "
"'%s', pool must be upgraded"), zhp->zpool_name));
default:
return (zpool_standard_error_fmt(hdl, errno,
dgettext(TEXT_DOMAIN,
"cannot get history for '%s'"), zhp->zpool_name));
}
}
*len = zc.zc_history_len;
*off = zc.zc_history_offset;
return (0);
}
/*
* Retrieve the command history of a pool.
*/
int
zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp, uint64_t *off,
boolean_t *eof)
{
libzfs_handle_t *hdl = zhp->zpool_hdl;
char *buf;
int buflen = 128 * 1024;
nvlist_t **records = NULL;
uint_t numrecords = 0;
int err = 0, i;
uint64_t start = *off;
buf = zfs_alloc(hdl, buflen);
/* process about 1MiB a time */
while (*off - start < 1024 * 1024) {
uint64_t bytes_read = buflen;
uint64_t leftover;
if ((err = get_history(zhp, buf, off, &bytes_read)) != 0)
break;
/* if nothing else was read in, we're at EOF, just return */
if (!bytes_read) {
*eof = B_TRUE;
break;
}
if ((err = zpool_history_unpack(buf, bytes_read,
&leftover, &records, &numrecords)) != 0) {
zpool_standard_error_fmt(hdl, err,
dgettext(TEXT_DOMAIN,
"cannot get history for '%s'"), zhp->zpool_name);
break;
}
*off -= leftover;
if (leftover == bytes_read) {
/*
* no progress made, because buffer is not big enough
* to hold this record; resize and retry.
*/
buflen *= 2;
free(buf);
buf = zfs_alloc(hdl, buflen);
}
}
free(buf);
if (!err) {
*nvhisp = fnvlist_alloc();
fnvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
(const nvlist_t **)records, numrecords);
}
for (i = 0; i < numrecords; i++)
nvlist_free(records[i]);
free(records);
return (err);
}
/*
* Retrieve the next event given the passed 'zevent_fd' file descriptor.
* If there is a new event available 'nvp' will contain a newly allocated
* nvlist and 'dropped' will be set to the number of missed events since
* the last call to this function. When 'nvp' is set to NULL it indicates
* no new events are available. In either case the function returns 0 and
* it is up to the caller to free 'nvp'. In the case of a fatal error the
* function will return a non-zero value. When the function is called in
* blocking mode (the default, unless the ZEVENT_NONBLOCK flag is passed),
* it will not return until a new event is available.
*/
int
zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp,
int *dropped, unsigned flags, int zevent_fd)
{
zfs_cmd_t zc = {"\0"};
int error = 0;
*nvp = NULL;
*dropped = 0;
zc.zc_cleanup_fd = zevent_fd;
if (flags & ZEVENT_NONBLOCK)
zc.zc_guid = ZEVENT_NONBLOCK;
zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE);
retry:
if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) {
switch (errno) {
case ESHUTDOWN:
error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
dgettext(TEXT_DOMAIN, "zfs shutdown"));
goto out;
case ENOENT:
/* Blocking error case should not occur */
if (!(flags & ZEVENT_NONBLOCK))
error = zpool_standard_error_fmt(hdl, errno,
dgettext(TEXT_DOMAIN, "cannot get event"));
goto out;
case ENOMEM:
zcmd_expand_dst_nvlist(hdl, &zc);
goto retry;
default:
error = zpool_standard_error_fmt(hdl, errno,
dgettext(TEXT_DOMAIN, "cannot get event"));
goto out;
}
}
error = zcmd_read_dst_nvlist(hdl, &zc, nvp);
if (error != 0)
goto out;
*dropped = (int)zc.zc_cookie;
out:
zcmd_free_nvlists(&zc);
return (error);
}
/*
* Clear all events.
*/
int
zpool_events_clear(libzfs_handle_t *hdl, int *count)
{
zfs_cmd_t zc = {"\0"};
if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0)
return (zpool_standard_error(hdl, errno,
dgettext(TEXT_DOMAIN, "cannot clear events")));
if (count != NULL)
*count = (int)zc.zc_cookie; /* # of events cleared */
return (0);
}
/*
* Seek to a specific EID, ZEVENT_SEEK_START, or ZEVENT_SEEK_END for
* the passed zevent_fd file handle. On success zero is returned,
* otherwise -1 is returned and hdl->libzfs_error is set to the errno.
*/
int
zpool_events_seek(libzfs_handle_t *hdl, uint64_t eid, int zevent_fd)
{
zfs_cmd_t zc = {"\0"};
int error = 0;
zc.zc_guid = eid;
zc.zc_cleanup_fd = zevent_fd;
if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_SEEK, &zc) != 0) {
switch (errno) {
case ENOENT:
error = zfs_error_fmt(hdl, EZFS_NOENT,
dgettext(TEXT_DOMAIN, "cannot get event"));
break;
case ENOMEM:
error = zfs_error_fmt(hdl, EZFS_NOMEM,
dgettext(TEXT_DOMAIN, "cannot get event"));
break;
default:
error = zpool_standard_error_fmt(hdl, errno,
dgettext(TEXT_DOMAIN, "cannot get event"));
break;
}
}
return (error);
}
static void
zpool_obj_to_path_impl(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
char *pathname, size_t len, boolean_t always_unmounted)
{
zfs_cmd_t zc = {"\0"};
boolean_t mounted = B_FALSE;
char *mntpnt = NULL;
char dsname[ZFS_MAX_DATASET_NAME_LEN];
if (dsobj == 0) {
/* special case for the MOS */
(void) snprintf(pathname, len, "<metadata>:<0x%llx>",
(longlong_t)obj);
return;
}
/* get the dataset's name */
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_obj = dsobj;
if (zfs_ioctl(zhp->zpool_hdl,
ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
/* just write out a path of two object numbers */
(void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
(longlong_t)dsobj, (longlong_t)obj);
return;
}
(void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
/* find out if the dataset is mounted */
mounted = !always_unmounted && is_mounted(zhp->zpool_hdl, dsname,
&mntpnt);
/* get the corrupted object's path */
(void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
zc.zc_obj = obj;
if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_OBJ_TO_PATH,
&zc) == 0) {
if (mounted) {
(void) snprintf(pathname, len, "%s%s", mntpnt,
zc.zc_value);
} else {
(void) snprintf(pathname, len, "%s:%s",
dsname, zc.zc_value);
}
} else {
(void) snprintf(pathname, len, "%s:<0x%llx>", dsname,
(longlong_t)obj);
}
free(mntpnt);
}
void
zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
char *pathname, size_t len)
{
zpool_obj_to_path_impl(zhp, dsobj, obj, pathname, len, B_FALSE);
}
void
zpool_obj_to_path_ds(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
char *pathname, size_t len)
{
zpool_obj_to_path_impl(zhp, dsobj, obj, pathname, len, B_TRUE);
}
/*
* Wait while the specified activity is in progress in the pool.
*/
int
zpool_wait(zpool_handle_t *zhp, zpool_wait_activity_t activity)
{
boolean_t missing;
int error = zpool_wait_status(zhp, activity, &missing, NULL);
if (missing) {
(void) zpool_standard_error_fmt(zhp->zpool_hdl, ENOENT,
dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"),
zhp->zpool_name);
return (ENOENT);
} else {
return (error);
}
}
/*
* Wait for the given activity and return the status of the wait (whether or not
* any waiting was done) in the 'waited' parameter. Non-existent pools are
* reported via the 'missing' parameter, rather than by printing an error
* message. This is convenient when this function is called in a loop over a
* long period of time (as it is, for example, by zpool's wait cmd). In that
* scenario, a pool being exported or destroyed should be considered a normal
* event, so we don't want to print an error when we find that the pool doesn't
* exist.
*/
int
zpool_wait_status(zpool_handle_t *zhp, zpool_wait_activity_t activity,
boolean_t *missing, boolean_t *waited)
{
int error = lzc_wait(zhp->zpool_name, activity, waited);
*missing = (error == ENOENT);
if (*missing)
return (0);
if (error != 0) {
(void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"),
zhp->zpool_name);
}
return (error);
}
int
zpool_set_bootenv(zpool_handle_t *zhp, const nvlist_t *envmap)
{
int error = lzc_set_bootenv(zhp->zpool_name, envmap);
if (error != 0) {
(void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
dgettext(TEXT_DOMAIN,
"error setting bootenv in pool '%s'"), zhp->zpool_name);
}
return (error);
}
int
zpool_get_bootenv(zpool_handle_t *zhp, nvlist_t **nvlp)
{
nvlist_t *nvl;
int error;
nvl = NULL;
error = lzc_get_bootenv(zhp->zpool_name, &nvl);
if (error != 0) {
(void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
dgettext(TEXT_DOMAIN,
"error getting bootenv in pool '%s'"), zhp->zpool_name);
} else {
*nvlp = nvl;
}
return (error);
}
/*
* Attempt to read and parse feature file(s) (from "compatibility" property).
* Files contain zpool feature names, comma or whitespace-separated.
* Comments (# character to next newline) are discarded.
*
* Arguments:
* compatibility : string containing feature filenames
* features : either NULL or pointer to array of boolean
* report : either NULL or pointer to string buffer
* rlen : length of "report" buffer
*
* compatibility is NULL (unset), "", "off", "legacy", or list of
* comma-separated filenames. filenames should either be absolute,
* or relative to:
* 1) ZPOOL_SYSCONF_COMPAT_D (eg: /etc/zfs/compatibility.d) or
* 2) ZPOOL_DATA_COMPAT_D (eg: /usr/share/zfs/compatibility.d).
* (Unset), "" or "off" => enable all features
* "legacy" => disable all features
*
* Any feature names read from files which match unames in spa_feature_table
* will have the corresponding boolean set in the features array (if non-NULL).
* If more than one feature set specified, only features present in *all* of
* them will be set.
*
* "report" if not NULL will be populated with a suitable status message.
*
* Return values:
* ZPOOL_COMPATIBILITY_OK : files read and parsed ok
* ZPOOL_COMPATIBILITY_BADFILE : file too big or not a text file
* ZPOOL_COMPATIBILITY_BADTOKEN : SYSCONF file contains invalid feature name
* ZPOOL_COMPATIBILITY_WARNTOKEN : DATA file contains invalid feature name
* ZPOOL_COMPATIBILITY_NOFILES : no feature files found
*/
zpool_compat_status_t
zpool_load_compat(const char *compat, boolean_t *features, char *report,
size_t rlen)
{
int sdirfd, ddirfd, featfd;
struct stat fs;
char *fc;
char *ps, *ls, *ws;
char *file, *line, *word;
char l_compat[ZFS_MAXPROPLEN];
boolean_t ret_nofiles = B_TRUE;
boolean_t ret_badfile = B_FALSE;
boolean_t ret_badtoken = B_FALSE;
boolean_t ret_warntoken = B_FALSE;
/* special cases (unset), "" and "off" => enable all features */
if (compat == NULL || compat[0] == '\0' ||
strcmp(compat, ZPOOL_COMPAT_OFF) == 0) {
if (features != NULL)
for (uint_t i = 0; i < SPA_FEATURES; i++)
features[i] = B_TRUE;
if (report != NULL)
strlcpy(report, gettext("all features enabled"), rlen);
return (ZPOOL_COMPATIBILITY_OK);
}
/* Final special case "legacy" => disable all features */
if (strcmp(compat, ZPOOL_COMPAT_LEGACY) == 0) {
if (features != NULL)
for (uint_t i = 0; i < SPA_FEATURES; i++)
features[i] = B_FALSE;
if (report != NULL)
strlcpy(report, gettext("all features disabled"), rlen);
return (ZPOOL_COMPATIBILITY_OK);
}
/*
* Start with all true; will be ANDed with results from each file
*/
if (features != NULL)
for (uint_t i = 0; i < SPA_FEATURES; i++)
features[i] = B_TRUE;
char err_badfile[ZFS_MAXPROPLEN] = "";
char err_badtoken[ZFS_MAXPROPLEN] = "";
/*
* We ignore errors from the directory open()
* as they're only needed if the filename is relative
* which will be checked during the openat().
*/
/* O_PATH safer than O_RDONLY if system allows it */
#if defined(O_PATH)
#define ZC_DIR_FLAGS (O_DIRECTORY | O_CLOEXEC | O_PATH)
#else
#define ZC_DIR_FLAGS (O_DIRECTORY | O_CLOEXEC | O_RDONLY)
#endif
sdirfd = open(ZPOOL_SYSCONF_COMPAT_D, ZC_DIR_FLAGS);
ddirfd = open(ZPOOL_DATA_COMPAT_D, ZC_DIR_FLAGS);
(void) strlcpy(l_compat, compat, ZFS_MAXPROPLEN);
for (file = strtok_r(l_compat, ",", &ps);
file != NULL;
file = strtok_r(NULL, ",", &ps)) {
boolean_t l_features[SPA_FEATURES];
enum { Z_SYSCONF, Z_DATA } source;
/* try sysconfdir first, then datadir */
source = Z_SYSCONF;
if ((featfd = openat(sdirfd, file, O_RDONLY | O_CLOEXEC)) < 0) {
featfd = openat(ddirfd, file, O_RDONLY | O_CLOEXEC);
source = Z_DATA;
}
/* File readable and correct size? */
if (featfd < 0 ||
fstat(featfd, &fs) < 0 ||
fs.st_size < 1 ||
fs.st_size > ZPOOL_COMPAT_MAXSIZE) {
(void) close(featfd);
strlcat(err_badfile, file, ZFS_MAXPROPLEN);
strlcat(err_badfile, " ", ZFS_MAXPROPLEN);
ret_badfile = B_TRUE;
continue;
}
/* Prefault the file if system allows */
#if defined(MAP_POPULATE)
#define ZC_MMAP_FLAGS (MAP_PRIVATE | MAP_POPULATE)
#elif defined(MAP_PREFAULT_READ)
#define ZC_MMAP_FLAGS (MAP_PRIVATE | MAP_PREFAULT_READ)
#else
#define ZC_MMAP_FLAGS (MAP_PRIVATE)
#endif
/* private mmap() so we can strtok safely */
fc = (char *)mmap(NULL, fs.st_size, PROT_READ | PROT_WRITE,
ZC_MMAP_FLAGS, featfd, 0);
(void) close(featfd);
/* map ok, and last character == newline? */
if (fc == MAP_FAILED || fc[fs.st_size - 1] != '\n') {
(void) munmap((void *) fc, fs.st_size);
strlcat(err_badfile, file, ZFS_MAXPROPLEN);
strlcat(err_badfile, " ", ZFS_MAXPROPLEN);
ret_badfile = B_TRUE;
continue;
}
ret_nofiles = B_FALSE;
for (uint_t i = 0; i < SPA_FEATURES; i++)
l_features[i] = B_FALSE;
/* replace final newline with NULL to ensure string ends */
fc[fs.st_size - 1] = '\0';
for (line = strtok_r(fc, "\n", &ls);
line != NULL;
line = strtok_r(NULL, "\n", &ls)) {
/* discard comments */
char *r = strchr(line, '#');
if (r != NULL)
*r = '\0';
for (word = strtok_r(line, ", \t", &ws);
word != NULL;
word = strtok_r(NULL, ", \t", &ws)) {
/* Find matching feature name */
uint_t f;
for (f = 0; f < SPA_FEATURES; f++) {
zfeature_info_t *fi =
&spa_feature_table[f];
if (strcmp(word, fi->fi_uname) == 0) {
l_features[f] = B_TRUE;
break;
}
}
if (f < SPA_FEATURES)
continue;
/* found an unrecognized word */
/* lightly sanitize it */
if (strlen(word) > 32)
word[32] = '\0';
for (char *c = word; *c != '\0'; c++)
if (!isprint(*c))
*c = '?';
strlcat(err_badtoken, word, ZFS_MAXPROPLEN);
strlcat(err_badtoken, " ", ZFS_MAXPROPLEN);
if (source == Z_SYSCONF)
ret_badtoken = B_TRUE;
else
ret_warntoken = B_TRUE;
}
}
(void) munmap((void *) fc, fs.st_size);
if (features != NULL)
for (uint_t i = 0; i < SPA_FEATURES; i++)
features[i] &= l_features[i];
}
(void) close(sdirfd);
(void) close(ddirfd);
/* Return the most serious error */
if (ret_badfile) {
if (report != NULL)
snprintf(report, rlen, gettext("could not read/"
"parse feature file(s): %s"), err_badfile);
return (ZPOOL_COMPATIBILITY_BADFILE);
}
if (ret_nofiles) {
if (report != NULL)
strlcpy(report,
gettext("no valid compatibility files specified"),
rlen);
return (ZPOOL_COMPATIBILITY_NOFILES);
}
if (ret_badtoken) {
if (report != NULL)
snprintf(report, rlen, gettext("invalid feature "
"name(s) in local compatibility files: %s"),
err_badtoken);
return (ZPOOL_COMPATIBILITY_BADTOKEN);
}
if (ret_warntoken) {
if (report != NULL)
snprintf(report, rlen, gettext("unrecognized feature "
"name(s) in distribution compatibility files: %s"),
err_badtoken);
return (ZPOOL_COMPATIBILITY_WARNTOKEN);
}
if (report != NULL)
strlcpy(report, gettext("compatibility set ok"), rlen);
return (ZPOOL_COMPATIBILITY_OK);
}
static int
zpool_vdev_guid(zpool_handle_t *zhp, const char *vdevname, uint64_t *vdev_guid)
{
nvlist_t *tgt;
boolean_t avail_spare, l2cache;
verify(zhp != NULL);
if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
char errbuf[ERRBUFLEN];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "pool is in an unavailable state"));
return (zfs_error(zhp->zpool_hdl, EZFS_POOLUNAVAIL, errbuf));
}
if ((tgt = zpool_find_vdev(zhp, vdevname, &avail_spare, &l2cache,
NULL)) == NULL) {
char errbuf[ERRBUFLEN];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "can not find %s in %s"),
vdevname, zhp->zpool_name);
return (zfs_error(zhp->zpool_hdl, EZFS_NODEVICE, errbuf));
}
*vdev_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
return (0);
}
/*
* Get a vdev property value for 'prop' and return the value in
* a pre-allocated buffer.
*/
int
zpool_get_vdev_prop_value(nvlist_t *nvprop, vdev_prop_t prop, char *prop_name,
char *buf, size_t len, zprop_source_t *srctype, boolean_t literal)
{
nvlist_t *nv;
const char *strval;
uint64_t intval;
zprop_source_t src = ZPROP_SRC_NONE;
if (prop == VDEV_PROP_USERPROP) {
/* user property, prop_name must contain the property name */
assert(prop_name != NULL);
if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {
src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
strval = fnvlist_lookup_string(nv, ZPROP_VALUE);
} else {
/* user prop not found */
return (-1);
}
(void) strlcpy(buf, strval, len);
if (srctype)
*srctype = src;
return (0);
}
if (prop_name == NULL)
prop_name = (char *)vdev_prop_to_name(prop);
switch (vdev_prop_get_type(prop)) {
case PROP_TYPE_STRING:
if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {
src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
strval = fnvlist_lookup_string(nv, ZPROP_VALUE);
} else {
src = ZPROP_SRC_DEFAULT;
if ((strval = vdev_prop_default_string(prop)) == NULL)
strval = "-";
}
(void) strlcpy(buf, strval, len);
break;
case PROP_TYPE_NUMBER:
if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {
src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
intval = fnvlist_lookup_uint64(nv, ZPROP_VALUE);
} else {
src = ZPROP_SRC_DEFAULT;
intval = vdev_prop_default_numeric(prop);
}
switch (prop) {
case VDEV_PROP_ASIZE:
case VDEV_PROP_PSIZE:
case VDEV_PROP_SIZE:
case VDEV_PROP_BOOTSIZE:
case VDEV_PROP_ALLOCATED:
case VDEV_PROP_FREE:
case VDEV_PROP_READ_ERRORS:
case VDEV_PROP_WRITE_ERRORS:
case VDEV_PROP_CHECKSUM_ERRORS:
case VDEV_PROP_INITIALIZE_ERRORS:
case VDEV_PROP_OPS_NULL:
case VDEV_PROP_OPS_READ:
case VDEV_PROP_OPS_WRITE:
case VDEV_PROP_OPS_FREE:
case VDEV_PROP_OPS_CLAIM:
case VDEV_PROP_OPS_TRIM:
case VDEV_PROP_BYTES_NULL:
case VDEV_PROP_BYTES_READ:
case VDEV_PROP_BYTES_WRITE:
case VDEV_PROP_BYTES_FREE:
case VDEV_PROP_BYTES_CLAIM:
case VDEV_PROP_BYTES_TRIM:
if (literal) {
(void) snprintf(buf, len, "%llu",
(u_longlong_t)intval);
} else {
(void) zfs_nicenum(intval, buf, len);
}
break;
case VDEV_PROP_EXPANDSZ:
if (intval == 0) {
(void) strlcpy(buf, "-", len);
} else if (literal) {
(void) snprintf(buf, len, "%llu",
(u_longlong_t)intval);
} else {
(void) zfs_nicenum(intval, buf, len);
}
break;
case VDEV_PROP_CAPACITY:
if (literal) {
(void) snprintf(buf, len, "%llu",
(u_longlong_t)intval);
} else {
(void) snprintf(buf, len, "%llu%%",
(u_longlong_t)intval);
}
break;
case VDEV_PROP_CHECKSUM_N:
case VDEV_PROP_CHECKSUM_T:
case VDEV_PROP_IO_N:
case VDEV_PROP_IO_T:
if (intval == UINT64_MAX) {
(void) strlcpy(buf, "-", len);
} else {
(void) snprintf(buf, len, "%llu",
(u_longlong_t)intval);
}
break;
case VDEV_PROP_FRAGMENTATION:
if (intval == UINT64_MAX) {
(void) strlcpy(buf, "-", len);
} else {
(void) snprintf(buf, len, "%llu%%",
(u_longlong_t)intval);
}
break;
case VDEV_PROP_STATE:
if (literal) {
(void) snprintf(buf, len, "%llu",
(u_longlong_t)intval);
} else {
(void) strlcpy(buf, zpool_state_to_name(intval,
VDEV_AUX_NONE), len);
}
break;
default:
(void) snprintf(buf, len, "%llu",
(u_longlong_t)intval);
}
break;
case PROP_TYPE_INDEX:
if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {
src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
intval = fnvlist_lookup_uint64(nv, ZPROP_VALUE);
} else {
src = ZPROP_SRC_DEFAULT;
intval = vdev_prop_default_numeric(prop);
}
if (vdev_prop_index_to_string(prop, intval,
(const char **)&strval) != 0)
return (-1);
(void) strlcpy(buf, strval, len);
break;
default:
abort();
}
if (srctype)
*srctype = src;
return (0);
}
/*
* Get a vdev property value for 'prop_name' and return the value in
* a pre-allocated buffer.
*/
int
zpool_get_vdev_prop(zpool_handle_t *zhp, const char *vdevname, vdev_prop_t prop,
char *prop_name, char *buf, size_t len, zprop_source_t *srctype,
boolean_t literal)
{
nvlist_t *reqnvl, *reqprops;
nvlist_t *retprops = NULL;
uint64_t vdev_guid = 0;
int ret;
if ((ret = zpool_vdev_guid(zhp, vdevname, &vdev_guid)) != 0)
return (ret);
if (nvlist_alloc(&reqnvl, NV_UNIQUE_NAME, 0) != 0)
return (no_memory(zhp->zpool_hdl));
if (nvlist_alloc(&reqprops, NV_UNIQUE_NAME, 0) != 0)
return (no_memory(zhp->zpool_hdl));
fnvlist_add_uint64(reqnvl, ZPOOL_VDEV_PROPS_GET_VDEV, vdev_guid);
if (prop != VDEV_PROP_USERPROP) {
/* prop_name overrides prop value */
if (prop_name != NULL)
prop = vdev_name_to_prop(prop_name);
else
prop_name = (char *)vdev_prop_to_name(prop);
assert(prop < VDEV_NUM_PROPS);
}
assert(prop_name != NULL);
if (nvlist_add_uint64(reqprops, prop_name, prop) != 0) {
nvlist_free(reqnvl);
nvlist_free(reqprops);
return (no_memory(zhp->zpool_hdl));
}
fnvlist_add_nvlist(reqnvl, ZPOOL_VDEV_PROPS_GET_PROPS, reqprops);
ret = lzc_get_vdev_prop(zhp->zpool_name, reqnvl, &retprops);
if (ret == 0) {
ret = zpool_get_vdev_prop_value(retprops, prop, prop_name, buf,
len, srctype, literal);
} else {
char errbuf[ERRBUFLEN];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot get vdev property %s from"
" %s in %s"), prop_name, vdevname, zhp->zpool_name);
(void) zpool_standard_error(zhp->zpool_hdl, ret, errbuf);
}
nvlist_free(reqnvl);
nvlist_free(reqprops);
nvlist_free(retprops);
return (ret);
}
/*
* Get all vdev properties
*/
int
zpool_get_all_vdev_props(zpool_handle_t *zhp, const char *vdevname,
nvlist_t **outnvl)
{
nvlist_t *nvl = NULL;
uint64_t vdev_guid = 0;
int ret;
if ((ret = zpool_vdev_guid(zhp, vdevname, &vdev_guid)) != 0)
return (ret);
if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
return (no_memory(zhp->zpool_hdl));
fnvlist_add_uint64(nvl, ZPOOL_VDEV_PROPS_GET_VDEV, vdev_guid);
ret = lzc_get_vdev_prop(zhp->zpool_name, nvl, outnvl);
nvlist_free(nvl);
if (ret) {
char errbuf[ERRBUFLEN];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot get vdev properties for"
" %s in %s"), vdevname, zhp->zpool_name);
(void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
}
return (ret);
}
/*
* Set vdev property
*/
int
zpool_set_vdev_prop(zpool_handle_t *zhp, const char *vdevname,
const char *propname, const char *propval)
{
int ret;
nvlist_t *nvl = NULL;
nvlist_t *outnvl = NULL;
nvlist_t *props;
nvlist_t *realprops;
prop_flags_t flags = { 0 };
uint64_t version;
uint64_t vdev_guid;
if ((ret = zpool_vdev_guid(zhp, vdevname, &vdev_guid)) != 0)
return (ret);
if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
return (no_memory(zhp->zpool_hdl));
if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
return (no_memory(zhp->zpool_hdl));
fnvlist_add_uint64(nvl, ZPOOL_VDEV_PROPS_SET_VDEV, vdev_guid);
if (nvlist_add_string(props, propname, propval) != 0) {
nvlist_free(props);
return (no_memory(zhp->zpool_hdl));
}
char errbuf[ERRBUFLEN];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot set property %s for %s on %s"),
propname, vdevname, zhp->zpool_name);
flags.vdevprop = 1;
version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
zhp->zpool_name, props, version, flags, errbuf)) == NULL) {
nvlist_free(props);
nvlist_free(nvl);
return (-1);
}
nvlist_free(props);
props = realprops;
fnvlist_add_nvlist(nvl, ZPOOL_VDEV_PROPS_SET_PROPS, props);
ret = lzc_set_vdev_prop(zhp->zpool_name, nvl, &outnvl);
nvlist_free(props);
nvlist_free(nvl);
nvlist_free(outnvl);
if (ret)
(void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
return (ret);
}
diff --git a/sys/contrib/openzfs/lib/libzutil/os/freebsd/zutil_import_os.c b/sys/contrib/openzfs/lib/libzutil/os/freebsd/zutil_import_os.c
index 19ba58e79a03..049710d3985f 100644
--- a/sys/contrib/openzfs/lib/libzutil/os/freebsd/zutil_import_os.c
+++ b/sys/contrib/openzfs/lib/libzutil/os/freebsd/zutil_import_os.c
@@ -1,256 +1,273 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017 by Delphix. All rights reserved.
* Copyright 2015 RackTop Systems.
* Copyright 2016 Nexenta Systems, Inc.
*/
/*
* Pool import support functions.
*
* To import a pool, we rely on reading the configuration information from the
* ZFS label of each device. If we successfully read the label, then we
* organize the configuration information in the following hierarchy:
*
* pool guid -> toplevel vdev guid -> label txg
*
* Duplicate entries matching this same tuple will be discarded. Once we have
* examined every device, we pick the best label txg config for each toplevel
* vdev. We then arrange these toplevel vdevs into a complete pool config, and
* update any paths that have changed. Finally, we attempt to import the pool
* using our derived config, and record the results.
*/
#include <sys/types.h>
#include <sys/disk.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <sys/sysctl.h>
#include <aio.h>
#include <ctype.h>
#include <dirent.h>
#include <errno.h>
#include <libintl.h>
#include <libgen.h>
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/efi_partition.h>
#include <thread_pool.h>
#include <libgeom.h>
#include <sys/vdev_impl.h>
#include <libzutil.h>
#include "zutil_import.h"
/*
* Update a leaf vdev's persistent device strings
*
* - only applies for a dedicated leaf vdev (aka whole disk)
* - updated during pool create|add|attach|import
* - used for matching device matching during auto-{online,expand,replace}
* - stored in a leaf disk config label (i.e. alongside 'path' NVP)
* - these strings are currently not used in kernel (i.e. for vdev_disk_open)
*
* On FreeBSD we currently just strip devid and phys_path to avoid confusion.
*/
void
update_vdev_config_dev_strs(nvlist_t *nv)
{
(void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID);
(void) nvlist_remove_all(nv, ZPOOL_CONFIG_PHYS_PATH);
}
/*
* Do not even look at these devices.
*/
static const char * const excluded_devs[] = {
"nfslock",
"sequencer",
"zfs",
};
#define EXCLUDED_DIR "/dev/"
#define EXCLUDED_DIR_LEN 5
void
zpool_open_func(void *arg)
{
rdsk_node_t *rn = arg;
struct stat64 statbuf;
nvlist_t *config;
size_t i;
int num_labels;
int fd;
off_t mediasize = 0;
/*
* Do not even look at excluded devices.
*/
if (strncmp(rn->rn_name, EXCLUDED_DIR, EXCLUDED_DIR_LEN) == 0) {
char *name = rn->rn_name + EXCLUDED_DIR_LEN;
for (i = 0; i < nitems(excluded_devs); ++i) {
const char *excluded_name = excluded_devs[i];
size_t len = strlen(excluded_name);
if (strncmp(name, excluded_name, len) == 0) {
return;
}
}
}
/*
* O_NONBLOCK so we don't hang trying to open things like serial ports.
*/
if ((fd = open(rn->rn_name, O_RDONLY|O_NONBLOCK|O_CLOEXEC)) < 0)
return;
/*
* Ignore failed stats.
*/
if (fstat64(fd, &statbuf) != 0)
goto out;
/*
* We only want regular files, character devs and block devs.
*/
if (S_ISREG(statbuf.st_mode)) {
/* Check if this file is too small to hold a zpool. */
if (statbuf.st_size < SPA_MINDEVSIZE) {
goto out;
}
} else if (S_ISCHR(statbuf.st_mode) || S_ISBLK(statbuf.st_mode)) {
/* Check if this device is too small to hold a zpool. */
if (ioctl(fd, DIOCGMEDIASIZE, &mediasize) != 0 ||
mediasize < SPA_MINDEVSIZE) {
goto out;
}
} else {
goto out;
}
if (zpool_read_label(fd, &config, &num_labels) != 0)
goto out;
if (num_labels == 0) {
nvlist_free(config);
goto out;
}
rn->rn_config = config;
rn->rn_num_labels = num_labels;
/* TODO: Reuse labelpaths logic from Linux? */
out:
(void) close(fd);
}
static const char * const
zpool_default_import_path[] = {
"/dev"
};
const char * const *
zpool_default_search_paths(size_t *count)
{
*count = nitems(zpool_default_import_path);
return (zpool_default_import_path);
}
int
zpool_find_import_blkid(libpc_handle_t *hdl, pthread_mutex_t *lock,
avl_tree_t **slice_cache)
{
const char *oid = "vfs.zfs.vol.recursive";
char *end, path[MAXPATHLEN];
rdsk_node_t *slice;
struct gmesh mesh;
struct gclass *mp;
struct ggeom *gp;
struct gprovider *pp;
avl_index_t where;
int error, value;
size_t pathleft, size = sizeof (value);
boolean_t skip_zvols = B_FALSE;
end = stpcpy(path, "/dev/");
pathleft = &path[sizeof (path)] - end;
error = geom_gettree(&mesh);
if (error != 0)
return (error);
if (sysctlbyname(oid, &value, &size, NULL, 0) == 0 && value == 0)
skip_zvols = B_TRUE;
*slice_cache = zutil_alloc(hdl, sizeof (avl_tree_t));
avl_create(*slice_cache, slice_cache_compare, sizeof (rdsk_node_t),
offsetof(rdsk_node_t, rn_node));
LIST_FOREACH(mp, &mesh.lg_class, lg_class) {
if (skip_zvols && strcmp(mp->lg_name, "ZFS::ZVOL") == 0)
continue;
LIST_FOREACH(gp, &mp->lg_geom, lg_geom) {
LIST_FOREACH(pp, &gp->lg_provider, lg_provider) {
strlcpy(end, pp->lg_name, pathleft);
slice = zutil_alloc(hdl, sizeof (rdsk_node_t));
slice->rn_name = zutil_strdup(hdl, path);
slice->rn_vdev_guid = 0;
slice->rn_lock = lock;
slice->rn_avl = *slice_cache;
slice->rn_hdl = hdl;
slice->rn_labelpaths = B_FALSE;
slice->rn_order = IMPORT_ORDER_DEFAULT;
pthread_mutex_lock(lock);
if (avl_find(*slice_cache, slice, &where)) {
free(slice->rn_name);
free(slice);
} else {
avl_insert(*slice_cache, slice, where);
}
pthread_mutex_unlock(lock);
}
}
}
geom_deletetree(&mesh);
return (0);
}
int
zfs_dev_flush(int fd)
{
(void) fd;
return (0);
}
+void
+update_vdev_config_dev_sysfs_path(nvlist_t *nv, const char *path,
+ const char *key)
+{
+ (void) nv;
+ (void) path;
+ (void) key;
+}
+
void
update_vdevs_config_dev_sysfs_path(nvlist_t *config)
{
(void) config;
}
+
+int
+zpool_disk_wait(const char *path)
+{
+
+ (void) path;
+ return (ENOTSUP);
+}
diff --git a/sys/contrib/openzfs/lib/libzutil/os/linux/zutil_import_os.c b/sys/contrib/openzfs/lib/libzutil/os/linux/zutil_import_os.c
index 44ed697dd490..bb91dec5acff 100644
--- a/sys/contrib/openzfs/lib/libzutil/os/linux/zutil_import_os.c
+++ b/sys/contrib/openzfs/lib/libzutil/os/linux/zutil_import_os.c
@@ -1,898 +1,923 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2015 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
* Copyright 2015 RackTop Systems.
* Copyright (c) 2016, Intel Corporation.
*/
/*
* Pool import support functions.
*
* Used by zpool, ztest, zdb, and zhack to locate importable configs. Since
* these commands are expected to run in the global zone, we can assume
* that the devices are all readable when called.
*
* To import a pool, we rely on reading the configuration information from the
* ZFS label of each device. If we successfully read the label, then we
* organize the configuration information in the following hierarchy:
*
* pool guid -> toplevel vdev guid -> label txg
*
* Duplicate entries matching this same tuple will be discarded. Once we have
* examined every device, we pick the best label txg config for each toplevel
* vdev. We then arrange these toplevel vdevs into a complete pool config, and
* update any paths that have changed. Finally, we attempt to import the pool
* using our derived config, and record the results.
*/
#include <ctype.h>
#include <dirent.h>
#include <errno.h>
#include <libintl.h>
#include <libgen.h>
#include <stddef.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <sys/stat.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/dktp/fdisk.h>
#include <sys/vdev_impl.h>
#include <sys/fs/zfs.h>
#include <thread_pool.h>
#include <libzutil.h>
#include <libnvpair.h>
#include <libzfs.h>
#include "zutil_import.h"
#ifdef HAVE_LIBUDEV
#include <libudev.h>
#include <sched.h>
#endif
#include <blkid/blkid.h>
#define DEV_BYID_PATH "/dev/disk/by-id/"
/*
* Skip devices with well known prefixes:
* there can be side effects when opening devices which need to be avoided.
*
* hpet - High Precision Event Timer
* watchdog[N] - Watchdog must be closed in a special way.
*/
static boolean_t
should_skip_dev(const char *dev)
{
return ((strcmp(dev, "watchdog") == 0) ||
(strncmp(dev, "watchdog", 8) == 0 && isdigit(dev[8])) ||
(strcmp(dev, "hpet") == 0));
}
int
zfs_dev_flush(int fd)
{
return (ioctl(fd, BLKFLSBUF));
}
void
zpool_open_func(void *arg)
{
rdsk_node_t *rn = arg;
libpc_handle_t *hdl = rn->rn_hdl;
struct stat64 statbuf;
nvlist_t *config;
uint64_t vdev_guid = 0;
int error;
int num_labels = 0;
int fd;
if (should_skip_dev(zfs_basename(rn->rn_name)))
return;
/*
* Ignore failed stats. We only want regular files and block devices.
* Ignore files that are too small to hold a zpool.
*/
if (stat64(rn->rn_name, &statbuf) != 0 ||
(!S_ISREG(statbuf.st_mode) && !S_ISBLK(statbuf.st_mode)) ||
(S_ISREG(statbuf.st_mode) && statbuf.st_size < SPA_MINDEVSIZE))
return;
/*
* Preferentially open using O_DIRECT to bypass the block device
* cache which may be stale for multipath devices. An EINVAL errno
* indicates O_DIRECT is unsupported so fallback to just O_RDONLY.
*/
fd = open(rn->rn_name, O_RDONLY | O_DIRECT | O_CLOEXEC);
if ((fd < 0) && (errno == EINVAL))
fd = open(rn->rn_name, O_RDONLY | O_CLOEXEC);
if ((fd < 0) && (errno == EACCES))
hdl->lpc_open_access_error = B_TRUE;
if (fd < 0)
return;
error = zpool_read_label(fd, &config, &num_labels);
if (error != 0) {
(void) close(fd);
return;
}
if (num_labels == 0) {
(void) close(fd);
nvlist_free(config);
return;
}
/*
* Check that the vdev is for the expected guid. Additional entries
* are speculatively added based on the paths stored in the labels.
* Entries with valid paths but incorrect guids must be removed.
*/
error = nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid);
if (error || (rn->rn_vdev_guid && rn->rn_vdev_guid != vdev_guid)) {
(void) close(fd);
nvlist_free(config);
return;
}
(void) close(fd);
rn->rn_config = config;
rn->rn_num_labels = num_labels;
/*
* Add additional entries for paths described by this label.
*/
if (rn->rn_labelpaths) {
const char *path = NULL;
const char *devid = NULL;
- const char *env = NULL;
rdsk_node_t *slice;
avl_index_t where;
- int timeout;
int error;
if (label_paths(rn->rn_hdl, rn->rn_config, &path, &devid))
return;
- env = getenv("ZPOOL_IMPORT_UDEV_TIMEOUT_MS");
- if ((env == NULL) || sscanf(env, "%d", &timeout) != 1 ||
- timeout < 0) {
- timeout = DISK_LABEL_WAIT;
- }
-
/*
* Allow devlinks to stabilize so all paths are available.
*/
- zpool_label_disk_wait(rn->rn_name, timeout);
+ zpool_disk_wait(rn->rn_name);
if (path != NULL) {
slice = zutil_alloc(hdl, sizeof (rdsk_node_t));
slice->rn_name = zutil_strdup(hdl, path);
slice->rn_vdev_guid = vdev_guid;
slice->rn_avl = rn->rn_avl;
slice->rn_hdl = hdl;
slice->rn_order = IMPORT_ORDER_PREFERRED_1;
slice->rn_labelpaths = B_FALSE;
pthread_mutex_lock(rn->rn_lock);
if (avl_find(rn->rn_avl, slice, &where)) {
pthread_mutex_unlock(rn->rn_lock);
free(slice->rn_name);
free(slice);
} else {
avl_insert(rn->rn_avl, slice, where);
pthread_mutex_unlock(rn->rn_lock);
zpool_open_func(slice);
}
}
if (devid != NULL) {
slice = zutil_alloc(hdl, sizeof (rdsk_node_t));
error = asprintf(&slice->rn_name, "%s%s",
DEV_BYID_PATH, devid);
if (error == -1) {
free(slice);
return;
}
slice->rn_vdev_guid = vdev_guid;
slice->rn_avl = rn->rn_avl;
slice->rn_hdl = hdl;
slice->rn_order = IMPORT_ORDER_PREFERRED_2;
slice->rn_labelpaths = B_FALSE;
pthread_mutex_lock(rn->rn_lock);
if (avl_find(rn->rn_avl, slice, &where)) {
pthread_mutex_unlock(rn->rn_lock);
free(slice->rn_name);
free(slice);
} else {
avl_insert(rn->rn_avl, slice, where);
pthread_mutex_unlock(rn->rn_lock);
zpool_open_func(slice);
}
}
}
}
static const char * const
zpool_default_import_path[] = {
"/dev/disk/by-vdev", /* Custom rules, use first if they exist */
"/dev/mapper", /* Use multipath devices before components */
"/dev/disk/by-partlabel", /* Single unique entry set by user */
"/dev/disk/by-partuuid", /* Generated partition uuid */
"/dev/disk/by-label", /* Custom persistent labels */
"/dev/disk/by-uuid", /* Single unique entry and persistent */
"/dev/disk/by-id", /* May be multiple entries and persistent */
"/dev/disk/by-path", /* Encodes physical location and persistent */
"/dev" /* UNSAFE device names will change */
};
const char * const *
zpool_default_search_paths(size_t *count)
{
*count = ARRAY_SIZE(zpool_default_import_path);
return (zpool_default_import_path);
}
/*
* Given a full path to a device determine if that device appears in the
* import search path. If it does return the first match and store the
* index in the passed 'order' variable, otherwise return an error.
*/
static int
zfs_path_order(const char *name, int *order)
{
const char *env = getenv("ZPOOL_IMPORT_PATH");
if (env) {
for (int i = 0; ; ++i) {
env += strspn(env, ":");
size_t dirlen = strcspn(env, ":");
if (dirlen) {
if (strncmp(name, env, dirlen) == 0) {
*order = i;
return (0);
}
env += dirlen;
} else
break;
}
} else {
for (int i = 0; i < ARRAY_SIZE(zpool_default_import_path);
++i) {
if (strncmp(name, zpool_default_import_path[i],
strlen(zpool_default_import_path[i])) == 0) {
*order = i;
return (0);
}
}
}
return (ENOENT);
}
/*
* Use libblkid to quickly enumerate all known zfs devices.
*/
int
zpool_find_import_blkid(libpc_handle_t *hdl, pthread_mutex_t *lock,
avl_tree_t **slice_cache)
{
rdsk_node_t *slice;
blkid_cache cache;
blkid_dev_iterate iter;
blkid_dev dev;
avl_index_t where;
int error;
*slice_cache = NULL;
error = blkid_get_cache(&cache, NULL);
if (error != 0)
return (error);
error = blkid_probe_all_new(cache);
if (error != 0) {
blkid_put_cache(cache);
return (error);
}
iter = blkid_dev_iterate_begin(cache);
if (iter == NULL) {
blkid_put_cache(cache);
return (EINVAL);
}
/* Only const char *s since 2.32 */
error = blkid_dev_set_search(iter,
(char *)"TYPE", (char *)"zfs_member");
if (error != 0) {
blkid_dev_iterate_end(iter);
blkid_put_cache(cache);
return (error);
}
*slice_cache = zutil_alloc(hdl, sizeof (avl_tree_t));
avl_create(*slice_cache, slice_cache_compare, sizeof (rdsk_node_t),
offsetof(rdsk_node_t, rn_node));
while (blkid_dev_next(iter, &dev) == 0) {
slice = zutil_alloc(hdl, sizeof (rdsk_node_t));
slice->rn_name = zutil_strdup(hdl, blkid_dev_devname(dev));
slice->rn_vdev_guid = 0;
slice->rn_lock = lock;
slice->rn_avl = *slice_cache;
slice->rn_hdl = hdl;
slice->rn_labelpaths = B_TRUE;
error = zfs_path_order(slice->rn_name, &slice->rn_order);
if (error == 0)
slice->rn_order += IMPORT_ORDER_SCAN_OFFSET;
else
slice->rn_order = IMPORT_ORDER_DEFAULT;
pthread_mutex_lock(lock);
if (avl_find(*slice_cache, slice, &where)) {
free(slice->rn_name);
free(slice);
} else {
avl_insert(*slice_cache, slice, where);
}
pthread_mutex_unlock(lock);
}
blkid_dev_iterate_end(iter);
blkid_put_cache(cache);
return (0);
}
/*
* Linux persistent device strings for vdev labels
*
* based on libudev for consistency with libudev disk add/remove events
*/
typedef struct vdev_dev_strs {
char vds_devid[128];
char vds_devphys[128];
} vdev_dev_strs_t;
#ifdef HAVE_LIBUDEV
/*
* Obtain the persistent device id string (describes what)
*
* used by ZED vdev matching for auto-{online,expand,replace}
*/
int
zfs_device_get_devid(struct udev_device *dev, char *bufptr, size_t buflen)
{
struct udev_list_entry *entry;
const char *bus;
char devbyid[MAXPATHLEN];
/* The bus based by-id path is preferred */
bus = udev_device_get_property_value(dev, "ID_BUS");
if (bus == NULL) {
const char *dm_uuid;
/*
* For multipath nodes use the persistent uuid based identifier
*
* Example: /dev/disk/by-id/dm-uuid-mpath-35000c5006304de3f
*/
dm_uuid = udev_device_get_property_value(dev, "DM_UUID");
if (dm_uuid != NULL) {
(void) snprintf(bufptr, buflen, "dm-uuid-%s", dm_uuid);
return (0);
}
/*
* For volumes use the persistent /dev/zvol/dataset identifier
*/
entry = udev_device_get_devlinks_list_entry(dev);
while (entry != NULL) {
const char *name;
name = udev_list_entry_get_name(entry);
if (strncmp(name, ZVOL_ROOT, strlen(ZVOL_ROOT)) == 0) {
(void) strlcpy(bufptr, name, buflen);
return (0);
}
entry = udev_list_entry_get_next(entry);
}
/*
* NVME 'by-id' symlinks are similar to bus case
*/
struct udev_device *parent;
parent = udev_device_get_parent_with_subsystem_devtype(dev,
"nvme", NULL);
if (parent != NULL)
bus = "nvme"; /* continue with bus symlink search */
else
return (ENODATA);
}
/*
* locate the bus specific by-id link
*/
(void) snprintf(devbyid, sizeof (devbyid), "%s%s-", DEV_BYID_PATH, bus);
entry = udev_device_get_devlinks_list_entry(dev);
while (entry != NULL) {
const char *name;
name = udev_list_entry_get_name(entry);
if (strncmp(name, devbyid, strlen(devbyid)) == 0) {
name += strlen(DEV_BYID_PATH);
(void) strlcpy(bufptr, name, buflen);
return (0);
}
entry = udev_list_entry_get_next(entry);
}
return (ENODATA);
}
/*
* Obtain the persistent physical location string (describes where)
*
* used by ZED vdev matching for auto-{online,expand,replace}
*/
int
zfs_device_get_physical(struct udev_device *dev, char *bufptr, size_t buflen)
{
const char *physpath = NULL;
struct udev_list_entry *entry;
/*
* Normal disks use ID_PATH for their physical path.
*/
physpath = udev_device_get_property_value(dev, "ID_PATH");
if (physpath != NULL && strlen(physpath) > 0) {
(void) strlcpy(bufptr, physpath, buflen);
return (0);
}
/*
* Device mapper devices are virtual and don't have a physical
* path. For them we use ID_VDEV instead, which is setup via the
* /etc/vdev_id.conf file. ID_VDEV provides a persistent path
* to a virtual device. If you don't have vdev_id.conf setup,
* you cannot use multipath autoreplace with device mapper.
*/
physpath = udev_device_get_property_value(dev, "ID_VDEV");
if (physpath != NULL && strlen(physpath) > 0) {
(void) strlcpy(bufptr, physpath, buflen);
return (0);
}
/*
* For ZFS volumes use the persistent /dev/zvol/dataset identifier
*/
entry = udev_device_get_devlinks_list_entry(dev);
while (entry != NULL) {
physpath = udev_list_entry_get_name(entry);
if (strncmp(physpath, ZVOL_ROOT, strlen(ZVOL_ROOT)) == 0) {
(void) strlcpy(bufptr, physpath, buflen);
return (0);
}
entry = udev_list_entry_get_next(entry);
}
/*
* For all other devices fallback to using the by-uuid name.
*/
entry = udev_device_get_devlinks_list_entry(dev);
while (entry != NULL) {
physpath = udev_list_entry_get_name(entry);
if (strncmp(physpath, "/dev/disk/by-uuid", 17) == 0) {
(void) strlcpy(bufptr, physpath, buflen);
return (0);
}
entry = udev_list_entry_get_next(entry);
}
return (ENODATA);
}
/*
* A disk is considered a multipath whole disk when:
* DEVNAME key value has "dm-"
* DM_NAME key value has "mpath" prefix
* DM_UUID key exists
* ID_PART_TABLE_TYPE key does not exist or is not gpt
*/
static boolean_t
udev_mpath_whole_disk(struct udev_device *dev)
{
const char *devname, *type, *uuid;
devname = udev_device_get_property_value(dev, "DEVNAME");
type = udev_device_get_property_value(dev, "ID_PART_TABLE_TYPE");
uuid = udev_device_get_property_value(dev, "DM_UUID");
if ((devname != NULL && strncmp(devname, "/dev/dm-", 8) == 0) &&
((type == NULL) || (strcmp(type, "gpt") != 0)) &&
(uuid != NULL)) {
return (B_TRUE);
}
return (B_FALSE);
}
static int
udev_device_is_ready(struct udev_device *dev)
{
#ifdef HAVE_LIBUDEV_UDEV_DEVICE_GET_IS_INITIALIZED
return (udev_device_get_is_initialized(dev));
#else
/* wait for DEVLINKS property to be initialized */
return (udev_device_get_property_value(dev, "DEVLINKS") != NULL);
#endif
}
#else
int
zfs_device_get_devid(struct udev_device *dev, char *bufptr, size_t buflen)
{
(void) dev, (void) bufptr, (void) buflen;
return (ENODATA);
}
int
zfs_device_get_physical(struct udev_device *dev, char *bufptr, size_t buflen)
{
(void) dev, (void) bufptr, (void) buflen;
return (ENODATA);
}
#endif /* HAVE_LIBUDEV */
/*
* Wait up to timeout_ms for udev to set up the device node. The device is
* considered ready when libudev determines it has been initialized, all of
* the device links have been verified to exist, and it has been allowed to
* settle. At this point the device can be accessed reliably. Depending on
* the complexity of the udev rules this process could take several seconds.
*/
int
zpool_label_disk_wait(const char *path, int timeout_ms)
{
#ifdef HAVE_LIBUDEV
struct udev *udev;
struct udev_device *dev = NULL;
char nodepath[MAXPATHLEN];
char *sysname = NULL;
int ret = ENODEV;
int settle_ms = 50;
long sleep_ms = 10;
hrtime_t start, settle;
if ((udev = udev_new()) == NULL)
return (ENXIO);
start = gethrtime();
settle = 0;
do {
if (sysname == NULL) {
if (realpath(path, nodepath) != NULL) {
sysname = strrchr(nodepath, '/') + 1;
} else {
(void) usleep(sleep_ms * MILLISEC);
continue;
}
}
dev = udev_device_new_from_subsystem_sysname(udev,
"block", sysname);
if ((dev != NULL) && udev_device_is_ready(dev)) {
struct udev_list_entry *links, *link = NULL;
ret = 0;
links = udev_device_get_devlinks_list_entry(dev);
udev_list_entry_foreach(link, links) {
struct stat64 statbuf;
const char *name;
name = udev_list_entry_get_name(link);
errno = 0;
if (stat64(name, &statbuf) == 0 && errno == 0)
continue;
settle = 0;
ret = ENODEV;
break;
}
if (ret == 0) {
if (settle == 0) {
settle = gethrtime();
} else if (NSEC2MSEC(gethrtime() - settle) >=
settle_ms) {
udev_device_unref(dev);
break;
}
}
}
udev_device_unref(dev);
(void) usleep(sleep_ms * MILLISEC);
} while (NSEC2MSEC(gethrtime() - start) < timeout_ms);
udev_unref(udev);
return (ret);
#else
int settle_ms = 50;
long sleep_ms = 10;
hrtime_t start, settle;
struct stat64 statbuf;
start = gethrtime();
settle = 0;
do {
errno = 0;
if ((stat64(path, &statbuf) == 0) && (errno == 0)) {
if (settle == 0)
settle = gethrtime();
else if (NSEC2MSEC(gethrtime() - settle) >= settle_ms)
return (0);
} else if (errno != ENOENT) {
return (errno);
}
usleep(sleep_ms * MILLISEC);
} while (NSEC2MSEC(gethrtime() - start) < timeout_ms);
return (ENODEV);
#endif /* HAVE_LIBUDEV */
}
+/*
+ * Simplified version of zpool_label_disk_wait() where we wait for a device
+ * to appear using the default timeouts.
+ */
+int
+zpool_disk_wait(const char *path)
+{
+ int timeout;
+ timeout = zpool_getenv_int("ZPOOL_IMPORT_UDEV_TIMEOUT_MS",
+ DISK_LABEL_WAIT);
+
+ return (zpool_label_disk_wait(path, timeout));
+}
+
/*
* Encode the persistent devices strings
* used for the vdev disk label
*/
static int
encode_device_strings(const char *path, vdev_dev_strs_t *ds,
boolean_t wholedisk)
{
#ifdef HAVE_LIBUDEV
struct udev *udev;
struct udev_device *dev = NULL;
char nodepath[MAXPATHLEN];
char *sysname;
int ret = ENODEV;
hrtime_t start;
if ((udev = udev_new()) == NULL)
return (ENXIO);
/* resolve path to a runtime device node instance */
if (realpath(path, nodepath) == NULL)
goto no_dev;
sysname = strrchr(nodepath, '/') + 1;
/*
* Wait up to 3 seconds for udev to set up the device node context
*/
start = gethrtime();
do {
dev = udev_device_new_from_subsystem_sysname(udev, "block",
sysname);
if (dev == NULL)
goto no_dev;
if (udev_device_is_ready(dev))
break; /* udev ready */
udev_device_unref(dev);
dev = NULL;
if (NSEC2MSEC(gethrtime() - start) < 10)
(void) sched_yield(); /* yield/busy wait up to 10ms */
else
(void) usleep(10 * MILLISEC);
} while (NSEC2MSEC(gethrtime() - start) < (3 * MILLISEC));
if (dev == NULL)
goto no_dev;
/*
* Only whole disks require extra device strings
*/
if (!wholedisk && !udev_mpath_whole_disk(dev))
goto no_dev;
ret = zfs_device_get_devid(dev, ds->vds_devid, sizeof (ds->vds_devid));
if (ret != 0)
goto no_dev_ref;
/* physical location string (optional) */
if (zfs_device_get_physical(dev, ds->vds_devphys,
sizeof (ds->vds_devphys)) != 0) {
ds->vds_devphys[0] = '\0'; /* empty string --> not available */
}
no_dev_ref:
udev_device_unref(dev);
no_dev:
udev_unref(udev);
return (ret);
#else
(void) path;
(void) ds;
(void) wholedisk;
return (ENOENT);
#endif
}
/*
* Rescan the enclosure sysfs path for turning on enclosure LEDs and store it
* in the nvlist * (if applicable). Like:
* vdev_enc_sysfs_path: '/sys/class/enclosure/11:0:1:0/SLOT 4'
+ *
+ * If an old path was in the nvlist, and the rescan can not find a new path,
+ * then keep the old path, since the disk may have been removed.
+ *
+ * path: The vdev path (value from ZPOOL_CONFIG_PATH)
+ * key: The nvlist_t name (like ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH)
*/
-static void
-update_vdev_config_dev_sysfs_path(nvlist_t *nv, const char *path)
+void
+update_vdev_config_dev_sysfs_path(nvlist_t *nv, const char *path,
+ const char *key)
{
char *upath, *spath;
+ const char *oldpath = NULL;
+
+ (void) nvlist_lookup_string(nv, key, &oldpath);
/* Add enclosure sysfs path (if disk is in an enclosure). */
upath = zfs_get_underlying_path(path);
spath = zfs_get_enclosure_sysfs_path(upath);
if (spath) {
- nvlist_add_string(nv, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH, spath);
+ (void) nvlist_add_string(nv, key, spath);
} else {
- nvlist_remove_all(nv, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH);
+ /*
+ * We couldn't dynamically scan the disk's enclosure sysfs path.
+ * This could be because the disk went away. If there's an old
+ * enclosure sysfs path in the nvlist, then keep using it.
+ */
+ if (!oldpath) {
+ (void) nvlist_remove_all(nv, key);
+ }
}
free(upath);
free(spath);
}
/*
* This will get called for each leaf vdev.
*/
static int
sysfs_path_pool_vdev_iter_f(void *hdl_data, nvlist_t *nv, void *data)
{
(void) hdl_data, (void) data;
const char *path = NULL;
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
return (1);
/* Rescan our enclosure sysfs path for this vdev */
- update_vdev_config_dev_sysfs_path(nv, path);
+ update_vdev_config_dev_sysfs_path(nv, path,
+ ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH);
return (0);
}
/*
* Given an nvlist for our pool (with vdev tree), iterate over all the
* leaf vdevs and update their ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH.
*/
void
update_vdevs_config_dev_sysfs_path(nvlist_t *config)
{
nvlist_t *nvroot = NULL;
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
for_each_vdev_in_nvlist(nvroot, sysfs_path_pool_vdev_iter_f, NULL);
}
/*
* Update a leaf vdev's persistent device strings
*
* - only applies for a dedicated leaf vdev (aka whole disk)
* - updated during pool create|add|attach|import
* - used for matching device matching during auto-{online,expand,replace}
* - stored in a leaf disk config label (i.e. alongside 'path' NVP)
* - these strings are currently not used in kernel (i.e. for vdev_disk_open)
*
* single device node example:
* devid: 'scsi-MG03SCA300_350000494a8cb3d67-part1'
* phys_path: 'pci-0000:04:00.0-sas-0x50000394a8cb3d67-lun-0'
*
* multipath device node example:
* devid: 'dm-uuid-mpath-35000c5006304de3f'
*
* We also store the enclosure sysfs path for turning on enclosure LEDs
* (if applicable):
* vdev_enc_sysfs_path: '/sys/class/enclosure/11:0:1:0/SLOT 4'
*/
void
update_vdev_config_dev_strs(nvlist_t *nv)
{
vdev_dev_strs_t vds;
const char *env, *type, *path;
uint64_t wholedisk = 0;
/*
* For the benefit of legacy ZFS implementations, allow
* for opting out of devid strings in the vdev label.
*
* example use:
* env ZFS_VDEV_DEVID_OPT_OUT=YES zpool import dozer
*
* explanation:
* Older OpenZFS implementations had issues when attempting to
* display pool config VDEV names if a "devid" NVP value is
* present in the pool's config.
*
* For example, a pool that originated on illumos platform would
* have a devid value in the config and "zpool status" would fail
* when listing the config.
*
* A pool can be stripped of any "devid" values on import or
* prevented from adding them on zpool create|add by setting
* ZFS_VDEV_DEVID_OPT_OUT.
*/
env = getenv("ZFS_VDEV_DEVID_OPT_OUT");
if (env && (strtoul(env, NULL, 0) > 0 ||
!strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2))) {
(void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID);
(void) nvlist_remove_all(nv, ZPOOL_CONFIG_PHYS_PATH);
return;
}
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0 ||
strcmp(type, VDEV_TYPE_DISK) != 0) {
return;
}
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
return;
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
/*
* Update device string values in the config nvlist.
*/
if (encode_device_strings(path, &vds, (boolean_t)wholedisk) == 0) {
(void) nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, vds.vds_devid);
if (vds.vds_devphys[0] != '\0') {
(void) nvlist_add_string(nv, ZPOOL_CONFIG_PHYS_PATH,
vds.vds_devphys);
}
- update_vdev_config_dev_sysfs_path(nv, path);
+ update_vdev_config_dev_sysfs_path(nv, path,
+ ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH);
} else {
/* Clear out any stale entries. */
(void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID);
(void) nvlist_remove_all(nv, ZPOOL_CONFIG_PHYS_PATH);
(void) nvlist_remove_all(nv, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH);
}
}
diff --git a/sys/contrib/openzfs/lib/libzutil/zutil_import.c b/sys/contrib/openzfs/lib/libzutil/zutil_import.c
index 19d8a4742813..eb9131190458 100644
--- a/sys/contrib/openzfs/lib/libzutil/zutil_import.c
+++ b/sys/contrib/openzfs/lib/libzutil/zutil_import.c
@@ -1,1965 +1,2087 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2015 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
* Copyright 2015 RackTop Systems.
* Copyright (c) 2016, Intel Corporation.
* Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
*/
/*
* Pool import support functions.
*
* Used by zpool, ztest, zdb, and zhack to locate importable configs. Since
* these commands are expected to run in the global zone, we can assume
* that the devices are all readable when called.
*
* To import a pool, we rely on reading the configuration information from the
* ZFS label of each device. If we successfully read the label, then we
* organize the configuration information in the following hierarchy:
*
* pool guid -> toplevel vdev guid -> label txg
*
* Duplicate entries matching this same tuple will be discarded. Once we have
* examined every device, we pick the best label txg config for each toplevel
* vdev. We then arrange these toplevel vdevs into a complete pool config, and
* update any paths that have changed. Finally, we attempt to import the pool
* using our derived config, and record the results.
*/
#ifdef HAVE_AIO_H
#include <aio.h>
#endif
#include <ctype.h>
#include <dirent.h>
#include <errno.h>
#include <libintl.h>
#include <libgen.h>
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/dktp/fdisk.h>
#include <sys/vdev_impl.h>
#include <sys/fs/zfs.h>
#include <thread_pool.h>
#include <libzutil.h>
#include <libnvpair.h>
#include "zutil_import.h"
const char *
libpc_error_description(libpc_handle_t *hdl)
{
if (hdl->lpc_desc[0] != '\0')
return (hdl->lpc_desc);
switch (hdl->lpc_error) {
case LPC_BADCACHE:
return (dgettext(TEXT_DOMAIN, "invalid or missing cache file"));
case LPC_BADPATH:
return (dgettext(TEXT_DOMAIN, "must be an absolute path"));
case LPC_NOMEM:
return (dgettext(TEXT_DOMAIN, "out of memory"));
case LPC_EACCESS:
return (dgettext(TEXT_DOMAIN, "some devices require root "
"privileges"));
case LPC_UNKNOWN:
return (dgettext(TEXT_DOMAIN, "unknown error"));
default:
assert(hdl->lpc_error == 0);
return (dgettext(TEXT_DOMAIN, "no error"));
}
}
static __attribute__((format(printf, 2, 3))) void
zutil_error_aux(libpc_handle_t *hdl, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
(void) vsnprintf(hdl->lpc_desc, sizeof (hdl->lpc_desc), fmt, ap);
hdl->lpc_desc_active = B_TRUE;
va_end(ap);
}
static void
zutil_verror(libpc_handle_t *hdl, lpc_error_t error, const char *fmt,
va_list ap)
{
char action[1024];
(void) vsnprintf(action, sizeof (action), fmt, ap);
hdl->lpc_error = error;
if (hdl->lpc_desc_active)
hdl->lpc_desc_active = B_FALSE;
else
hdl->lpc_desc[0] = '\0';
if (hdl->lpc_printerr)
(void) fprintf(stderr, "%s: %s\n", action,
libpc_error_description(hdl));
}
static __attribute__((format(printf, 3, 4))) int
zutil_error_fmt(libpc_handle_t *hdl, lpc_error_t error,
const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
zutil_verror(hdl, error, fmt, ap);
va_end(ap);
return (-1);
}
static int
zutil_error(libpc_handle_t *hdl, lpc_error_t error, const char *msg)
{
return (zutil_error_fmt(hdl, error, "%s", msg));
}
static int
zutil_no_memory(libpc_handle_t *hdl)
{
zutil_error(hdl, LPC_NOMEM, "internal error");
exit(1);
}
void *
zutil_alloc(libpc_handle_t *hdl, size_t size)
{
void *data;
if ((data = calloc(1, size)) == NULL)
(void) zutil_no_memory(hdl);
return (data);
}
char *
zutil_strdup(libpc_handle_t *hdl, const char *str)
{
char *ret;
if ((ret = strdup(str)) == NULL)
(void) zutil_no_memory(hdl);
return (ret);
}
static char *
zutil_strndup(libpc_handle_t *hdl, const char *str, size_t n)
{
char *ret;
if ((ret = strndup(str, n)) == NULL)
(void) zutil_no_memory(hdl);
return (ret);
}
/*
* Intermediate structures used to gather configuration information.
*/
typedef struct config_entry {
uint64_t ce_txg;
nvlist_t *ce_config;
struct config_entry *ce_next;
} config_entry_t;
typedef struct vdev_entry {
uint64_t ve_guid;
config_entry_t *ve_configs;
struct vdev_entry *ve_next;
} vdev_entry_t;
typedef struct pool_entry {
uint64_t pe_guid;
vdev_entry_t *pe_vdevs;
struct pool_entry *pe_next;
} pool_entry_t;
typedef struct name_entry {
char *ne_name;
uint64_t ne_guid;
uint64_t ne_order;
uint64_t ne_num_labels;
struct name_entry *ne_next;
} name_entry_t;
typedef struct pool_list {
pool_entry_t *pools;
name_entry_t *names;
} pool_list_t;
/*
* Go through and fix up any path and/or devid information for the given vdev
* configuration.
*/
static int
fix_paths(libpc_handle_t *hdl, nvlist_t *nv, name_entry_t *names)
{
nvlist_t **child;
uint_t c, children;
uint64_t guid;
name_entry_t *ne, *best;
const char *path;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0) {
for (c = 0; c < children; c++)
if (fix_paths(hdl, child[c], names) != 0)
return (-1);
return (0);
}
/*
* This is a leaf (file or disk) vdev. In either case, go through
* the name list and see if we find a matching guid. If so, replace
* the path and see if we can calculate a new devid.
*
* There may be multiple names associated with a particular guid, in
* which case we have overlapping partitions or multiple paths to the
* same disk. In this case we prefer to use the path name which
* matches the ZPOOL_CONFIG_PATH. If no matching entry is found we
* use the lowest order device which corresponds to the first match
* while traversing the ZPOOL_IMPORT_PATH search path.
*/
verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
path = NULL;
best = NULL;
for (ne = names; ne != NULL; ne = ne->ne_next) {
if (ne->ne_guid == guid) {
if (path == NULL) {
best = ne;
break;
}
if ((strlen(path) == strlen(ne->ne_name)) &&
strncmp(path, ne->ne_name, strlen(path)) == 0) {
best = ne;
break;
}
if (best == NULL) {
best = ne;
continue;
}
/* Prefer paths with move vdev labels. */
if (ne->ne_num_labels > best->ne_num_labels) {
best = ne;
continue;
}
/* Prefer paths earlier in the search order. */
if (ne->ne_num_labels == best->ne_num_labels &&
ne->ne_order < best->ne_order) {
best = ne;
continue;
}
}
}
if (best == NULL)
return (0);
if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) != 0)
return (-1);
update_vdev_config_dev_strs(nv);
return (0);
}
/*
* Add the given configuration to the list of known devices.
*/
static int
add_config(libpc_handle_t *hdl, pool_list_t *pl, const char *path,
int order, int num_labels, nvlist_t *config)
{
uint64_t pool_guid, vdev_guid, top_guid, txg, state;
pool_entry_t *pe;
vdev_entry_t *ve;
config_entry_t *ce;
name_entry_t *ne;
/*
* If this is a hot spare not currently in use or level 2 cache
* device, add it to the list of names to translate, but don't do
* anything else.
*/
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
&state) == 0 &&
(state == POOL_STATE_SPARE || state == POOL_STATE_L2CACHE) &&
nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) {
if ((ne = zutil_alloc(hdl, sizeof (name_entry_t))) == NULL)
return (-1);
if ((ne->ne_name = zutil_strdup(hdl, path)) == NULL) {
free(ne);
return (-1);
}
ne->ne_guid = vdev_guid;
ne->ne_order = order;
ne->ne_num_labels = num_labels;
ne->ne_next = pl->names;
pl->names = ne;
return (0);
}
/*
* If we have a valid config but cannot read any of these fields, then
* it means we have a half-initialized label. In vdev_label_init()
* we write a label with txg == 0 so that we can identify the device
* in case the user refers to the same disk later on. If we fail to
* create the pool, we'll be left with a label in this state
* which should not be considered part of a valid pool.
*/
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
&pool_guid) != 0 ||
nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
&vdev_guid) != 0 ||
nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID,
&top_guid) != 0 ||
nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
&txg) != 0 || txg == 0) {
return (0);
}
/*
* First, see if we know about this pool. If not, then add it to the
* list of known pools.
*/
for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
if (pe->pe_guid == pool_guid)
break;
}
if (pe == NULL) {
if ((pe = zutil_alloc(hdl, sizeof (pool_entry_t))) == NULL) {
return (-1);
}
pe->pe_guid = pool_guid;
pe->pe_next = pl->pools;
pl->pools = pe;
}
/*
* Second, see if we know about this toplevel vdev. Add it if its
* missing.
*/
for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
if (ve->ve_guid == top_guid)
break;
}
if (ve == NULL) {
if ((ve = zutil_alloc(hdl, sizeof (vdev_entry_t))) == NULL) {
return (-1);
}
ve->ve_guid = top_guid;
ve->ve_next = pe->pe_vdevs;
pe->pe_vdevs = ve;
}
/*
* Third, see if we have a config with a matching transaction group. If
* so, then we do nothing. Otherwise, add it to the list of known
* configs.
*/
for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) {
if (ce->ce_txg == txg)
break;
}
if (ce == NULL) {
if ((ce = zutil_alloc(hdl, sizeof (config_entry_t))) == NULL) {
return (-1);
}
ce->ce_txg = txg;
ce->ce_config = fnvlist_dup(config);
ce->ce_next = ve->ve_configs;
ve->ve_configs = ce;
}
/*
* At this point we've successfully added our config to the list of
* known configs. The last thing to do is add the vdev guid -> path
* mappings so that we can fix up the configuration as necessary before
* doing the import.
*/
if ((ne = zutil_alloc(hdl, sizeof (name_entry_t))) == NULL)
return (-1);
if ((ne->ne_name = zutil_strdup(hdl, path)) == NULL) {
free(ne);
return (-1);
}
ne->ne_guid = vdev_guid;
ne->ne_order = order;
ne->ne_num_labels = num_labels;
ne->ne_next = pl->names;
pl->names = ne;
return (0);
}
static int
zutil_pool_active(libpc_handle_t *hdl, const char *name, uint64_t guid,
boolean_t *isactive)
{
ASSERT(hdl->lpc_ops->pco_pool_active != NULL);
int error = hdl->lpc_ops->pco_pool_active(hdl->lpc_lib_handle, name,
guid, isactive);
return (error);
}
static nvlist_t *
zutil_refresh_config(libpc_handle_t *hdl, nvlist_t *tryconfig)
{
ASSERT(hdl->lpc_ops->pco_refresh_config != NULL);
return (hdl->lpc_ops->pco_refresh_config(hdl->lpc_lib_handle,
tryconfig));
}
/*
* Determine if the vdev id is a hole in the namespace.
*/
static boolean_t
vdev_is_hole(uint64_t *hole_array, uint_t holes, uint_t id)
{
int c;
for (c = 0; c < holes; c++) {
/* Top-level is a hole */
if (hole_array[c] == id)
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Convert our list of pools into the definitive set of configurations. We
* start by picking the best config for each toplevel vdev. Once that's done,
* we assemble the toplevel vdevs into a full config for the pool. We make a
* pass to fix up any incorrect paths, and then add it to the main list to
* return to the user.
*/
static nvlist_t *
get_configs(libpc_handle_t *hdl, pool_list_t *pl, boolean_t active_ok,
nvlist_t *policy)
{
pool_entry_t *pe;
vdev_entry_t *ve;
config_entry_t *ce;
nvlist_t *ret = NULL, *config = NULL, *tmp = NULL, *nvtop, *nvroot;
nvlist_t **spares, **l2cache;
uint_t i, nspares, nl2cache;
boolean_t config_seen;
uint64_t best_txg;
const char *name, *hostname = NULL;
uint64_t guid;
uint_t children = 0;
nvlist_t **child = NULL;
uint64_t *hole_array, max_id;
uint_t c;
boolean_t isactive;
nvlist_t *nvl;
boolean_t valid_top_config = B_FALSE;
if (nvlist_alloc(&ret, 0, 0) != 0)
goto nomem;
for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
uint64_t id, max_txg = 0, hostid = 0;
uint_t holes = 0;
if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0)
goto nomem;
config_seen = B_FALSE;
/*
* Iterate over all toplevel vdevs. Grab the pool configuration
* from the first one we find, and then go through the rest and
* add them as necessary to the 'vdevs' member of the config.
*/
for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
/*
* Determine the best configuration for this vdev by
* selecting the config with the latest transaction
* group.
*/
best_txg = 0;
for (ce = ve->ve_configs; ce != NULL;
ce = ce->ce_next) {
if (ce->ce_txg > best_txg) {
tmp = ce->ce_config;
best_txg = ce->ce_txg;
}
}
/*
* We rely on the fact that the max txg for the
* pool will contain the most up-to-date information
* about the valid top-levels in the vdev namespace.
*/
if (best_txg > max_txg) {
(void) nvlist_remove(config,
ZPOOL_CONFIG_VDEV_CHILDREN,
DATA_TYPE_UINT64);
(void) nvlist_remove(config,
ZPOOL_CONFIG_HOLE_ARRAY,
DATA_TYPE_UINT64_ARRAY);
max_txg = best_txg;
hole_array = NULL;
holes = 0;
max_id = 0;
valid_top_config = B_FALSE;
if (nvlist_lookup_uint64(tmp,
ZPOOL_CONFIG_VDEV_CHILDREN, &max_id) == 0) {
verify(nvlist_add_uint64(config,
ZPOOL_CONFIG_VDEV_CHILDREN,
max_id) == 0);
valid_top_config = B_TRUE;
}
if (nvlist_lookup_uint64_array(tmp,
ZPOOL_CONFIG_HOLE_ARRAY, &hole_array,
&holes) == 0) {
verify(nvlist_add_uint64_array(config,
ZPOOL_CONFIG_HOLE_ARRAY,
hole_array, holes) == 0);
}
}
if (!config_seen) {
/*
* Copy the relevant pieces of data to the pool
* configuration:
*
* version
* pool guid
* name
* comment (if available)
* compatibility features (if available)
* pool state
* hostid (if available)
* hostname (if available)
*/
uint64_t state, version;
const char *comment = NULL;
const char *compatibility = NULL;
version = fnvlist_lookup_uint64(tmp,
ZPOOL_CONFIG_VERSION);
fnvlist_add_uint64(config,
ZPOOL_CONFIG_VERSION, version);
guid = fnvlist_lookup_uint64(tmp,
ZPOOL_CONFIG_POOL_GUID);
fnvlist_add_uint64(config,
ZPOOL_CONFIG_POOL_GUID, guid);
name = fnvlist_lookup_string(tmp,
ZPOOL_CONFIG_POOL_NAME);
fnvlist_add_string(config,
ZPOOL_CONFIG_POOL_NAME, name);
if (nvlist_lookup_string(tmp,
ZPOOL_CONFIG_COMMENT, &comment) == 0)
fnvlist_add_string(config,
ZPOOL_CONFIG_COMMENT, comment);
if (nvlist_lookup_string(tmp,
ZPOOL_CONFIG_COMPATIBILITY,
&compatibility) == 0)
fnvlist_add_string(config,
ZPOOL_CONFIG_COMPATIBILITY,
compatibility);
state = fnvlist_lookup_uint64(tmp,
ZPOOL_CONFIG_POOL_STATE);
fnvlist_add_uint64(config,
ZPOOL_CONFIG_POOL_STATE, state);
hostid = 0;
if (nvlist_lookup_uint64(tmp,
ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
fnvlist_add_uint64(config,
ZPOOL_CONFIG_HOSTID, hostid);
hostname = fnvlist_lookup_string(tmp,
ZPOOL_CONFIG_HOSTNAME);
fnvlist_add_string(config,
ZPOOL_CONFIG_HOSTNAME, hostname);
}
config_seen = B_TRUE;
}
/*
* Add this top-level vdev to the child array.
*/
verify(nvlist_lookup_nvlist(tmp,
ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0);
verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID,
&id) == 0);
if (id >= children) {
nvlist_t **newchild;
newchild = zutil_alloc(hdl, (id + 1) *
sizeof (nvlist_t *));
if (newchild == NULL)
goto nomem;
for (c = 0; c < children; c++)
newchild[c] = child[c];
free(child);
child = newchild;
children = id + 1;
}
if (nvlist_dup(nvtop, &child[id], 0) != 0)
goto nomem;
}
/*
* If we have information about all the top-levels then
* clean up the nvlist which we've constructed. This
* means removing any extraneous devices that are
* beyond the valid range or adding devices to the end
* of our array which appear to be missing.
*/
if (valid_top_config) {
if (max_id < children) {
for (c = max_id; c < children; c++)
nvlist_free(child[c]);
children = max_id;
} else if (max_id > children) {
nvlist_t **newchild;
newchild = zutil_alloc(hdl, (max_id) *
sizeof (nvlist_t *));
if (newchild == NULL)
goto nomem;
for (c = 0; c < children; c++)
newchild[c] = child[c];
free(child);
child = newchild;
children = max_id;
}
}
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
&guid) == 0);
/*
* The vdev namespace may contain holes as a result of
* device removal. We must add them back into the vdev
* tree before we process any missing devices.
*/
if (holes > 0) {
ASSERT(valid_top_config);
for (c = 0; c < children; c++) {
nvlist_t *holey;
if (child[c] != NULL ||
!vdev_is_hole(hole_array, holes, c))
continue;
if (nvlist_alloc(&holey, NV_UNIQUE_NAME,
0) != 0)
goto nomem;
/*
* Holes in the namespace are treated as
* "hole" top-level vdevs and have a
* special flag set on them.
*/
if (nvlist_add_string(holey,
ZPOOL_CONFIG_TYPE,
VDEV_TYPE_HOLE) != 0 ||
nvlist_add_uint64(holey,
ZPOOL_CONFIG_ID, c) != 0 ||
nvlist_add_uint64(holey,
ZPOOL_CONFIG_GUID, 0ULL) != 0) {
nvlist_free(holey);
goto nomem;
}
child[c] = holey;
}
}
/*
* Look for any missing top-level vdevs. If this is the case,
* create a faked up 'missing' vdev as a placeholder. We cannot
* simply compress the child array, because the kernel performs
* certain checks to make sure the vdev IDs match their location
* in the configuration.
*/
for (c = 0; c < children; c++) {
if (child[c] == NULL) {
nvlist_t *missing;
if (nvlist_alloc(&missing, NV_UNIQUE_NAME,
0) != 0)
goto nomem;
if (nvlist_add_string(missing,
ZPOOL_CONFIG_TYPE,
VDEV_TYPE_MISSING) != 0 ||
nvlist_add_uint64(missing,
ZPOOL_CONFIG_ID, c) != 0 ||
nvlist_add_uint64(missing,
ZPOOL_CONFIG_GUID, 0ULL) != 0) {
nvlist_free(missing);
goto nomem;
}
child[c] = missing;
}
}
/*
* Put all of this pool's top-level vdevs into a root vdev.
*/
if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
goto nomem;
if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
VDEV_TYPE_ROOT) != 0 ||
nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 ||
nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 ||
nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
(const nvlist_t **)child, children) != 0) {
nvlist_free(nvroot);
goto nomem;
}
for (c = 0; c < children; c++)
nvlist_free(child[c]);
free(child);
children = 0;
child = NULL;
/*
* Go through and fix up any paths and/or devids based on our
* known list of vdev GUID -> path mappings.
*/
if (fix_paths(hdl, nvroot, pl->names) != 0) {
nvlist_free(nvroot);
goto nomem;
}
/*
* Add the root vdev to this pool's configuration.
*/
if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
nvroot) != 0) {
nvlist_free(nvroot);
goto nomem;
}
nvlist_free(nvroot);
/*
* zdb uses this path to report on active pools that were
* imported or created using -R.
*/
if (active_ok)
goto add_pool;
/*
* Determine if this pool is currently active, in which case we
* can't actually import it.
*/
verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
&name) == 0);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
&guid) == 0);
if (zutil_pool_active(hdl, name, guid, &isactive) != 0)
goto error;
if (isactive) {
nvlist_free(config);
config = NULL;
continue;
}
if (policy != NULL) {
if (nvlist_add_nvlist(config, ZPOOL_LOAD_POLICY,
policy) != 0)
goto nomem;
}
if ((nvl = zutil_refresh_config(hdl, config)) == NULL) {
nvlist_free(config);
config = NULL;
continue;
}
nvlist_free(config);
config = nvl;
/*
* Go through and update the paths for spares, now that we have
* them.
*/
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) == 0) {
for (i = 0; i < nspares; i++) {
if (fix_paths(hdl, spares[i], pl->names) != 0)
goto nomem;
}
}
/*
* Update the paths for l2cache devices.
*/
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2cache, &nl2cache) == 0) {
for (i = 0; i < nl2cache; i++) {
if (fix_paths(hdl, l2cache[i], pl->names) != 0)
goto nomem;
}
}
/*
* Restore the original information read from the actual label.
*/
(void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID,
DATA_TYPE_UINT64);
(void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME,
DATA_TYPE_STRING);
if (hostid != 0) {
verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID,
hostid) == 0);
verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME,
hostname) == 0);
}
add_pool:
/*
* Add this pool to the list of configs.
*/
verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
&name) == 0);
if (nvlist_add_nvlist(ret, name, config) != 0)
goto nomem;
nvlist_free(config);
config = NULL;
}
return (ret);
nomem:
(void) zutil_no_memory(hdl);
error:
nvlist_free(config);
nvlist_free(ret);
for (c = 0; c < children; c++)
nvlist_free(child[c]);
free(child);
return (NULL);
}
/*
* Return the offset of the given label.
*/
static uint64_t
label_offset(uint64_t size, int l)
{
ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0);
return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
0 : size - VDEV_LABELS * sizeof (vdev_label_t)));
}
/*
* The same description applies as to zpool_read_label below,
* except here we do it without aio, presumably because an aio call
* errored out in a way we think not using it could circumvent.
*/
static int
zpool_read_label_slow(int fd, nvlist_t **config, int *num_labels)
{
struct stat64 statbuf;
int l, count = 0;
vdev_phys_t *label;
nvlist_t *expected_config = NULL;
uint64_t expected_guid = 0, size;
*config = NULL;
if (fstat64_blk(fd, &statbuf) == -1)
return (0);
size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
label = (vdev_phys_t *)umem_alloc_aligned(sizeof (*label), PAGESIZE,
UMEM_DEFAULT);
if (label == NULL)
return (-1);
for (l = 0; l < VDEV_LABELS; l++) {
uint64_t state, guid, txg;
off_t offset = label_offset(size, l) + VDEV_SKIP_SIZE;
if (pread64(fd, label, sizeof (vdev_phys_t),
offset) != sizeof (vdev_phys_t))
continue;
if (nvlist_unpack(label->vp_nvlist,
sizeof (label->vp_nvlist), config, 0) != 0)
continue;
if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_GUID,
&guid) != 0 || guid == 0) {
nvlist_free(*config);
continue;
}
if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
&state) != 0 || state > POOL_STATE_L2CACHE) {
nvlist_free(*config);
continue;
}
if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
(nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
&txg) != 0 || txg == 0)) {
nvlist_free(*config);
continue;
}
if (expected_guid) {
if (expected_guid == guid)
count++;
nvlist_free(*config);
} else {
expected_config = *config;
expected_guid = guid;
count++;
}
}
if (num_labels != NULL)
*num_labels = count;
umem_free_aligned(label, sizeof (*label));
*config = expected_config;
return (0);
}
/*
* Given a file descriptor, read the label information and return an nvlist
* describing the configuration, if there is one. The number of valid
* labels found will be returned in num_labels when non-NULL.
*/
int
zpool_read_label(int fd, nvlist_t **config, int *num_labels)
{
#ifndef HAVE_AIO_H
return (zpool_read_label_slow(fd, config, num_labels));
#else
struct stat64 statbuf;
struct aiocb aiocbs[VDEV_LABELS];
struct aiocb *aiocbps[VDEV_LABELS];
vdev_phys_t *labels;
nvlist_t *expected_config = NULL;
uint64_t expected_guid = 0, size;
int error, l, count = 0;
*config = NULL;
if (fstat64_blk(fd, &statbuf) == -1)
return (0);
size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
labels = (vdev_phys_t *)umem_alloc_aligned(
VDEV_LABELS * sizeof (*labels), PAGESIZE, UMEM_DEFAULT);
if (labels == NULL)
return (-1);
memset(aiocbs, 0, sizeof (aiocbs));
for (l = 0; l < VDEV_LABELS; l++) {
off_t offset = label_offset(size, l) + VDEV_SKIP_SIZE;
aiocbs[l].aio_fildes = fd;
aiocbs[l].aio_offset = offset;
aiocbs[l].aio_buf = &labels[l];
aiocbs[l].aio_nbytes = sizeof (vdev_phys_t);
aiocbs[l].aio_lio_opcode = LIO_READ;
aiocbps[l] = &aiocbs[l];
}
if (lio_listio(LIO_WAIT, aiocbps, VDEV_LABELS, NULL) != 0) {
int saved_errno = errno;
boolean_t do_slow = B_FALSE;
error = -1;
if (errno == EAGAIN || errno == EINTR || errno == EIO) {
/*
* A portion of the requests may have been submitted.
* Clean them up.
*/
for (l = 0; l < VDEV_LABELS; l++) {
errno = 0;
switch (aio_error(&aiocbs[l])) {
case EINVAL:
break;
case EINPROGRESS:
- // This shouldn't be possible to
- // encounter, die if we do.
+ /*
+ * This shouldn't be possible to
+ * encounter, die if we do.
+ */
ASSERT(B_FALSE);
zfs_fallthrough;
+ case EREMOTEIO:
+ /*
+ * May be returned by an NVMe device
+ * which is visible in /dev/ but due
+ * to a low-level format change, or
+ * other error, needs to be rescanned.
+ * Try the slow method.
+ */
+ zfs_fallthrough;
case EOPNOTSUPP:
case ENOSYS:
do_slow = B_TRUE;
zfs_fallthrough;
case 0:
default:
(void) aio_return(&aiocbs[l]);
}
}
}
if (do_slow) {
/*
* At least some IO involved access unsafe-for-AIO
* files. Let's try again, without AIO this time.
*/
error = zpool_read_label_slow(fd, config, num_labels);
saved_errno = errno;
}
umem_free_aligned(labels, VDEV_LABELS * sizeof (*labels));
errno = saved_errno;
return (error);
}
for (l = 0; l < VDEV_LABELS; l++) {
uint64_t state, guid, txg;
if (aio_return(&aiocbs[l]) != sizeof (vdev_phys_t))
continue;
if (nvlist_unpack(labels[l].vp_nvlist,
sizeof (labels[l].vp_nvlist), config, 0) != 0)
continue;
if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_GUID,
&guid) != 0 || guid == 0) {
nvlist_free(*config);
continue;
}
if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
&state) != 0 || state > POOL_STATE_L2CACHE) {
nvlist_free(*config);
continue;
}
if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
(nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
&txg) != 0 || txg == 0)) {
nvlist_free(*config);
continue;
}
if (expected_guid) {
if (expected_guid == guid)
count++;
nvlist_free(*config);
} else {
expected_config = *config;
expected_guid = guid;
count++;
}
}
if (num_labels != NULL)
*num_labels = count;
umem_free_aligned(labels, VDEV_LABELS * sizeof (*labels));
*config = expected_config;
return (0);
#endif
}
/*
* Sorted by full path and then vdev guid to allow for multiple entries with
* the same full path name. This is required because it's possible to
* have multiple block devices with labels that refer to the same
* ZPOOL_CONFIG_PATH yet have different vdev guids. In this case both
* entries need to be added to the cache. Scenarios where this can occur
* include overwritten pool labels, devices which are visible from multiple
* hosts and multipath devices.
*/
int
slice_cache_compare(const void *arg1, const void *arg2)
{
const char *nm1 = ((rdsk_node_t *)arg1)->rn_name;
const char *nm2 = ((rdsk_node_t *)arg2)->rn_name;
uint64_t guid1 = ((rdsk_node_t *)arg1)->rn_vdev_guid;
uint64_t guid2 = ((rdsk_node_t *)arg2)->rn_vdev_guid;
int rv;
rv = TREE_ISIGN(strcmp(nm1, nm2));
if (rv)
return (rv);
return (TREE_CMP(guid1, guid2));
}
static int
label_paths_impl(libpc_handle_t *hdl, nvlist_t *nvroot, uint64_t pool_guid,
uint64_t vdev_guid, const char **path, const char **devid)
{
nvlist_t **child;
uint_t c, children;
uint64_t guid;
const char *val;
int error;
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0) {
for (c = 0; c < children; c++) {
error = label_paths_impl(hdl, child[c],
pool_guid, vdev_guid, path, devid);
if (error)
return (error);
}
return (0);
}
if (nvroot == NULL)
return (0);
error = nvlist_lookup_uint64(nvroot, ZPOOL_CONFIG_GUID, &guid);
if ((error != 0) || (guid != vdev_guid))
return (0);
error = nvlist_lookup_string(nvroot, ZPOOL_CONFIG_PATH, &val);
if (error == 0)
*path = val;
error = nvlist_lookup_string(nvroot, ZPOOL_CONFIG_DEVID, &val);
if (error == 0)
*devid = val;
return (0);
}
/*
* Given a disk label fetch the ZPOOL_CONFIG_PATH and ZPOOL_CONFIG_DEVID
* and store these strings as config_path and devid_path respectively.
* The returned pointers are only valid as long as label remains valid.
*/
int
label_paths(libpc_handle_t *hdl, nvlist_t *label, const char **path,
const char **devid)
{
nvlist_t *nvroot;
uint64_t pool_guid;
uint64_t vdev_guid;
+ uint64_t state;
*path = NULL;
*devid = NULL;
+ if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &vdev_guid) != 0)
+ return (ENOENT);
+
+ /*
+ * In case of spare or l2cache, we directly return path/devid from the
+ * label.
+ */
+ if (!(nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state)) &&
+ (state == POOL_STATE_SPARE || state == POOL_STATE_L2CACHE)) {
+ (void) nvlist_lookup_string(label, ZPOOL_CONFIG_PATH, path);
+ (void) nvlist_lookup_string(label, ZPOOL_CONFIG_DEVID, devid);
+ return (0);
+ }
if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_VDEV_TREE, &nvroot) ||
- nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, &pool_guid) ||
- nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &vdev_guid))
+ nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, &pool_guid))
return (ENOENT);
return (label_paths_impl(hdl, nvroot, pool_guid, vdev_guid, path,
devid));
}
static void
zpool_find_import_scan_add_slice(libpc_handle_t *hdl, pthread_mutex_t *lock,
avl_tree_t *cache, const char *path, const char *name, int order)
{
avl_index_t where;
rdsk_node_t *slice;
slice = zutil_alloc(hdl, sizeof (rdsk_node_t));
if (asprintf(&slice->rn_name, "%s/%s", path, name) == -1) {
free(slice);
return;
}
slice->rn_vdev_guid = 0;
slice->rn_lock = lock;
slice->rn_avl = cache;
slice->rn_hdl = hdl;
slice->rn_order = order + IMPORT_ORDER_SCAN_OFFSET;
slice->rn_labelpaths = B_FALSE;
pthread_mutex_lock(lock);
if (avl_find(cache, slice, &where)) {
free(slice->rn_name);
free(slice);
} else {
avl_insert(cache, slice, where);
}
pthread_mutex_unlock(lock);
}
static int
zpool_find_import_scan_dir(libpc_handle_t *hdl, pthread_mutex_t *lock,
avl_tree_t *cache, const char *dir, int order)
{
int error;
char path[MAXPATHLEN];
struct dirent64 *dp;
DIR *dirp;
if (realpath(dir, path) == NULL) {
error = errno;
if (error == ENOENT)
return (0);
zutil_error_aux(hdl, "%s", strerror(error));
(void) zutil_error_fmt(hdl, LPC_BADPATH, dgettext(TEXT_DOMAIN,
"cannot resolve path '%s'"), dir);
return (error);
}
dirp = opendir(path);
if (dirp == NULL) {
error = errno;
zutil_error_aux(hdl, "%s", strerror(error));
(void) zutil_error_fmt(hdl, LPC_BADPATH, dgettext(TEXT_DOMAIN,
"cannot open '%s'"), path);
return (error);
}
while ((dp = readdir64(dirp)) != NULL) {
const char *name = dp->d_name;
if (strcmp(name, ".") == 0 || strcmp(name, "..") == 0)
continue;
switch (dp->d_type) {
case DT_UNKNOWN:
case DT_BLK:
case DT_LNK:
#ifdef __FreeBSD__
case DT_CHR:
#endif
case DT_REG:
break;
default:
continue;
}
zpool_find_import_scan_add_slice(hdl, lock, cache, path, name,
order);
}
(void) closedir(dirp);
return (0);
}
static int
zpool_find_import_scan_path(libpc_handle_t *hdl, pthread_mutex_t *lock,
avl_tree_t *cache, const char *dir, int order)
{
int error = 0;
char path[MAXPATHLEN];
char *d = NULL;
ssize_t dl;
const char *dpath, *name;
/*
* Separate the directory and the basename.
* We do this so that we can get the realpath of
* the directory. We don't get the realpath on the
* whole path because if it's a symlink, we want the
* path of the symlink not where it points to.
*/
name = zfs_basename(dir);
if ((dl = zfs_dirnamelen(dir)) == -1)
dpath = ".";
else
dpath = d = zutil_strndup(hdl, dir, dl);
if (realpath(dpath, path) == NULL) {
error = errno;
if (error == ENOENT) {
error = 0;
goto out;
}
zutil_error_aux(hdl, "%s", strerror(error));
(void) zutil_error_fmt(hdl, LPC_BADPATH, dgettext(TEXT_DOMAIN,
"cannot resolve path '%s'"), dir);
goto out;
}
zpool_find_import_scan_add_slice(hdl, lock, cache, path, name, order);
out:
free(d);
return (error);
}
/*
* Scan a list of directories for zfs devices.
*/
static int
zpool_find_import_scan(libpc_handle_t *hdl, pthread_mutex_t *lock,
avl_tree_t **slice_cache, const char * const *dir, size_t dirs)
{
avl_tree_t *cache;
rdsk_node_t *slice;
void *cookie;
int i, error;
*slice_cache = NULL;
cache = zutil_alloc(hdl, sizeof (avl_tree_t));
avl_create(cache, slice_cache_compare, sizeof (rdsk_node_t),
offsetof(rdsk_node_t, rn_node));
for (i = 0; i < dirs; i++) {
struct stat sbuf;
if (stat(dir[i], &sbuf) != 0) {
error = errno;
if (error == ENOENT)
continue;
zutil_error_aux(hdl, "%s", strerror(error));
(void) zutil_error_fmt(hdl, LPC_BADPATH, dgettext(
TEXT_DOMAIN, "cannot resolve path '%s'"), dir[i]);
goto error;
}
/*
* If dir[i] is a directory, we walk through it and add all
* the entries to the cache. If it's not a directory, we just
* add it to the cache.
*/
if (S_ISDIR(sbuf.st_mode)) {
if ((error = zpool_find_import_scan_dir(hdl, lock,
cache, dir[i], i)) != 0)
goto error;
} else {
if ((error = zpool_find_import_scan_path(hdl, lock,
cache, dir[i], i)) != 0)
goto error;
}
}
*slice_cache = cache;
return (0);
error:
cookie = NULL;
while ((slice = avl_destroy_nodes(cache, &cookie)) != NULL) {
free(slice->rn_name);
free(slice);
}
free(cache);
return (error);
}
/*
* Given a list of directories to search, find all pools stored on disk. This
* includes partial pools which are not available to import. If no args are
* given (argc is 0), then the default directory (/dev/dsk) is searched.
* poolname or guid (but not both) are provided by the caller when trying
* to import a specific pool.
*/
static nvlist_t *
zpool_find_import_impl(libpc_handle_t *hdl, importargs_t *iarg,
pthread_mutex_t *lock, avl_tree_t *cache)
{
(void) lock;
nvlist_t *ret = NULL;
pool_list_t pools = { 0 };
pool_entry_t *pe, *penext;
vdev_entry_t *ve, *venext;
config_entry_t *ce, *cenext;
name_entry_t *ne, *nenext;
rdsk_node_t *slice;
void *cookie;
tpool_t *t;
verify(iarg->poolname == NULL || iarg->guid == 0);
/*
* Create a thread pool to parallelize the process of reading and
* validating labels, a large number of threads can be used due to
* minimal contention.
*/
t = tpool_create(1, 2 * sysconf(_SC_NPROCESSORS_ONLN), 0, NULL);
for (slice = avl_first(cache); slice;
(slice = avl_walk(cache, slice, AVL_AFTER)))
(void) tpool_dispatch(t, zpool_open_func, slice);
tpool_wait(t);
tpool_destroy(t);
/*
* Process the cache, filtering out any entries which are not
* for the specified pool then adding matching label configs.
*/
cookie = NULL;
while ((slice = avl_destroy_nodes(cache, &cookie)) != NULL) {
if (slice->rn_config != NULL) {
nvlist_t *config = slice->rn_config;
boolean_t matched = B_TRUE;
boolean_t aux = B_FALSE;
int fd;
/*
* Check if it's a spare or l2cache device. If it is,
* we need to skip the name and guid check since they
* don't exist on aux device label.
*/
if (iarg->poolname != NULL || iarg->guid != 0) {
uint64_t state;
aux = nvlist_lookup_uint64(config,
ZPOOL_CONFIG_POOL_STATE, &state) == 0 &&
(state == POOL_STATE_SPARE ||
state == POOL_STATE_L2CACHE);
}
if (iarg->poolname != NULL && !aux) {
const char *pname;
matched = nvlist_lookup_string(config,
ZPOOL_CONFIG_POOL_NAME, &pname) == 0 &&
strcmp(iarg->poolname, pname) == 0;
} else if (iarg->guid != 0 && !aux) {
uint64_t this_guid;
matched = nvlist_lookup_uint64(config,
ZPOOL_CONFIG_POOL_GUID, &this_guid) == 0 &&
iarg->guid == this_guid;
}
if (matched) {
/*
* Verify all remaining entries can be opened
* exclusively. This will prune all underlying
* multipath devices which otherwise could
* result in the vdev appearing as UNAVAIL.
*
* Under zdb, this step isn't required and
* would prevent a zdb -e of active pools with
* no cachefile.
*/
fd = open(slice->rn_name,
O_RDONLY | O_EXCL | O_CLOEXEC);
if (fd >= 0 || iarg->can_be_active) {
if (fd >= 0)
close(fd);
add_config(hdl, &pools,
slice->rn_name, slice->rn_order,
slice->rn_num_labels, config);
}
}
nvlist_free(config);
}
free(slice->rn_name);
free(slice);
}
avl_destroy(cache);
free(cache);
ret = get_configs(hdl, &pools, iarg->can_be_active, iarg->policy);
for (pe = pools.pools; pe != NULL; pe = penext) {
penext = pe->pe_next;
for (ve = pe->pe_vdevs; ve != NULL; ve = venext) {
venext = ve->ve_next;
for (ce = ve->ve_configs; ce != NULL; ce = cenext) {
cenext = ce->ce_next;
nvlist_free(ce->ce_config);
free(ce);
}
free(ve);
}
free(pe);
}
for (ne = pools.names; ne != NULL; ne = nenext) {
nenext = ne->ne_next;
free(ne->ne_name);
free(ne);
}
return (ret);
}
/*
* Given a config, discover the paths for the devices which
* exist in the config.
*/
static int
discover_cached_paths(libpc_handle_t *hdl, nvlist_t *nv,
avl_tree_t *cache, pthread_mutex_t *lock)
{
const char *path = NULL;
ssize_t dl;
uint_t children;
nvlist_t **child;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0) {
for (int c = 0; c < children; c++) {
discover_cached_paths(hdl, child[c], cache, lock);
}
}
/*
* Once we have the path, we need to add the directory to
* our directory cache.
*/
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
int ret;
char c = '\0';
if ((dl = zfs_dirnamelen(path)) == -1) {
path = ".";
} else {
c = path[dl];
((char *)path)[dl] = '\0';
}
ret = zpool_find_import_scan_dir(hdl, lock, cache,
path, 0);
if (c != '\0')
((char *)path)[dl] = c;
return (ret);
}
return (0);
}
/*
* Given a cache file, return the contents as a list of importable pools.
* poolname or guid (but not both) are provided by the caller when trying
* to import a specific pool.
*/
static nvlist_t *
zpool_find_import_cached(libpc_handle_t *hdl, importargs_t *iarg)
{
char *buf;
int fd;
struct stat64 statbuf;
nvlist_t *raw, *src, *dst;
nvlist_t *pools;
nvpair_t *elem;
const char *name;
uint64_t this_guid;
boolean_t active;
verify(iarg->poolname == NULL || iarg->guid == 0);
if ((fd = open(iarg->cachefile, O_RDONLY | O_CLOEXEC)) < 0) {
zutil_error_aux(hdl, "%s", strerror(errno));
(void) zutil_error(hdl, LPC_BADCACHE, dgettext(TEXT_DOMAIN,
"failed to open cache file"));
return (NULL);
}
if (fstat64(fd, &statbuf) != 0) {
zutil_error_aux(hdl, "%s", strerror(errno));
(void) close(fd);
(void) zutil_error(hdl, LPC_BADCACHE, dgettext(TEXT_DOMAIN,
"failed to get size of cache file"));
return (NULL);
}
if ((buf = zutil_alloc(hdl, statbuf.st_size)) == NULL) {
(void) close(fd);
return (NULL);
}
if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
(void) close(fd);
free(buf);
(void) zutil_error(hdl, LPC_BADCACHE, dgettext(TEXT_DOMAIN,
"failed to read cache file contents"));
return (NULL);
}
(void) close(fd);
if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) {
free(buf);
(void) zutil_error(hdl, LPC_BADCACHE, dgettext(TEXT_DOMAIN,
"invalid or corrupt cache file contents"));
return (NULL);
}
free(buf);
/*
* Go through and get the current state of the pools and refresh their
* state.
*/
if (nvlist_alloc(&pools, 0, 0) != 0) {
(void) zutil_no_memory(hdl);
nvlist_free(raw);
return (NULL);
}
elem = NULL;
while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) {
src = fnvpair_value_nvlist(elem);
name = fnvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME);
if (iarg->poolname != NULL && strcmp(iarg->poolname, name) != 0)
continue;
this_guid = fnvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID);
if (iarg->guid != 0 && iarg->guid != this_guid)
continue;
if (zutil_pool_active(hdl, name, this_guid, &active) != 0) {
nvlist_free(raw);
nvlist_free(pools);
return (NULL);
}
if (active)
continue;
if (iarg->scan) {
uint64_t saved_guid = iarg->guid;
const char *saved_poolname = iarg->poolname;
pthread_mutex_t lock;
/*
* Create the device cache that will hold the
* devices we will scan based on the cachefile.
* This will get destroyed and freed by
* zpool_find_import_impl.
*/
avl_tree_t *cache = zutil_alloc(hdl,
sizeof (avl_tree_t));
avl_create(cache, slice_cache_compare,
sizeof (rdsk_node_t),
offsetof(rdsk_node_t, rn_node));
nvlist_t *nvroot = fnvlist_lookup_nvlist(src,
ZPOOL_CONFIG_VDEV_TREE);
/*
* We only want to find the pool with this_guid.
* We will reset these values back later.
*/
iarg->guid = this_guid;
iarg->poolname = NULL;
/*
* We need to build up a cache of devices that exists
* in the paths pointed to by the cachefile. This allows
* us to preserve the device namespace that was
* originally specified by the user but also lets us
* scan devices in those directories in case they had
* been renamed.
*/
pthread_mutex_init(&lock, NULL);
discover_cached_paths(hdl, nvroot, cache, &lock);
nvlist_t *nv = zpool_find_import_impl(hdl, iarg,
&lock, cache);
pthread_mutex_destroy(&lock);
/*
* zpool_find_import_impl will return back
* a list of pools that it found based on the
* device cache. There should only be one pool
* since we're looking for a specific guid.
* We will use that pool to build up the final
* pool nvlist which is returned back to the
* caller.
*/
nvpair_t *pair = nvlist_next_nvpair(nv, NULL);
if (pair == NULL)
continue;
fnvlist_add_nvlist(pools, nvpair_name(pair),
fnvpair_value_nvlist(pair));
VERIFY3P(nvlist_next_nvpair(nv, pair), ==, NULL);
iarg->guid = saved_guid;
iarg->poolname = saved_poolname;
continue;
}
if (nvlist_add_string(src, ZPOOL_CONFIG_CACHEFILE,
iarg->cachefile) != 0) {
(void) zutil_no_memory(hdl);
nvlist_free(raw);
nvlist_free(pools);
return (NULL);
}
update_vdevs_config_dev_sysfs_path(src);
if ((dst = zutil_refresh_config(hdl, src)) == NULL) {
nvlist_free(raw);
nvlist_free(pools);
return (NULL);
}
if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) != 0) {
(void) zutil_no_memory(hdl);
nvlist_free(dst);
nvlist_free(raw);
nvlist_free(pools);
return (NULL);
}
nvlist_free(dst);
}
nvlist_free(raw);
return (pools);
}
static nvlist_t *
zpool_find_import(libpc_handle_t *hdl, importargs_t *iarg)
{
pthread_mutex_t lock;
avl_tree_t *cache;
nvlist_t *pools = NULL;
verify(iarg->poolname == NULL || iarg->guid == 0);
pthread_mutex_init(&lock, NULL);
/*
* Locate pool member vdevs by blkid or by directory scanning.
* On success a newly allocated AVL tree which is populated with an
* entry for each discovered vdev will be returned in the cache.
* It's the caller's responsibility to consume and destroy this tree.
*/
if (iarg->scan || iarg->paths != 0) {
size_t dirs = iarg->paths;
const char * const *dir = (const char * const *)iarg->path;
if (dirs == 0)
dir = zpool_default_search_paths(&dirs);
if (zpool_find_import_scan(hdl, &lock, &cache,
dir, dirs) != 0) {
pthread_mutex_destroy(&lock);
return (NULL);
}
} else {
if (zpool_find_import_blkid(hdl, &lock, &cache) != 0) {
pthread_mutex_destroy(&lock);
return (NULL);
}
}
pools = zpool_find_import_impl(hdl, iarg, &lock, cache);
pthread_mutex_destroy(&lock);
return (pools);
}
nvlist_t *
zpool_search_import(libpc_handle_t *hdl, importargs_t *import)
{
nvlist_t *pools = NULL;
verify(import->poolname == NULL || import->guid == 0);
if (import->cachefile != NULL)
pools = zpool_find_import_cached(hdl, import);
else
pools = zpool_find_import(hdl, import);
if ((pools == NULL || nvlist_empty(pools)) &&
hdl->lpc_open_access_error && geteuid() != 0) {
(void) zutil_error(hdl, LPC_EACCESS, dgettext(TEXT_DOMAIN,
"no pools found"));
}
return (pools);
}
static boolean_t
pool_match(nvlist_t *cfg, const char *tgt)
{
uint64_t v, guid = strtoull(tgt, NULL, 0);
const char *s;
if (guid != 0) {
if (nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_POOL_GUID, &v) == 0)
return (v == guid);
} else {
if (nvlist_lookup_string(cfg, ZPOOL_CONFIG_POOL_NAME, &s) == 0)
return (strcmp(s, tgt) == 0);
}
return (B_FALSE);
}
int
zpool_find_config(libpc_handle_t *hdl, const char *target, nvlist_t **configp,
importargs_t *args)
{
nvlist_t *pools;
nvlist_t *match = NULL;
nvlist_t *config = NULL;
char *sepp = NULL;
int count = 0;
char *targetdup = strdup(target);
if (targetdup == NULL)
return (ENOMEM);
*configp = NULL;
if ((sepp = strpbrk(targetdup, "/@")) != NULL)
*sepp = '\0';
pools = zpool_search_import(hdl, args);
if (pools != NULL) {
nvpair_t *elem = NULL;
while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) {
VERIFY0(nvpair_value_nvlist(elem, &config));
if (pool_match(config, targetdup)) {
count++;
if (match != NULL) {
/* multiple matches found */
continue;
} else {
match = fnvlist_dup(config);
}
}
}
fnvlist_free(pools);
}
if (count == 0) {
free(targetdup);
return (ENOENT);
}
if (count > 1) {
free(targetdup);
fnvlist_free(match);
return (EINVAL);
}
*configp = match;
free(targetdup);
return (0);
}
+/* Return if a vdev is a leaf vdev. Note: draid spares are leaf vdevs. */
+static boolean_t
+vdev_is_leaf(nvlist_t *nv)
+{
+ uint_t children = 0;
+ nvlist_t **child;
+
+ (void) nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
+ &child, &children);
+
+ return (children == 0);
+}
+
+/* Return if a vdev is a leaf vdev and a real device (disk or file) */
+static boolean_t
+vdev_is_real_leaf(nvlist_t *nv)
+{
+ const char *type = NULL;
+ if (!vdev_is_leaf(nv))
+ return (B_FALSE);
+
+ (void) nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type);
+ if ((strcmp(type, VDEV_TYPE_DISK) == 0) ||
+ (strcmp(type, VDEV_TYPE_FILE) == 0)) {
+ return (B_TRUE);
+ }
+
+ return (B_FALSE);
+}
+
+/*
+ * This function is called by our FOR_EACH_VDEV() macros.
+ *
+ * state: State machine status (stored inside of a (nvlist_t *))
+ * nv: The current vdev nvlist_t we are iterating over.
+ * last_nv: The previous vdev nvlist_t we returned to the user in
+ * the last iteration of FOR_EACH_VDEV(). We use it
+ * to find the next vdev nvlist_t we should return.
+ * real_leaves_only: Only return leaf vdevs.
+ *
+ * Returns 1 if we found the next vdev nvlist_t for this iteration. 0 if
+ * we're still searching for it.
+ */
+static int
+__for_each_vdev_macro_helper_func(void *state, nvlist_t *nv, void *last_nv,
+ boolean_t real_leaves_only)
+{
+ enum {FIRST_NV = 0, NEXT_IS_MATCH = 1, STOP_LOOKING = 2};
+
+ /* The very first entry in the NV list is a special case */
+ if (*((nvlist_t **)state) == (nvlist_t *)FIRST_NV) {
+ if (real_leaves_only && !vdev_is_real_leaf(nv))
+ return (0);
+
+ *((nvlist_t **)last_nv) = nv;
+ *((nvlist_t **)state) = (nvlist_t *)STOP_LOOKING;
+ return (1);
+ }
+
+ /*
+ * We came across our last_nv, meaning the next one is the one we
+ * want
+ */
+ if (nv == *((nvlist_t **)last_nv)) {
+ /* Next iteration of this function will return the nvlist_t */
+ *((nvlist_t **)state) = (nvlist_t *)NEXT_IS_MATCH;
+ return (0);
+ }
+
+ /*
+ * We marked NEXT_IS_MATCH on the previous iteration, so this is the one
+ * we want.
+ */
+ if (*(nvlist_t **)state == (nvlist_t *)NEXT_IS_MATCH) {
+ if (real_leaves_only && !vdev_is_real_leaf(nv))
+ return (0);
+
+ *((nvlist_t **)last_nv) = nv;
+ *((nvlist_t **)state) = (nvlist_t *)STOP_LOOKING;
+ return (1);
+ }
+
+ return (0);
+}
+
+int
+for_each_vdev_macro_helper_func(void *state, nvlist_t *nv, void *last_nv)
+{
+ return (__for_each_vdev_macro_helper_func(state, nv, last_nv, B_FALSE));
+}
+
+int
+for_each_real_leaf_vdev_macro_helper_func(void *state, nvlist_t *nv,
+ void *last_nv)
+{
+ return (__for_each_vdev_macro_helper_func(state, nv, last_nv, B_TRUE));
+}
+
/*
* Internal function for iterating over the vdevs.
*
* For each vdev, func() will be called and will be passed 'zhp' (which is
* typically the zpool_handle_t cast as a void pointer), the vdev's nvlist, and
* a user-defined data pointer).
*
* The return values from all the func() calls will be OR'd together and
* returned.
*/
int
for_each_vdev_cb(void *zhp, nvlist_t *nv, pool_vdev_iter_f func,
void *data)
{
nvlist_t **child;
uint_t c, children;
int ret = 0;
int i;
const char *type;
const char *list[] = {
ZPOOL_CONFIG_SPARES,
ZPOOL_CONFIG_L2CACHE,
ZPOOL_CONFIG_CHILDREN
};
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
return (ret);
/* Don't run our function on indirect vdevs */
if (strcmp(type, VDEV_TYPE_INDIRECT) != 0) {
ret |= func(zhp, nv, data);
}
for (i = 0; i < ARRAY_SIZE(list); i++) {
if (nvlist_lookup_nvlist_array(nv, list[i], &child,
&children) == 0) {
for (c = 0; c < children; c++) {
uint64_t ishole = 0;
(void) nvlist_lookup_uint64(child[c],
ZPOOL_CONFIG_IS_HOLE, &ishole);
if (ishole)
continue;
ret |= for_each_vdev_cb(zhp, child[c],
func, data);
}
}
}
return (ret);
}
/*
* Given an ZPOOL_CONFIG_VDEV_TREE nvpair, iterate over all the vdevs, calling
* func() for each one. func() is passed the vdev's nvlist and an optional
* user-defined 'data' pointer.
*/
int
for_each_vdev_in_nvlist(nvlist_t *nvroot, pool_vdev_iter_f func, void *data)
{
return (for_each_vdev_cb(NULL, nvroot, func, data));
}
diff --git a/sys/contrib/openzfs/lib/libzutil/zutil_pool.c b/sys/contrib/openzfs/lib/libzutil/zutil_pool.c
index 288a0033cd13..86460de3fc61 100644
--- a/sys/contrib/openzfs/lib/libzutil/zutil_pool.c
+++ b/sys/contrib/openzfs/lib/libzutil/zutil_pool.c
@@ -1,146 +1,177 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/nvpair.h>
#include <sys/fs/zfs.h>
+#include <math.h>
#include <libzutil.h>
static void
dump_ddt_stat(const ddt_stat_t *dds, int h)
{
char refcnt[6];
char blocks[6], lsize[6], psize[6], dsize[6];
char ref_blocks[6], ref_lsize[6], ref_psize[6], ref_dsize[6];
if (dds == NULL || dds->dds_blocks == 0)
return;
if (h == -1)
(void) strcpy(refcnt, "Total");
else
zfs_nicenum(1ULL << h, refcnt, sizeof (refcnt));
zfs_nicenum(dds->dds_blocks, blocks, sizeof (blocks));
zfs_nicebytes(dds->dds_lsize, lsize, sizeof (lsize));
zfs_nicebytes(dds->dds_psize, psize, sizeof (psize));
zfs_nicebytes(dds->dds_dsize, dsize, sizeof (dsize));
zfs_nicenum(dds->dds_ref_blocks, ref_blocks, sizeof (ref_blocks));
zfs_nicebytes(dds->dds_ref_lsize, ref_lsize, sizeof (ref_lsize));
zfs_nicebytes(dds->dds_ref_psize, ref_psize, sizeof (ref_psize));
zfs_nicebytes(dds->dds_ref_dsize, ref_dsize, sizeof (ref_dsize));
(void) printf("%6s %6s %5s %5s %5s %6s %5s %5s %5s\n",
refcnt,
blocks, lsize, psize, dsize,
ref_blocks, ref_lsize, ref_psize, ref_dsize);
}
/*
* Print the DDT histogram and the column totals.
*/
void
zpool_dump_ddt(const ddt_stat_t *dds_total, const ddt_histogram_t *ddh)
{
int h;
(void) printf("\n");
(void) printf("bucket "
" allocated "
" referenced \n");
(void) printf("______ "
"______________________________ "
"______________________________\n");
(void) printf("%6s %6s %5s %5s %5s %6s %5s %5s %5s\n",
"refcnt",
"blocks", "LSIZE", "PSIZE", "DSIZE",
"blocks", "LSIZE", "PSIZE", "DSIZE");
(void) printf("%6s %6s %5s %5s %5s %6s %5s %5s %5s\n",
"------",
"------", "-----", "-----", "-----",
"------", "-----", "-----", "-----");
for (h = 0; h < 64; h++)
dump_ddt_stat(&ddh->ddh_stat[h], h);
dump_ddt_stat(dds_total, -1);
(void) printf("\n");
}
/*
* Process the buffer of nvlists, unpacking and storing each nvlist record
* into 'records'. 'leftover' is set to the number of bytes that weren't
* processed as there wasn't a complete record.
*/
int
zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
nvlist_t ***records, uint_t *numrecords)
{
uint64_t reclen;
nvlist_t *nv;
int i;
void *tmp;
while (bytes_read > sizeof (reclen)) {
/* get length of packed record (stored as little endian) */
for (i = 0, reclen = 0; i < sizeof (reclen); i++)
reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
if (bytes_read < sizeof (reclen) + reclen)
break;
/* unpack record */
int err = nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0);
if (err != 0)
return (err);
bytes_read -= sizeof (reclen) + reclen;
buf += sizeof (reclen) + reclen;
/* add record to nvlist array */
(*numrecords)++;
if (ISP2(*numrecords + 1)) {
tmp = realloc(*records,
*numrecords * 2 * sizeof (nvlist_t *));
if (tmp == NULL) {
nvlist_free(nv);
(*numrecords)--;
return (ENOMEM);
}
*records = tmp;
}
(*records)[*numrecords - 1] = nv;
}
*leftover = bytes_read;
return (0);
}
+
+/*
+ * Floating point sleep(). Allows you to pass in a floating point value for
+ * seconds.
+ */
+void
+fsleep(float sec)
+{
+ struct timespec req;
+ req.tv_sec = floor(sec);
+ req.tv_nsec = (sec - (float)req.tv_sec) * NANOSEC;
+ nanosleep(&req, NULL);
+}
+
+/*
+ * Get environment variable 'env' and return it as an integer.
+ * If 'env' is not set, then return 'default_val' instead.
+ */
+int
+zpool_getenv_int(const char *env, int default_val)
+{
+ char *str;
+ int val;
+ str = getenv(env);
+ if ((str == NULL) || sscanf(str, "%d", &val) != 1 ||
+ val < 0) {
+ val = default_val;
+ }
+ return (val);
+}
diff --git a/sys/contrib/openzfs/man/man4/spl.4 b/sys/contrib/openzfs/man/man4/spl.4
index 82455fb53254..414a92394858 100644
--- a/sys/contrib/openzfs/man/man4/spl.4
+++ b/sys/contrib/openzfs/man/man4/spl.4
@@ -1,211 +1,203 @@
.\"
.\" The contents of this file are subject to the terms of the Common Development
.\" and Distribution License (the "License"). You may not use this file except
.\" in compliance with the License. You can obtain a copy of the license at
.\" usr/src/OPENSOLARIS.LICENSE or https://opensource.org/licenses/CDDL-1.0.
.\"
.\" See the License for the specific language governing permissions and
.\" limitations under the License. When distributing Covered Code, include this
.\" CDDL HEADER in each file and include the License file at
.\" usr/src/OPENSOLARIS.LICENSE. If applicable, add the following below this
.\" CDDL HEADER, with the fields enclosed by brackets "[]" replaced with your
.\" own identifying information:
.\" Portions Copyright [yyyy] [name of copyright owner]
.\"
.\" Copyright 2013 Turbo Fredriksson <turbo@bayour.com>. All rights reserved.
.\"
.Dd August 24, 2020
.Dt SPL 4
.Os
.
.Sh NAME
.Nm spl
.Nd parameters of the SPL kernel module
.
.Sh DESCRIPTION
.Bl -tag -width Ds
.It Sy spl_kmem_cache_kmem_threads Ns = Ns Sy 4 Pq uint
The number of threads created for the spl_kmem_cache task queue.
This task queue is responsible for allocating new slabs
for use by the kmem caches.
For the majority of systems and workloads only a small number of threads are
required.
.
-.It Sy spl_kmem_cache_reclaim Ns = Ns Sy 0 Pq uint
-When this is set it prevents Linux from being able to rapidly reclaim all the
-memory held by the kmem caches.
-This may be useful in circumstances where it's preferable that Linux
-reclaim memory from some other subsystem first.
-Setting this will increase the likelihood out of memory events on a memory
-constrained system.
-.
.It Sy spl_kmem_cache_obj_per_slab Ns = Ns Sy 8 Pq uint
The preferred number of objects per slab in the cache.
In general, a larger value will increase the caches memory footprint
while decreasing the time required to perform an allocation.
Conversely, a smaller value will minimize the footprint
and improve cache reclaim time but individual allocations may take longer.
.
.It Sy spl_kmem_cache_max_size Ns = Ns Sy 32 Po 64-bit Pc or Sy 4 Po 32-bit Pc Pq uint
The maximum size of a kmem cache slab in MiB.
This effectively limits the maximum cache object size to
.Sy spl_kmem_cache_max_size Ns / Ns Sy spl_kmem_cache_obj_per_slab .
.Pp
Caches may not be created with
object sized larger than this limit.
.
.It Sy spl_kmem_cache_slab_limit Ns = Ns Sy 16384 Pq uint
For small objects the Linux slab allocator should be used to make the most
efficient use of the memory.
However, large objects are not supported by
the Linux slab and therefore the SPL implementation is preferred.
This value is used to determine the cutoff between a small and large object.
.Pp
Objects of size
.Sy spl_kmem_cache_slab_limit
or smaller will be allocated using the Linux slab allocator,
large objects use the SPL allocator.
A cutoff of 16K was determined to be optimal for architectures using 4K pages.
.
.It Sy spl_kmem_alloc_warn Ns = Ns Sy 32768 Pq uint
As a general rule
.Fn kmem_alloc
allocations should be small,
preferably just a few pages, since they must by physically contiguous.
Therefore, a rate limited warning will be printed to the console for any
.Fn kmem_alloc
which exceeds a reasonable threshold.
.Pp
The default warning threshold is set to eight pages but capped at 32K to
accommodate systems using large pages.
This value was selected to be small enough to ensure
the largest allocations are quickly noticed and fixed.
But large enough to avoid logging any warnings when a allocation size is
larger than optimal but not a serious concern.
Since this value is tunable, developers are encouraged to set it lower
when testing so any new largish allocations are quickly caught.
These warnings may be disabled by setting the threshold to zero.
.
.It Sy spl_kmem_alloc_max Ns = Ns Sy KMALLOC_MAX_SIZE Ns / Ns Sy 4 Pq uint
Large
.Fn kmem_alloc
allocations will fail if they exceed
.Sy KMALLOC_MAX_SIZE .
Allocations which are marginally smaller than this limit may succeed but
should still be avoided due to the expense of locating a contiguous range
of free pages.
Therefore, a maximum kmem size with reasonable safely margin of 4x is set.
.Fn kmem_alloc
allocations larger than this maximum will quickly fail.
.Fn vmem_alloc
allocations less than or equal to this value will use
.Fn kmalloc ,
but shift to
.Fn vmalloc
when exceeding this value.
.
.It Sy spl_kmem_cache_magazine_size Ns = Ns Sy 0 Pq uint
Cache magazines are an optimization designed to minimize the cost of
allocating memory.
They do this by keeping a per-cpu cache of recently
freed objects, which can then be reallocated without taking a lock.
This can improve performance on highly contended caches.
However, because objects in magazines will prevent otherwise empty slabs
from being immediately released this may not be ideal for low memory machines.
.Pp
For this reason,
.Sy spl_kmem_cache_magazine_size
can be used to set a maximum magazine size.
When this value is set to 0 the magazine size will
be automatically determined based on the object size.
Otherwise magazines will be limited to 2-256 objects per magazine (i.e per cpu).
Magazines may never be entirely disabled in this implementation.
.
.It Sy spl_hostid Ns = Ns Sy 0 Pq ulong
The system hostid, when set this can be used to uniquely identify a system.
By default this value is set to zero which indicates the hostid is disabled.
It can be explicitly enabled by placing a unique non-zero value in
.Pa /etc/hostid .
.
.It Sy spl_hostid_path Ns = Ns Pa /etc/hostid Pq charp
The expected path to locate the system hostid when specified.
This value may be overridden for non-standard configurations.
.
.It Sy spl_panic_halt Ns = Ns Sy 0 Pq uint
Cause a kernel panic on assertion failures.
When not enabled, the thread is halted to facilitate further debugging.
.Pp
Set to a non-zero value to enable.
.
.It Sy spl_taskq_kick Ns = Ns Sy 0 Pq uint
Kick stuck taskq to spawn threads.
When writing a non-zero value to it, it will scan all the taskqs.
If any of them have a pending task more than 5 seconds old,
it will kick it to spawn more threads.
This can be used if you find a rare
deadlock occurs because one or more taskqs didn't spawn a thread when it should.
.
.It Sy spl_taskq_thread_bind Ns = Ns Sy 0 Pq int
Bind taskq threads to specific CPUs.
When enabled all taskq threads will be distributed evenly
across the available CPUs.
By default, this behavior is disabled to allow the Linux scheduler
the maximum flexibility to determine where a thread should run.
.
.It Sy spl_taskq_thread_dynamic Ns = Ns Sy 1 Pq int
Allow dynamic taskqs.
When enabled taskqs which set the
.Sy TASKQ_DYNAMIC
flag will by default create only a single thread.
New threads will be created on demand up to a maximum allowed number
to facilitate the completion of outstanding tasks.
Threads which are no longer needed will be promptly destroyed.
By default this behavior is enabled but it can be disabled to
aid performance analysis or troubleshooting.
.
.It Sy spl_taskq_thread_priority Ns = Ns Sy 1 Pq int
Allow newly created taskq threads to set a non-default scheduler priority.
When enabled, the priority specified when a taskq is created will be applied
to all threads created by that taskq.
When disabled all threads will use the default Linux kernel thread priority.
By default, this behavior is enabled.
.
.It Sy spl_taskq_thread_sequential Ns = Ns Sy 4 Pq int
The number of items a taskq worker thread must handle without interruption
before requesting a new worker thread be spawned.
This is used to control
how quickly taskqs ramp up the number of threads processing the queue.
Because Linux thread creation and destruction are relatively inexpensive a
small default value has been selected.
This means that normally threads will be created aggressively which is
desirable.
Increasing this value will
result in a slower thread creation rate which may be preferable for some
configurations.
.
.It Sy spl_max_show_tasks Ns = Ns Sy 512 Pq uint
The maximum number of tasks per pending list in each taskq shown in
.Pa /proc/spl/taskq{,-all} .
Write
.Sy 0
to turn off the limit.
The proc file will walk the lists with lock held,
reading it could cause a lock-up if the list grow too large
without limiting the output.
"(truncated)" will be shown if the list is larger than the limit.
.
.It Sy spl_taskq_thread_timeout_ms Ns = Ns Sy 10000 Pq uint
(Linux-only)
How long a taskq has to have had no work before we tear it down.
Previously, we would tear down a dynamic taskq worker as soon
as we noticed it had no work, but it was observed that this led
to a lot of churn in tearing down things we then immediately
spawned anew.
In practice, it seems any nonzero value will remove the vast
majority of this churn, while the nontrivially larger value
was chosen to help filter out the little remaining churn on
a mostly idle system.
Setting this value to
.Sy 0
will revert to the previous behavior.
.El
diff --git a/sys/contrib/openzfs/man/man4/zfs.4 b/sys/contrib/openzfs/man/man4/zfs.4
index 4ec52a2fb653..352990e02daf 100644
--- a/sys/contrib/openzfs/man/man4/zfs.4
+++ b/sys/contrib/openzfs/man/man4/zfs.4
@@ -1,2603 +1,2622 @@
.\"
.\" Copyright (c) 2013 by Turbo Fredriksson <turbo@bayour.com>. All rights reserved.
.\" Copyright (c) 2019, 2021 by Delphix. All rights reserved.
.\" Copyright (c) 2019 Datto Inc.
.\" The contents of this file are subject to the terms of the Common Development
.\" and Distribution License (the "License"). You may not use this file except
.\" in compliance with the License. You can obtain a copy of the license at
.\" usr/src/OPENSOLARIS.LICENSE or https://opensource.org/licenses/CDDL-1.0.
.\"
.\" See the License for the specific language governing permissions and
.\" limitations under the License. When distributing Covered Code, include this
.\" CDDL HEADER in each file and include the License file at
.\" usr/src/OPENSOLARIS.LICENSE. If applicable, add the following below this
.\" CDDL HEADER, with the fields enclosed by brackets "[]" replaced with your
.\" own identifying information:
.\" Portions Copyright [yyyy] [name of copyright owner]
.\"
.Dd July 21, 2023
.Dt ZFS 4
.Os
.
.Sh NAME
.Nm zfs
.Nd tuning of the ZFS kernel module
.
.Sh DESCRIPTION
The ZFS module supports these parameters:
.Bl -tag -width Ds
.It Sy dbuf_cache_max_bytes Ns = Ns Sy UINT64_MAX Ns B Pq u64
Maximum size in bytes of the dbuf cache.
The target size is determined by the MIN versus
.No 1/2^ Ns Sy dbuf_cache_shift Pq 1/32nd
of the target ARC size.
The behavior of the dbuf cache and its associated settings
can be observed via the
.Pa /proc/spl/kstat/zfs/dbufstats
kstat.
.
.It Sy dbuf_metadata_cache_max_bytes Ns = Ns Sy UINT64_MAX Ns B Pq u64
Maximum size in bytes of the metadata dbuf cache.
The target size is determined by the MIN versus
.No 1/2^ Ns Sy dbuf_metadata_cache_shift Pq 1/64th
of the target ARC size.
The behavior of the metadata dbuf cache and its associated settings
can be observed via the
.Pa /proc/spl/kstat/zfs/dbufstats
kstat.
.
.It Sy dbuf_cache_hiwater_pct Ns = Ns Sy 10 Ns % Pq uint
The percentage over
.Sy dbuf_cache_max_bytes
when dbufs must be evicted directly.
.
.It Sy dbuf_cache_lowater_pct Ns = Ns Sy 10 Ns % Pq uint
The percentage below
.Sy dbuf_cache_max_bytes
when the evict thread stops evicting dbufs.
.
.It Sy dbuf_cache_shift Ns = Ns Sy 5 Pq uint
Set the size of the dbuf cache
.Pq Sy dbuf_cache_max_bytes
to a log2 fraction of the target ARC size.
.
.It Sy dbuf_metadata_cache_shift Ns = Ns Sy 6 Pq uint
Set the size of the dbuf metadata cache
.Pq Sy dbuf_metadata_cache_max_bytes
to a log2 fraction of the target ARC size.
.
.It Sy dbuf_mutex_cache_shift Ns = Ns Sy 0 Pq uint
Set the size of the mutex array for the dbuf cache.
When set to
.Sy 0
the array is dynamically sized based on total system memory.
.
.It Sy dmu_object_alloc_chunk_shift Ns = Ns Sy 7 Po 128 Pc Pq uint
dnode slots allocated in a single operation as a power of 2.
The default value minimizes lock contention for the bulk operation performed.
.
.It Sy dmu_prefetch_max Ns = Ns Sy 134217728 Ns B Po 128 MiB Pc Pq uint
Limit the amount we can prefetch with one call to this amount in bytes.
This helps to limit the amount of memory that can be used by prefetching.
.
.It Sy ignore_hole_birth Pq int
Alias for
.Sy send_holes_without_birth_time .
.
.It Sy l2arc_feed_again Ns = Ns Sy 1 Ns | Ns 0 Pq int
Turbo L2ARC warm-up.
When the L2ARC is cold the fill interval will be set as fast as possible.
.
.It Sy l2arc_feed_min_ms Ns = Ns Sy 200 Pq u64
Min feed interval in milliseconds.
Requires
.Sy l2arc_feed_again Ns = Ns Ar 1
and only applicable in related situations.
.
.It Sy l2arc_feed_secs Ns = Ns Sy 1 Pq u64
Seconds between L2ARC writing.
.
.It Sy l2arc_headroom Ns = Ns Sy 2 Pq u64
How far through the ARC lists to search for L2ARC cacheable content,
expressed as a multiplier of
.Sy l2arc_write_max .
ARC persistence across reboots can be achieved with persistent L2ARC
by setting this parameter to
.Sy 0 ,
allowing the full length of ARC lists to be searched for cacheable content.
.
.It Sy l2arc_headroom_boost Ns = Ns Sy 200 Ns % Pq u64
Scales
.Sy l2arc_headroom
by this percentage when L2ARC contents are being successfully compressed
before writing.
A value of
.Sy 100
disables this feature.
.
.It Sy l2arc_exclude_special Ns = Ns Sy 0 Ns | Ns 1 Pq int
Controls whether buffers present on special vdevs are eligible for caching
into L2ARC.
If set to 1, exclude dbufs on special vdevs from being cached to L2ARC.
.
.It Sy l2arc_mfuonly Ns = Ns Sy 0 Ns | Ns 1 Pq int
Controls whether only MFU metadata and data are cached from ARC into L2ARC.
This may be desired to avoid wasting space on L2ARC when reading/writing large
amounts of data that are not expected to be accessed more than once.
.Pp
The default is off,
meaning both MRU and MFU data and metadata are cached.
When turning off this feature, some MRU buffers will still be present
in ARC and eventually cached on L2ARC.
.No If Sy l2arc_noprefetch Ns = Ns Sy 0 ,
some prefetched buffers will be cached to L2ARC, and those might later
transition to MRU, in which case the
.Sy l2arc_mru_asize No arcstat will not be Sy 0 .
.Pp
Regardless of
.Sy l2arc_noprefetch ,
some MFU buffers might be evicted from ARC,
accessed later on as prefetches and transition to MRU as prefetches.
If accessed again they are counted as MRU and the
.Sy l2arc_mru_asize No arcstat will not be Sy 0 .
.Pp
The ARC status of L2ARC buffers when they were first cached in
L2ARC can be seen in the
.Sy l2arc_mru_asize , Sy l2arc_mfu_asize , No and Sy l2arc_prefetch_asize
arcstats when importing the pool or onlining a cache
device if persistent L2ARC is enabled.
.Pp
The
.Sy evict_l2_eligible_mru
arcstat does not take into account if this option is enabled as the information
provided by the
.Sy evict_l2_eligible_m[rf]u
arcstats can be used to decide if toggling this option is appropriate
for the current workload.
.
.It Sy l2arc_meta_percent Ns = Ns Sy 33 Ns % Pq uint
Percent of ARC size allowed for L2ARC-only headers.
Since L2ARC buffers are not evicted on memory pressure,
too many headers on a system with an irrationally large L2ARC
can render it slow or unusable.
This parameter limits L2ARC writes and rebuilds to achieve the target.
.
.It Sy l2arc_trim_ahead Ns = Ns Sy 0 Ns % Pq u64
Trims ahead of the current write size
.Pq Sy l2arc_write_max
on L2ARC devices by this percentage of write size if we have filled the device.
If set to
.Sy 100
we TRIM twice the space required to accommodate upcoming writes.
A minimum of
.Sy 64 MiB
will be trimmed.
It also enables TRIM of the whole L2ARC device upon creation
or addition to an existing pool or if the header of the device is
invalid upon importing a pool or onlining a cache device.
A value of
.Sy 0
disables TRIM on L2ARC altogether and is the default as it can put significant
stress on the underlying storage devices.
This will vary depending of how well the specific device handles these commands.
.
.It Sy l2arc_noprefetch Ns = Ns Sy 1 Ns | Ns 0 Pq int
Do not write buffers to L2ARC if they were prefetched but not used by
applications.
In case there are prefetched buffers in L2ARC and this option
is later set, we do not read the prefetched buffers from L2ARC.
Unsetting this option is useful for caching sequential reads from the
disks to L2ARC and serve those reads from L2ARC later on.
This may be beneficial in case the L2ARC device is significantly faster
in sequential reads than the disks of the pool.
.Pp
Use
.Sy 1
to disable and
.Sy 0
to enable caching/reading prefetches to/from L2ARC.
.
.It Sy l2arc_norw Ns = Ns Sy 0 Ns | Ns 1 Pq int
No reads during writes.
.
.It Sy l2arc_write_boost Ns = Ns Sy 8388608 Ns B Po 8 MiB Pc Pq u64
Cold L2ARC devices will have
.Sy l2arc_write_max
increased by this amount while they remain cold.
.
.It Sy l2arc_write_max Ns = Ns Sy 8388608 Ns B Po 8 MiB Pc Pq u64
Max write bytes per interval.
.
.It Sy l2arc_rebuild_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Rebuild the L2ARC when importing a pool (persistent L2ARC).
This can be disabled if there are problems importing a pool
or attaching an L2ARC device (e.g. the L2ARC device is slow
in reading stored log metadata, or the metadata
has become somehow fragmented/unusable).
.
.It Sy l2arc_rebuild_blocks_min_l2size Ns = Ns Sy 1073741824 Ns B Po 1 GiB Pc Pq u64
Mininum size of an L2ARC device required in order to write log blocks in it.
The log blocks are used upon importing the pool to rebuild the persistent L2ARC.
.Pp
For L2ARC devices less than 1 GiB, the amount of data
.Fn l2arc_evict
evicts is significant compared to the amount of restored L2ARC data.
In this case, do not write log blocks in L2ARC in order not to waste space.
.
.It Sy metaslab_aliquot Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq u64
Metaslab granularity, in bytes.
This is roughly similar to what would be referred to as the "stripe size"
in traditional RAID arrays.
In normal operation, ZFS will try to write this amount of data to each disk
before moving on to the next top-level vdev.
.
.It Sy metaslab_bias_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enable metaslab group biasing based on their vdevs' over- or under-utilization
relative to the pool.
.
.It Sy metaslab_force_ganging Ns = Ns Sy 16777217 Ns B Po 16 MiB + 1 B Pc Pq u64
Make some blocks above a certain size be gang blocks.
This option is used by the test suite to facilitate testing.
.
.It Sy metaslab_force_ganging_pct Ns = Ns Sy 3 Ns % Pq uint
For blocks that could be forced to be a gang block (due to
.Sy metaslab_force_ganging ) ,
force this many of them to be gang blocks.
.
.It Sy zfs_ddt_zap_default_bs Ns = Ns Sy 15 Po 32 KiB Pc Pq int
Default DDT ZAP data block size as a power of 2. Note that changing this after
creating a DDT on the pool will not affect existing DDTs, only newly created
ones.
.
.It Sy zfs_ddt_zap_default_ibs Ns = Ns Sy 15 Po 32 KiB Pc Pq int
Default DDT ZAP indirect block size as a power of 2. Note that changing this
after creating a DDT on the pool will not affect existing DDTs, only newly
created ones.
.
.It Sy zfs_default_bs Ns = Ns Sy 9 Po 512 B Pc Pq int
Default dnode block size as a power of 2.
.
.It Sy zfs_default_ibs Ns = Ns Sy 17 Po 128 KiB Pc Pq int
Default dnode indirect block size as a power of 2.
.
.It Sy zfs_history_output_max Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq u64
When attempting to log an output nvlist of an ioctl in the on-disk history,
the output will not be stored if it is larger than this size (in bytes).
This must be less than
.Sy DMU_MAX_ACCESS Pq 64 MiB .
This applies primarily to
.Fn zfs_ioc_channel_program Pq cf. Xr zfs-program 8 .
.
.It Sy zfs_keep_log_spacemaps_at_export Ns = Ns Sy 0 Ns | Ns 1 Pq int
Prevent log spacemaps from being destroyed during pool exports and destroys.
.
.It Sy zfs_metaslab_segment_weight_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enable/disable segment-based metaslab selection.
.
.It Sy zfs_metaslab_switch_threshold Ns = Ns Sy 2 Pq int
When using segment-based metaslab selection, continue allocating
from the active metaslab until this option's
worth of buckets have been exhausted.
.
.It Sy metaslab_debug_load Ns = Ns Sy 0 Ns | Ns 1 Pq int
Load all metaslabs during pool import.
.
.It Sy metaslab_debug_unload Ns = Ns Sy 0 Ns | Ns 1 Pq int
Prevent metaslabs from being unloaded.
.
.It Sy metaslab_fragmentation_factor_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enable use of the fragmentation metric in computing metaslab weights.
.
.It Sy metaslab_df_max_search Ns = Ns Sy 16777216 Ns B Po 16 MiB Pc Pq uint
Maximum distance to search forward from the last offset.
Without this limit, fragmented pools can see
.Em >100`000
iterations and
.Fn metaslab_block_picker
becomes the performance limiting factor on high-performance storage.
.Pp
With the default setting of
.Sy 16 MiB ,
we typically see less than
.Em 500
iterations, even with very fragmented
.Sy ashift Ns = Ns Sy 9
pools.
The maximum number of iterations possible is
.Sy metaslab_df_max_search / 2^(ashift+1) .
With the default setting of
.Sy 16 MiB
this is
.Em 16*1024 Pq with Sy ashift Ns = Ns Sy 9
or
.Em 2*1024 Pq with Sy ashift Ns = Ns Sy 12 .
.
.It Sy metaslab_df_use_largest_segment Ns = Ns Sy 0 Ns | Ns 1 Pq int
If not searching forward (due to
.Sy metaslab_df_max_search , metaslab_df_free_pct ,
.No or Sy metaslab_df_alloc_threshold ) ,
this tunable controls which segment is used.
If set, we will use the largest free segment.
If unset, we will use a segment of at least the requested size.
.
.It Sy zfs_metaslab_max_size_cache_sec Ns = Ns Sy 3600 Ns s Po 1 hour Pc Pq u64
When we unload a metaslab, we cache the size of the largest free chunk.
We use that cached size to determine whether or not to load a metaslab
for a given allocation.
As more frees accumulate in that metaslab while it's unloaded,
the cached max size becomes less and less accurate.
After a number of seconds controlled by this tunable,
we stop considering the cached max size and start
considering only the histogram instead.
.
.It Sy zfs_metaslab_mem_limit Ns = Ns Sy 25 Ns % Pq uint
When we are loading a new metaslab, we check the amount of memory being used
to store metaslab range trees.
If it is over a threshold, we attempt to unload the least recently used metaslab
to prevent the system from clogging all of its memory with range trees.
This tunable sets the percentage of total system memory that is the threshold.
.
.It Sy zfs_metaslab_try_hard_before_gang Ns = Ns Sy 0 Ns | Ns 1 Pq int
.Bl -item -compact
.It
If unset, we will first try normal allocation.
.It
If that fails then we will do a gang allocation.
.It
If that fails then we will do a "try hard" gang allocation.
.It
If that fails then we will have a multi-layer gang block.
.El
.Pp
.Bl -item -compact
.It
If set, we will first try normal allocation.
.It
If that fails then we will do a "try hard" allocation.
.It
If that fails we will do a gang allocation.
.It
If that fails we will do a "try hard" gang allocation.
.It
If that fails then we will have a multi-layer gang block.
.El
.
.It Sy zfs_metaslab_find_max_tries Ns = Ns Sy 100 Pq uint
When not trying hard, we only consider this number of the best metaslabs.
This improves performance, especially when there are many metaslabs per vdev
and the allocation can't actually be satisfied
(so we would otherwise iterate all metaslabs).
.
.It Sy zfs_vdev_default_ms_count Ns = Ns Sy 200 Pq uint
When a vdev is added, target this number of metaslabs per top-level vdev.
.
.It Sy zfs_vdev_default_ms_shift Ns = Ns Sy 29 Po 512 MiB Pc Pq uint
Default lower limit for metaslab size.
.
.It Sy zfs_vdev_max_ms_shift Ns = Ns Sy 34 Po 16 GiB Pc Pq uint
Default upper limit for metaslab size.
.
.It Sy zfs_vdev_max_auto_ashift Ns = Ns Sy 14 Pq uint
Maximum ashift used when optimizing for logical \[->] physical sector size on
new
top-level vdevs.
May be increased up to
.Sy ASHIFT_MAX Po 16 Pc ,
but this may negatively impact pool space efficiency.
.
.It Sy zfs_vdev_min_auto_ashift Ns = Ns Sy ASHIFT_MIN Po 9 Pc Pq uint
Minimum ashift used when creating new top-level vdevs.
.
.It Sy zfs_vdev_min_ms_count Ns = Ns Sy 16 Pq uint
Minimum number of metaslabs to create in a top-level vdev.
.
.It Sy vdev_validate_skip Ns = Ns Sy 0 Ns | Ns 1 Pq int
Skip label validation steps during pool import.
Changing is not recommended unless you know what you're doing
and are recovering a damaged label.
.
.It Sy zfs_vdev_ms_count_limit Ns = Ns Sy 131072 Po 128k Pc Pq uint
Practical upper limit of total metaslabs per top-level vdev.
.
.It Sy metaslab_preload_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enable metaslab group preloading.
.
.It Sy metaslab_preload_limit Ns = Ns Sy 10 Pq uint
Maximum number of metaslabs per group to preload
.
.It Sy metaslab_preload_pct Ns = Ns Sy 50 Pq uint
Percentage of CPUs to run a metaslab preload taskq
.
.It Sy metaslab_lba_weighting_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Give more weight to metaslabs with lower LBAs,
assuming they have greater bandwidth,
as is typically the case on a modern constant angular velocity disk drive.
.
.It Sy metaslab_unload_delay Ns = Ns Sy 32 Pq uint
After a metaslab is used, we keep it loaded for this many TXGs, to attempt to
reduce unnecessary reloading.
Note that both this many TXGs and
.Sy metaslab_unload_delay_ms
milliseconds must pass before unloading will occur.
.
.It Sy metaslab_unload_delay_ms Ns = Ns Sy 600000 Ns ms Po 10 min Pc Pq uint
After a metaslab is used, we keep it loaded for this many milliseconds,
to attempt to reduce unnecessary reloading.
Note, that both this many milliseconds and
.Sy metaslab_unload_delay
TXGs must pass before unloading will occur.
.
.It Sy reference_history Ns = Ns Sy 3 Pq uint
Maximum reference holders being tracked when reference_tracking_enable is
active.
.
.It Sy reference_tracking_enable Ns = Ns Sy 0 Ns | Ns 1 Pq int
Track reference holders to
.Sy refcount_t
objects (debug builds only).
.
.It Sy send_holes_without_birth_time Ns = Ns Sy 1 Ns | Ns 0 Pq int
When set, the
.Sy hole_birth
optimization will not be used, and all holes will always be sent during a
.Nm zfs Cm send .
This is useful if you suspect your datasets are affected by a bug in
.Sy hole_birth .
.
.It Sy spa_config_path Ns = Ns Pa /etc/zfs/zpool.cache Pq charp
SPA config file.
.
.It Sy spa_asize_inflation Ns = Ns Sy 24 Pq uint
Multiplication factor used to estimate actual disk consumption from the
size of data being written.
The default value is a worst case estimate,
but lower values may be valid for a given pool depending on its configuration.
Pool administrators who understand the factors involved
may wish to specify a more realistic inflation factor,
particularly if they operate close to quota or capacity limits.
.
.It Sy spa_load_print_vdev_tree Ns = Ns Sy 0 Ns | Ns 1 Pq int
Whether to print the vdev tree in the debugging message buffer during pool
import.
.
.It Sy spa_load_verify_data Ns = Ns Sy 1 Ns | Ns 0 Pq int
Whether to traverse data blocks during an "extreme rewind"
.Pq Fl X
import.
.Pp
An extreme rewind import normally performs a full traversal of all
blocks in the pool for verification.
If this parameter is unset, the traversal skips non-metadata blocks.
It can be toggled once the
import has started to stop or start the traversal of non-metadata blocks.
.
.It Sy spa_load_verify_metadata Ns = Ns Sy 1 Ns | Ns 0 Pq int
Whether to traverse blocks during an "extreme rewind"
.Pq Fl X
pool import.
.Pp
An extreme rewind import normally performs a full traversal of all
blocks in the pool for verification.
If this parameter is unset, the traversal is not performed.
It can be toggled once the import has started to stop or start the traversal.
.
.It Sy spa_load_verify_shift Ns = Ns Sy 4 Po 1/16th Pc Pq uint
Sets the maximum number of bytes to consume during pool import to the log2
fraction of the target ARC size.
.
.It Sy spa_slop_shift Ns = Ns Sy 5 Po 1/32nd Pc Pq int
Normally, we don't allow the last
.Sy 3.2% Pq Sy 1/2^spa_slop_shift
of space in the pool to be consumed.
This ensures that we don't run the pool completely out of space,
due to unaccounted changes (e.g. to the MOS).
It also limits the worst-case time to allocate space.
If we have less than this amount of free space,
most ZPL operations (e.g. write, create) will return
.Sy ENOSPC .
.
.It Sy spa_upgrade_errlog_limit Ns = Ns Sy 0 Pq uint
Limits the number of on-disk error log entries that will be converted to the
new format when enabling the
.Sy head_errlog
feature.
The default is to convert all log entries.
.
.It Sy vdev_removal_max_span Ns = Ns Sy 32768 Ns B Po 32 KiB Pc Pq uint
During top-level vdev removal, chunks of data are copied from the vdev
which may include free space in order to trade bandwidth for IOPS.
This parameter determines the maximum span of free space, in bytes,
which will be included as "unnecessary" data in a chunk of copied data.
.Pp
The default value here was chosen to align with
.Sy zfs_vdev_read_gap_limit ,
which is a similar concept when doing
regular reads (but there's no reason it has to be the same).
.
.It Sy vdev_file_logical_ashift Ns = Ns Sy 9 Po 512 B Pc Pq u64
Logical ashift for file-based devices.
.
.It Sy vdev_file_physical_ashift Ns = Ns Sy 9 Po 512 B Pc Pq u64
Physical ashift for file-based devices.
.
.It Sy zap_iterate_prefetch Ns = Ns Sy 1 Ns | Ns 0 Pq int
If set, when we start iterating over a ZAP object,
prefetch the entire object (all leaf blocks).
However, this is limited by
.Sy dmu_prefetch_max .
.
.It Sy zap_micro_max_size Ns = Ns Sy 131072 Ns B Po 128 KiB Pc Pq int
Maximum micro ZAP size.
A micro ZAP is upgraded to a fat ZAP, once it grows beyond the specified size.
.
.It Sy zfetch_min_distance Ns = Ns Sy 4194304 Ns B Po 4 MiB Pc Pq uint
Min bytes to prefetch per stream.
Prefetch distance starts from the demand access size and quickly grows to
this value, doubling on each hit.
After that it may grow further by 1/8 per hit, but only if some prefetch
since last time haven't completed in time to satisfy demand request, i.e.
prefetch depth didn't cover the read latency or the pool got saturated.
.
.It Sy zfetch_max_distance Ns = Ns Sy 67108864 Ns B Po 64 MiB Pc Pq uint
Max bytes to prefetch per stream.
.
.It Sy zfetch_max_idistance Ns = Ns Sy 67108864 Ns B Po 64 MiB Pc Pq uint
Max bytes to prefetch indirects for per stream.
.
.It Sy zfetch_max_streams Ns = Ns Sy 8 Pq uint
Max number of streams per zfetch (prefetch streams per file).
.
.It Sy zfetch_min_sec_reap Ns = Ns Sy 1 Pq uint
Min time before inactive prefetch stream can be reclaimed
.
.It Sy zfetch_max_sec_reap Ns = Ns Sy 2 Pq uint
Max time before inactive prefetch stream can be deleted
.
.It Sy zfs_abd_scatter_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enables ARC from using scatter/gather lists and forces all allocations to be
linear in kernel memory.
Disabling can improve performance in some code paths
at the expense of fragmented kernel memory.
.
.It Sy zfs_abd_scatter_max_order Ns = Ns Sy MAX_ORDER\-1 Pq uint
Maximum number of consecutive memory pages allocated in a single block for
scatter/gather lists.
.Pp
The value of
.Sy MAX_ORDER
depends on kernel configuration.
.
.It Sy zfs_abd_scatter_min_size Ns = Ns Sy 1536 Ns B Po 1.5 KiB Pc Pq uint
This is the minimum allocation size that will use scatter (page-based) ABDs.
Smaller allocations will use linear ABDs.
.
.It Sy zfs_arc_dnode_limit Ns = Ns Sy 0 Ns B Pq u64
When the number of bytes consumed by dnodes in the ARC exceeds this number of
bytes, try to unpin some of it in response to demand for non-metadata.
This value acts as a ceiling to the amount of dnode metadata, and defaults to
.Sy 0 ,
which indicates that a percent which is based on
.Sy zfs_arc_dnode_limit_percent
of the ARC meta buffers that may be used for dnodes.
.It Sy zfs_arc_dnode_limit_percent Ns = Ns Sy 10 Ns % Pq u64
Percentage that can be consumed by dnodes of ARC meta buffers.
.Pp
See also
.Sy zfs_arc_dnode_limit ,
which serves a similar purpose but has a higher priority if nonzero.
.
.It Sy zfs_arc_dnode_reduce_percent Ns = Ns Sy 10 Ns % Pq u64
Percentage of ARC dnodes to try to scan in response to demand for non-metadata
when the number of bytes consumed by dnodes exceeds
.Sy zfs_arc_dnode_limit .
.
.It Sy zfs_arc_average_blocksize Ns = Ns Sy 8192 Ns B Po 8 KiB Pc Pq uint
The ARC's buffer hash table is sized based on the assumption of an average
block size of this value.
This works out to roughly 1 MiB of hash table per 1 GiB of physical memory
with 8-byte pointers.
For configurations with a known larger average block size,
this value can be increased to reduce the memory footprint.
.
.It Sy zfs_arc_eviction_pct Ns = Ns Sy 200 Ns % Pq uint
When
.Fn arc_is_overflowing ,
.Fn arc_get_data_impl
waits for this percent of the requested amount of data to be evicted.
For example, by default, for every
.Em 2 KiB
that's evicted,
.Em 1 KiB
of it may be "reused" by a new allocation.
Since this is above
.Sy 100 Ns % ,
it ensures that progress is made towards getting
.Sy arc_size No under Sy arc_c .
Since this is finite, it ensures that allocations can still happen,
even during the potentially long time that
.Sy arc_size No is more than Sy arc_c .
.
.It Sy zfs_arc_evict_batch_limit Ns = Ns Sy 10 Pq uint
Number ARC headers to evict per sub-list before proceeding to another sub-list.
This batch-style operation prevents entire sub-lists from being evicted at once
but comes at a cost of additional unlocking and locking.
.
.It Sy zfs_arc_grow_retry Ns = Ns Sy 0 Ns s Pq uint
If set to a non zero value, it will replace the
.Sy arc_grow_retry
value with this value.
The
.Sy arc_grow_retry
.No value Pq default Sy 5 Ns s
is the number of seconds the ARC will wait before
trying to resume growth after a memory pressure event.
.
.It Sy zfs_arc_lotsfree_percent Ns = Ns Sy 10 Ns % Pq int
Throttle I/O when free system memory drops below this percentage of total
system memory.
Setting this value to
.Sy 0
will disable the throttle.
.
.It Sy zfs_arc_max Ns = Ns Sy 0 Ns B Pq u64
Max size of ARC in bytes.
If
.Sy 0 ,
then the max size of ARC is determined by the amount of system memory installed.
Under Linux, half of system memory will be used as the limit.
Under
.Fx ,
the larger of
.Sy all_system_memory No \- Sy 1 GiB
and
.Sy 5/8 No \(mu Sy all_system_memory
will be used as the limit.
This value must be at least
.Sy 67108864 Ns B Pq 64 MiB .
.Pp
This value can be changed dynamically, with some caveats.
It cannot be set back to
.Sy 0
while running, and reducing it below the current ARC size will not cause
the ARC to shrink without memory pressure to induce shrinking.
.
.It Sy zfs_arc_meta_balance Ns = Ns Sy 500 Pq uint
Balance between metadata and data on ghost hits.
Values above 100 increase metadata caching by proportionally reducing effect
of ghost data hits on target data/metadata rate.
.
.It Sy zfs_arc_min Ns = Ns Sy 0 Ns B Pq u64
Min size of ARC in bytes.
.No If set to Sy 0 , arc_c_min
will default to consuming the larger of
.Sy 32 MiB
and
.Sy all_system_memory No / Sy 32 .
.
.It Sy zfs_arc_min_prefetch_ms Ns = Ns Sy 0 Ns ms Ns Po Ns ≡ Ns 1s Pc Pq uint
Minimum time prefetched blocks are locked in the ARC.
.
.It Sy zfs_arc_min_prescient_prefetch_ms Ns = Ns Sy 0 Ns ms Ns Po Ns ≡ Ns 6s Pc Pq uint
Minimum time "prescient prefetched" blocks are locked in the ARC.
These blocks are meant to be prefetched fairly aggressively ahead of
the code that may use them.
.
.It Sy zfs_arc_prune_task_threads Ns = Ns Sy 1 Pq int
Number of arc_prune threads.
.Fx
does not need more than one.
Linux may theoretically use one per mount point up to number of CPUs,
but that was not proven to be useful.
.
.It Sy zfs_max_missing_tvds Ns = Ns Sy 0 Pq int
Number of missing top-level vdevs which will be allowed during
pool import (only in read-only mode).
.
.It Sy zfs_max_nvlist_src_size Ns = Sy 0 Pq u64
Maximum size in bytes allowed to be passed as
.Sy zc_nvlist_src_size
for ioctls on
.Pa /dev/zfs .
This prevents a user from causing the kernel to allocate
an excessive amount of memory.
When the limit is exceeded, the ioctl fails with
.Sy EINVAL
and a description of the error is sent to the
.Pa zfs-dbgmsg
log.
This parameter should not need to be touched under normal circumstances.
If
.Sy 0 ,
equivalent to a quarter of the user-wired memory limit under
.Fx
and to
.Sy 134217728 Ns B Pq 128 MiB
under Linux.
.
.It Sy zfs_multilist_num_sublists Ns = Ns Sy 0 Pq uint
To allow more fine-grained locking, each ARC state contains a series
of lists for both data and metadata objects.
Locking is performed at the level of these "sub-lists".
This parameters controls the number of sub-lists per ARC state,
and also applies to other uses of the multilist data structure.
.Pp
If
.Sy 0 ,
equivalent to the greater of the number of online CPUs and
.Sy 4 .
.
.It Sy zfs_arc_overflow_shift Ns = Ns Sy 8 Pq int
The ARC size is considered to be overflowing if it exceeds the current
ARC target size
.Pq Sy arc_c
by thresholds determined by this parameter.
Exceeding by
.Sy ( arc_c No >> Sy zfs_arc_overflow_shift ) No / Sy 2
starts ARC reclamation process.
If that appears insufficient, exceeding by
.Sy ( arc_c No >> Sy zfs_arc_overflow_shift ) No \(mu Sy 1.5
blocks new buffer allocation until the reclaim thread catches up.
Started reclamation process continues till ARC size returns below the
target size.
.Pp
The default value of
.Sy 8
causes the ARC to start reclamation if it exceeds the target size by
.Em 0.2%
of the target size, and block allocations by
.Em 0.6% .
.
.It Sy zfs_arc_shrink_shift Ns = Ns Sy 0 Pq uint
If nonzero, this will update
.Sy arc_shrink_shift Pq default Sy 7
with the new value.
.
.It Sy zfs_arc_pc_percent Ns = Ns Sy 0 Ns % Po off Pc Pq uint
Percent of pagecache to reclaim ARC to.
.Pp
This tunable allows the ZFS ARC to play more nicely
with the kernel's LRU pagecache.
It can guarantee that the ARC size won't collapse under scanning
pressure on the pagecache, yet still allows the ARC to be reclaimed down to
.Sy zfs_arc_min
if necessary.
This value is specified as percent of pagecache size (as measured by
.Sy NR_FILE_PAGES ) ,
where that percent may exceed
.Sy 100 .
This
only operates during memory pressure/reclaim.
.
.It Sy zfs_arc_shrinker_limit Ns = Ns Sy 10000 Pq int
This is a limit on how many pages the ARC shrinker makes available for
eviction in response to one page allocation attempt.
Note that in practice, the kernel's shrinker can ask us to evict
up to about four times this for one allocation attempt.
.Pp
The default limit of
.Sy 10000 Pq in practice, Em 160 MiB No per allocation attempt with 4 KiB pages
limits the amount of time spent attempting to reclaim ARC memory to
less than 100 ms per allocation attempt,
even with a small average compressed block size of ~8 KiB.
.Pp
The parameter can be set to 0 (zero) to disable the limit,
and only applies on Linux.
.
.It Sy zfs_arc_sys_free Ns = Ns Sy 0 Ns B Pq u64
The target number of bytes the ARC should leave as free memory on the system.
If zero, equivalent to the bigger of
.Sy 512 KiB No and Sy all_system_memory/64 .
.
.It Sy zfs_autoimport_disable Ns = Ns Sy 1 Ns | Ns 0 Pq int
Disable pool import at module load by ignoring the cache file
.Pq Sy spa_config_path .
.
.It Sy zfs_checksum_events_per_second Ns = Ns Sy 20 Ns /s Pq uint
Rate limit checksum events to this many per second.
Note that this should not be set below the ZED thresholds
(currently 10 checksums over 10 seconds)
or else the daemon may not trigger any action.
.
.It Sy zfs_commit_timeout_pct Ns = Ns Sy 5 Ns % Pq uint
This controls the amount of time that a ZIL block (lwb) will remain "open"
when it isn't "full", and it has a thread waiting for it to be committed to
stable storage.
The timeout is scaled based on a percentage of the last lwb
latency to avoid significantly impacting the latency of each individual
transaction record (itx).
.
.It Sy zfs_condense_indirect_commit_entry_delay_ms Ns = Ns Sy 0 Ns ms Pq int
Vdev indirection layer (used for device removal) sleeps for this many
milliseconds during mapping generation.
Intended for use with the test suite to throttle vdev removal speed.
.
.It Sy zfs_condense_indirect_obsolete_pct Ns = Ns Sy 25 Ns % Pq uint
Minimum percent of obsolete bytes in vdev mapping required to attempt to
condense
.Pq see Sy zfs_condense_indirect_vdevs_enable .
Intended for use with the test suite
to facilitate triggering condensing as needed.
.
.It Sy zfs_condense_indirect_vdevs_enable Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enable condensing indirect vdev mappings.
When set, attempt to condense indirect vdev mappings
if the mapping uses more than
.Sy zfs_condense_min_mapping_bytes
bytes of memory and if the obsolete space map object uses more than
.Sy zfs_condense_max_obsolete_bytes
bytes on-disk.
The condensing process is an attempt to save memory by removing obsolete
mappings.
.
.It Sy zfs_condense_max_obsolete_bytes Ns = Ns Sy 1073741824 Ns B Po 1 GiB Pc Pq u64
Only attempt to condense indirect vdev mappings if the on-disk size
of the obsolete space map object is greater than this number of bytes
.Pq see Sy zfs_condense_indirect_vdevs_enable .
.
.It Sy zfs_condense_min_mapping_bytes Ns = Ns Sy 131072 Ns B Po 128 KiB Pc Pq u64
Minimum size vdev mapping to attempt to condense
.Pq see Sy zfs_condense_indirect_vdevs_enable .
.
.It Sy zfs_dbgmsg_enable Ns = Ns Sy 1 Ns | Ns 0 Pq int
Internally ZFS keeps a small log to facilitate debugging.
The log is enabled by default, and can be disabled by unsetting this option.
The contents of the log can be accessed by reading
.Pa /proc/spl/kstat/zfs/dbgmsg .
Writing
.Sy 0
to the file clears the log.
.Pp
This setting does not influence debug prints due to
.Sy zfs_flags .
.
.It Sy zfs_dbgmsg_maxsize Ns = Ns Sy 4194304 Ns B Po 4 MiB Pc Pq uint
Maximum size of the internal ZFS debug log.
.
.It Sy zfs_dbuf_state_index Ns = Ns Sy 0 Pq int
Historically used for controlling what reporting was available under
.Pa /proc/spl/kstat/zfs .
No effect.
.
.It Sy zfs_deadman_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
When a pool sync operation takes longer than
.Sy zfs_deadman_synctime_ms ,
or when an individual I/O operation takes longer than
.Sy zfs_deadman_ziotime_ms ,
then the operation is considered to be "hung".
If
.Sy zfs_deadman_enabled
is set, then the deadman behavior is invoked as described by
.Sy zfs_deadman_failmode .
By default, the deadman is enabled and set to
.Sy wait
which results in "hung" I/O operations only being logged.
The deadman is automatically disabled when a pool gets suspended.
.
.It Sy zfs_deadman_failmode Ns = Ns Sy wait Pq charp
Controls the failure behavior when the deadman detects a "hung" I/O operation.
Valid values are:
.Bl -tag -compact -offset 4n -width "continue"
.It Sy wait
Wait for a "hung" operation to complete.
For each "hung" operation a "deadman" event will be posted
describing that operation.
.It Sy continue
Attempt to recover from a "hung" operation by re-dispatching it
to the I/O pipeline if possible.
.It Sy panic
Panic the system.
This can be used to facilitate automatic fail-over
to a properly configured fail-over partner.
.El
.
.It Sy zfs_deadman_checktime_ms Ns = Ns Sy 60000 Ns ms Po 1 min Pc Pq u64
Check time in milliseconds.
This defines the frequency at which we check for hung I/O requests
and potentially invoke the
.Sy zfs_deadman_failmode
behavior.
.
.It Sy zfs_deadman_synctime_ms Ns = Ns Sy 600000 Ns ms Po 10 min Pc Pq u64
Interval in milliseconds after which the deadman is triggered and also
the interval after which a pool sync operation is considered to be "hung".
Once this limit is exceeded the deadman will be invoked every
.Sy zfs_deadman_checktime_ms
milliseconds until the pool sync completes.
.
.It Sy zfs_deadman_ziotime_ms Ns = Ns Sy 300000 Ns ms Po 5 min Pc Pq u64
Interval in milliseconds after which the deadman is triggered and an
individual I/O operation is considered to be "hung".
As long as the operation remains "hung",
the deadman will be invoked every
.Sy zfs_deadman_checktime_ms
milliseconds until the operation completes.
.
.It Sy zfs_dedup_prefetch Ns = Ns Sy 0 Ns | Ns 1 Pq int
Enable prefetching dedup-ed blocks which are going to be freed.
.
.It Sy zfs_delay_min_dirty_percent Ns = Ns Sy 60 Ns % Pq uint
Start to delay each transaction once there is this amount of dirty data,
expressed as a percentage of
.Sy zfs_dirty_data_max .
This value should be at least
.Sy zfs_vdev_async_write_active_max_dirty_percent .
.No See Sx ZFS TRANSACTION DELAY .
.
.It Sy zfs_delay_scale Ns = Ns Sy 500000 Pq int
This controls how quickly the transaction delay approaches infinity.
Larger values cause longer delays for a given amount of dirty data.
.Pp
For the smoothest delay, this value should be about 1 billion divided
by the maximum number of operations per second.
This will smoothly handle between ten times and a tenth of this number.
.No See Sx ZFS TRANSACTION DELAY .
.Pp
.Sy zfs_delay_scale No \(mu Sy zfs_dirty_data_max Em must No be smaller than Sy 2^64 .
.
.It Sy zfs_disable_ivset_guid_check Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disables requirement for IVset GUIDs to be present and match when doing a raw
receive of encrypted datasets.
Intended for users whose pools were created with
OpenZFS pre-release versions and now have compatibility issues.
.
.It Sy zfs_key_max_salt_uses Ns = Ns Sy 400000000 Po 4*10^8 Pc Pq ulong
Maximum number of uses of a single salt value before generating a new one for
encrypted datasets.
The default value is also the maximum.
.
.It Sy zfs_object_mutex_size Ns = Ns Sy 64 Pq uint
Size of the znode hashtable used for holds.
.Pp
Due to the need to hold locks on objects that may not exist yet, kernel mutexes
are not created per-object and instead a hashtable is used where collisions
will result in objects waiting when there is not actually contention on the
same object.
.
.It Sy zfs_slow_io_events_per_second Ns = Ns Sy 20 Ns /s Pq int
Rate limit delay and deadman zevents (which report slow I/O operations) to this
many per
second.
.
.It Sy zfs_unflushed_max_mem_amt Ns = Ns Sy 1073741824 Ns B Po 1 GiB Pc Pq u64
Upper-bound limit for unflushed metadata changes to be held by the
log spacemap in memory, in bytes.
.
.It Sy zfs_unflushed_max_mem_ppm Ns = Ns Sy 1000 Ns ppm Po 0.1% Pc Pq u64
Part of overall system memory that ZFS allows to be used
for unflushed metadata changes by the log spacemap, in millionths.
.
.It Sy zfs_unflushed_log_block_max Ns = Ns Sy 131072 Po 128k Pc Pq u64
Describes the maximum number of log spacemap blocks allowed for each pool.
The default value means that the space in all the log spacemaps
can add up to no more than
.Sy 131072
blocks (which means
.Em 16 GiB
of logical space before compression and ditto blocks,
assuming that blocksize is
.Em 128 KiB ) .
.Pp
This tunable is important because it involves a trade-off between import
time after an unclean export and the frequency of flushing metaslabs.
The higher this number is, the more log blocks we allow when the pool is
active which means that we flush metaslabs less often and thus decrease
the number of I/O operations for spacemap updates per TXG.
At the same time though, that means that in the event of an unclean export,
there will be more log spacemap blocks for us to read, inducing overhead
in the import time of the pool.
The lower the number, the amount of flushing increases, destroying log
blocks quicker as they become obsolete faster, which leaves less blocks
to be read during import time after a crash.
.Pp
Each log spacemap block existing during pool import leads to approximately
one extra logical I/O issued.
This is the reason why this tunable is exposed in terms of blocks rather
than space used.
.
.It Sy zfs_unflushed_log_block_min Ns = Ns Sy 1000 Pq u64
If the number of metaslabs is small and our incoming rate is high,
we could get into a situation that we are flushing all our metaslabs every TXG.
Thus we always allow at least this many log blocks.
.
.It Sy zfs_unflushed_log_block_pct Ns = Ns Sy 400 Ns % Pq u64
Tunable used to determine the number of blocks that can be used for
the spacemap log, expressed as a percentage of the total number of
unflushed metaslabs in the pool.
.
.It Sy zfs_unflushed_log_txg_max Ns = Ns Sy 1000 Pq u64
Tunable limiting maximum time in TXGs any metaslab may remain unflushed.
It effectively limits maximum number of unflushed per-TXG spacemap logs
that need to be read after unclean pool export.
.
.It Sy zfs_unlink_suspend_progress Ns = Ns Sy 0 Ns | Ns 1 Pq uint
When enabled, files will not be asynchronously removed from the list of pending
unlinks and the space they consume will be leaked.
Once this option has been disabled and the dataset is remounted,
the pending unlinks will be processed and the freed space returned to the pool.
This option is used by the test suite.
.
.It Sy zfs_delete_blocks Ns = Ns Sy 20480 Pq ulong
This is the used to define a large file for the purposes of deletion.
Files containing more than
.Sy zfs_delete_blocks
will be deleted asynchronously, while smaller files are deleted synchronously.
Decreasing this value will reduce the time spent in an
.Xr unlink 2
system call, at the expense of a longer delay before the freed space is
available.
This only applies on Linux.
.
.It Sy zfs_dirty_data_max Ns = Pq int
Determines the dirty space limit in bytes.
Once this limit is exceeded, new writes are halted until space frees up.
This parameter takes precedence over
.Sy zfs_dirty_data_max_percent .
.No See Sx ZFS TRANSACTION DELAY .
.Pp
Defaults to
.Sy physical_ram/10 ,
capped at
.Sy zfs_dirty_data_max_max .
.
.It Sy zfs_dirty_data_max_max Ns = Pq int
Maximum allowable value of
.Sy zfs_dirty_data_max ,
expressed in bytes.
This limit is only enforced at module load time, and will be ignored if
.Sy zfs_dirty_data_max
is later changed.
This parameter takes precedence over
.Sy zfs_dirty_data_max_max_percent .
.No See Sx ZFS TRANSACTION DELAY .
.Pp
Defaults to
.Sy min(physical_ram/4, 4GiB) ,
or
.Sy min(physical_ram/4, 1GiB)
for 32-bit systems.
.
.It Sy zfs_dirty_data_max_max_percent Ns = Ns Sy 25 Ns % Pq uint
Maximum allowable value of
.Sy zfs_dirty_data_max ,
expressed as a percentage of physical RAM.
This limit is only enforced at module load time, and will be ignored if
.Sy zfs_dirty_data_max
is later changed.
The parameter
.Sy zfs_dirty_data_max_max
takes precedence over this one.
.No See Sx ZFS TRANSACTION DELAY .
.
.It Sy zfs_dirty_data_max_percent Ns = Ns Sy 10 Ns % Pq uint
Determines the dirty space limit, expressed as a percentage of all memory.
Once this limit is exceeded, new writes are halted until space frees up.
The parameter
.Sy zfs_dirty_data_max
takes precedence over this one.
.No See Sx ZFS TRANSACTION DELAY .
.Pp
Subject to
.Sy zfs_dirty_data_max_max .
.
.It Sy zfs_dirty_data_sync_percent Ns = Ns Sy 20 Ns % Pq uint
Start syncing out a transaction group if there's at least this much dirty data
.Pq as a percentage of Sy zfs_dirty_data_max .
This should be less than
.Sy zfs_vdev_async_write_active_min_dirty_percent .
.
.It Sy zfs_wrlog_data_max Ns = Pq int
The upper limit of write-transaction zil log data size in bytes.
Write operations are throttled when approaching the limit until log data is
cleared out after transaction group sync.
Because of some overhead, it should be set at least 2 times the size of
.Sy zfs_dirty_data_max
.No to prevent harming normal write throughput .
It also should be smaller than the size of the slog device if slog is present.
.Pp
Defaults to
.Sy zfs_dirty_data_max*2
.
.It Sy zfs_fallocate_reserve_percent Ns = Ns Sy 110 Ns % Pq uint
Since ZFS is a copy-on-write filesystem with snapshots, blocks cannot be
preallocated for a file in order to guarantee that later writes will not
run out of space.
Instead,
.Xr fallocate 2
space preallocation only checks that sufficient space is currently available
in the pool or the user's project quota allocation,
and then creates a sparse file of the requested size.
The requested space is multiplied by
.Sy zfs_fallocate_reserve_percent
to allow additional space for indirect blocks and other internal metadata.
Setting this to
.Sy 0
disables support for
.Xr fallocate 2
and causes it to return
.Sy EOPNOTSUPP .
.
.It Sy zfs_fletcher_4_impl Ns = Ns Sy fastest Pq string
Select a fletcher 4 implementation.
.Pp
Supported selectors are:
.Sy fastest , scalar , sse2 , ssse3 , avx2 , avx512f , avx512bw ,
.No and Sy aarch64_neon .
All except
.Sy fastest No and Sy scalar
require instruction set extensions to be available,
and will only appear if ZFS detects that they are present at runtime.
If multiple implementations of fletcher 4 are available, the
.Sy fastest
will be chosen using a micro benchmark.
Selecting
.Sy scalar
results in the original CPU-based calculation being used.
Selecting any option other than
.Sy fastest No or Sy scalar
results in vector instructions
from the respective CPU instruction set being used.
.
.It Sy zfs_bclone_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enable the experimental block cloning feature.
If this setting is 0, then even if feature@block_cloning is enabled,
attempts to clone blocks will act as though the feature is disabled.
.
+.It Sy zfs_bclone_wait_dirty Ns = Ns Sy 0 Ns | Ns 1 Pq int
+When set to 1 the FICLONE and FICLONERANGE ioctls wait for dirty data to be
+written to disk.
+This allows the clone operation to reliably succeed when a file is
+modified and then immediately cloned.
+For small files this may be slower than making a copy of the file.
+Therefore, this setting defaults to 0 which causes a clone operation to
+immediately fail when encountering a dirty block.
+.
.It Sy zfs_blake3_impl Ns = Ns Sy fastest Pq string
Select a BLAKE3 implementation.
.Pp
Supported selectors are:
.Sy cycle , fastest , generic , sse2 , sse41 , avx2 , avx512 .
All except
.Sy cycle , fastest No and Sy generic
require instruction set extensions to be available,
and will only appear if ZFS detects that they are present at runtime.
If multiple implementations of BLAKE3 are available, the
.Sy fastest will be chosen using a micro benchmark. You can see the
benchmark results by reading this kstat file:
.Pa /proc/spl/kstat/zfs/chksum_bench .
.
.It Sy zfs_free_bpobj_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enable/disable the processing of the free_bpobj object.
.
.It Sy zfs_async_block_max_blocks Ns = Ns Sy UINT64_MAX Po unlimited Pc Pq u64
Maximum number of blocks freed in a single TXG.
.
.It Sy zfs_max_async_dedup_frees Ns = Ns Sy 100000 Po 10^5 Pc Pq u64
Maximum number of dedup blocks freed in a single TXG.
.
.It Sy zfs_vdev_async_read_max_active Ns = Ns Sy 3 Pq uint
Maximum asynchronous read I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_async_read_min_active Ns = Ns Sy 1 Pq uint
Minimum asynchronous read I/O operation active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_async_write_active_max_dirty_percent Ns = Ns Sy 60 Ns % Pq uint
When the pool has more than this much dirty data, use
.Sy zfs_vdev_async_write_max_active
to limit active async writes.
If the dirty data is between the minimum and maximum,
the active I/O limit is linearly interpolated.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_async_write_active_min_dirty_percent Ns = Ns Sy 30 Ns % Pq uint
When the pool has less than this much dirty data, use
.Sy zfs_vdev_async_write_min_active
to limit active async writes.
If the dirty data is between the minimum and maximum,
the active I/O limit is linearly
interpolated.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_async_write_max_active Ns = Ns Sy 10 Pq uint
Maximum asynchronous write I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_async_write_min_active Ns = Ns Sy 2 Pq uint
Minimum asynchronous write I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.Pp
Lower values are associated with better latency on rotational media but poorer
resilver performance.
The default value of
.Sy 2
was chosen as a compromise.
A value of
.Sy 3
has been shown to improve resilver performance further at a cost of
further increasing latency.
.
.It Sy zfs_vdev_initializing_max_active Ns = Ns Sy 1 Pq uint
Maximum initializing I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_initializing_min_active Ns = Ns Sy 1 Pq uint
Minimum initializing I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_max_active Ns = Ns Sy 1000 Pq uint
The maximum number of I/O operations active to each device.
Ideally, this will be at least the sum of each queue's
.Sy max_active .
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_open_timeout_ms Ns = Ns Sy 1000 Pq uint
Timeout value to wait before determining a device is missing
during import.
This is helpful for transient missing paths due
to links being briefly removed and recreated in response to
udev events.
.
.It Sy zfs_vdev_rebuild_max_active Ns = Ns Sy 3 Pq uint
Maximum sequential resilver I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_rebuild_min_active Ns = Ns Sy 1 Pq uint
Minimum sequential resilver I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_removal_max_active Ns = Ns Sy 2 Pq uint
Maximum removal I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_removal_min_active Ns = Ns Sy 1 Pq uint
Minimum removal I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_scrub_max_active Ns = Ns Sy 2 Pq uint
Maximum scrub I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_scrub_min_active Ns = Ns Sy 1 Pq uint
Minimum scrub I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_sync_read_max_active Ns = Ns Sy 10 Pq uint
Maximum synchronous read I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_sync_read_min_active Ns = Ns Sy 10 Pq uint
Minimum synchronous read I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_sync_write_max_active Ns = Ns Sy 10 Pq uint
Maximum synchronous write I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_sync_write_min_active Ns = Ns Sy 10 Pq uint
Minimum synchronous write I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_trim_max_active Ns = Ns Sy 2 Pq uint
Maximum trim/discard I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_trim_min_active Ns = Ns Sy 1 Pq uint
Minimum trim/discard I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_nia_delay Ns = Ns Sy 5 Pq uint
For non-interactive I/O (scrub, resilver, removal, initialize and rebuild),
the number of concurrently-active I/O operations is limited to
.Sy zfs_*_min_active ,
unless the vdev is "idle".
When there are no interactive I/O operations active (synchronous or otherwise),
and
.Sy zfs_vdev_nia_delay
operations have completed since the last interactive operation,
then the vdev is considered to be "idle",
and the number of concurrently-active non-interactive operations is increased to
.Sy zfs_*_max_active .
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_nia_credit Ns = Ns Sy 5 Pq uint
Some HDDs tend to prioritize sequential I/O so strongly, that concurrent
random I/O latency reaches several seconds.
On some HDDs this happens even if sequential I/O operations
are submitted one at a time, and so setting
.Sy zfs_*_max_active Ns = Sy 1
does not help.
To prevent non-interactive I/O, like scrub,
from monopolizing the device, no more than
.Sy zfs_vdev_nia_credit operations can be sent
while there are outstanding incomplete interactive operations.
This enforced wait ensures the HDD services the interactive I/O
within a reasonable amount of time.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_queue_depth_pct Ns = Ns Sy 1000 Ns % Pq uint
Maximum number of queued allocations per top-level vdev expressed as
a percentage of
.Sy zfs_vdev_async_write_max_active ,
which allows the system to detect devices that are more capable
of handling allocations and to allocate more blocks to those devices.
This allows for dynamic allocation distribution when devices are imbalanced,
as fuller devices will tend to be slower than empty devices.
.Pp
Also see
.Sy zio_dva_throttle_enabled .
.
.It Sy zfs_vdev_def_queue_depth Ns = Ns Sy 32 Pq uint
Default queue depth for each vdev IO allocator.
Higher values allow for better coalescing of sequential writes before sending
them to the disk, but can increase transaction commit times.
.
.It Sy zfs_vdev_failfast_mask Ns = Ns Sy 1 Pq uint
Defines if the driver should retire on a given error type.
The following options may be bitwise-ored together:
.TS
box;
lbz r l l .
Value Name Description
_
1 Device No driver retries on device errors
2 Transport No driver retries on transport errors.
4 Driver No driver retries on driver errors.
.TE
.
.It Sy zfs_expire_snapshot Ns = Ns Sy 300 Ns s Pq int
Time before expiring
.Pa .zfs/snapshot .
.
.It Sy zfs_admin_snapshot Ns = Ns Sy 0 Ns | Ns 1 Pq int
Allow the creation, removal, or renaming of entries in the
.Sy .zfs/snapshot
directory to cause the creation, destruction, or renaming of snapshots.
When enabled, this functionality works both locally and over NFS exports
which have the
.Em no_root_squash
option set.
.
.It Sy zfs_flags Ns = Ns Sy 0 Pq int
Set additional debugging flags.
The following flags may be bitwise-ored together:
.TS
box;
lbz r l l .
Value Name Description
_
1 ZFS_DEBUG_DPRINTF Enable dprintf entries in the debug log.
* 2 ZFS_DEBUG_DBUF_VERIFY Enable extra dbuf verifications.
* 4 ZFS_DEBUG_DNODE_VERIFY Enable extra dnode verifications.
8 ZFS_DEBUG_SNAPNAMES Enable snapshot name verification.
* 16 ZFS_DEBUG_MODIFY Check for illegally modified ARC buffers.
64 ZFS_DEBUG_ZIO_FREE Enable verification of block frees.
128 ZFS_DEBUG_HISTOGRAM_VERIFY Enable extra spacemap histogram verifications.
256 ZFS_DEBUG_METASLAB_VERIFY Verify space accounting on disk matches in-memory \fBrange_trees\fP.
512 ZFS_DEBUG_SET_ERROR Enable \fBSET_ERROR\fP and dprintf entries in the debug log.
1024 ZFS_DEBUG_INDIRECT_REMAP Verify split blocks created by device removal.
2048 ZFS_DEBUG_TRIM Verify TRIM ranges are always within the allocatable range tree.
4096 ZFS_DEBUG_LOG_SPACEMAP Verify that the log summary is consistent with the spacemap log
and enable \fBzfs_dbgmsgs\fP for metaslab loading and flushing.
.TE
.Sy \& * No Requires debug build .
.
.It Sy zfs_btree_verify_intensity Ns = Ns Sy 0 Pq uint
Enables btree verification.
The following settings are culminative:
.TS
box;
lbz r l l .
Value Description
1 Verify height.
2 Verify pointers from children to parent.
3 Verify element counts.
4 Verify element order. (expensive)
* 5 Verify unused memory is poisoned. (expensive)
.TE
.Sy \& * No Requires debug build .
.
.It Sy zfs_free_leak_on_eio Ns = Ns Sy 0 Ns | Ns 1 Pq int
If destroy encounters an
.Sy EIO
while reading metadata (e.g. indirect blocks),
space referenced by the missing metadata can not be freed.
Normally this causes the background destroy to become "stalled",
as it is unable to make forward progress.
While in this stalled state, all remaining space to free
from the error-encountering filesystem is "temporarily leaked".
Set this flag to cause it to ignore the
.Sy EIO ,
permanently leak the space from indirect blocks that can not be read,
and continue to free everything else that it can.
.Pp
The default "stalling" behavior is useful if the storage partially
fails (i.e. some but not all I/O operations fail), and then later recovers.
In this case, we will be able to continue pool operations while it is
partially failed, and when it recovers, we can continue to free the
space, with no leaks.
Note, however, that this case is actually fairly rare.
.Pp
Typically pools either
.Bl -enum -compact -offset 4n -width "1."
.It
fail completely (but perhaps temporarily,
e.g. due to a top-level vdev going offline), or
.It
have localized, permanent errors (e.g. disk returns the wrong data
due to bit flip or firmware bug).
.El
In the former case, this setting does not matter because the
pool will be suspended and the sync thread will not be able to make
forward progress regardless.
In the latter, because the error is permanent, the best we can do
is leak the minimum amount of space,
which is what setting this flag will do.
It is therefore reasonable for this flag to normally be set,
but we chose the more conservative approach of not setting it,
so that there is no possibility of
leaking space in the "partial temporary" failure case.
.
.It Sy zfs_free_min_time_ms Ns = Ns Sy 1000 Ns ms Po 1s Pc Pq uint
During a
.Nm zfs Cm destroy
operation using the
.Sy async_destroy
feature,
a minimum of this much time will be spent working on freeing blocks per TXG.
.
.It Sy zfs_obsolete_min_time_ms Ns = Ns Sy 500 Ns ms Pq uint
Similar to
.Sy zfs_free_min_time_ms ,
but for cleanup of old indirection records for removed vdevs.
.
.It Sy zfs_immediate_write_sz Ns = Ns Sy 32768 Ns B Po 32 KiB Pc Pq s64
Largest data block to write to the ZIL.
Larger blocks will be treated as if the dataset being written to had the
.Sy logbias Ns = Ns Sy throughput
property set.
.
.It Sy zfs_initialize_value Ns = Ns Sy 16045690984833335022 Po 0xDEADBEEFDEADBEEE Pc Pq u64
Pattern written to vdev free space by
.Xr zpool-initialize 8 .
.
.It Sy zfs_initialize_chunk_size Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq u64
Size of writes used by
.Xr zpool-initialize 8 .
This option is used by the test suite.
.
.It Sy zfs_livelist_max_entries Ns = Ns Sy 500000 Po 5*10^5 Pc Pq u64
The threshold size (in block pointers) at which we create a new sub-livelist.
Larger sublists are more costly from a memory perspective but the fewer
sublists there are, the lower the cost of insertion.
.
.It Sy zfs_livelist_min_percent_shared Ns = Ns Sy 75 Ns % Pq int
If the amount of shared space between a snapshot and its clone drops below
this threshold, the clone turns off the livelist and reverts to the old
deletion method.
This is in place because livelists no long give us a benefit
once a clone has been overwritten enough.
.
.It Sy zfs_livelist_condense_new_alloc Ns = Ns Sy 0 Pq int
Incremented each time an extra ALLOC blkptr is added to a livelist entry while
it is being condensed.
This option is used by the test suite to track race conditions.
.
.It Sy zfs_livelist_condense_sync_cancel Ns = Ns Sy 0 Pq int
Incremented each time livelist condensing is canceled while in
.Fn spa_livelist_condense_sync .
This option is used by the test suite to track race conditions.
.
.It Sy zfs_livelist_condense_sync_pause Ns = Ns Sy 0 Ns | Ns 1 Pq int
When set, the livelist condense process pauses indefinitely before
executing the synctask \(em
.Fn spa_livelist_condense_sync .
This option is used by the test suite to trigger race conditions.
.
.It Sy zfs_livelist_condense_zthr_cancel Ns = Ns Sy 0 Pq int
Incremented each time livelist condensing is canceled while in
.Fn spa_livelist_condense_cb .
This option is used by the test suite to track race conditions.
.
.It Sy zfs_livelist_condense_zthr_pause Ns = Ns Sy 0 Ns | Ns 1 Pq int
When set, the livelist condense process pauses indefinitely before
executing the open context condensing work in
.Fn spa_livelist_condense_cb .
This option is used by the test suite to trigger race conditions.
.
.It Sy zfs_lua_max_instrlimit Ns = Ns Sy 100000000 Po 10^8 Pc Pq u64
The maximum execution time limit that can be set for a ZFS channel program,
specified as a number of Lua instructions.
.
.It Sy zfs_lua_max_memlimit Ns = Ns Sy 104857600 Po 100 MiB Pc Pq u64
The maximum memory limit that can be set for a ZFS channel program, specified
in bytes.
.
.It Sy zfs_max_dataset_nesting Ns = Ns Sy 50 Pq int
The maximum depth of nested datasets.
This value can be tuned temporarily to
fix existing datasets that exceed the predefined limit.
.
.It Sy zfs_max_log_walking Ns = Ns Sy 5 Pq u64
The number of past TXGs that the flushing algorithm of the log spacemap
feature uses to estimate incoming log blocks.
.
.It Sy zfs_max_logsm_summary_length Ns = Ns Sy 10 Pq u64
Maximum number of rows allowed in the summary of the spacemap log.
.
.It Sy zfs_max_recordsize Ns = Ns Sy 16777216 Po 16 MiB Pc Pq uint
We currently support block sizes from
.Em 512 Po 512 B Pc No to Em 16777216 Po 16 MiB Pc .
The benefits of larger blocks, and thus larger I/O,
need to be weighed against the cost of COWing a giant block to modify one byte.
Additionally, very large blocks can have an impact on I/O latency,
and also potentially on the memory allocator.
Therefore, we formerly forbade creating blocks larger than 1M.
Larger blocks could be created by changing it,
and pools with larger blocks can always be imported and used,
regardless of this setting.
.
.It Sy zfs_allow_redacted_dataset_mount Ns = Ns Sy 0 Ns | Ns 1 Pq int
Allow datasets received with redacted send/receive to be mounted.
Normally disabled because these datasets may be missing key data.
.
.It Sy zfs_min_metaslabs_to_flush Ns = Ns Sy 1 Pq u64
Minimum number of metaslabs to flush per dirty TXG.
.
.It Sy zfs_metaslab_fragmentation_threshold Ns = Ns Sy 70 Ns % Pq uint
Allow metaslabs to keep their active state as long as their fragmentation
percentage is no more than this value.
An active metaslab that exceeds this threshold
will no longer keep its active status allowing better metaslabs to be selected.
.
.It Sy zfs_mg_fragmentation_threshold Ns = Ns Sy 95 Ns % Pq uint
Metaslab groups are considered eligible for allocations if their
fragmentation metric (measured as a percentage) is less than or equal to
this value.
If a metaslab group exceeds this threshold then it will be
skipped unless all metaslab groups within the metaslab class have also
crossed this threshold.
.
.It Sy zfs_mg_noalloc_threshold Ns = Ns Sy 0 Ns % Pq uint
Defines a threshold at which metaslab groups should be eligible for allocations.
The value is expressed as a percentage of free space
beyond which a metaslab group is always eligible for allocations.
If a metaslab group's free space is less than or equal to the
threshold, the allocator will avoid allocating to that group
unless all groups in the pool have reached the threshold.
Once all groups have reached the threshold, all groups are allowed to accept
allocations.
The default value of
.Sy 0
disables the feature and causes all metaslab groups to be eligible for
allocations.
.Pp
This parameter allows one to deal with pools having heavily imbalanced
vdevs such as would be the case when a new vdev has been added.
Setting the threshold to a non-zero percentage will stop allocations
from being made to vdevs that aren't filled to the specified percentage
and allow lesser filled vdevs to acquire more allocations than they
otherwise would under the old
.Sy zfs_mg_alloc_failures
facility.
.
.It Sy zfs_ddt_data_is_special Ns = Ns Sy 1 Ns | Ns 0 Pq int
If enabled, ZFS will place DDT data into the special allocation class.
.
.It Sy zfs_user_indirect_is_special Ns = Ns Sy 1 Ns | Ns 0 Pq int
If enabled, ZFS will place user data indirect blocks
into the special allocation class.
.
.It Sy zfs_multihost_history Ns = Ns Sy 0 Pq uint
Historical statistics for this many latest multihost updates will be available
in
.Pa /proc/spl/kstat/zfs/ Ns Ao Ar pool Ac Ns Pa /multihost .
.
.It Sy zfs_multihost_interval Ns = Ns Sy 1000 Ns ms Po 1 s Pc Pq u64
Used to control the frequency of multihost writes which are performed when the
.Sy multihost
pool property is on.
This is one of the factors used to determine the
length of the activity check during import.
.Pp
The multihost write period is
.Sy zfs_multihost_interval No / Sy leaf-vdevs .
On average a multihost write will be issued for each leaf vdev
every
.Sy zfs_multihost_interval
milliseconds.
In practice, the observed period can vary with the I/O load
and this observed value is the delay which is stored in the uberblock.
.
.It Sy zfs_multihost_import_intervals Ns = Ns Sy 20 Pq uint
Used to control the duration of the activity test on import.
Smaller values of
.Sy zfs_multihost_import_intervals
will reduce the import time but increase
the risk of failing to detect an active pool.
The total activity check time is never allowed to drop below one second.
.Pp
On import the activity check waits a minimum amount of time determined by
.Sy zfs_multihost_interval No \(mu Sy zfs_multihost_import_intervals ,
or the same product computed on the host which last had the pool imported,
whichever is greater.
The activity check time may be further extended if the value of MMP
delay found in the best uberblock indicates actual multihost updates happened
at longer intervals than
.Sy zfs_multihost_interval .
A minimum of
.Em 100 ms
is enforced.
.Pp
.Sy 0 No is equivalent to Sy 1 .
.
.It Sy zfs_multihost_fail_intervals Ns = Ns Sy 10 Pq uint
Controls the behavior of the pool when multihost write failures or delays are
detected.
.Pp
When
.Sy 0 ,
multihost write failures or delays are ignored.
The failures will still be reported to the ZED which depending on
its configuration may take action such as suspending the pool or offlining a
device.
.Pp
Otherwise, the pool will be suspended if
.Sy zfs_multihost_fail_intervals No \(mu Sy zfs_multihost_interval
milliseconds pass without a successful MMP write.
This guarantees the activity test will see MMP writes if the pool is imported.
.Sy 1 No is equivalent to Sy 2 ;
this is necessary to prevent the pool from being suspended
due to normal, small I/O latency variations.
.
.It Sy zfs_no_scrub_io Ns = Ns Sy 0 Ns | Ns 1 Pq int
Set to disable scrub I/O.
This results in scrubs not actually scrubbing data and
simply doing a metadata crawl of the pool instead.
.
.It Sy zfs_no_scrub_prefetch Ns = Ns Sy 0 Ns | Ns 1 Pq int
Set to disable block prefetching for scrubs.
.
.It Sy zfs_nocacheflush Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disable cache flush operations on disks when writing.
Setting this will cause pool corruption on power loss
if a volatile out-of-order write cache is enabled.
.
.It Sy zfs_nopwrite_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Allow no-operation writes.
The occurrence of nopwrites will further depend on other pool properties
.Pq i.a. the checksumming and compression algorithms .
.
.It Sy zfs_dmu_offset_next_sync Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enable forcing TXG sync to find holes.
When enabled forces ZFS to sync data when
.Sy SEEK_HOLE No or Sy SEEK_DATA
flags are used allowing holes in a file to be accurately reported.
When disabled holes will not be reported in recently dirtied files.
.
.It Sy zfs_pd_bytes_max Ns = Ns Sy 52428800 Ns B Po 50 MiB Pc Pq int
The number of bytes which should be prefetched during a pool traversal, like
.Nm zfs Cm send
or other data crawling operations.
.
.It Sy zfs_traverse_indirect_prefetch_limit Ns = Ns Sy 32 Pq uint
The number of blocks pointed by indirect (non-L0) block which should be
prefetched during a pool traversal, like
.Nm zfs Cm send
or other data crawling operations.
.
.It Sy zfs_per_txg_dirty_frees_percent Ns = Ns Sy 30 Ns % Pq u64
Control percentage of dirtied indirect blocks from frees allowed into one TXG.
After this threshold is crossed, additional frees will wait until the next TXG.
.Sy 0 No disables this throttle .
.
.It Sy zfs_prefetch_disable Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disable predictive prefetch.
Note that it leaves "prescient" prefetch
.Pq for, e.g., Nm zfs Cm send
intact.
Unlike predictive prefetch, prescient prefetch never issues I/O
that ends up not being needed, so it can't hurt performance.
.
.It Sy zfs_qat_checksum_disable Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disable QAT hardware acceleration for SHA256 checksums.
May be unset after the ZFS modules have been loaded to initialize the QAT
hardware as long as support is compiled in and the QAT driver is present.
.
.It Sy zfs_qat_compress_disable Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disable QAT hardware acceleration for gzip compression.
May be unset after the ZFS modules have been loaded to initialize the QAT
hardware as long as support is compiled in and the QAT driver is present.
.
.It Sy zfs_qat_encrypt_disable Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disable QAT hardware acceleration for AES-GCM encryption.
May be unset after the ZFS modules have been loaded to initialize the QAT
hardware as long as support is compiled in and the QAT driver is present.
.
.It Sy zfs_vnops_read_chunk_size Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq u64
Bytes to read per chunk.
.
.It Sy zfs_read_history Ns = Ns Sy 0 Pq uint
Historical statistics for this many latest reads will be available in
.Pa /proc/spl/kstat/zfs/ Ns Ao Ar pool Ac Ns Pa /reads .
.
.It Sy zfs_read_history_hits Ns = Ns Sy 0 Ns | Ns 1 Pq int
Include cache hits in read history
.
.It Sy zfs_rebuild_max_segment Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq u64
Maximum read segment size to issue when sequentially resilvering a
top-level vdev.
.
.It Sy zfs_rebuild_scrub_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Automatically start a pool scrub when the last active sequential resilver
completes in order to verify the checksums of all blocks which have been
resilvered.
This is enabled by default and strongly recommended.
.
.It Sy zfs_rebuild_vdev_limit Ns = Ns Sy 67108864 Ns B Po 64 MiB Pc Pq u64
Maximum amount of I/O that can be concurrently issued for a sequential
resilver per leaf device, given in bytes.
.
.It Sy zfs_reconstruct_indirect_combinations_max Ns = Ns Sy 4096 Pq int
If an indirect split block contains more than this many possible unique
combinations when being reconstructed, consider it too computationally
expensive to check them all.
Instead, try at most this many randomly selected
combinations each time the block is accessed.
This allows all segment copies to participate fairly
in the reconstruction when all combinations
cannot be checked and prevents repeated use of one bad copy.
.
.It Sy zfs_recover Ns = Ns Sy 0 Ns | Ns 1 Pq int
Set to attempt to recover from fatal errors.
This should only be used as a last resort,
as it typically results in leaked space, or worse.
.
.It Sy zfs_removal_ignore_errors Ns = Ns Sy 0 Ns | Ns 1 Pq int
Ignore hard I/O errors during device removal.
When set, if a device encounters a hard I/O error during the removal process
the removal will not be cancelled.
This can result in a normally recoverable block becoming permanently damaged
and is hence not recommended.
This should only be used as a last resort when the
pool cannot be returned to a healthy state prior to removing the device.
.
.It Sy zfs_removal_suspend_progress Ns = Ns Sy 0 Ns | Ns 1 Pq uint
This is used by the test suite so that it can ensure that certain actions
happen while in the middle of a removal.
.
.It Sy zfs_remove_max_segment Ns = Ns Sy 16777216 Ns B Po 16 MiB Pc Pq uint
The largest contiguous segment that we will attempt to allocate when removing
a device.
If there is a performance problem with attempting to allocate large blocks,
consider decreasing this.
The default value is also the maximum.
.
.It Sy zfs_resilver_disable_defer Ns = Ns Sy 0 Ns | Ns 1 Pq int
Ignore the
.Sy resilver_defer
feature, causing an operation that would start a resilver to
immediately restart the one in progress.
.
.It Sy zfs_resilver_min_time_ms Ns = Ns Sy 3000 Ns ms Po 3 s Pc Pq uint
Resilvers are processed by the sync thread.
While resilvering, it will spend at least this much time
working on a resilver between TXG flushes.
.
.It Sy zfs_scan_ignore_errors Ns = Ns Sy 0 Ns | Ns 1 Pq int
If set, remove the DTL (dirty time list) upon completion of a pool scan (scrub),
even if there were unrepairable errors.
Intended to be used during pool repair or recovery to
stop resilvering when the pool is next imported.
.
.It Sy zfs_scrub_min_time_ms Ns = Ns Sy 1000 Ns ms Po 1 s Pc Pq uint
Scrubs are processed by the sync thread.
While scrubbing, it will spend at least this much time
working on a scrub between TXG flushes.
.
.It Sy zfs_scrub_error_blocks_per_txg Ns = Ns Sy 4096 Pq uint
Error blocks to be scrubbed in one txg.
.
.It Sy zfs_scan_checkpoint_intval Ns = Ns Sy 7200 Ns s Po 2 hour Pc Pq uint
To preserve progress across reboots, the sequential scan algorithm periodically
needs to stop metadata scanning and issue all the verification I/O to disk.
The frequency of this flushing is determined by this tunable.
.
.It Sy zfs_scan_fill_weight Ns = Ns Sy 3 Pq uint
This tunable affects how scrub and resilver I/O segments are ordered.
A higher number indicates that we care more about how filled in a segment is,
while a lower number indicates we care more about the size of the extent without
considering the gaps within a segment.
This value is only tunable upon module insertion.
Changing the value afterwards will have no effect on scrub or resilver
performance.
.
.It Sy zfs_scan_issue_strategy Ns = Ns Sy 0 Pq uint
Determines the order that data will be verified while scrubbing or resilvering:
.Bl -tag -compact -offset 4n -width "a"
.It Sy 1
Data will be verified as sequentially as possible, given the
amount of memory reserved for scrubbing
.Pq see Sy zfs_scan_mem_lim_fact .
This may improve scrub performance if the pool's data is very fragmented.
.It Sy 2
The largest mostly-contiguous chunk of found data will be verified first.
By deferring scrubbing of small segments, we may later find adjacent data
to coalesce and increase the segment size.
.It Sy 0
.No Use strategy Sy 1 No during normal verification
.No and strategy Sy 2 No while taking a checkpoint .
.El
.
.It Sy zfs_scan_legacy Ns = Ns Sy 0 Ns | Ns 1 Pq int
If unset, indicates that scrubs and resilvers will gather metadata in
memory before issuing sequential I/O.
Otherwise indicates that the legacy algorithm will be used,
where I/O is initiated as soon as it is discovered.
Unsetting will not affect scrubs or resilvers that are already in progress.
.
.It Sy zfs_scan_max_ext_gap Ns = Ns Sy 2097152 Ns B Po 2 MiB Pc Pq int
Sets the largest gap in bytes between scrub/resilver I/O operations
that will still be considered sequential for sorting purposes.
Changing this value will not
affect scrubs or resilvers that are already in progress.
.
.It Sy zfs_scan_mem_lim_fact Ns = Ns Sy 20 Ns ^-1 Pq uint
Maximum fraction of RAM used for I/O sorting by sequential scan algorithm.
This tunable determines the hard limit for I/O sorting memory usage.
When the hard limit is reached we stop scanning metadata and start issuing
data verification I/O.
This is done until we get below the soft limit.
.
.It Sy zfs_scan_mem_lim_soft_fact Ns = Ns Sy 20 Ns ^-1 Pq uint
The fraction of the hard limit used to determined the soft limit for I/O sorting
by the sequential scan algorithm.
When we cross this limit from below no action is taken.
When we cross this limit from above it is because we are issuing verification
I/O.
In this case (unless the metadata scan is done) we stop issuing verification I/O
and start scanning metadata again until we get to the hard limit.
.
.It Sy zfs_scan_report_txgs Ns = Ns Sy 0 Ns | Ns 1 Pq uint
When reporting resilver throughput and estimated completion time use the
performance observed over roughly the last
.Sy zfs_scan_report_txgs
TXGs.
When set to zero performance is calculated over the time between checkpoints.
.
.It Sy zfs_scan_strict_mem_lim Ns = Ns Sy 0 Ns | Ns 1 Pq int
Enforce tight memory limits on pool scans when a sequential scan is in progress.
When disabled, the memory limit may be exceeded by fast disks.
.
.It Sy zfs_scan_suspend_progress Ns = Ns Sy 0 Ns | Ns 1 Pq int
Freezes a scrub/resilver in progress without actually pausing it.
Intended for testing/debugging.
.
.It Sy zfs_scan_vdev_limit Ns = Ns Sy 16777216 Ns B Po 16 MiB Pc Pq int
Maximum amount of data that can be concurrently issued at once for scrubs and
resilvers per leaf device, given in bytes.
.
.It Sy zfs_send_corrupt_data Ns = Ns Sy 0 Ns | Ns 1 Pq int
Allow sending of corrupt data (ignore read/checksum errors when sending).
.
.It Sy zfs_send_unmodified_spill_blocks Ns = Ns Sy 1 Ns | Ns 0 Pq int
Include unmodified spill blocks in the send stream.
Under certain circumstances, previous versions of ZFS could incorrectly
remove the spill block from an existing object.
Including unmodified copies of the spill blocks creates a backwards-compatible
stream which will recreate a spill block if it was incorrectly removed.
.
.It Sy zfs_send_no_prefetch_queue_ff Ns = Ns Sy 20 Ns ^\-1 Pq uint
The fill fraction of the
.Nm zfs Cm send
internal queues.
The fill fraction controls the timing with which internal threads are woken up.
.
.It Sy zfs_send_no_prefetch_queue_length Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq uint
The maximum number of bytes allowed in
.Nm zfs Cm send Ns 's
internal queues.
.
.It Sy zfs_send_queue_ff Ns = Ns Sy 20 Ns ^\-1 Pq uint
The fill fraction of the
.Nm zfs Cm send
prefetch queue.
The fill fraction controls the timing with which internal threads are woken up.
.
.It Sy zfs_send_queue_length Ns = Ns Sy 16777216 Ns B Po 16 MiB Pc Pq uint
The maximum number of bytes allowed that will be prefetched by
.Nm zfs Cm send .
This value must be at least twice the maximum block size in use.
.
.It Sy zfs_recv_queue_ff Ns = Ns Sy 20 Ns ^\-1 Pq uint
The fill fraction of the
.Nm zfs Cm receive
queue.
The fill fraction controls the timing with which internal threads are woken up.
.
.It Sy zfs_recv_queue_length Ns = Ns Sy 16777216 Ns B Po 16 MiB Pc Pq uint
The maximum number of bytes allowed in the
.Nm zfs Cm receive
queue.
This value must be at least twice the maximum block size in use.
.
.It Sy zfs_recv_write_batch_size Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq uint
The maximum amount of data, in bytes, that
.Nm zfs Cm receive
will write in one DMU transaction.
This is the uncompressed size, even when receiving a compressed send stream.
This setting will not reduce the write size below a single block.
Capped at a maximum of
.Sy 32 MiB .
.
.It Sy zfs_recv_best_effort_corrective Ns = Ns Sy 0 Pq int
When this variable is set to non-zero a corrective receive:
.Bl -enum -compact -offset 4n -width "1."
.It
Does not enforce the restriction of source & destination snapshot GUIDs
matching.
.It
If there is an error during healing, the healing receive is not
terminated instead it moves on to the next record.
.El
.
.It Sy zfs_override_estimate_recordsize Ns = Ns Sy 0 Ns | Ns 1 Pq uint
Setting this variable overrides the default logic for estimating block
sizes when doing a
.Nm zfs Cm send .
The default heuristic is that the average block size
will be the current recordsize.
Override this value if most data in your dataset is not of that size
and you require accurate zfs send size estimates.
.
.It Sy zfs_sync_pass_deferred_free Ns = Ns Sy 2 Pq uint
Flushing of data to disk is done in passes.
Defer frees starting in this pass.
.
.It Sy zfs_spa_discard_memory_limit Ns = Ns Sy 16777216 Ns B Po 16 MiB Pc Pq int
Maximum memory used for prefetching a checkpoint's space map on each
vdev while discarding the checkpoint.
.
.It Sy zfs_special_class_metadata_reserve_pct Ns = Ns Sy 25 Ns % Pq uint
Only allow small data blocks to be allocated on the special and dedup vdev
types when the available free space percentage on these vdevs exceeds this
value.
This ensures reserved space is available for pool metadata as the
special vdevs approach capacity.
.
.It Sy zfs_sync_pass_dont_compress Ns = Ns Sy 8 Pq uint
Starting in this sync pass, disable compression (including of metadata).
With the default setting, in practice, we don't have this many sync passes,
so this has no effect.
.Pp
The original intent was that disabling compression would help the sync passes
to converge.
However, in practice, disabling compression increases
the average number of sync passes; because when we turn compression off,
many blocks' size will change, and thus we have to re-allocate
(not overwrite) them.
It also increases the number of
.Em 128 KiB
allocations (e.g. for indirect blocks and spacemaps)
because these will not be compressed.
The
.Em 128 KiB
allocations are especially detrimental to performance
on highly fragmented systems, which may have very few free segments of this
size,
and may need to load new metaslabs to satisfy these allocations.
.
.It Sy zfs_sync_pass_rewrite Ns = Ns Sy 2 Pq uint
Rewrite new block pointers starting in this pass.
.
.It Sy zfs_sync_taskq_batch_pct Ns = Ns Sy 75 Ns % Pq int
This controls the number of threads used by
.Sy dp_sync_taskq .
The default value of
.Sy 75%
will create a maximum of one thread per CPU.
.
.It Sy zfs_trim_extent_bytes_max Ns = Ns Sy 134217728 Ns B Po 128 MiB Pc Pq uint
Maximum size of TRIM command.
Larger ranges will be split into chunks no larger than this value before
issuing.
.
.It Sy zfs_trim_extent_bytes_min Ns = Ns Sy 32768 Ns B Po 32 KiB Pc Pq uint
Minimum size of TRIM commands.
TRIM ranges smaller than this will be skipped,
unless they're part of a larger range which was chunked.
This is done because it's common for these small TRIMs
to negatively impact overall performance.
.
.It Sy zfs_trim_metaslab_skip Ns = Ns Sy 0 Ns | Ns 1 Pq uint
Skip uninitialized metaslabs during the TRIM process.
This option is useful for pools constructed from large thinly-provisioned
devices
where TRIM operations are slow.
As a pool ages, an increasing fraction of the pool's metaslabs
will be initialized, progressively degrading the usefulness of this option.
This setting is stored when starting a manual TRIM and will
persist for the duration of the requested TRIM.
.
.It Sy zfs_trim_queue_limit Ns = Ns Sy 10 Pq uint
Maximum number of queued TRIMs outstanding per leaf vdev.
The number of concurrent TRIM commands issued to the device is controlled by
.Sy zfs_vdev_trim_min_active No and Sy zfs_vdev_trim_max_active .
.
.It Sy zfs_trim_txg_batch Ns = Ns Sy 32 Pq uint
The number of transaction groups' worth of frees which should be aggregated
before TRIM operations are issued to the device.
This setting represents a trade-off between issuing larger,
more efficient TRIM operations and the delay
before the recently trimmed space is available for use by the device.
.Pp
Increasing this value will allow frees to be aggregated for a longer time.
This will result is larger TRIM operations and potentially increased memory
usage.
Decreasing this value will have the opposite effect.
The default of
.Sy 32
was determined to be a reasonable compromise.
.
.It Sy zfs_txg_history Ns = Ns Sy 0 Pq uint
Historical statistics for this many latest TXGs will be available in
.Pa /proc/spl/kstat/zfs/ Ns Ao Ar pool Ac Ns Pa /TXGs .
.
.It Sy zfs_txg_timeout Ns = Ns Sy 5 Ns s Pq uint
Flush dirty data to disk at least every this many seconds (maximum TXG
duration).
.
.It Sy zfs_vdev_aggregation_limit Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq uint
Max vdev I/O aggregation size.
.
.It Sy zfs_vdev_aggregation_limit_non_rotating Ns = Ns Sy 131072 Ns B Po 128 KiB Pc Pq uint
Max vdev I/O aggregation size for non-rotating media.
.
.It Sy zfs_vdev_mirror_rotating_inc Ns = Ns Sy 0 Pq int
A number by which the balancing algorithm increments the load calculation for
the purpose of selecting the least busy mirror member when an I/O operation
immediately follows its predecessor on rotational vdevs
for the purpose of making decisions based on load.
.
.It Sy zfs_vdev_mirror_rotating_seek_inc Ns = Ns Sy 5 Pq int
A number by which the balancing algorithm increments the load calculation for
the purpose of selecting the least busy mirror member when an I/O operation
lacks locality as defined by
.Sy zfs_vdev_mirror_rotating_seek_offset .
Operations within this that are not immediately following the previous operation
are incremented by half.
.
.It Sy zfs_vdev_mirror_rotating_seek_offset Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq int
The maximum distance for the last queued I/O operation in which
the balancing algorithm considers an operation to have locality.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_mirror_non_rotating_inc Ns = Ns Sy 0 Pq int
A number by which the balancing algorithm increments the load calculation for
the purpose of selecting the least busy mirror member on non-rotational vdevs
when I/O operations do not immediately follow one another.
.
.It Sy zfs_vdev_mirror_non_rotating_seek_inc Ns = Ns Sy 1 Pq int
A number by which the balancing algorithm increments the load calculation for
the purpose of selecting the least busy mirror member when an I/O operation
lacks
locality as defined by the
.Sy zfs_vdev_mirror_rotating_seek_offset .
Operations within this that are not immediately following the previous operation
are incremented by half.
.
.It Sy zfs_vdev_read_gap_limit Ns = Ns Sy 32768 Ns B Po 32 KiB Pc Pq uint
Aggregate read I/O operations if the on-disk gap between them is within this
threshold.
.
.It Sy zfs_vdev_write_gap_limit Ns = Ns Sy 4096 Ns B Po 4 KiB Pc Pq uint
Aggregate write I/O operations if the on-disk gap between them is within this
threshold.
.
.It Sy zfs_vdev_raidz_impl Ns = Ns Sy fastest Pq string
Select the raidz parity implementation to use.
.Pp
Variants that don't depend on CPU-specific features
may be selected on module load, as they are supported on all systems.
The remaining options may only be set after the module is loaded,
as they are available only if the implementations are compiled in
and supported on the running system.
.Pp
Once the module is loaded,
.Pa /sys/module/zfs/parameters/zfs_vdev_raidz_impl
will show the available options,
with the currently selected one enclosed in square brackets.
.Pp
.TS
lb l l .
fastest selected by built-in benchmark
original original implementation
scalar scalar implementation
sse2 SSE2 instruction set 64-bit x86
ssse3 SSSE3 instruction set 64-bit x86
avx2 AVX2 instruction set 64-bit x86
avx512f AVX512F instruction set 64-bit x86
avx512bw AVX512F & AVX512BW instruction sets 64-bit x86
aarch64_neon NEON Aarch64/64-bit ARMv8
aarch64_neonx2 NEON with more unrolling Aarch64/64-bit ARMv8
powerpc_altivec Altivec PowerPC
.TE
.
.It Sy zfs_vdev_scheduler Pq charp
.Sy DEPRECATED .
Prints warning to kernel log for compatibility.
.
.It Sy zfs_zevent_len_max Ns = Ns Sy 512 Pq uint
Max event queue length.
Events in the queue can be viewed with
.Xr zpool-events 8 .
.
.It Sy zfs_zevent_retain_max Ns = Ns Sy 2000 Pq int
Maximum recent zevent records to retain for duplicate checking.
Setting this to
.Sy 0
disables duplicate detection.
.
.It Sy zfs_zevent_retain_expire_secs Ns = Ns Sy 900 Ns s Po 15 min Pc Pq int
Lifespan for a recent ereport that was retained for duplicate checking.
.
.It Sy zfs_zil_clean_taskq_maxalloc Ns = Ns Sy 1048576 Pq int
The maximum number of taskq entries that are allowed to be cached.
When this limit is exceeded transaction records (itxs)
will be cleaned synchronously.
.
.It Sy zfs_zil_clean_taskq_minalloc Ns = Ns Sy 1024 Pq int
The number of taskq entries that are pre-populated when the taskq is first
created and are immediately available for use.
.
.It Sy zfs_zil_clean_taskq_nthr_pct Ns = Ns Sy 100 Ns % Pq int
This controls the number of threads used by
.Sy dp_zil_clean_taskq .
The default value of
.Sy 100%
will create a maximum of one thread per cpu.
.
.It Sy zil_maxblocksize Ns = Ns Sy 131072 Ns B Po 128 KiB Pc Pq uint
This sets the maximum block size used by the ZIL.
On very fragmented pools, lowering this
.Pq typically to Sy 36 KiB
can improve performance.
.
.It Sy zil_maxcopied Ns = Ns Sy 7680 Ns B Po 7.5 KiB Pc Pq uint
This sets the maximum number of write bytes logged via WR_COPIED.
It tunes a tradeoff between additional memory copy and possibly worse log
space efficiency vs additional range lock/unlock.
.
.It Sy zil_min_commit_timeout Ns = Ns Sy 5000 Pq u64
This sets the minimum delay in nanoseconds ZIL care to delay block commit,
waiting for more records.
If ZIL writes are too fast, kernel may not be able sleep for so short interval,
increasing log latency above allowed by
.Sy zfs_commit_timeout_pct .
.
.It Sy zil_nocacheflush Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disable the cache flush commands that are normally sent to disk by
the ZIL after an LWB write has completed.
Setting this will cause ZIL corruption on power loss
if a volatile out-of-order write cache is enabled.
.
.It Sy zil_replay_disable Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disable intent logging replay.
Can be disabled for recovery from corrupted ZIL.
.
.It Sy zil_slog_bulk Ns = Ns Sy 67108864 Ns B Po 64 MiB Pc Pq u64
Limit SLOG write size per commit executed with synchronous priority.
Any writes above that will be executed with lower (asynchronous) priority
to limit potential SLOG device abuse by single active ZIL writer.
.
.It Sy zfs_zil_saxattr Ns = Ns Sy 1 Ns | Ns 0 Pq int
Setting this tunable to zero disables ZIL logging of new
.Sy xattr Ns = Ns Sy sa
records if the
.Sy org.openzfs:zilsaxattr
feature is enabled on the pool.
This would only be necessary to work around bugs in the ZIL logging or replay
code for this record type.
The tunable has no effect if the feature is disabled.
.
.It Sy zfs_embedded_slog_min_ms Ns = Ns Sy 64 Pq uint
Usually, one metaslab from each normal-class vdev is dedicated for use by
the ZIL to log synchronous writes.
However, if there are fewer than
.Sy zfs_embedded_slog_min_ms
metaslabs in the vdev, this functionality is disabled.
This ensures that we don't set aside an unreasonable amount of space for the
ZIL.
.
.It Sy zstd_earlyabort_pass Ns = Ns Sy 1 Pq uint
Whether heuristic for detection of incompressible data with zstd levels >= 3
using LZ4 and zstd-1 passes is enabled.
.
.It Sy zstd_abort_size Ns = Ns Sy 131072 Pq uint
Minimal uncompressed size (inclusive) of a record before the early abort
heuristic will be attempted.
.
.It Sy zio_deadman_log_all Ns = Ns Sy 0 Ns | Ns 1 Pq int
If non-zero, the zio deadman will produce debugging messages
.Pq see Sy zfs_dbgmsg_enable
for all zios, rather than only for leaf zios possessing a vdev.
This is meant to be used by developers to gain
diagnostic information for hang conditions which don't involve a mutex
or other locking primitive: typically conditions in which a thread in
the zio pipeline is looping indefinitely.
.
.It Sy zio_slow_io_ms Ns = Ns Sy 30000 Ns ms Po 30 s Pc Pq int
When an I/O operation takes more than this much time to complete,
it's marked as slow.
Each slow operation causes a delay zevent.
Slow I/O counters can be seen with
.Nm zpool Cm status Fl s .
.
.It Sy zio_dva_throttle_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Throttle block allocations in the I/O pipeline.
This allows for dynamic allocation distribution when devices are imbalanced.
When enabled, the maximum number of pending allocations per top-level vdev
is limited by
.Sy zfs_vdev_queue_depth_pct .
.
.It Sy zfs_xattr_compat Ns = Ns 0 Ns | Ns 1 Pq int
Control the naming scheme used when setting new xattrs in the user namespace.
If
.Sy 0
.Pq the default on Linux ,
user namespace xattr names are prefixed with the namespace, to be backwards
compatible with previous versions of ZFS on Linux.
If
.Sy 1
.Pq the default on Fx ,
user namespace xattr names are not prefixed, to be backwards compatible with
previous versions of ZFS on illumos and
.Fx .
.Pp
Either naming scheme can be read on this and future versions of ZFS, regardless
of this tunable, but legacy ZFS on illumos or
.Fx
are unable to read user namespace xattrs written in the Linux format, and
legacy versions of ZFS on Linux are unable to read user namespace xattrs written
in the legacy ZFS format.
.Pp
An existing xattr with the alternate naming scheme is removed when overwriting
the xattr so as to not accumulate duplicates.
.
.It Sy zio_requeue_io_start_cut_in_line Ns = Ns Sy 0 Ns | Ns 1 Pq int
Prioritize requeued I/O.
.
.It Sy zio_taskq_batch_pct Ns = Ns Sy 80 Ns % Pq uint
Percentage of online CPUs which will run a worker thread for I/O.
These workers are responsible for I/O work such as compression and
checksum calculations.
Fractional number of CPUs will be rounded down.
.Pp
The default value of
.Sy 80%
was chosen to avoid using all CPUs which can result in
latency issues and inconsistent application performance,
especially when slower compression and/or checksumming is enabled.
.
.It Sy zio_taskq_batch_tpq Ns = Ns Sy 0 Pq uint
Number of worker threads per taskq.
Lower values improve I/O ordering and CPU utilization,
while higher reduces lock contention.
.Pp
If
.Sy 0 ,
generate a system-dependent value close to 6 threads per taskq.
.
+.It Sy zio_taskq_read Ns = Ns Sy fixed,1,8 null scale null Pq charp
+Set the queue and thread configuration for the IO read queues.
+This is an advanced debugging parameter.
+Don't change this unless you understand what it does.
+.
+.It Sy zio_taskq_write Ns = Ns Sy batch fixed,1,5 scale fixed,1,5 Pq charp
+Set the queue and thread configuration for the IO write queues.
+This is an advanced debugging parameter.
+Don't change this unless you understand what it does.
+.
.It Sy zvol_inhibit_dev Ns = Ns Sy 0 Ns | Ns 1 Pq uint
Do not create zvol device nodes.
This may slightly improve startup time on
systems with a very large number of zvols.
.
.It Sy zvol_major Ns = Ns Sy 230 Pq uint
Major number for zvol block devices.
.
.It Sy zvol_max_discard_blocks Ns = Ns Sy 16384 Pq long
Discard (TRIM) operations done on zvols will be done in batches of this
many blocks, where block size is determined by the
.Sy volblocksize
property of a zvol.
.
.It Sy zvol_prefetch_bytes Ns = Ns Sy 131072 Ns B Po 128 KiB Pc Pq uint
When adding a zvol to the system, prefetch this many bytes
from the start and end of the volume.
Prefetching these regions of the volume is desirable,
because they are likely to be accessed immediately by
.Xr blkid 8
or the kernel partitioner.
.
.It Sy zvol_request_sync Ns = Ns Sy 0 Ns | Ns 1 Pq uint
When processing I/O requests for a zvol, submit them synchronously.
This effectively limits the queue depth to
.Em 1
for each I/O submitter.
When unset, requests are handled asynchronously by a thread pool.
The number of requests which can be handled concurrently is controlled by
.Sy zvol_threads .
.Sy zvol_request_sync
is ignored when running on a kernel that supports block multiqueue
.Pq Li blk-mq .
.
.It Sy zvol_threads Ns = Ns Sy 0 Pq uint
The number of system wide threads to use for processing zvol block IOs.
If
.Sy 0
(the default) then internally set
.Sy zvol_threads
to the number of CPUs present or 32 (whichever is greater).
.
.It Sy zvol_blk_mq_threads Ns = Ns Sy 0 Pq uint
The number of threads per zvol to use for queuing IO requests.
This parameter will only appear if your kernel supports
.Li blk-mq
and is only read and assigned to a zvol at zvol load time.
If
.Sy 0
(the default) then internally set
.Sy zvol_blk_mq_threads
to the number of CPUs present.
.
.It Sy zvol_use_blk_mq Ns = Ns Sy 0 Ns | Ns 1 Pq uint
Set to
.Sy 1
to use the
.Li blk-mq
API for zvols.
Set to
.Sy 0
(the default) to use the legacy zvol APIs.
This setting can give better or worse zvol performance depending on
the workload.
This parameter will only appear if your kernel supports
.Li blk-mq
and is only read and assigned to a zvol at zvol load time.
.
.It Sy zvol_blk_mq_blocks_per_thread Ns = Ns Sy 8 Pq uint
If
.Sy zvol_use_blk_mq
is enabled, then process this number of
.Sy volblocksize Ns -sized blocks per zvol thread.
This tunable can be use to favor better performance for zvol reads (lower
values) or writes (higher values).
If set to
.Sy 0 ,
then the zvol layer will process the maximum number of blocks
per thread that it can.
This parameter will only appear if your kernel supports
.Li blk-mq
and is only applied at each zvol's load time.
.
.It Sy zvol_blk_mq_queue_depth Ns = Ns Sy 0 Pq uint
The queue_depth value for the zvol
.Li blk-mq
interface.
This parameter will only appear if your kernel supports
.Li blk-mq
and is only applied at each zvol's load time.
If
.Sy 0
(the default) then use the kernel's default queue depth.
Values are clamped to the kernel's
.Dv BLKDEV_MIN_RQ
and
.Dv BLKDEV_MAX_RQ Ns / Ns Dv BLKDEV_DEFAULT_RQ
limits.
.
.It Sy zvol_volmode Ns = Ns Sy 1 Pq uint
Defines zvol block devices behaviour when
.Sy volmode Ns = Ns Sy default :
.Bl -tag -compact -offset 4n -width "a"
.It Sy 1
.No equivalent to Sy full
.It Sy 2
.No equivalent to Sy dev
.It Sy 3
.No equivalent to Sy none
.El
.
.It Sy zvol_enforce_quotas Ns = Ns Sy 0 Ns | Ns 1 Pq uint
Enable strict ZVOL quota enforcement.
The strict quota enforcement may have a performance impact.
.El
.
.Sh ZFS I/O SCHEDULER
ZFS issues I/O operations to leaf vdevs to satisfy and complete I/O operations.
The scheduler determines when and in what order those operations are issued.
The scheduler divides operations into five I/O classes,
prioritized in the following order: sync read, sync write, async read,
async write, and scrub/resilver.
Each queue defines the minimum and maximum number of concurrent operations
that may be issued to the device.
In addition, the device has an aggregate maximum,
.Sy zfs_vdev_max_active .
Note that the sum of the per-queue minima must not exceed the aggregate maximum.
If the sum of the per-queue maxima exceeds the aggregate maximum,
then the number of active operations may reach
.Sy zfs_vdev_max_active ,
in which case no further operations will be issued,
regardless of whether all per-queue minima have been met.
.Pp
For many physical devices, throughput increases with the number of
concurrent operations, but latency typically suffers.
Furthermore, physical devices typically have a limit
at which more concurrent operations have no
effect on throughput or can actually cause it to decrease.
.Pp
The scheduler selects the next operation to issue by first looking for an
I/O class whose minimum has not been satisfied.
Once all are satisfied and the aggregate maximum has not been hit,
the scheduler looks for classes whose maximum has not been satisfied.
Iteration through the I/O classes is done in the order specified above.
No further operations are issued
if the aggregate maximum number of concurrent operations has been hit,
or if there are no operations queued for an I/O class that has not hit its
maximum.
Every time an I/O operation is queued or an operation completes,
the scheduler looks for new operations to issue.
.Pp
In general, smaller
.Sy max_active Ns s
will lead to lower latency of synchronous operations.
Larger
.Sy max_active Ns s
may lead to higher overall throughput, depending on underlying storage.
.Pp
The ratio of the queues'
.Sy max_active Ns s
determines the balance of performance between reads, writes, and scrubs.
For example, increasing
.Sy zfs_vdev_scrub_max_active
will cause the scrub or resilver to complete more quickly,
but reads and writes to have higher latency and lower throughput.
.Pp
All I/O classes have a fixed maximum number of outstanding operations,
except for the async write class.
Asynchronous writes represent the data that is committed to stable storage
during the syncing stage for transaction groups.
Transaction groups enter the syncing state periodically,
so the number of queued async writes will quickly burst up
and then bleed down to zero.
Rather than servicing them as quickly as possible,
the I/O scheduler changes the maximum number of active async write operations
according to the amount of dirty data in the pool.
Since both throughput and latency typically increase with the number of
concurrent operations issued to physical devices, reducing the
burstiness in the number of simultaneous operations also stabilizes the
response time of operations from other queues, in particular synchronous ones.
In broad strokes, the I/O scheduler will issue more concurrent operations
from the async write queue as there is more dirty data in the pool.
.
.Ss Async Writes
The number of concurrent operations issued for the async write I/O class
follows a piece-wise linear function defined by a few adjustable points:
.Bd -literal
| o---------| <-- \fBzfs_vdev_async_write_max_active\fP
^ | /^ |
| | / | |
active | / | |
I/O | / | |
count | / | |
| / | |
|-------o | | <-- \fBzfs_vdev_async_write_min_active\fP
0|_______^______|_________|
0% | | 100% of \fBzfs_dirty_data_max\fP
| |
| `-- \fBzfs_vdev_async_write_active_max_dirty_percent\fP
`--------- \fBzfs_vdev_async_write_active_min_dirty_percent\fP
.Ed
.Pp
Until the amount of dirty data exceeds a minimum percentage of the dirty
data allowed in the pool, the I/O scheduler will limit the number of
concurrent operations to the minimum.
As that threshold is crossed, the number of concurrent operations issued
increases linearly to the maximum at the specified maximum percentage
of the dirty data allowed in the pool.
.Pp
Ideally, the amount of dirty data on a busy pool will stay in the sloped
part of the function between
.Sy zfs_vdev_async_write_active_min_dirty_percent
and
.Sy zfs_vdev_async_write_active_max_dirty_percent .
If it exceeds the maximum percentage,
this indicates that the rate of incoming data is
greater than the rate that the backend storage can handle.
In this case, we must further throttle incoming writes,
as described in the next section.
.
.Sh ZFS TRANSACTION DELAY
We delay transactions when we've determined that the backend storage
isn't able to accommodate the rate of incoming writes.
.Pp
If there is already a transaction waiting, we delay relative to when
that transaction will finish waiting.
This way the calculated delay time
is independent of the number of threads concurrently executing transactions.
.Pp
If we are the only waiter, wait relative to when the transaction started,
rather than the current time.
This credits the transaction for "time already served",
e.g. reading indirect blocks.
.Pp
The minimum time for a transaction to take is calculated as
.D1 min_time = min( Ns Sy zfs_delay_scale No \(mu Po Sy dirty No \- Sy min Pc / Po Sy max No \- Sy dirty Pc , 100ms)
.Pp
The delay has two degrees of freedom that can be adjusted via tunables.
The percentage of dirty data at which we start to delay is defined by
.Sy zfs_delay_min_dirty_percent .
This should typically be at or above
.Sy zfs_vdev_async_write_active_max_dirty_percent ,
so that we only start to delay after writing at full speed
has failed to keep up with the incoming write rate.
The scale of the curve is defined by
.Sy zfs_delay_scale .
Roughly speaking, this variable determines the amount of delay at the midpoint
of the curve.
.Bd -literal
delay
10ms +-------------------------------------------------------------*+
| *|
9ms + *+
| *|
8ms + *+
| * |
7ms + * +
| * |
6ms + * +
| * |
5ms + * +
| * |
4ms + * +
| * |
3ms + * +
| * |
2ms + (midpoint) * +
| | ** |
1ms + v *** +
| \fBzfs_delay_scale\fP ----------> ******** |
0 +-------------------------------------*********----------------+
0% <- \fBzfs_dirty_data_max\fP -> 100%
.Ed
.Pp
Note, that since the delay is added to the outstanding time remaining on the
most recent transaction it's effectively the inverse of IOPS.
Here, the midpoint of
.Em 500 us
translates to
.Em 2000 IOPS .
The shape of the curve
was chosen such that small changes in the amount of accumulated dirty data
in the first three quarters of the curve yield relatively small differences
in the amount of delay.
.Pp
The effects can be easier to understand when the amount of delay is
represented on a logarithmic scale:
.Bd -literal
delay
100ms +-------------------------------------------------------------++
+ +
| |
+ *+
10ms + *+
+ ** +
| (midpoint) ** |
+ | ** +
1ms + v **** +
+ \fBzfs_delay_scale\fP ----------> ***** +
| **** |
+ **** +
100us + ** +
+ * +
| * |
+ * +
10us + * +
+ +
| |
+ +
+--------------------------------------------------------------+
0% <- \fBzfs_dirty_data_max\fP -> 100%
.Ed
.Pp
Note here that only as the amount of dirty data approaches its limit does
the delay start to increase rapidly.
The goal of a properly tuned system should be to keep the amount of dirty data
out of that range by first ensuring that the appropriate limits are set
for the I/O scheduler to reach optimal throughput on the back-end storage,
and then by changing the value of
.Sy zfs_delay_scale
to increase the steepness of the curve.
diff --git a/sys/contrib/openzfs/man/man7/zpool-features.7 b/sys/contrib/openzfs/man/man7/zpool-features.7
index 8ca4bd927b24..8456a9aa7648 100644
--- a/sys/contrib/openzfs/man/man7/zpool-features.7
+++ b/sys/contrib/openzfs/man/man7/zpool-features.7
@@ -1,963 +1,966 @@
.\"
.\" Copyright (c) 2012, 2018 by Delphix. All rights reserved.
.\" Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
.\" Copyright (c) 2014, Joyent, Inc. All rights reserved.
.\" The contents of this file are subject to the terms of the Common Development
.\" and Distribution License (the "License"). You may not use this file except
.\" in compliance with the License. You can obtain a copy of the license at
.\" usr/src/OPENSOLARIS.LICENSE or https://opensource.org/licenses/CDDL-1.0.
.\"
.\" See the License for the specific language governing permissions and
.\" limitations under the License. When distributing Covered Code, include this
.\" CDDL HEADER in each file and include the License file at
.\" usr/src/OPENSOLARIS.LICENSE. If applicable, add the following below this
.\" CDDL HEADER, with the fields enclosed by brackets "[]" replaced with your
.\" own identifying information:
.\" Portions Copyright [yyyy] [name of copyright owner]
.\" Copyright (c) 2019, Klara Inc.
.\" Copyright (c) 2019, Allan Jude
.\" Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
.\"
.Dd June 23, 2022
.Dt ZPOOL-FEATURES 7
.Os
.
.Sh NAME
.Nm zpool-features
.Nd description of ZFS pool features
.
.Sh DESCRIPTION
ZFS pool on-disk format versions are specified via
.Dq features
which replace the old on-disk format numbers
.Pq the last supported on-disk format number is 28 .
To enable a feature on a pool use the
.Nm zpool Cm upgrade ,
or set the
.Sy feature Ns @ Ns Ar feature-name
property to
.Sy enabled .
Please also see the
.Sx Compatibility feature sets
section for information on how sets of features may be enabled together.
.Pp
The pool format does not affect file system version compatibility or the ability
to send file systems between pools.
.Pp
Since most features can be enabled independently of each other, the on-disk
format of the pool is specified by the set of all features marked as
.Sy active
on the pool.
If the pool was created by another software version
this set may include unsupported features.
.
.Ss Identifying features
Every feature has a GUID of the form
.Ar com.example : Ns Ar feature-name .
The reversed DNS name ensures that the feature's GUID is unique across all ZFS
implementations.
When unsupported features are encountered on a pool they will
be identified by their GUIDs.
Refer to the documentation for the ZFS
implementation that created the pool for information about those features.
.Pp
Each supported feature also has a short name.
By convention a feature's short name is the portion of its GUID which follows
the
.Sq \&:
.Po
i.e.
.Ar com.example : Ns Ar feature-name
would have the short name
.Ar feature-name
.Pc ,
however a feature's short name may differ across ZFS implementations if
following the convention would result in name conflicts.
.
.Ss Feature states
Features can be in one of three states:
.Bl -tag -width "disabled"
.It Sy active
This feature's on-disk format changes are in effect on the pool.
Support for this feature is required to import the pool in read-write mode.
If this feature is not read-only compatible,
support is also required to import the pool in read-only mode
.Pq see Sx Read-only compatibility .
.It Sy enabled
An administrator has marked this feature as enabled on the pool, but the
feature's on-disk format changes have not been made yet.
The pool can still be imported by software that does not support this feature,
but changes may be made to the on-disk format at any time
which will move the feature to the
.Sy active
state.
Some features may support returning to the
.Sy enabled
state after becoming
.Sy active .
See feature-specific documentation for details.
.It Sy disabled
This feature's on-disk format changes have not been made and will not be made
unless an administrator moves the feature to the
.Sy enabled
state.
Features cannot be disabled once they have been enabled.
.El
.Pp
The state of supported features is exposed through pool properties of the form
.Sy feature Ns @ Ns Ar short-name .
.
.Ss Read-only compatibility
Some features may make on-disk format changes that do not interfere with other
software's ability to read from the pool.
These features are referred to as
.Dq read-only compatible .
If all unsupported features on a pool are read-only compatible,
the pool can be imported in read-only mode by setting the
.Sy readonly
property during import
.Po see
.Xr zpool-import 8
for details on importing pools
.Pc .
.
.Ss Unsupported features
For each unsupported feature enabled on an imported pool, a pool property
named
.Sy unsupported Ns @ Ns Ar feature-name
will indicate why the import was allowed despite the unsupported feature.
Possible values for this property are:
.Bl -tag -width "readonly"
.It Sy inactive
The feature is in the
.Sy enabled
state and therefore the pool's on-disk
format is still compatible with software that does not support this feature.
.It Sy readonly
The feature is read-only compatible and the pool has been imported in
read-only mode.
.El
.
.Ss Feature dependencies
Some features depend on other features being enabled in order to function.
Enabling a feature will automatically enable any features it depends on.
.
.Ss Compatibility feature sets
It is sometimes necessary for a pool to maintain compatibility with a
specific on-disk format, by enabling and disabling particular features.
The
.Sy compatibility
feature facilitates this by allowing feature sets to be read from text files.
When set to
.Sy off
.Pq the default ,
compatibility feature sets are disabled
.Pq i.e. all features are enabled ;
when set to
.Sy legacy ,
no features are enabled.
When set to a comma-separated list of filenames
.Po
each filename may either be an absolute path, or relative to
.Pa /etc/zfs/compatibility.d
or
.Pa /usr/share/zfs/compatibility.d
.Pc ,
the lists of requested features are read from those files,
separated by whitespace and/or commas.
Only features present in all files are enabled.
.Pp
Simple sanity checks are applied to the files:
they must be between 1 B and 16 KiB in size, and must end with a newline
character.
.Pp
The requested features are applied when a pool is created using
.Nm zpool Cm create Fl o Sy compatibility Ns = Ns Ar …
and controls which features are enabled when using
.Nm zpool Cm upgrade .
.Nm zpool Cm status
will not show a warning about disabled features which are not part
of the requested feature set.
.Pp
The special value
.Sy legacy
prevents any features from being enabled, either via
.Nm zpool Cm upgrade
or
.Nm zpool Cm set Sy feature Ns @ Ns Ar feature-name Ns = Ns Sy enabled .
This setting also prevents pools from being upgraded to newer on-disk versions.
This is a safety measure to prevent new features from being
accidentally enabled, breaking compatibility.
.Pp
By convention, compatibility files in
.Pa /usr/share/zfs/compatibility.d
are provided by the distribution, and include feature sets
supported by important versions of popular distributions, and feature
sets commonly supported at the start of each year.
Compatibility files in
.Pa /etc/zfs/compatibility.d ,
if present, will take precedence over files with the same name in
.Pa /usr/share/zfs/compatibility.d .
.Pp
If an unrecognized feature is found in these files, an error message will
be shown.
If the unrecognized feature is in a file in
.Pa /etc/zfs/compatibility.d ,
this is treated as an error and processing will stop.
If the unrecognized feature is under
.Pa /usr/share/zfs/compatibility.d ,
this is treated as a warning and processing will continue.
This difference is to allow distributions to include features
which might not be recognized by the currently-installed binaries.
.Pp
Compatibility files may include comments:
any text from
.Sq #
to the end of the line is ignored.
.Pp
.Sy Example :
.Bd -literal -compact -offset 4n
.No example# Nm cat Pa /usr/share/zfs/compatibility.d/grub2
# Features which are supported by GRUB2
allocation_classes
async_destroy
block_cloning
bookmarks
device_rebuild
embedded_data
empty_bpobj
enabled_txg
extensible_dataset
filesystem_limits
hole_birth
large_blocks
livelist
log_spacemap
lz4_compress
project_quota
resilver_defer
spacemap_histogram
spacemap_v2
userobj_accounting
zilsaxattr
zpool_checkpoint
.No example# Nm zpool Cm create Fl o Sy compatibility Ns = Ns Ar grub2 Ar bootpool Ar vdev
.Ed
.Pp
See
.Xr zpool-create 8
and
.Xr zpool-upgrade 8
for more information on how these commands are affected by feature sets.
.
.de feature
.It Sy \\$2
.Bl -tag -compact -width "READ-ONLY COMPATIBLE"
.It GUID
.Sy \\$1:\\$2
.if !"\\$4"" \{\
.It DEPENDENCIES
\fB\\$4\fP\c
.if !"\\$5"" , \fB\\$5\fP\c
.if !"\\$6"" , \fB\\$6\fP\c
.if !"\\$7"" , \fB\\$7\fP\c
.if !"\\$8"" , \fB\\$8\fP\c
.if !"\\$9"" , \fB\\$9\fP\c
.\}
.It READ-ONLY COMPATIBLE
\\$3
.El
.Pp
..
.
.ds instant-never \
.No This feature becomes Sy active No as soon as it is enabled \
and will never return to being Sy enabled .
.
.ds remount-upgrade \
.No Each filesystem will be upgraded automatically when remounted, \
or when a new file is created under that filesystem. \
The upgrade can also be triggered on filesystems via \
Nm zfs Cm set Sy version Ns = Ns Sy current Ar fs . \
No The upgrade process runs in the background and may take a while to complete \
for filesystems containing large amounts of files .
.
.de checksum-spiel
When the
.Sy \\$1
feature is set to
.Sy enabled ,
the administrator can turn on the
.Sy \\$1
checksum on any dataset using
.Nm zfs Cm set Sy checksum Ns = Ns Sy \\$1 Ar dset
.Po see Xr zfs-set 8 Pc .
This feature becomes
.Sy active
once a
.Sy checksum
property has been set to
.Sy \\$1 ,
and will return to being
.Sy enabled
once all filesystems that have ever had their checksum set to
.Sy \\$1
are destroyed.
..
.
.Sh FEATURES
The following features are supported on this system:
.Bl -tag -width Ds
.feature org.zfsonlinux allocation_classes yes
This feature enables support for separate allocation classes.
.Pp
This feature becomes
.Sy active
when a dedicated allocation class vdev
.Pq dedup or special
is created with the
.Nm zpool Cm create No or Nm zpool Cm add No commands .
With device removal, it can be returned to the
.Sy enabled
state if all the dedicated allocation class vdevs are removed.
.
.feature com.delphix async_destroy yes
Destroying a file system requires traversing all of its data in order to
return its used space to the pool.
Without
.Sy async_destroy ,
the file system is not fully removed until all space has been reclaimed.
If the destroy operation is interrupted by a reboot or power outage,
the next attempt to open the pool will need to complete the destroy
operation synchronously.
.Pp
When
.Sy async_destroy
is enabled, the file system's data will be reclaimed by a background process,
allowing the destroy operation to complete
without traversing the entire file system.
The background process is able to resume
interrupted destroys after the pool has been opened, eliminating the need
to finish interrupted destroys as part of the open operation.
The amount of space remaining to be reclaimed by the background process
is available through the
.Sy freeing
property.
.Pp
This feature is only
.Sy active
while
.Sy freeing
is non-zero.
.
.feature org.openzfs blake3 no extensible_dataset
This feature enables the use of the BLAKE3 hash algorithm for checksum and
dedup.
BLAKE3 is a secure hash algorithm focused on high performance.
.Pp
.checksum-spiel blake3
.
.feature com.fudosecurity block_cloning yes
When this feature is enabled ZFS will use block cloning for operations like
.Fn copy_file_range 2 .
Block cloning allows to create multiple references to a single block.
It is much faster than copying the data (as the actual data is neither read nor
written) and takes no additional space.
-Blocks can be cloned across datasets under some conditions (like disabled
-encryption and equal
-.Nm recordsize ) .
+Blocks can be cloned across datasets under some conditions (like equal
+.Nm recordsize ,
+the same master encryption key, etc.).
+ZFS tries its best to clone across datasets including encrypted ones.
+This is limited for various (nontrivial) reasons depending on the OS
+and/or ZFS internals.
.Pp
This feature becomes
.Sy active
when first block is cloned.
When the last cloned block is freed, it goes back to the enabled state.
.feature com.delphix bookmarks yes extensible_dataset
This feature enables use of the
.Nm zfs Cm bookmark
command.
.Pp
This feature is
.Sy active
while any bookmarks exist in the pool.
All bookmarks in the pool can be listed by running
.Nm zfs Cm list Fl t Sy bookmark Fl r Ar poolname .
.
.feature com.datto bookmark_v2 no bookmark extensible_dataset
This feature enables the creation and management of larger bookmarks which are
needed for other features in ZFS.
.Pp
This feature becomes
.Sy active
when a v2 bookmark is created and will be returned to the
.Sy enabled
state when all v2 bookmarks are destroyed.
.
.feature com.delphix bookmark_written no bookmark extensible_dataset bookmark_v2
This feature enables additional bookmark accounting fields, enabling the
.Sy written Ns # Ns Ar bookmark
property
.Pq space written since a bookmark
and estimates of send stream sizes for incrementals from bookmarks.
.Pp
This feature becomes
.Sy active
when a bookmark is created and will be
returned to the
.Sy enabled
state when all bookmarks with these fields are destroyed.
.
.feature org.openzfs device_rebuild yes
This feature enables the ability for the
.Nm zpool Cm attach
and
.Nm zpool Cm replace
commands to perform sequential reconstruction
.Pq instead of healing reconstruction
when resilvering.
.Pp
Sequential reconstruction resilvers a device in LBA order without immediately
verifying the checksums.
Once complete, a scrub is started, which then verifies the checksums.
This approach allows full redundancy to be restored to the pool
in the minimum amount of time.
This two-phase approach will take longer than a healing resilver
when the time to verify the checksums is included.
However, unless there is additional pool damage,
no checksum errors should be reported by the scrub.
This feature is incompatible with raidz configurations.
.
This feature becomes
.Sy active
while a sequential resilver is in progress, and returns to
.Sy enabled
when the resilver completes.
.
.feature com.delphix device_removal no
This feature enables the
.Nm zpool Cm remove
command to remove top-level vdevs,
evacuating them to reduce the total size of the pool.
.Pp
This feature becomes
.Sy active
when the
.Nm zpool Cm remove
command is used
on a top-level vdev, and will never return to being
.Sy enabled .
.
.feature org.openzfs draid no
This feature enables use of the
.Sy draid
vdev type.
dRAID is a variant of RAID-Z which provides integrated distributed
hot spares that allow faster resilvering while retaining the benefits of RAID-Z.
Data, parity, and spare space are organized in redundancy groups
and distributed evenly over all of the devices.
.Pp
This feature becomes
.Sy active
when creating a pool which uses the
.Sy draid
vdev type, or when adding a new
.Sy draid
vdev to an existing pool.
.
.feature org.illumos edonr no extensible_dataset
This feature enables the use of the Edon-R hash algorithm for checksum,
including for nopwrite
.Po if compression is also enabled, an overwrite of
a block whose checksum matches the data being written will be ignored
.Pc .
In an abundance of caution, Edon-R requires verification when used with
dedup:
.Nm zfs Cm set Sy dedup Ns = Ns Sy edonr , Ns Sy verify
.Po see Xr zfs-set 8 Pc .
.Pp
Edon-R is a very high-performance hash algorithm that was part
of the NIST SHA-3 competition.
It provides extremely high hash performance
.Pq over 350% faster than SHA-256 ,
but was not selected because of its unsuitability
as a general purpose secure hash algorithm.
This implementation utilizes the new salted checksumming functionality
in ZFS, which means that the checksum is pre-seeded with a secret
256-bit random key
.Pq stored on the pool
before being fed the data block to be checksummed.
Thus the produced checksums are unique to a given pool,
preventing hash collision attacks on systems with dedup.
.Pp
.checksum-spiel edonr
.
.feature com.delphix embedded_data no
This feature improves the performance and compression ratio of
highly-compressible blocks.
Blocks whose contents can compress to 112 bytes
or smaller can take advantage of this feature.
.Pp
When this feature is enabled, the contents of highly-compressible blocks are
stored in the block
.Dq pointer
itself
.Po a misnomer in this case, as it contains
the compressed data, rather than a pointer to its location on disk
.Pc .
Thus the space of the block
.Pq one sector, typically 512 B or 4 KiB
is saved, and no additional I/O is needed to read and write the data block.
.
\*[instant-never]
.
.feature com.delphix empty_bpobj yes
This feature increases the performance of creating and using a large
number of snapshots of a single filesystem or volume, and also reduces
the disk space required.
.Pp
When there are many snapshots, each snapshot uses many Block Pointer
Objects
.Pq bpobjs
to track blocks associated with that snapshot.
However, in common use cases, most of these bpobjs are empty.
This feature allows us to create each bpobj on-demand,
thus eliminating the empty bpobjs.
.Pp
This feature is
.Sy active
while there are any filesystems, volumes,
or snapshots which were created after enabling this feature.
.
.feature com.delphix enabled_txg yes
Once this feature is enabled, ZFS records the transaction group number
in which new features are enabled.
This has no user-visible impact, but other features may depend on this feature.
.Pp
This feature becomes
.Sy active
as soon as it is enabled and will never return to being
.Sy enabled .
.
.feature com.datto encryption no bookmark_v2 extensible_dataset
This feature enables the creation and management of natively encrypted datasets.
.Pp
This feature becomes
.Sy active
when an encrypted dataset is created and will be returned to the
.Sy enabled
state when all datasets that use this feature are destroyed.
.
.feature com.delphix extensible_dataset no
This feature allows more flexible use of internal ZFS data structures,
and exists for other features to depend on.
.Pp
This feature will be
.Sy active
when the first dependent feature uses it, and will be returned to the
.Sy enabled
state when all datasets that use this feature are destroyed.
.
.feature com.joyent filesystem_limits yes extensible_dataset
This feature enables filesystem and snapshot limits.
These limits can be used to control how many filesystems and/or snapshots
can be created at the point in the tree on which the limits are set.
.Pp
This feature is
.Sy active
once either of the limit properties has been set on a dataset
and will never return to being
.Sy enabled .
.
.feature com.delphix head_errlog no
This feature enables the upgraded version of errlog, which required an on-disk
error log format change.
Now the error log of each head dataset is stored separately in the zap object
and keyed by the head id.
With this feature enabled, every dataset affected by an error block is listed
in the output of
.Nm zpool Cm status .
In case of encrypted filesystems with unloaded keys we are unable to check
their snapshots or clones for errors and these will not be reported.
An "access denied" error will be reported.
.Pp
\*[instant-never]
.
.feature com.delphix hole_birth no enabled_txg
This feature has/had bugs, the result of which is that, if you do a
.Nm zfs Cm send Fl i
.Pq or Fl R , No since it uses Fl i
from an affected dataset, the receiving party will not see any checksum
or other errors, but the resulting destination snapshot
will not match the source.
Its use by
.Nm zfs Cm send Fl i
has been disabled by default
.Po
see
.Sy send_holes_without_birth_time
in
.Xr zfs 4
.Pc .
.Pp
This feature improves performance of incremental sends
.Pq Nm zfs Cm send Fl i
and receives for objects with many holes.
The most common case of hole-filled objects is zvols.
.Pp
An incremental send stream from snapshot
.Sy A No to snapshot Sy B
contains information about every block that changed between
.Sy A No and Sy B .
Blocks which did not change between those snapshots can be
identified and omitted from the stream using a piece of metadata called
the
.Dq block birth time ,
but birth times are not recorded for holes
.Pq blocks filled only with zeroes .
Since holes created after
.Sy A No cannot be distinguished from holes created before Sy A ,
information about every hole in the entire filesystem or zvol
is included in the send stream.
.Pp
For workloads where holes are rare this is not a problem.
However, when incrementally replicating filesystems or zvols with many holes
.Pq for example a zvol formatted with another filesystem
a lot of time will be spent sending and receiving unnecessary information
about holes that already exist on the receiving side.
.Pp
Once the
.Sy hole_birth
feature has been enabled the block birth times
of all new holes will be recorded.
Incremental sends between snapshots created after this feature is enabled
will use this new metadata to avoid sending information about holes that
already exist on the receiving side.
.Pp
\*[instant-never]
.
.feature org.open-zfs large_blocks no extensible_dataset
This feature allows the record size on a dataset to be set larger than 128 KiB.
.Pp
This feature becomes
.Sy active
once a dataset contains a file with a block size larger than 128 KiB,
and will return to being
.Sy enabled
once all filesystems that have ever had their recordsize larger than 128 KiB
are destroyed.
.
.feature org.zfsonlinux large_dnode no extensible_dataset
This feature allows the size of dnodes in a dataset to be set larger than 512 B.
.
This feature becomes
.Sy active
once a dataset contains an object with a dnode larger than 512 B,
which occurs as a result of setting the
.Sy dnodesize
dataset property to a value other than
.Sy legacy .
The feature will return to being
.Sy enabled
once all filesystems that have ever contained a dnode larger than 512 B
are destroyed.
Large dnodes allow more data to be stored in the bonus buffer,
thus potentially improving performance by avoiding the use of spill blocks.
.
.feature com.delphix livelist yes
This feature allows clones to be deleted faster than the traditional method
when a large number of random/sparse writes have been made to the clone.
All blocks allocated and freed after a clone is created are tracked by the
the clone's livelist which is referenced during the deletion of the clone.
The feature is activated when a clone is created and remains
.Sy active
until all clones have been destroyed.
.
.feature com.delphix log_spacemap yes com.delphix:spacemap_v2
This feature improves performance for heavily-fragmented pools,
especially when workloads are heavy in random-writes.
It does so by logging all the metaslab changes on a single spacemap every TXG
instead of scattering multiple writes to all the metaslab spacemaps.
.Pp
\*[instant-never]
.
.feature org.illumos lz4_compress no
.Sy lz4
is a high-performance real-time compression algorithm that
features significantly faster compression and decompression as well as a
higher compression ratio than the older
.Sy lzjb
compression.
Typically,
.Sy lz4
compression is approximately 50% faster on compressible data and 200% faster
on incompressible data than
.Sy lzjb .
It is also approximately 80% faster on decompression,
while giving approximately a 10% better compression ratio.
.Pp
When the
.Sy lz4_compress
feature is set to
.Sy enabled ,
the administrator can turn on
.Sy lz4
compression on any dataset on the pool using the
.Xr zfs-set 8
command.
All newly written metadata will be compressed with the
.Sy lz4
algorithm.
.Pp
\*[instant-never]
.
.feature com.joyent multi_vdev_crash_dump no
This feature allows a dump device to be configured with a pool comprised
of multiple vdevs.
Those vdevs may be arranged in any mirrored or raidz configuration.
.Pp
When the
.Sy multi_vdev_crash_dump
feature is set to
.Sy enabled ,
the administrator can use
.Xr dumpadm 8
to configure a dump device on a pool comprised of multiple vdevs.
.Pp
Under
.Fx
and Linux this feature is unused, but registered for compatibility.
New pools created on these systems will have the feature
.Sy enabled
but will never transition to
.Sy active ,
as this functionality is not required for crash dump support.
Existing pools where this feature is
.Sy active
can be imported.
.
.feature com.delphix obsolete_counts yes device_removal
This feature is an enhancement of
.Sy device_removal ,
which will over time reduce the memory used to track removed devices.
When indirect blocks are freed or remapped,
we note that their part of the indirect mapping is
.Dq obsolete
– no longer needed.
.Pp
This feature becomes
.Sy active
when the
.Nm zpool Cm remove
command is used on a top-level vdev, and will never return to being
.Sy enabled .
.
.feature org.zfsonlinux project_quota yes extensible_dataset
This feature allows administrators to account the spaces and objects usage
information against the project identifier
.Pq ID .
.Pp
The project ID is an object-based attribute.
When upgrading an existing filesystem,
objects without a project ID will be assigned a zero project ID.
When this feature is enabled, newly created objects inherit
their parent directories' project ID if the parent's inherit flag is set
.Pq via Nm chattr Sy [+-]P No or Nm zfs Cm project Fl s Ns | Ns Fl C .
Otherwise, the new object's project ID will be zero.
An object's project ID can be changed at any time by the owner
.Pq or privileged user
via
.Nm chattr Fl p Ar prjid
or
.Nm zfs Cm project Fl p Ar prjid .
.Pp
This feature will become
.Sy active
as soon as it is enabled and will never return to being
.Sy disabled .
\*[remount-upgrade]
.
.feature com.delphix redaction_bookmarks no bookmarks extensible_dataset
This feature enables the use of redacted
.Nm zfs Cm send Ns s ,
which create redaction bookmarks storing the list of blocks
redacted by the send that created them.
For more information about redacted sends, see
.Xr zfs-send 8 .
.
.feature com.delphix redacted_datasets no extensible_dataset
This feature enables the receiving of redacted
.Nm zfs Cm send
streams, which create redacted datasets when received.
These datasets are missing some of their blocks,
and so cannot be safely mounted, and their contents cannot be safely read.
For more information about redacted receives, see
.Xr zfs-send 8 .
.
.feature com.datto resilver_defer yes
This feature allows ZFS to postpone new resilvers if an existing one is already
in progress.
Without this feature, any new resilvers will cause the currently
running one to be immediately restarted from the beginning.
.Pp
This feature becomes
.Sy active
once a resilver has been deferred, and returns to being
.Sy enabled
when the deferred resilver begins.
.
.feature org.illumos sha512 no extensible_dataset
This feature enables the use of the SHA-512/256 truncated hash algorithm
.Pq FIPS 180-4
for checksum and dedup.
The native 64-bit arithmetic of SHA-512 provides an approximate 50%
performance boost over SHA-256 on 64-bit hardware
and is thus a good minimum-change replacement candidate
for systems where hash performance is important,
but these systems cannot for whatever reason utilize the faster
.Sy skein No and Sy edonr
algorithms.
.Pp
.checksum-spiel sha512
.
.feature org.illumos skein no extensible_dataset
This feature enables the use of the Skein hash algorithm for checksum and dedup.
Skein is a high-performance secure hash algorithm that was a
finalist in the NIST SHA-3 competition.
It provides a very high security margin and high performance on 64-bit hardware
.Pq 80% faster than SHA-256 .
This implementation also utilizes the new salted checksumming
functionality in ZFS, which means that the checksum is pre-seeded with a
secret 256-bit random key
.Pq stored on the pool
before being fed the data block to be checksummed.
Thus the produced checksums are unique to a given pool,
preventing hash collision attacks on systems with dedup.
.Pp
.checksum-spiel skein
.
.feature com.delphix spacemap_histogram yes
This features allows ZFS to maintain more information about how free space
is organized within the pool.
If this feature is
.Sy enabled ,
it will be activated when a new space map object is created, or
an existing space map is upgraded to the new format,
and never returns back to being
.Sy enabled .
.
.feature com.delphix spacemap_v2 yes
This feature enables the use of the new space map encoding which
consists of two words
.Pq instead of one
whenever it is advantageous.
The new encoding allows space maps to represent large regions of
space more efficiently on-disk while also increasing their maximum
addressable offset.
.Pp
This feature becomes
.Sy active
once it is
.Sy enabled ,
and never returns back to being
.Sy enabled .
.
.feature org.zfsonlinux userobj_accounting yes extensible_dataset
This feature allows administrators to account the object usage information
by user and group.
.Pp
\*[instant-never]
\*[remount-upgrade]
.
.feature com.klarasystems vdev_zaps_v2 no
This feature creates a ZAP object for the root vdev.
.Pp
This feature becomes active after the next
.Nm zpool Cm import
or
.Nm zpool reguid .
.
Properties can be retrieved or set on the root vdev using
.Nm zpool Cm get
and
.Nm zpool Cm set
with
.Sy root
as the vdev name which is an alias for
.Sy root-0 .
.feature org.openzfs zilsaxattr yes extensible_dataset
This feature enables
.Sy xattr Ns = Ns Sy sa
extended attribute logging in the ZIL.
If enabled, extended attribute changes
.Pq both Sy xattrdir Ns = Ns Sy dir No and Sy xattr Ns = Ns Sy sa
are guaranteed to be durable if either the dataset had
.Sy sync Ns = Ns Sy always
set at the time the changes were made, or
.Xr sync 2
is called on the dataset after the changes were made.
.Pp
This feature becomes
.Sy active
when a ZIL is created for at least one dataset and will be returned to the
.Sy enabled
state when it is destroyed for all datasets that use this feature.
.
.feature com.delphix zpool_checkpoint yes
This feature enables the
.Nm zpool Cm checkpoint
command that can checkpoint the state of the pool
at the time it was issued and later rewind back to it or discard it.
.Pp
This feature becomes
.Sy active
when the
.Nm zpool Cm checkpoint
command is used to checkpoint the pool.
The feature will only return back to being
.Sy enabled
when the pool is rewound or the checkpoint has been discarded.
.
.feature org.freebsd zstd_compress no extensible_dataset
.Sy zstd
is a high-performance compression algorithm that features a
combination of high compression ratios and high speed.
Compared to
.Sy gzip ,
.Sy zstd
offers slightly better compression at much higher speeds.
Compared to
.Sy lz4 ,
.Sy zstd
offers much better compression while being only modestly slower.
Typically,
.Sy zstd
compression speed ranges from 250 to 500 MB/s per thread
and decompression speed is over 1 GB/s per thread.
.Pp
When the
.Sy zstd
feature is set to
.Sy enabled ,
the administrator can turn on
.Sy zstd
compression of any dataset using
.Nm zfs Cm set Sy compress Ns = Ns Sy zstd Ar dset
.Po see Xr zfs-set 8 Pc .
This feature becomes
.Sy active
once a
.Sy compress
property has been set to
.Sy zstd ,
and will return to being
.Sy enabled
once all filesystems that have ever had their
.Sy compress
property set to
.Sy zstd
are destroyed.
.El
.
.Sh SEE ALSO
.Xr zfs 8 ,
.Xr zpool 8
diff --git a/sys/contrib/openzfs/man/man8/zfs-list.8 b/sys/contrib/openzfs/man/man8/zfs-list.8
index 9f6a73ab956d..85bd3fbafced 100644
--- a/sys/contrib/openzfs/man/man8/zfs-list.8
+++ b/sys/contrib/openzfs/man/man8/zfs-list.8
@@ -1,183 +1,192 @@
.\"
.\" CDDL HEADER START
.\"
.\" The contents of this file are subject to the terms of the
.\" Common Development and Distribution License (the "License").
.\" You may not use this file except in compliance with the License.
.\"
.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
.\" or https://opensource.org/licenses/CDDL-1.0.
.\" See the License for the specific language governing permissions
.\" and limitations under the License.
.\"
.\" When distributing Covered Code, include this CDDL HEADER in each
.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
.\" If applicable, add the following below this CDDL HEADER, with the
.\" fields enclosed by brackets "[]" replaced with your own identifying
.\" information: Portions Copyright [yyyy] [name of copyright owner]
.\"
.\" CDDL HEADER END
.\"
.\" Copyright (c) 2009 Sun Microsystems, Inc. All Rights Reserved.
.\" Copyright 2011 Joshua M. Clulow <josh@sysmgr.org>
.\" Copyright (c) 2011, 2019 by Delphix. All rights reserved.
.\" Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
.\" Copyright (c) 2014, Joyent, Inc. All rights reserved.
.\" Copyright (c) 2014 by Adam Stevko. All rights reserved.
.\" Copyright (c) 2014 Integros [integros.com]
.\" Copyright 2019 Richard Laager. All rights reserved.
.\" Copyright 2018 Nexenta Systems, Inc.
.\" Copyright 2019 Joyent, Inc.
.\"
-.Dd March 16, 2022
+.Dd February 8, 2024
.Dt ZFS-LIST 8
.Os
.
.Sh NAME
.Nm zfs-list
.Nd list properties of ZFS datasets
.Sh SYNOPSIS
.Nm zfs
.Cm list
.Op Fl r Ns | Ns Fl d Ar depth
.Op Fl Hp
.Oo Fl o Ar property Ns Oo , Ns Ar property Oc Ns … Oc
.Oo Fl s Ar property Oc Ns …
.Oo Fl S Ar property Oc Ns …
.Oo Fl t Ar type Ns Oo , Ns Ar type Oc Ns … Oc
.Oo Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot Oc Ns …
.
.Sh DESCRIPTION
If specified, you can list property information by the absolute pathname or the
relative pathname.
By default, all file systems and volumes are displayed.
Snapshots are displayed if the
.Sy listsnapshots
pool property is
.Sy on
.Po the default is
.Sy off
.Pc ,
or if the
.Fl t Sy snapshot
or
.Fl t Sy all
options are specified.
The following fields are displayed:
.Sy name , Sy used , Sy available , Sy referenced , Sy mountpoint .
.Bl -tag -width "-H"
.It Fl H
Used for scripting mode.
Do not print headers and separate fields by a single tab instead of arbitrary
white space.
.It Fl d Ar depth
Recursively display any children of the dataset, limiting the recursion to
.Ar depth .
A
.Ar depth
of
.Sy 1
will display only the dataset and its direct children.
.It Fl o Ar property
A comma-separated list of properties to display.
The property must be:
.Bl -bullet -compact
.It
One of the properties described in the
.Sx Native Properties
section of
.Xr zfsprops 7
.It
A user property
.It
The value
.Sy name
to display the dataset name
.It
The value
.Sy space
to display space usage properties on file systems and volumes.
This is a shortcut for specifying
.Fl o Ns \ \& Ns Sy name , Ns Sy avail , Ns Sy used , Ns Sy usedsnap , Ns
.Sy usedds , Ns Sy usedrefreserv , Ns Sy usedchild
.Fl t Sy filesystem , Ns Sy volume .
.El
.It Fl p
Display numbers in parsable
.Pq exact
values.
.It Fl r
Recursively display any children of the dataset on the command line.
.It Fl s Ar property
A property for sorting the output by column in ascending order based on the
value of the property.
The property must be one of the properties described in the
.Sx Properties
section of
.Xr zfsprops 7
or the value
.Sy name
to sort by the dataset name.
Multiple properties can be specified at one time using multiple
.Fl s
property options.
Multiple
.Fl s
options are evaluated from left to right in decreasing order of importance.
The following is a list of sorting criteria:
.Bl -bullet -compact
.It
Numeric types sort in numeric order.
.It
String types sort in alphabetical order.
.It
Types inappropriate for a row sort that row to the literal bottom, regardless of
the specified ordering.
.El
.Pp
If no sorting options are specified the existing behavior of
.Nm zfs Cm list
is preserved.
.It Fl S Ar property
Same as
.Fl s ,
but sorts by property in descending order.
.It Fl t Ar type
A comma-separated list of types to display, where
.Ar type
is one of
.Sy filesystem ,
.Sy snapshot ,
.Sy volume ,
.Sy bookmark ,
or
.Sy all .
For example, specifying
.Fl t Sy snapshot
displays only snapshots.
+.Sy fs ,
+.Sy snap ,
+or
+.Sy vol
+can be used as aliases for
+.Sy filesystem ,
+.Sy snapshot ,
+or
+.Sy volume .
.El
.
.Sh EXAMPLES
.\" These are, respectively, examples 5 from zfs.8
.\" Make sure to update them bidirectionally
.Ss Example 1 : No Listing ZFS Datasets
The following command lists all active file systems and volumes in the system.
Snapshots are displayed if
.Sy listsnaps Ns = Ns Sy on .
The default is
.Sy off .
See
.Xr zpoolprops 7
for more information on pool properties.
.Bd -literal -compact -offset Ds
.No # Nm zfs Cm list
NAME USED AVAIL REFER MOUNTPOINT
pool 450K 457G 18K /pool
pool/home 315K 457G 21K /export/home
pool/home/anne 18K 457G 18K /export/home/anne
pool/home/bob 276K 457G 276K /export/home/bob
.Ed
.
.Sh SEE ALSO
.Xr zfsprops 7 ,
.Xr zfs-get 8
diff --git a/sys/contrib/openzfs/man/man8/zpool-clear.8 b/sys/contrib/openzfs/man/man8/zpool-clear.8
index 7b9d40c74ebd..c61ecae483ac 100644
--- a/sys/contrib/openzfs/man/man8/zpool-clear.8
+++ b/sys/contrib/openzfs/man/man8/zpool-clear.8
@@ -1,59 +1,70 @@
.\"
.\" CDDL HEADER START
.\"
.\" The contents of this file are subject to the terms of the
.\" Common Development and Distribution License (the "License").
.\" You may not use this file except in compliance with the License.
.\"
.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
.\" or https://opensource.org/licenses/CDDL-1.0.
.\" See the License for the specific language governing permissions
.\" and limitations under the License.
.\"
.\" When distributing Covered Code, include this CDDL HEADER in each
.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
.\" If applicable, add the following below this CDDL HEADER, with the
.\" fields enclosed by brackets "[]" replaced with your own identifying
.\" information: Portions Copyright [yyyy] [name of copyright owner]
.\"
.\" CDDL HEADER END
.\"
.\" Copyright (c) 2007, Sun Microsystems, Inc. All Rights Reserved.
.\" Copyright (c) 2012, 2018 by Delphix. All rights reserved.
.\" Copyright (c) 2012 Cyril Plisko. All Rights Reserved.
.\" Copyright (c) 2017 Datto Inc.
.\" Copyright (c) 2018 George Melikov. All Rights Reserved.
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
.Dd May 27, 2021
.Dt ZPOOL-CLEAR 8
.Os
.
.Sh NAME
.Nm zpool-clear
.Nd clear device errors in ZFS storage pool
.Sh SYNOPSIS
.Nm zpool
.Cm clear
+.Op Fl -power
.Ar pool
.Oo Ar device Oc Ns …
.
.Sh DESCRIPTION
Clears device errors in a pool.
If no arguments are specified, all device errors within the pool are cleared.
If one or more devices is specified, only those errors associated with the
specified device or devices are cleared.
.Pp
If the pool was suspended it will be brought back online provided the
devices can be accessed.
Pools with
.Sy multihost
enabled which have been suspended cannot be resumed.
While the pool was suspended, it may have been imported on
another host, and resuming I/O could result in pool damage.
+.Bl -tag -width Ds
+.It Fl -power
+Power on the devices's slot in the storage enclosure and wait for the device
+to show up before attempting to clear errors.
+This is done on all the devices specified.
+Alternatively, you can set the
+.Sy ZPOOL_AUTO_POWER_ON_SLOT
+environment variable to always enable this behavior.
+Note: This flag currently works on Linux only.
+.El
.
.Sh SEE ALSO
.Xr zdb 8 ,
.Xr zpool-reopen 8 ,
.Xr zpool-status 8
diff --git a/sys/contrib/openzfs/man/man8/zpool-iostat.8 b/sys/contrib/openzfs/man/man8/zpool-iostat.8
index 34f7243d5aaa..e1d2a4b4ff1c 100644
--- a/sys/contrib/openzfs/man/man8/zpool-iostat.8
+++ b/sys/contrib/openzfs/man/man8/zpool-iostat.8
@@ -1,306 +1,306 @@
.\"
.\" CDDL HEADER START
.\"
.\" The contents of this file are subject to the terms of the
.\" Common Development and Distribution License (the "License").
.\" You may not use this file except in compliance with the License.
.\"
.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
.\" or https://opensource.org/licenses/CDDL-1.0.
.\" See the License for the specific language governing permissions
.\" and limitations under the License.
.\"
.\" When distributing Covered Code, include this CDDL HEADER in each
.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
.\" If applicable, add the following below this CDDL HEADER, with the
.\" fields enclosed by brackets "[]" replaced with your own identifying
.\" information: Portions Copyright [yyyy] [name of copyright owner]
.\"
.\" CDDL HEADER END
.\"
.\" Copyright (c) 2007, Sun Microsystems, Inc. All Rights Reserved.
.\" Copyright (c) 2012, 2018 by Delphix. All rights reserved.
.\" Copyright (c) 2012 Cyril Plisko. All Rights Reserved.
.\" Copyright (c) 2017 Datto Inc.
.\" Copyright (c) 2018 George Melikov. All Rights Reserved.
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
.Dd March 16, 2022
.Dt ZPOOL-IOSTAT 8
.Os
.
.Sh NAME
.Nm zpool-iostat
.Nd display logical I/O statistics for ZFS storage pools
.Sh SYNOPSIS
.Nm zpool
.Cm iostat
.Op Oo Oo Fl c Ar SCRIPT Oc Oo Fl lq Oc Oc Ns | Ns Fl rw
.Op Fl T Sy u Ns | Ns Sy d
.Op Fl ghHLnpPvy
.Oo Ar pool Ns … Ns | Ns Oo Ar pool vdev Ns … Oc Ns | Ns Ar vdev Ns … Oc
.Op Ar interval Op Ar count
.
.Sh DESCRIPTION
Displays logical I/O statistics for the given pools/vdevs.
Physical I/O statistics may be observed via
.Xr iostat 1 .
If writes are located nearby, they may be merged into a single
larger operation.
Additional I/O may be generated depending on the level of vdev redundancy.
To filter output, you may pass in a list of pools, a pool and list of vdevs
in that pool, or a list of any vdevs from any pool.
If no items are specified, statistics for every pool in the system are shown.
When given an
.Ar interval ,
the statistics are printed every
.Ar interval
seconds until killed.
If
.Fl n
flag is specified the headers are displayed only once, otherwise they are
displayed periodically.
If
.Ar count
is specified, the command exits after
.Ar count
reports are printed.
The first report printed is always the statistics since boot regardless of
whether
.Ar interval
and
.Ar count
are passed.
However, this behavior can be suppressed with the
.Fl y
flag.
Also note that the units of
.Sy K ,
.Sy M ,
.Sy G Ns …
that are printed in the report are in base 1024.
To get the raw values, use the
.Fl p
flag.
.Bl -tag -width Ds
.It Fl c Op Ar SCRIPT1 Ns Oo , Ns Ar SCRIPT2 Oc Ns …
Run a script (or scripts) on each vdev and include the output as a new column
in the
.Nm zpool Cm iostat
output.
Users can run any script found in their
.Pa ~/.zpool.d
directory or from the system
.Pa /etc/zfs/zpool.d
directory.
Script names containing the slash
.Pq Sy /
character are not allowed.
The default search path can be overridden by setting the
.Sy ZPOOL_SCRIPTS_PATH
environment variable.
A privileged user can only run
.Fl c
if they have the
.Sy ZPOOL_SCRIPTS_AS_ROOT
environment variable set.
If a script requires the use of a privileged command, like
.Xr smartctl 8 ,
then it's recommended you allow the user access to it in
.Pa /etc/sudoers
or add the user to the
.Pa /etc/sudoers.d/zfs
file.
.Pp
If
.Fl c
is passed without a script name, it prints a list of all scripts.
.Fl c
also sets verbose mode
.No \&( Ns Fl v Ns No \&) .
.Pp
Script output should be in the form of "name=value".
The column name is set to "name" and the value is set to "value".
Multiple lines can be used to output multiple columns.
The first line of output not in the
"name=value" format is displayed without a column title,
and no more output after that is displayed.
This can be useful for printing error messages.
Blank or NULL values are printed as a '-' to make output AWKable.
.Pp
The following environment variables are set before running each script:
.Bl -tag -compact -width "VDEV_ENC_SYSFS_PATH"
.It Sy VDEV_PATH
Full path to the vdev
.It Sy VDEV_UPATH
Underlying path to the vdev
.Pq Pa /dev/sd* .
For use with device mapper, multipath, or partitioned vdevs.
.It Sy VDEV_ENC_SYSFS_PATH
The sysfs path to the enclosure for the vdev (if any).
.El
.It Fl T Sy u Ns | Ns Sy d
Display a time stamp.
Specify
.Sy u
for a printed representation of the internal representation of time.
See
-.Xr time 2 .
+.Xr time 1 .
Specify
.Sy d
for standard date format.
See
.Xr date 1 .
.It Fl g
Display vdev GUIDs instead of the normal device names.
These GUIDs can be used in place of device names for the zpool
detach/offline/remove/replace commands.
.It Fl H
Scripted mode.
Do not display headers, and separate fields by a
single tab instead of arbitrary space.
.It Fl L
Display real paths for vdevs resolving all symbolic links.
This can be used to look up the current block device name regardless of the
.Pa /dev/disk/
path used to open it.
.It Fl n
Print headers only once when passed
.It Fl p
Display numbers in parsable (exact) values.
Time values are in nanoseconds.
.It Fl P
Display full paths for vdevs instead of only the last component of the path.
This can be used in conjunction with the
.Fl L
flag.
.It Fl r
Print request size histograms for the leaf vdev's I/O.
This includes histograms of individual I/O (ind) and aggregate I/O (agg).
These stats can be useful for observing how well I/O aggregation is working.
Note that TRIM I/O may exceed 16M, but will be counted as 16M.
.It Fl v
Verbose statistics Reports usage statistics for individual vdevs within the
pool, in addition to the pool-wide statistics.
.It Fl y
Normally the first line of output reports the statistics since boot:
suppress it.
.It Fl w
Display latency histograms:
.Bl -tag -compact -width "asyncq_read/write"
.It Sy total_wait
Total I/O time (queuing + disk I/O time).
.It Sy disk_wait
Disk I/O time (time reading/writing the disk).
.It Sy syncq_wait
Amount of time I/O spent in synchronous priority queues.
Does not include disk time.
.It Sy asyncq_wait
Amount of time I/O spent in asynchronous priority queues.
Does not include disk time.
.It Sy scrub
Amount of time I/O spent in scrub queue.
Does not include disk time.
.It Sy rebuild
Amount of time I/O spent in rebuild queue.
Does not include disk time.
.El
.It Fl l
Include average latency statistics:
.Bl -tag -compact -width "asyncq_read/write"
.It Sy total_wait
Average total I/O time (queuing + disk I/O time).
.It Sy disk_wait
Average disk I/O time (time reading/writing the disk).
.It Sy syncq_wait
Average amount of time I/O spent in synchronous priority queues.
Does not include disk time.
.It Sy asyncq_wait
Average amount of time I/O spent in asynchronous priority queues.
Does not include disk time.
.It Sy scrub
Average queuing time in scrub queue.
Does not include disk time.
.It Sy trim
Average queuing time in trim queue.
Does not include disk time.
.It Sy rebuild
Average queuing time in rebuild queue.
Does not include disk time.
.El
.It Fl q
Include active queue statistics.
Each priority queue has both pending
.Sy ( pend )
and active
.Sy ( activ )
I/O requests.
Pending requests are waiting to be issued to the disk,
and active requests have been issued to disk and are waiting for completion.
These stats are broken out by priority queue:
.Bl -tag -compact -width "asyncq_read/write"
.It Sy syncq_read/write
Current number of entries in synchronous priority
queues.
.It Sy asyncq_read/write
Current number of entries in asynchronous priority queues.
.It Sy scrubq_read
Current number of entries in scrub queue.
.It Sy trimq_write
Current number of entries in trim queue.
.It Sy rebuildq_write
Current number of entries in rebuild queue.
.El
.Pp
All queue statistics are instantaneous measurements of the number of
entries in the queues.
If you specify an interval,
the measurements will be sampled from the end of the interval.
.El
.
.Sh EXAMPLES
.\" These are, respectively, examples 13, 16 from zpool.8
.\" Make sure to update them bidirectionally
.Ss Example 13 : No Adding Cache Devices to a ZFS Pool
The following command adds two disks for use as cache devices to a ZFS storage
pool:
.Dl # Nm zpool Cm add Ar pool Sy cache Pa sdc sdd
.Pp
Once added, the cache devices gradually fill with content from main memory.
Depending on the size of your cache devices, it could take over an hour for
them to fill.
Capacity and reads can be monitored using the
.Cm iostat
subcommand as follows:
.Dl # Nm zpool Cm iostat Fl v Ar pool 5
.
.Ss Example 16 : No Adding output columns
Additional columns can be added to the
.Nm zpool Cm status No and Nm zpool Cm iostat No output with Fl c .
.Bd -literal -compact -offset Ds
.No # Nm zpool Cm status Fl c Pa vendor , Ns Pa model , Ns Pa size
NAME STATE READ WRITE CKSUM vendor model size
tank ONLINE 0 0 0
mirror-0 ONLINE 0 0 0
U1 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
U10 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
U11 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
U12 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
U13 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
U14 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
.No # Nm zpool Cm iostat Fl vc Pa size
capacity operations bandwidth
pool alloc free read write read write size
---------- ----- ----- ----- ----- ----- ----- ----
rpool 14.6G 54.9G 4 55 250K 2.69M
sda1 14.6G 54.9G 4 55 250K 2.69M 70G
---------- ----- ----- ----- ----- ----- ----- ----
.Ed
.
.Sh SEE ALSO
.Xr iostat 1 ,
.Xr smartctl 8 ,
.Xr zpool-list 8 ,
.Xr zpool-status 8
diff --git a/sys/contrib/openzfs/man/man8/zpool-list.8 b/sys/contrib/openzfs/man/man8/zpool-list.8
index 9e905d52dddc..c60c47f5eb3d 100644
--- a/sys/contrib/openzfs/man/man8/zpool-list.8
+++ b/sys/contrib/openzfs/man/man8/zpool-list.8
@@ -1,146 +1,146 @@
.\"
.\" CDDL HEADER START
.\"
.\" The contents of this file are subject to the terms of the
.\" Common Development and Distribution License (the "License").
.\" You may not use this file except in compliance with the License.
.\"
.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
.\" or https://opensource.org/licenses/CDDL-1.0.
.\" See the License for the specific language governing permissions
.\" and limitations under the License.
.\"
.\" When distributing Covered Code, include this CDDL HEADER in each
.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
.\" If applicable, add the following below this CDDL HEADER, with the
.\" fields enclosed by brackets "[]" replaced with your own identifying
.\" information: Portions Copyright [yyyy] [name of copyright owner]
.\"
.\" CDDL HEADER END
.\"
.\" Copyright (c) 2007, Sun Microsystems, Inc. All Rights Reserved.
.\" Copyright (c) 2012, 2018 by Delphix. All rights reserved.
.\" Copyright (c) 2012 Cyril Plisko. All Rights Reserved.
.\" Copyright (c) 2017 Datto Inc.
.\" Copyright (c) 2018 George Melikov. All Rights Reserved.
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
.Dd March 16, 2022
.Dt ZPOOL-LIST 8
.Os
.
.Sh NAME
.Nm zpool-list
.Nd list information about ZFS storage pools
.Sh SYNOPSIS
.Nm zpool
.Cm list
.Op Fl HgLpPv
.Op Fl o Ar property Ns Oo , Ns Ar property Oc Ns …
.Op Fl T Sy u Ns | Ns Sy d
.Oo Ar pool Oc Ns …
.Op Ar interval Op Ar count
.
.Sh DESCRIPTION
Lists the given pools along with a health status and space usage.
If no
.Ar pool Ns s
are specified, all pools in the system are listed.
When given an
.Ar interval ,
the information is printed every
.Ar interval
seconds until killed.
If
.Ar count
is specified, the command exits after
.Ar count
reports are printed.
.Bl -tag -width Ds
.It Fl g
Display vdev GUIDs instead of the normal device names.
These GUIDs can be used in place of device names for the zpool
detach/offline/remove/replace commands.
.It Fl H
Scripted mode.
Do not display headers, and separate fields by a single tab instead of arbitrary
space.
.It Fl o Ar property
Comma-separated list of properties to display.
See the
.Xr zpoolprops 7
manual page for a list of valid properties.
The default list is
.Sy name , size , allocated , free , checkpoint, expandsize , fragmentation ,
.Sy capacity , dedupratio , health , altroot .
.It Fl L
Display real paths for vdevs resolving all symbolic links.
This can be used to look up the current block device name regardless of the
.Pa /dev/disk
path used to open it.
.It Fl p
Display numbers in parsable
.Pq exact
values.
.It Fl P
Display full paths for vdevs instead of only the last component of
the path.
This can be used in conjunction with the
.Fl L
flag.
.It Fl T Sy u Ns | Ns Sy d
Display a time stamp.
Specify
.Sy u
for a printed representation of the internal representation of time.
See
-.Xr time 2 .
+.Xr time 1 .
Specify
.Sy d
for standard date format.
See
.Xr date 1 .
.It Fl v
Verbose statistics.
Reports usage statistics for individual vdevs within the pool, in addition to
the pool-wide statistics.
.El
.
.Sh EXAMPLES
.\" These are, respectively, examples 6, 15 from zpool.8
.\" Make sure to update them bidirectionally
.Ss Example 1 : No Listing Available ZFS Storage Pools
The following command lists all available pools on the system.
In this case, the pool
.Ar zion
is faulted due to a missing device.
The results from this command are similar to the following:
.Bd -literal -compact -offset Ds
.No # Nm zpool Cm list
NAME SIZE ALLOC FREE EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT
rpool 19.9G 8.43G 11.4G - 33% 42% 1.00x ONLINE -
tank 61.5G 20.0G 41.5G - 48% 32% 1.00x ONLINE -
zion - - - - - - - FAULTED -
.Ed
.
.Ss Example 2 : No Displaying expanded space on a device
The following command displays the detailed information for the pool
.Ar data .
This pool is comprised of a single raidz vdev where one of its devices
increased its capacity by 10 GiB.
In this example, the pool will not be able to utilize this extra capacity until
all the devices under the raidz vdev have been expanded.
.Bd -literal -compact -offset Ds
.No # Nm zpool Cm list Fl v Ar data
NAME SIZE ALLOC FREE EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT
data 23.9G 14.6G 9.30G - 48% 61% 1.00x ONLINE -
raidz1 23.9G 14.6G 9.30G - 48%
sda - - - - -
sdb - - - 10G -
sdc - - - - -
.Ed
.
.Sh SEE ALSO
.Xr zpool-import 8 ,
.Xr zpool-status 8
diff --git a/sys/contrib/openzfs/man/man8/zpool-offline.8 b/sys/contrib/openzfs/man/man8/zpool-offline.8
index edcf1d06ab67..1b6095d63709 100644
--- a/sys/contrib/openzfs/man/man8/zpool-offline.8
+++ b/sys/contrib/openzfs/man/man8/zpool-offline.8
@@ -1,94 +1,106 @@
.\"
.\" CDDL HEADER START
.\"
.\" The contents of this file are subject to the terms of the
.\" Common Development and Distribution License (the "License").
.\" You may not use this file except in compliance with the License.
.\"
.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
.\" or https://opensource.org/licenses/CDDL-1.0.
.\" See the License for the specific language governing permissions
.\" and limitations under the License.
.\"
.\" When distributing Covered Code, include this CDDL HEADER in each
.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
.\" If applicable, add the following below this CDDL HEADER, with the
.\" fields enclosed by brackets "[]" replaced with your own identifying
.\" information: Portions Copyright [yyyy] [name of copyright owner]
.\"
.\" CDDL HEADER END
.\"
.\" Copyright (c) 2007, Sun Microsystems, Inc. All Rights Reserved.
.\" Copyright (c) 2012, 2018 by Delphix. All rights reserved.
.\" Copyright (c) 2012 Cyril Plisko. All Rights Reserved.
.\" Copyright (c) 2017 Datto Inc.
.\" Copyright (c) 2018 George Melikov. All Rights Reserved.
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
.Dd August 9, 2019
.Dt ZPOOL-OFFLINE 8
.Os
.
.Sh NAME
.Nm zpool-offline
.Nd take physical devices offline in ZFS storage pool
.Sh SYNOPSIS
.Nm zpool
.Cm offline
-.Op Fl ft
+.Op Fl Sy -power Ns | Ns Op Fl Sy ft
.Ar pool
.Ar device Ns …
.Nm zpool
.Cm online
-.Op Fl e
+.Op Fl Sy -power
+.Op Fl Sy e
.Ar pool
.Ar device Ns …
.
.Sh DESCRIPTION
.Bl -tag -width Ds
.It Xo
.Nm zpool
.Cm offline
-.Op Fl ft
+.Op Fl Sy -power Ns | Ns Op Fl Sy ft
.Ar pool
.Ar device Ns …
.Xc
Takes the specified physical device offline.
While the
.Ar device
is offline, no attempt is made to read or write to the device.
This command is not applicable to spares.
.Bl -tag -width Ds
+.It Fl -power
+Power off the device's slot in the storage enclosure.
+This flag currently works on Linux only
.It Fl f
Force fault.
Instead of offlining the disk, put it into a faulted state.
The fault will persist across imports unless the
.Fl t
flag was specified.
.It Fl t
Temporary.
Upon reboot, the specified physical device reverts to its previous state.
.El
.It Xo
.Nm zpool
.Cm online
+.Op Fl -power
.Op Fl e
.Ar pool
.Ar device Ns …
.Xc
Brings the specified physical device online.
This command is not applicable to spares.
.Bl -tag -width Ds
+.It Fl -power
+Power on the device's slot in the storage enclosure and wait for the device
+to show up before attempting to online it.
+Alternatively, you can set the
+.Sy ZPOOL_AUTO_POWER_ON_SLOT
+environment variable to always enable this behavior.
+This flag currently works on Linux only
.It Fl e
Expand the device to use all available space.
If the device is part of a mirror or raidz then all devices must be expanded
before the new space will become available to the pool.
.El
.El
.
.Sh SEE ALSO
.Xr zpool-detach 8 ,
.Xr zpool-remove 8 ,
.Xr zpool-reopen 8 ,
.Xr zpool-resilver 8
diff --git a/sys/contrib/openzfs/man/man8/zpool-status.8 b/sys/contrib/openzfs/man/man8/zpool-status.8
index 8f9580cf086e..24ad6e643cae 100644
--- a/sys/contrib/openzfs/man/man8/zpool-status.8
+++ b/sys/contrib/openzfs/man/man8/zpool-status.8
@@ -1,165 +1,169 @@
.\"
.\" CDDL HEADER START
.\"
.\" The contents of this file are subject to the terms of the
.\" Common Development and Distribution License (the "License").
.\" You may not use this file except in compliance with the License.
.\"
.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
.\" or https://opensource.org/licenses/CDDL-1.0.
.\" See the License for the specific language governing permissions
.\" and limitations under the License.
.\"
.\" When distributing Covered Code, include this CDDL HEADER in each
.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
.\" If applicable, add the following below this CDDL HEADER, with the
.\" fields enclosed by brackets "[]" replaced with your own identifying
.\" information: Portions Copyright [yyyy] [name of copyright owner]
.\"
.\" CDDL HEADER END
.\"
.\" Copyright (c) 2007, Sun Microsystems, Inc. All Rights Reserved.
.\" Copyright (c) 2012, 2018 by Delphix. All rights reserved.
.\" Copyright (c) 2012 Cyril Plisko. All Rights Reserved.
.\" Copyright (c) 2017 Datto Inc.
.\" Copyright (c) 2018 George Melikov. All Rights Reserved.
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
.Dd March 16, 2022
.Dt ZPOOL-STATUS 8
.Os
.
.Sh NAME
.Nm zpool-status
.Nd show detailed health status for ZFS storage pools
.Sh SYNOPSIS
.Nm zpool
.Cm status
-.Op Fl DigLpPstvx
+.Op Fl DeigLpPstvx
.Op Fl T Sy u Ns | Ns Sy d
.Op Fl c Op Ar SCRIPT1 Ns Oo , Ns Ar SCRIPT2 Oc Ns …
.Oo Ar pool Oc Ns …
.Op Ar interval Op Ar count
.
.Sh DESCRIPTION
Displays the detailed health status for the given pools.
If no
.Ar pool
is specified, then the status of each pool in the system is displayed.
For more information on pool and device health, see the
.Sx Device Failure and Recovery
section of
.Xr zpoolconcepts 7 .
.Pp
If a scrub or resilver is in progress, this command reports the percentage done
and the estimated time to completion.
Both of these are only approximate, because the amount of data in the pool and
the other workloads on the system can change.
.Bl -tag -width Ds
+.It Fl -power
+Display vdev enclosure slot power status (on or off).
.It Fl c Op Ar SCRIPT1 Ns Oo , Ns Ar SCRIPT2 Oc Ns …
Run a script (or scripts) on each vdev and include the output as a new column
in the
.Nm zpool Cm status
output.
See the
.Fl c
option of
.Nm zpool Cm iostat
for complete details.
+.It Fl e
+Only show unhealthy vdevs (not-ONLINE or with errors).
.It Fl i
Display vdev initialization status.
.It Fl g
Display vdev GUIDs instead of the normal device names
These GUIDs can be used in place of device names for the zpool
detach/offline/remove/replace commands.
.It Fl L
Display real paths for vdevs resolving all symbolic links.
This can be used to look up the current block device name regardless of the
.Pa /dev/disk/
path used to open it.
.It Fl p
Display numbers in parsable (exact) values.
.It Fl P
Display full paths for vdevs instead of only the last component of
the path.
This can be used in conjunction with the
.Fl L
flag.
.It Fl D
Display a histogram of deduplication statistics, showing the allocated
.Pq physically present on disk
and referenced
.Pq logically referenced in the pool
block counts and sizes by reference count.
.It Fl s
Display the number of leaf vdev slow I/O operations.
This is the number of I/O operations that didn't complete in
.Sy zio_slow_io_ms
milliseconds
.Pq Sy 30000 No by default .
This does not necessarily mean the I/O operations failed to complete, just took
an
unreasonably long amount of time.
This may indicate a problem with the underlying storage.
.It Fl t
Display vdev TRIM status.
.It Fl T Sy u Ns | Ns Sy d
Display a time stamp.
Specify
.Sy u
for a printed representation of the internal representation of time.
See
-.Xr time 2 .
+.Xr time 1 .
Specify
.Sy d
for standard date format.
See
.Xr date 1 .
.It Fl v
Displays verbose data error information, printing out a complete list of all
data errors since the last complete pool scrub.
If the head_errlog feature is enabled and files containing errors have been
removed then the respective filenames will not be reported in subsequent runs
of this command.
.It Fl x
Only display status for pools that are exhibiting errors or are otherwise
unavailable.
Warnings about pools not using the latest on-disk format will not be included.
.El
.
.Sh EXAMPLES
.\" These are, respectively, examples 16 from zpool.8
.\" Make sure to update them bidirectionally
.Ss Example 1 : No Adding output columns
Additional columns can be added to the
.Nm zpool Cm status No and Nm zpool Cm iostat No output with Fl c .
.Bd -literal -compact -offset Ds
.No # Nm zpool Cm status Fl c Pa vendor , Ns Pa model , Ns Pa size
NAME STATE READ WRITE CKSUM vendor model size
tank ONLINE 0 0 0
mirror-0 ONLINE 0 0 0
U1 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
U10 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
U11 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
U12 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
U13 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
U14 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
.No # Nm zpool Cm iostat Fl vc Pa size
capacity operations bandwidth
pool alloc free read write read write size
---------- ----- ----- ----- ----- ----- ----- ----
rpool 14.6G 54.9G 4 55 250K 2.69M
sda1 14.6G 54.9G 4 55 250K 2.69M 70G
---------- ----- ----- ----- ----- ----- ----- ----
.Ed
.
.Sh SEE ALSO
.Xr zpool-events 8 ,
.Xr zpool-history 8 ,
.Xr zpool-iostat 8 ,
.Xr zpool-list 8 ,
.Xr zpool-resilver 8 ,
.Xr zpool-scrub 8 ,
.Xr zpool-wait 8
diff --git a/sys/contrib/openzfs/man/man8/zpool-wait.8 b/sys/contrib/openzfs/man/man8/zpool-wait.8
index 683b0141425c..4fa4cb235642 100644
--- a/sys/contrib/openzfs/man/man8/zpool-wait.8
+++ b/sys/contrib/openzfs/man/man8/zpool-wait.8
@@ -1,116 +1,116 @@
.\"
.\" CDDL HEADER START
.\"
.\" The contents of this file are subject to the terms of the
.\" Common Development and Distribution License (the "License").
.\" You may not use this file except in compliance with the License.
.\"
.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
.\" or https://opensource.org/licenses/CDDL-1.0.
.\" See the License for the specific language governing permissions
.\" and limitations under the License.
.\"
.\" When distributing Covered Code, include this CDDL HEADER in each
.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
.\" If applicable, add the following below this CDDL HEADER, with the
.\" fields enclosed by brackets "[]" replaced with your own identifying
.\" information: Portions Copyright [yyyy] [name of copyright owner]
.\"
.\" CDDL HEADER END
.\"
.\"
.\" Copyright (c) 2007, Sun Microsystems, Inc. All Rights Reserved.
.\" Copyright (c) 2012, 2018 by Delphix. All rights reserved.
.\" Copyright (c) 2012 Cyril Plisko. All Rights Reserved.
.\" Copyright (c) 2017 Datto Inc.
.\" Copyright (c) 2018 George Melikov. All Rights Reserved.
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
.Dd May 27, 2021
.Dt ZPOOL-WAIT 8
.Os
.
.Sh NAME
.Nm zpool-wait
.Nd wait for activity to stop in a ZFS storage pool
.Sh SYNOPSIS
.Nm zpool
.Cm wait
.Op Fl Hp
.Op Fl T Sy u Ns | Ns Sy d
.Op Fl t Ar activity Ns Oo , Ns Ar activity Ns Oc Ns …
.Ar pool
.Op Ar interval
.
.Sh DESCRIPTION
Waits until all background activity of the given types has ceased in the given
pool.
The activity could cease because it has completed, or because it has been
paused or canceled by a user, or because the pool has been exported or
destroyed.
If no activities are specified, the command waits until background activity of
every type listed below has ceased.
If there is no activity of the given types in progress, the command returns
immediately.
.Pp
These are the possible values for
.Ar activity ,
along with what each one waits for:
.Bl -tag -compact -offset Ds -width "initialize"
.It Sy discard
Checkpoint to be discarded
.It Sy free
.Sy freeing
property to become
.Sy 0
.It Sy initialize
All initializations to cease
.It Sy replace
All device replacements to cease
.It Sy remove
Device removal to cease
.It Sy resilver
Resilver to cease
.It Sy scrub
Scrub to cease
.It Sy trim
Manual trim to cease
.El
.Pp
If an
.Ar interval
is provided, the amount of work remaining, in bytes, for each activity is
printed every
.Ar interval
seconds.
.Bl -tag -width Ds
.It Fl H
Scripted mode.
Do not display headers, and separate fields by a single tab instead of arbitrary
space.
.It Fl p
Display numbers in parsable (exact) values.
.It Fl T Sy u Ns | Ns Sy d
Display a time stamp.
Specify
.Sy u
for a printed representation of the internal representation of time.
See
-.Xr time 2 .
+.Xr time 1 .
Specify
.Sy d
for standard date format.
See
.Xr date 1 .
.El
.
.Sh SEE ALSO
.Xr zpool-checkpoint 8 ,
.Xr zpool-initialize 8 ,
.Xr zpool-remove 8 ,
.Xr zpool-replace 8 ,
.Xr zpool-resilver 8 ,
.Xr zpool-scrub 8 ,
.Xr zpool-status 8 ,
.Xr zpool-trim 8
diff --git a/sys/contrib/openzfs/man/man8/zpool.8 b/sys/contrib/openzfs/man/man8/zpool.8
index 4c4020bdd810..fe44e15cabe1 100644
--- a/sys/contrib/openzfs/man/man8/zpool.8
+++ b/sys/contrib/openzfs/man/man8/zpool.8
@@ -1,596 +1,613 @@
.\"
.\" CDDL HEADER START
.\"
.\" The contents of this file are subject to the terms of the
.\" Common Development and Distribution License (the "License").
.\" You may not use this file except in compliance with the License.
.\"
.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
.\" or https://opensource.org/licenses/CDDL-1.0.
.\" See the License for the specific language governing permissions
.\" and limitations under the License.
.\"
.\" When distributing Covered Code, include this CDDL HEADER in each
.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
.\" If applicable, add the following below this CDDL HEADER, with the
.\" fields enclosed by brackets "[]" replaced with your own identifying
.\" information: Portions Copyright [yyyy] [name of copyright owner]
.\"
.\" CDDL HEADER END
.\"
.\" Copyright (c) 2007, Sun Microsystems, Inc. All Rights Reserved.
.\" Copyright (c) 2012, 2018 by Delphix. All rights reserved.
.\" Copyright (c) 2012 Cyril Plisko. All Rights Reserved.
.\" Copyright (c) 2017 Datto Inc.
.\" Copyright (c) 2018 George Melikov. All Rights Reserved.
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
.Dd March 16, 2022
.Dt ZPOOL 8
.Os
.
.Sh NAME
.Nm zpool
.Nd configure ZFS storage pools
.Sh SYNOPSIS
.Nm
.Fl ?V
.Nm
.Cm version
.Nm
.Cm subcommand
.Op Ar arguments
.
.Sh DESCRIPTION
The
.Nm
command configures ZFS storage pools.
A storage pool is a collection of devices that provides physical storage and
data replication for ZFS datasets.
All datasets within a storage pool share the same space.
See
.Xr zfs 8
for information on managing datasets.
.Pp
For an overview of creating and managing ZFS storage pools see the
.Xr zpoolconcepts 7
manual page.
.
.Sh SUBCOMMANDS
All subcommands that modify state are logged persistently to the pool in their
original form.
.Pp
The
.Nm
command provides subcommands to create and destroy storage pools, add capacity
to storage pools, and provide information about the storage pools.
The following subcommands are supported:
.Bl -tag -width Ds
.It Xo
.Nm
.Fl ?\&
.Xc
Displays a help message.
.It Xo
.Nm
.Fl V , -version
.Xc
.It Xo
.Nm
.Cm version
.Xc
Displays the software version of the
.Nm
userland utility and the ZFS kernel module.
.El
.
.Ss Creation
.Bl -tag -width Ds
.It Xr zpool-create 8
Creates a new storage pool containing the virtual devices specified on the
command line.
.It Xr zpool-initialize 8
Begins initializing by writing to all unallocated regions on the specified
devices, or all eligible devices in the pool if no individual devices are
specified.
.El
.
.Ss Destruction
.Bl -tag -width Ds
.It Xr zpool-destroy 8
Destroys the given pool, freeing up any devices for other use.
.It Xr zpool-labelclear 8
Removes ZFS label information from the specified
.Ar device .
.El
.
.Ss Virtual Devices
.Bl -tag -width Ds
.It Xo
.Xr zpool-attach 8 Ns / Ns Xr zpool-detach 8
.Xc
Converts a non-redundant disk into a mirror, or increases
the redundancy level of an existing mirror
.Cm ( attach Ns ), or performs the inverse operation (
.Cm detach Ns ).
.It Xo
.Xr zpool-add 8 Ns / Ns Xr zpool-remove 8
.Xc
Adds the specified virtual devices to the given pool,
or removes the specified device from the pool.
.It Xr zpool-replace 8
Replaces an existing device (which may be faulted) with a new one.
.It Xr zpool-split 8
Creates a new pool by splitting all mirrors in an existing pool (which decreases
its redundancy).
.El
.
.Ss Properties
Available pool properties listed in the
.Xr zpoolprops 7
manual page.
.Bl -tag -width Ds
.It Xr zpool-list 8
Lists the given pools along with a health status and space usage.
.It Xo
.Xr zpool-get 8 Ns / Ns Xr zpool-set 8
.Xc
Retrieves the given list of properties
.Po
or all properties if
.Sy all
is used
.Pc
for the specified storage pool(s).
.El
.
.Ss Monitoring
.Bl -tag -width Ds
.It Xr zpool-status 8
Displays the detailed health status for the given pools.
.It Xr zpool-iostat 8
Displays logical I/O statistics for the given pools/vdevs.
Physical I/O operations may be observed via
.Xr iostat 1 .
.It Xr zpool-events 8
Lists all recent events generated by the ZFS kernel modules.
These events are consumed by the
.Xr zed 8
and used to automate administrative tasks such as replacing a failed device
with a hot spare.
That manual page also describes the subclasses and event payloads
that can be generated.
.It Xr zpool-history 8
Displays the command history of the specified pool(s) or all pools if no pool is
specified.
.El
.
.Ss Maintenance
.Bl -tag -width Ds
.It Xr zpool-scrub 8
Begins a scrub or resumes a paused scrub.
.It Xr zpool-checkpoint 8
Checkpoints the current state of
.Ar pool ,
which can be later restored by
.Nm zpool Cm import Fl -rewind-to-checkpoint .
.It Xr zpool-trim 8
Initiates an immediate on-demand TRIM operation for all of the free space in a
pool.
This operation informs the underlying storage devices of all blocks
in the pool which are no longer allocated and allows thinly provisioned
devices to reclaim the space.
.It Xr zpool-sync 8
This command forces all in-core dirty data to be written to the primary
pool storage and not the ZIL.
It will also update administrative information including quota reporting.
Without arguments,
.Nm zpool Cm sync
will sync all pools on the system.
Otherwise, it will sync only the specified pool(s).
.It Xr zpool-upgrade 8
Manage the on-disk format version of storage pools.
.It Xr zpool-wait 8
Waits until all background activity of the given types has ceased in the given
pool.
.El
.
.Ss Fault Resolution
.Bl -tag -width Ds
.It Xo
.Xr zpool-offline 8 Ns / Ns Xr zpool-online 8
.Xc
Takes the specified physical device offline or brings it online.
.It Xr zpool-resilver 8
Starts a resilver.
If an existing resilver is already running it will be restarted from the
beginning.
.It Xr zpool-reopen 8
Reopen all the vdevs associated with the pool.
.It Xr zpool-clear 8
Clears device errors in a pool.
.El
.
.Ss Import & Export
.Bl -tag -width Ds
.It Xr zpool-import 8
Make disks containing ZFS storage pools available for use on the system.
.It Xr zpool-export 8
Exports the given pools from the system.
.It Xr zpool-reguid 8
Generates a new unique identifier for the pool.
.El
.
.Sh EXIT STATUS
The following exit values are returned:
.Bl -tag -compact -offset 4n -width "a"
.It Sy 0
Successful completion.
.It Sy 1
An error occurred.
.It Sy 2
Invalid command line options were specified.
.El
.
.Sh EXAMPLES
.\" Examples 1, 2, 3, 4, 12, 13 are shared with zpool-create.8.
.\" Examples 6, 14 are shared with zpool-add.8.
.\" Examples 7, 16 are shared with zpool-list.8.
.\" Examples 8 are shared with zpool-destroy.8.
.\" Examples 9 are shared with zpool-export.8.
.\" Examples 10 are shared with zpool-import.8.
.\" Examples 11 are shared with zpool-upgrade.8.
.\" Examples 15 are shared with zpool-remove.8.
.\" Examples 17 are shared with zpool-status.8.
.\" Examples 14, 17 are also shared with zpool-iostat.8.
.\" Make sure to update them omnidirectionally
.Ss Example 1 : No Creating a RAID-Z Storage Pool
The following command creates a pool with a single raidz root vdev that
consists of six disks:
.Dl # Nm zpool Cm create Ar tank Sy raidz Pa sda sdb sdc sdd sde sdf
.
.Ss Example 2 : No Creating a Mirrored Storage Pool
The following command creates a pool with two mirrors, where each mirror
contains two disks:
.Dl # Nm zpool Cm create Ar tank Sy mirror Pa sda sdb Sy mirror Pa sdc sdd
.
.Ss Example 3 : No Creating a ZFS Storage Pool by Using Partitions
The following command creates a non-redundant pool using two disk partitions:
.Dl # Nm zpool Cm create Ar tank Pa sda1 sdb2
.
.Ss Example 4 : No Creating a ZFS Storage Pool by Using Files
The following command creates a non-redundant pool using files.
While not recommended, a pool based on files can be useful for experimental
purposes.
.Dl # Nm zpool Cm create Ar tank Pa /path/to/file/a /path/to/file/b
.
.Ss Example 5 : No Making a non-mirrored ZFS Storage Pool mirrored
The following command converts an existing single device
.Ar sda
into a mirror by attaching a second device to it,
.Ar sdb .
.Dl # Nm zpool Cm attach Ar tank Pa sda sdb
.
.Ss Example 6 : No Adding a Mirror to a ZFS Storage Pool
The following command adds two mirrored disks to the pool
.Ar tank ,
assuming the pool is already made up of two-way mirrors.
The additional space is immediately available to any datasets within the pool.
.Dl # Nm zpool Cm add Ar tank Sy mirror Pa sda sdb
.
.Ss Example 7 : No Listing Available ZFS Storage Pools
The following command lists all available pools on the system.
In this case, the pool
.Ar zion
is faulted due to a missing device.
The results from this command are similar to the following:
.Bd -literal -compact -offset Ds
.No # Nm zpool Cm list
NAME SIZE ALLOC FREE EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT
rpool 19.9G 8.43G 11.4G - 33% 42% 1.00x ONLINE -
tank 61.5G 20.0G 41.5G - 48% 32% 1.00x ONLINE -
zion - - - - - - - FAULTED -
.Ed
.
.Ss Example 8 : No Destroying a ZFS Storage Pool
The following command destroys the pool
.Ar tank
and any datasets contained within:
.Dl # Nm zpool Cm destroy Fl f Ar tank
.
.Ss Example 9 : No Exporting a ZFS Storage Pool
The following command exports the devices in pool
.Ar tank
so that they can be relocated or later imported:
.Dl # Nm zpool Cm export Ar tank
.
.Ss Example 10 : No Importing a ZFS Storage Pool
The following command displays available pools, and then imports the pool
.Ar tank
for use on the system.
The results from this command are similar to the following:
.Bd -literal -compact -offset Ds
.No # Nm zpool Cm import
pool: tank
id: 15451357997522795478
state: ONLINE
action: The pool can be imported using its name or numeric identifier.
config:
tank ONLINE
mirror ONLINE
sda ONLINE
sdb ONLINE
.No # Nm zpool Cm import Ar tank
.Ed
.
.Ss Example 11 : No Upgrading All ZFS Storage Pools to the Current Version
The following command upgrades all ZFS Storage pools to the current version of
the software:
.Bd -literal -compact -offset Ds
.No # Nm zpool Cm upgrade Fl a
This system is currently running ZFS version 2.
.Ed
.
.Ss Example 12 : No Managing Hot Spares
The following command creates a new pool with an available hot spare:
.Dl # Nm zpool Cm create Ar tank Sy mirror Pa sda sdb Sy spare Pa sdc
.Pp
If one of the disks were to fail, the pool would be reduced to the degraded
state.
The failed device can be replaced using the following command:
.Dl # Nm zpool Cm replace Ar tank Pa sda sdd
.Pp
Once the data has been resilvered, the spare is automatically removed and is
made available for use should another device fail.
The hot spare can be permanently removed from the pool using the following
command:
.Dl # Nm zpool Cm remove Ar tank Pa sdc
.
.Ss Example 13 : No Creating a ZFS Pool with Mirrored Separate Intent Logs
The following command creates a ZFS storage pool consisting of two, two-way
mirrors and mirrored log devices:
.Dl # Nm zpool Cm create Ar pool Sy mirror Pa sda sdb Sy mirror Pa sdc sdd Sy log mirror Pa sde sdf
.
.Ss Example 14 : No Adding Cache Devices to a ZFS Pool
The following command adds two disks for use as cache devices to a ZFS storage
pool:
.Dl # Nm zpool Cm add Ar pool Sy cache Pa sdc sdd
.Pp
Once added, the cache devices gradually fill with content from main memory.
Depending on the size of your cache devices, it could take over an hour for
them to fill.
Capacity and reads can be monitored using the
.Cm iostat
subcommand as follows:
.Dl # Nm zpool Cm iostat Fl v Ar pool 5
.
.Ss Example 15 : No Removing a Mirrored top-level (Log or Data) Device
The following commands remove the mirrored log device
.Sy mirror-2
and mirrored top-level data device
.Sy mirror-1 .
.Pp
Given this configuration:
.Bd -literal -compact -offset Ds
pool: tank
state: ONLINE
scrub: none requested
config:
NAME STATE READ WRITE CKSUM
tank ONLINE 0 0 0
mirror-0 ONLINE 0 0 0
sda ONLINE 0 0 0
sdb ONLINE 0 0 0
mirror-1 ONLINE 0 0 0
sdc ONLINE 0 0 0
sdd ONLINE 0 0 0
logs
mirror-2 ONLINE 0 0 0
sde ONLINE 0 0 0
sdf ONLINE 0 0 0
.Ed
.Pp
The command to remove the mirrored log
.Ar mirror-2 No is :
.Dl # Nm zpool Cm remove Ar tank mirror-2
.Pp
The command to remove the mirrored data
.Ar mirror-1 No is :
.Dl # Nm zpool Cm remove Ar tank mirror-1
.
.Ss Example 16 : No Displaying expanded space on a device
The following command displays the detailed information for the pool
.Ar data .
This pool is comprised of a single raidz vdev where one of its devices
increased its capacity by 10 GiB.
In this example, the pool will not be able to utilize this extra capacity until
all the devices under the raidz vdev have been expanded.
.Bd -literal -compact -offset Ds
.No # Nm zpool Cm list Fl v Ar data
NAME SIZE ALLOC FREE EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT
data 23.9G 14.6G 9.30G - 48% 61% 1.00x ONLINE -
raidz1 23.9G 14.6G 9.30G - 48%
sda - - - - -
sdb - - - 10G -
sdc - - - - -
.Ed
.
.Ss Example 17 : No Adding output columns
Additional columns can be added to the
.Nm zpool Cm status No and Nm zpool Cm iostat No output with Fl c .
.Bd -literal -compact -offset Ds
.No # Nm zpool Cm status Fl c Pa vendor , Ns Pa model , Ns Pa size
NAME STATE READ WRITE CKSUM vendor model size
tank ONLINE 0 0 0
mirror-0 ONLINE 0 0 0
U1 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
U10 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
U11 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
U12 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
U13 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
U14 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
.No # Nm zpool Cm iostat Fl vc Pa size
capacity operations bandwidth
pool alloc free read write read write size
---------- ----- ----- ----- ----- ----- ----- ----
rpool 14.6G 54.9G 4 55 250K 2.69M
sda1 14.6G 54.9G 4 55 250K 2.69M 70G
---------- ----- ----- ----- ----- ----- ----- ----
.Ed
.
.Sh ENVIRONMENT VARIABLES
-.Bl -tag -compact -width "ZPOOL_IMPORT_UDEV_TIMEOUT_MS"
+.Bl -tag -compact -width "ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE"
.It Sy ZFS_ABORT
Cause
.Nm
to dump core on exit for the purposes of running
.Sy ::findleaks .
.It Sy ZFS_COLOR
Use ANSI color in
.Nm zpool Cm status
and
.Nm zpool Cm iostat
output.
+.It Sy ZPOOL_AUTO_POWER_ON_SLOT
+Automatically attempt to turn on the drives enclosure slot power to a drive when
+running the
+.Nm zpool Cm online
+or
+.Nm zpool Cm clear
+commands.
+This has the same effect as passing the
+.Fl -power
+option to those commands.
+.It Sy ZPOOL_POWER_ON_SLOT_TIMEOUT_MS
+The maximum time in milliseconds to wait for a slot power sysfs value
+to return the correct value after writing it.
+For example, after writing "on" to the sysfs enclosure slot power_control file,
+it can take some time for the enclosure to power down the slot and return
+"on" if you read back the 'power_control' value.
+Defaults to 30 seconds (30000ms) if not set.
.It Sy ZPOOL_IMPORT_PATH
The search path for devices or files to use with the pool.
This is a colon-separated list of directories in which
.Nm
looks for device nodes and files.
Similar to the
.Fl d
option in
.Nm zpool import .
.It Sy ZPOOL_IMPORT_UDEV_TIMEOUT_MS
The maximum time in milliseconds that
.Nm zpool import
will wait for an expected device to be available.
.It Sy ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE
If set, suppress warning about non-native vdev ashift in
.Nm zpool Cm status .
The value is not used, only the presence or absence of the variable matters.
.It Sy ZPOOL_VDEV_NAME_GUID
Cause
.Nm
subcommands to output vdev guids by default.
This behavior is identical to the
.Nm zpool Cm status Fl g
command line option.
.It Sy ZPOOL_VDEV_NAME_FOLLOW_LINKS
Cause
.Nm
subcommands to follow links for vdev names by default.
This behavior is identical to the
.Nm zpool Cm status Fl L
command line option.
.It Sy ZPOOL_VDEV_NAME_PATH
Cause
.Nm
subcommands to output full vdev path names by default.
This behavior is identical to the
.Nm zpool Cm status Fl P
command line option.
.It Sy ZFS_VDEV_DEVID_OPT_OUT
Older OpenZFS implementations had issues when attempting to display pool
config vdev names if a
.Sy devid
NVP value is present in the pool's config.
.Pp
For example, a pool that originated on illumos platform would have a
.Sy devid
value in the config and
.Nm zpool Cm status
would fail when listing the config.
This would also be true for future Linux-based pools.
.Pp
A pool can be stripped of any
.Sy devid
values on import or prevented from adding
them on
.Nm zpool Cm create
or
.Nm zpool Cm add
by setting
.Sy ZFS_VDEV_DEVID_OPT_OUT .
.Pp
.It Sy ZPOOL_SCRIPTS_AS_ROOT
Allow a privileged user to run
.Nm zpool Cm status Ns / Ns Cm iostat Fl c .
Normally, only unprivileged users are allowed to run
.Fl c .
.It Sy ZPOOL_SCRIPTS_PATH
The search path for scripts when running
.Nm zpool Cm status Ns / Ns Cm iostat Fl c .
This is a colon-separated list of directories and overrides the default
.Pa ~/.zpool.d
and
.Pa /etc/zfs/zpool.d
search paths.
.It Sy ZPOOL_SCRIPTS_ENABLED
Allow a user to run
.Nm zpool Cm status Ns / Ns Cm iostat Fl c .
If
.Sy ZPOOL_SCRIPTS_ENABLED
is not set, it is assumed that the user is allowed to run
.Nm zpool Cm status Ns / Ns Cm iostat Fl c .
.\" Shared with zfs.8
.It Sy ZFS_MODULE_TIMEOUT
Time, in seconds, to wait for
.Pa /dev/zfs
to appear.
Defaults to
.Sy 10 ,
max
.Sy 600 Pq 10 minutes .
If
.Pf < Sy 0 ,
wait forever; if
.Sy 0 ,
don't wait.
.El
.
.Sh INTERFACE STABILITY
.Sy Evolving
.
.Sh SEE ALSO
.Xr zfs 4 ,
.Xr zpool-features 7 ,
.Xr zpoolconcepts 7 ,
.Xr zpoolprops 7 ,
.Xr zed 8 ,
.Xr zfs 8 ,
.Xr zpool-add 8 ,
.Xr zpool-attach 8 ,
.Xr zpool-checkpoint 8 ,
.Xr zpool-clear 8 ,
.Xr zpool-create 8 ,
.Xr zpool-destroy 8 ,
.Xr zpool-detach 8 ,
.Xr zpool-events 8 ,
.Xr zpool-export 8 ,
.Xr zpool-get 8 ,
.Xr zpool-history 8 ,
.Xr zpool-import 8 ,
.Xr zpool-initialize 8 ,
.Xr zpool-iostat 8 ,
.Xr zpool-labelclear 8 ,
.Xr zpool-list 8 ,
.Xr zpool-offline 8 ,
.Xr zpool-online 8 ,
.Xr zpool-reguid 8 ,
.Xr zpool-remove 8 ,
.Xr zpool-reopen 8 ,
.Xr zpool-replace 8 ,
.Xr zpool-resilver 8 ,
.Xr zpool-scrub 8 ,
.Xr zpool-set 8 ,
.Xr zpool-split 8 ,
.Xr zpool-status 8 ,
.Xr zpool-sync 8 ,
.Xr zpool-trim 8 ,
.Xr zpool-upgrade 8 ,
.Xr zpool-wait 8
diff --git a/sys/contrib/openzfs/module/Kbuild.in b/sys/contrib/openzfs/module/Kbuild.in
index b9c284a24418..f1a145779ddc 100644
--- a/sys/contrib/openzfs/module/Kbuild.in
+++ b/sys/contrib/openzfs/module/Kbuild.in
@@ -1,502 +1,503 @@
# When integrated in to a monolithic kernel the spl module must appear
# first. This ensures its module initialization function is run before
# any of the other module initialization functions which depend on it.
ZFS_MODULE_CFLAGS += -std=gnu99 -Wno-declaration-after-statement
ZFS_MODULE_CFLAGS += -Wmissing-prototypes
ZFS_MODULE_CFLAGS += @KERNEL_DEBUG_CFLAGS@ @NO_FORMAT_ZERO_LENGTH@
ifneq ($(KBUILD_EXTMOD),)
zfs_include = @abs_top_srcdir@/include
icp_include = @abs_srcdir@/icp/include
zstd_include = @abs_srcdir@/zstd/include
ZFS_MODULE_CFLAGS += -include @abs_top_builddir@/zfs_config.h
ZFS_MODULE_CFLAGS += -I@abs_top_builddir@/include
src = @abs_srcdir@
obj = @abs_builddir@
else
zfs_include = $(srctree)/include/zfs
icp_include = $(srctree)/$(src)/icp/include
zstd_include = $(srctree)/$(src)/zstd/include
ZFS_MODULE_CFLAGS += -include $(zfs_include)/zfs_config.h
endif
ZFS_MODULE_CFLAGS += -I$(zfs_include)/os/linux/kernel
ZFS_MODULE_CFLAGS += -I$(zfs_include)/os/linux/spl
ZFS_MODULE_CFLAGS += -I$(zfs_include)/os/linux/zfs
ZFS_MODULE_CFLAGS += -I$(zfs_include)
ZFS_MODULE_CPPFLAGS += -D_KERNEL
ZFS_MODULE_CPPFLAGS += @KERNEL_DEBUG_CPPFLAGS@
# KASAN enables -Werror=frame-larger-than=1024, which
# breaks oh so many parts of our build.
ifeq ($(CONFIG_KASAN),y)
ZFS_MODULE_CFLAGS += -Wno-error=frame-larger-than=
endif
# Generated binary search code is particularly bad with this optimization.
# Oddly, range_tree.c is not affected when unrolling is not done and dsl_scan.c
# is not affected when unrolling is done.
# Disable it until the following upstream issue is resolved:
# https://github.com/llvm/llvm-project/issues/62790
ifeq ($(CONFIG_X86),y)
ifeq ($(CONFIG_CC_IS_CLANG),y)
CFLAGS_zfs/dsl_scan.o += -mllvm -x86-cmov-converter=false
CFLAGS_zfs/metaslab.o += -mllvm -x86-cmov-converter=false
CFLAGS_zfs/range_tree.o += -mllvm -x86-cmov-converter=false
CFLAGS_zfs/zap_micro.o += -mllvm -x86-cmov-converter=false
endif
endif
ifneq ($(KBUILD_EXTMOD),)
@CONFIG_QAT_TRUE@ZFS_MODULE_CFLAGS += -I@QAT_SRC@/include
@CONFIG_QAT_TRUE@KBUILD_EXTRA_SYMBOLS += @QAT_SYMBOLS@
endif
asflags-y := $(ZFS_MODULE_CFLAGS) $(ZFS_MODULE_CPPFLAGS)
ccflags-y := $(ZFS_MODULE_CFLAGS) $(ZFS_MODULE_CPPFLAGS)
ifeq ($(CONFIG_ARM64),y)
CFLAGS_REMOVE_zcommon/zfs_fletcher_aarch64_neon.o += -mgeneral-regs-only
CFLAGS_REMOVE_zfs/vdev_raidz_math_aarch64_neon.o += -mgeneral-regs-only
CFLAGS_REMOVE_zfs/vdev_raidz_math_aarch64_neonx2.o += -mgeneral-regs-only
endif
# Suppress unused-value warnings in sparc64 architecture headers
ccflags-$(CONFIG_SPARC64) += -Wno-unused-value
obj-$(CONFIG_ZFS) := spl.o zfs.o
SPL_OBJS := \
spl-atomic.o \
spl-condvar.o \
spl-cred.o \
spl-err.o \
spl-generic.o \
spl-kmem-cache.o \
spl-kmem.o \
spl-kstat.o \
spl-proc.o \
spl-procfs-list.o \
+ spl-shrinker.o \
spl-taskq.o \
spl-thread.o \
spl-trace.o \
spl-tsd.o \
spl-vmem.o \
spl-xdr.o \
spl-zlib.o \
spl-zone.o
spl-objs += $(addprefix os/linux/spl/,$(SPL_OBJS))
zfs-objs += avl/avl.o
ICP_OBJS := \
algs/aes/aes_impl.o \
algs/aes/aes_impl_generic.o \
algs/aes/aes_modes.o \
algs/blake3/blake3.o \
algs/blake3/blake3_generic.o \
algs/blake3/blake3_impl.o \
algs/edonr/edonr.o \
algs/modes/cbc.o \
algs/modes/ccm.o \
algs/modes/ctr.o \
algs/modes/ecb.o \
algs/modes/gcm.o \
algs/modes/gcm_generic.o \
algs/modes/modes.o \
algs/sha2/sha2_generic.o \
algs/sha2/sha256_impl.o \
algs/sha2/sha512_impl.o \
algs/skein/skein.o \
algs/skein/skein_block.o \
algs/skein/skein_iv.o \
api/kcf_cipher.o \
api/kcf_ctxops.o \
api/kcf_mac.o \
core/kcf_callprov.o \
core/kcf_mech_tabs.o \
core/kcf_prov_lib.o \
core/kcf_prov_tabs.o \
core/kcf_sched.o \
illumos-crypto.o \
io/aes.o \
io/sha2_mod.o \
io/skein_mod.o \
spi/kcf_spi.o
ICP_OBJS_X86_64 := \
asm-x86_64/aes/aes_aesni.o \
asm-x86_64/aes/aes_amd64.o \
asm-x86_64/aes/aeskey.o \
asm-x86_64/blake3/blake3_avx2.o \
asm-x86_64/blake3/blake3_avx512.o \
asm-x86_64/blake3/blake3_sse2.o \
asm-x86_64/blake3/blake3_sse41.o \
asm-x86_64/sha2/sha256-x86_64.o \
asm-x86_64/sha2/sha512-x86_64.o \
asm-x86_64/modes/aesni-gcm-x86_64.o \
asm-x86_64/modes/gcm_pclmulqdq.o \
asm-x86_64/modes/ghash-x86_64.o
ICP_OBJS_X86 := \
algs/aes/aes_impl_aesni.o \
algs/aes/aes_impl_x86-64.o \
algs/modes/gcm_pclmulqdq.o
ICP_OBJS_ARM := \
asm-arm/sha2/sha256-armv7.o \
asm-arm/sha2/sha512-armv7.o
ICP_OBJS_ARM64 := \
asm-aarch64/blake3/b3_aarch64_sse2.o \
asm-aarch64/blake3/b3_aarch64_sse41.o \
asm-aarch64/sha2/sha256-armv8.o \
asm-aarch64/sha2/sha512-armv8.o
ICP_OBJS_PPC_PPC64 := \
asm-ppc64/blake3/b3_ppc64le_sse2.o \
asm-ppc64/blake3/b3_ppc64le_sse41.o \
asm-ppc64/sha2/sha256-p8.o \
asm-ppc64/sha2/sha512-p8.o \
asm-ppc64/sha2/sha256-ppc.o \
asm-ppc64/sha2/sha512-ppc.o
zfs-objs += $(addprefix icp/,$(ICP_OBJS))
zfs-$(CONFIG_X86) += $(addprefix icp/,$(ICP_OBJS_X86))
zfs-$(CONFIG_UML_X86)+= $(addprefix icp/,$(ICP_OBJS_X86))
zfs-$(CONFIG_X86_64) += $(addprefix icp/,$(ICP_OBJS_X86_64))
zfs-$(CONFIG_ARM) += $(addprefix icp/,$(ICP_OBJS_ARM))
zfs-$(CONFIG_ARM64) += $(addprefix icp/,$(ICP_OBJS_ARM64))
zfs-$(CONFIG_PPC) += $(addprefix icp/,$(ICP_OBJS_PPC_PPC64))
zfs-$(CONFIG_PPC64) += $(addprefix icp/,$(ICP_OBJS_PPC_PPC64))
$(addprefix $(obj)/icp/,$(ICP_OBJS) $(ICP_OBJS_X86) $(ICP_OBJS_X86_64) \
$(ICP_OBJS_ARM64) $(ICP_OBJS_PPC_PPC64)) : asflags-y += -I$(icp_include) -I$(zfs_include)/os/linux/spl -I$(zfs_include)
$(addprefix $(obj)/icp/,$(ICP_OBJS) $(ICP_OBJS_X86) $(ICP_OBJS_X86_64) \
$(ICP_OBJS_ARM64) $(ICP_OBJS_PPC_PPC64)) : ccflags-y += -I$(icp_include) -I$(zfs_include)/os/linux/spl -I$(zfs_include)
# Suppress objtool "return with modified stack frame" warnings.
OBJECT_FILES_NON_STANDARD_aesni-gcm-x86_64.o := y
# Suppress objtool "unsupported stack pointer realignment" warnings.
# See #6950 for the reasoning.
OBJECT_FILES_NON_STANDARD_sha256-x86_64.o := y
OBJECT_FILES_NON_STANDARD_sha512-x86_64.o := y
LUA_OBJS := \
lapi.o \
lauxlib.o \
lbaselib.o \
lcode.o \
lcompat.o \
lcorolib.o \
lctype.o \
ldebug.o \
ldo.o \
lfunc.o \
lgc.o \
llex.o \
lmem.o \
lobject.o \
lopcodes.o \
lparser.o \
lstate.o \
lstring.o \
lstrlib.o \
ltable.o \
ltablib.o \
ltm.o \
lvm.o \
lzio.o \
setjmp/setjmp.o
zfs-objs += $(addprefix lua/,$(LUA_OBJS))
NVPAIR_OBJS := \
fnvpair.o \
nvpair.o \
nvpair_alloc_fixed.o \
nvpair_alloc_spl.o
zfs-objs += $(addprefix nvpair/,$(NVPAIR_OBJS))
UNICODE_OBJS := \
u8_textprep.o \
uconv.o
zfs-objs += $(addprefix unicode/,$(UNICODE_OBJS))
ZCOMMON_OBJS := \
cityhash.o \
zfeature_common.o \
zfs_comutil.o \
zfs_deleg.o \
zfs_fletcher.o \
zfs_fletcher_superscalar.o \
zfs_fletcher_superscalar4.o \
zfs_namecheck.o \
zfs_prop.o \
zpool_prop.o \
zprop_common.o
ZCOMMON_OBJS_X86 := \
zfs_fletcher_avx512.o \
zfs_fletcher_intel.o \
zfs_fletcher_sse.o
ZCOMMON_OBJS_ARM64 := \
zfs_fletcher_aarch64_neon.o
zfs-objs += $(addprefix zcommon/,$(ZCOMMON_OBJS))
zfs-$(CONFIG_X86) += $(addprefix zcommon/,$(ZCOMMON_OBJS_X86))
zfs-$(CONFIG_UML_X86)+= $(addprefix zcommon/,$(ZCOMMON_OBJS_X86))
zfs-$(CONFIG_ARM64) += $(addprefix zcommon/,$(ZCOMMON_OBJS_ARM64))
# Zstd uses -O3 by default, so we should follow
ZFS_ZSTD_FLAGS := -O3
# -fno-tree-vectorize gets set for gcc in zstd/common/compiler.h
# Set it for other compilers, too.
ZFS_ZSTD_FLAGS += -fno-tree-vectorize
# SSE register return with SSE disabled if -march=znverX is passed
ZFS_ZSTD_FLAGS += -U__BMI__
# Quiet warnings about frame size due to unused code in unmodified zstd lib
ZFS_ZSTD_FLAGS += -Wframe-larger-than=20480
ZSTD_OBJS := \
zfs_zstd.o \
zstd_sparc.o
ZSTD_UPSTREAM_OBJS := \
lib/common/entropy_common.o \
lib/common/error_private.o \
lib/common/fse_decompress.o \
lib/common/pool.o \
lib/common/zstd_common.o \
lib/compress/fse_compress.o \
lib/compress/hist.o \
lib/compress/huf_compress.o \
lib/compress/zstd_compress.o \
lib/compress/zstd_compress_literals.o \
lib/compress/zstd_compress_sequences.o \
lib/compress/zstd_compress_superblock.o \
lib/compress/zstd_double_fast.o \
lib/compress/zstd_fast.o \
lib/compress/zstd_lazy.o \
lib/compress/zstd_ldm.o \
lib/compress/zstd_opt.o \
lib/decompress/huf_decompress.o \
lib/decompress/zstd_ddict.o \
lib/decompress/zstd_decompress.o \
lib/decompress/zstd_decompress_block.o
zfs-objs += $(addprefix zstd/,$(ZSTD_OBJS) $(ZSTD_UPSTREAM_OBJS))
# Disable aarch64 neon SIMD instructions for kernel mode
$(addprefix $(obj)/zstd/,$(ZSTD_OBJS) $(ZSTD_UPSTREAM_OBJS)) : ccflags-y += -I$(zstd_include) $(ZFS_ZSTD_FLAGS)
$(addprefix $(obj)/zstd/,$(ZSTD_OBJS) $(ZSTD_UPSTREAM_OBJS)) : asflags-y += -I$(zstd_include)
$(addprefix $(obj)/zstd/,$(ZSTD_UPSTREAM_OBJS)) : ccflags-y += -include $(zstd_include)/aarch64_compat.h -include $(zstd_include)/zstd_compat_wrapper.h -Wp,-w
$(obj)/zstd/zfs_zstd.o : ccflags-y += -include $(zstd_include)/zstd_compat_wrapper.h
ZFS_OBJS := \
abd.o \
aggsum.o \
arc.o \
blake3_zfs.o \
blkptr.o \
bplist.o \
bpobj.o \
bptree.o \
bqueue.o \
brt.o \
btree.o \
dataset_kstats.o \
dbuf.o \
dbuf_stats.o \
ddt.o \
ddt_zap.o \
dmu.o \
dmu_diff.o \
dmu_object.o \
dmu_objset.o \
dmu_recv.o \
dmu_redact.o \
dmu_send.o \
dmu_traverse.o \
dmu_tx.o \
dmu_zfetch.o \
dnode.o \
dnode_sync.o \
dsl_bookmark.o \
dsl_crypt.o \
dsl_dataset.o \
dsl_deadlist.o \
dsl_deleg.o \
dsl_destroy.o \
dsl_dir.o \
dsl_pool.o \
dsl_prop.o \
dsl_scan.o \
dsl_synctask.o \
dsl_userhold.o \
edonr_zfs.o \
fm.o \
gzip.o \
hkdf.o \
lz4.o \
lz4_zfs.o \
lzjb.o \
metaslab.o \
mmp.o \
multilist.o \
objlist.o \
pathname.o \
range_tree.o \
refcount.o \
rrwlock.o \
sa.o \
sha2_zfs.o \
skein_zfs.o \
spa.o \
spa_checkpoint.o \
spa_config.o \
spa_errlog.o \
spa_history.o \
spa_log_spacemap.o \
spa_misc.o \
spa_stats.o \
space_map.o \
space_reftree.o \
txg.o \
uberblock.o \
unique.o \
vdev.o \
vdev_draid.o \
vdev_draid_rand.o \
vdev_indirect.o \
vdev_indirect_births.o \
vdev_indirect_mapping.o \
vdev_initialize.o \
vdev_label.o \
vdev_mirror.o \
vdev_missing.o \
vdev_queue.o \
vdev_raidz.o \
vdev_raidz_math.o \
vdev_raidz_math_scalar.o \
vdev_rebuild.o \
vdev_removal.o \
vdev_root.o \
vdev_trim.o \
zap.o \
zap_leaf.o \
zap_micro.o \
zcp.o \
zcp_get.o \
zcp_global.o \
zcp_iter.o \
zcp_set.o \
zcp_synctask.o \
zfeature.o \
zfs_byteswap.o \
zfs_chksum.o \
zfs_fm.o \
zfs_fuid.o \
zfs_impl.o \
zfs_ioctl.o \
zfs_log.o \
zfs_onexit.o \
zfs_quota.o \
zfs_ratelimit.o \
zfs_replay.o \
zfs_rlock.o \
zfs_sa.o \
zfs_vnops.o \
zil.o \
zio.o \
zio_checksum.o \
zio_compress.o \
zio_inject.o \
zle.o \
zrlock.o \
zthr.o \
zvol.o
ZFS_OBJS_OS := \
abd_os.o \
arc_os.o \
mmp_os.o \
policy.o \
qat.o \
qat_compress.o \
qat_crypt.o \
spa_misc_os.o \
trace.o \
vdev_disk.o \
vdev_file.o \
zfs_acl.o \
zfs_ctldir.o \
zfs_debug.o \
zfs_dir.o \
zfs_file_os.o \
zfs_ioctl_os.o \
zfs_racct.o \
zfs_sysfs.o \
zfs_uio.o \
zfs_vfsops.o \
zfs_vnops_os.o \
zfs_znode.o \
zio_crypt.o \
zpl_ctldir.o \
zpl_export.o \
zpl_file.o \
zpl_file_range.o \
zpl_inode.o \
zpl_super.o \
zpl_xattr.o \
zvol_os.o
ZFS_OBJS_X86 := \
vdev_raidz_math_avx2.o \
vdev_raidz_math_avx512bw.o \
vdev_raidz_math_avx512f.o \
vdev_raidz_math_sse2.o \
vdev_raidz_math_ssse3.o
ZFS_OBJS_ARM64 := \
vdev_raidz_math_aarch64_neon.o \
vdev_raidz_math_aarch64_neonx2.o
ZFS_OBJS_PPC_PPC64 := \
vdev_raidz_math_powerpc_altivec.o
zfs-objs += $(addprefix zfs/,$(ZFS_OBJS)) $(addprefix os/linux/zfs/,$(ZFS_OBJS_OS))
zfs-$(CONFIG_X86) += $(addprefix zfs/,$(ZFS_OBJS_X86))
zfs-$(CONFIG_UML_X86)+= $(addprefix zfs/,$(ZFS_OBJS_X86))
zfs-$(CONFIG_ARM64) += $(addprefix zfs/,$(ZFS_OBJS_ARM64))
zfs-$(CONFIG_PPC) += $(addprefix zfs/,$(ZFS_OBJS_PPC_PPC64))
zfs-$(CONFIG_PPC64) += $(addprefix zfs/,$(ZFS_OBJS_PPC_PPC64))
UBSAN_SANITIZE_zap_leaf.o := n
UBSAN_SANITIZE_zap_micro.o := n
UBSAN_SANITIZE_sa.o := n
# Suppress incorrect warnings from versions of objtool which are not
# aware of x86 EVEX prefix instructions used for AVX512.
OBJECT_FILES_NON_STANDARD_vdev_raidz_math_avx512bw.o := y
OBJECT_FILES_NON_STANDARD_vdev_raidz_math_avx512f.o := y
ifeq ($(CONFIG_ALTIVEC),y)
$(obj)/zfs/vdev_raidz_math_powerpc_altivec.o : c_flags += -maltivec
endif
diff --git a/sys/contrib/openzfs/module/lua/ldebug.c b/sys/contrib/openzfs/module/lua/ldebug.c
index 0092474c762d..23e321bb1247 100644
--- a/sys/contrib/openzfs/module/lua/ldebug.c
+++ b/sys/contrib/openzfs/module/lua/ldebug.c
@@ -1,606 +1,607 @@
/*
** $Id: ldebug.c,v 2.90.1.4 2015/02/19 17:05:13 roberto Exp $
** Debug Interface
** See Copyright Notice in lua.h
*/
#define ldebug_c
#define LUA_CORE
#include <sys/lua/lua.h>
#include "lapi.h"
#include "lcode.h"
#include "ldebug.h"
#include "ldo.h"
#include "lfunc.h"
#include "lobject.h"
#include "lopcodes.h"
#include "lstate.h"
#include "lstring.h"
#include "ltable.h"
#include "ltm.h"
#include "lvm.h"
#define noLuaClosure(f) ((f) == NULL || (f)->c.tt == LUA_TCCL)
static const char *getfuncname (lua_State *L, CallInfo *ci, const char **name);
static int currentpc (CallInfo *ci) {
lua_assert(isLua(ci));
return pcRel(ci->u.l.savedpc, ci_func(ci)->p);
}
static int currentline (CallInfo *ci) {
return getfuncline(ci_func(ci)->p, currentpc(ci));
}
static void swapextra (lua_State *L) {
if (L->status == LUA_YIELD) {
CallInfo *ci = L->ci; /* get function that yielded */
StkId temp = ci->func; /* exchange its 'func' and 'extra' values */
ci->func = restorestack(L, ci->extra);
ci->extra = savestack(L, temp);
}
}
/*
** this function can be called asynchronous (e.g. during a signal)
*/
LUA_API int lua_sethook (lua_State *L, lua_Hook func, int mask, int count) {
if (func == NULL || mask == 0) { /* turn off hooks? */
mask = 0;
func = NULL;
}
if (isLua(L->ci))
L->oldpc = L->ci->u.l.savedpc;
L->hook = func;
L->basehookcount = count;
resethookcount(L);
L->hookmask = cast_byte(mask);
return 1;
}
LUA_API lua_Hook lua_gethook (lua_State *L) {
return L->hook;
}
LUA_API int lua_gethookmask (lua_State *L) {
return L->hookmask;
}
LUA_API int lua_gethookcount (lua_State *L) {
return L->basehookcount;
}
LUA_API int lua_getstack (lua_State *L, int level, lua_Debug *ar) {
int status;
CallInfo *ci;
if (level < 0) return 0; /* invalid (negative) level */
lua_lock(L);
for (ci = L->ci; level > 0 && ci != &L->base_ci; ci = ci->previous)
level--;
if (level == 0 && ci != &L->base_ci) { /* level found? */
status = 1;
ar->i_ci = ci;
}
else status = 0; /* no such level */
lua_unlock(L);
return status;
}
static const char *upvalname (Proto *p, int uv) {
TString *s = check_exp(uv < p->sizeupvalues, p->upvalues[uv].name);
if (s == NULL) return "?";
else return getstr(s);
}
static const char *findvararg (CallInfo *ci, int n, StkId *pos) {
int nparams = clLvalue(ci->func)->p->numparams;
- if (n >= ci->u.l.base - ci->func - nparams)
+ int nvararg = cast_int(ci->u.l.base - ci->func) - nparams;
+ if (n <= -nvararg)
return NULL; /* no such vararg */
else {
- *pos = ci->func + nparams + n;
+ *pos = ci->func + nparams - n;
return "(*vararg)"; /* generic name for any vararg */
}
}
static const char *findlocal (lua_State *L, CallInfo *ci, int n,
StkId *pos) {
const char *name = NULL;
StkId base;
if (isLua(ci)) {
if (n < 0) /* access to vararg values? */
- return findvararg(ci, -n, pos);
+ return findvararg(ci, n, pos);
else {
base = ci->u.l.base;
name = luaF_getlocalname(ci_func(ci)->p, n, currentpc(ci));
}
}
else
base = ci->func + 1;
if (name == NULL) { /* no 'standard' name? */
StkId limit = (ci == L->ci) ? L->top : ci->next->func;
if (limit - base >= n && n > 0) /* is 'n' inside 'ci' stack? */
name = "(*temporary)"; /* generic name for any valid slot */
else
return NULL; /* no name */
}
*pos = base + (n - 1);
return name;
}
LUA_API const char *lua_getlocal (lua_State *L, const lua_Debug *ar, int n) {
const char *name;
lua_lock(L);
swapextra(L);
if (ar == NULL) { /* information about non-active function? */
if (!isLfunction(L->top - 1)) /* not a Lua function? */
name = NULL;
else /* consider live variables at function start (parameters) */
name = luaF_getlocalname(clLvalue(L->top - 1)->p, n, 0);
}
else { /* active function; get information through 'ar' */
StkId pos = 0; /* to avoid warnings */
name = findlocal(L, ar->i_ci, n, &pos);
if (name) {
setobj2s(L, L->top, pos);
api_incr_top(L);
}
}
swapextra(L);
lua_unlock(L);
return name;
}
LUA_API const char *lua_setlocal (lua_State *L, const lua_Debug *ar, int n) {
StkId pos = 0; /* to avoid warnings */
const char *name;
lua_lock(L);
swapextra(L);
name = findlocal(L, ar->i_ci, n, &pos);
if (name)
setobjs2s(L, pos, L->top - 1);
L->top--; /* pop value */
swapextra(L);
lua_unlock(L);
return name;
}
static void funcinfo (lua_Debug *ar, Closure *cl) {
if (noLuaClosure(cl)) {
ar->source = "=[C]";
ar->linedefined = -1;
ar->lastlinedefined = -1;
ar->what = "C";
}
else {
Proto *p = cl->l.p;
ar->source = p->source ? getstr(p->source) : "=?";
ar->linedefined = p->linedefined;
ar->lastlinedefined = p->lastlinedefined;
ar->what = (ar->linedefined == 0) ? "main" : "Lua";
}
luaO_chunkid(ar->short_src, ar->source, LUA_IDSIZE);
}
static void collectvalidlines (lua_State *L, Closure *f) {
if (noLuaClosure(f)) {
setnilvalue(L->top);
api_incr_top(L);
}
else {
int i;
TValue v;
int *lineinfo = f->l.p->lineinfo;
Table *t = luaH_new(L); /* new table to store active lines */
sethvalue(L, L->top, t); /* push it on stack */
api_incr_top(L);
setbvalue(&v, 1); /* boolean 'true' to be the value of all indices */
for (i = 0; i < f->l.p->sizelineinfo; i++) /* for all lines with code */
luaH_setint(L, t, lineinfo[i], &v); /* table[line] = true */
}
}
static int auxgetinfo (lua_State *L, const char *what, lua_Debug *ar,
Closure *f, CallInfo *ci) {
int status = 1;
for (; *what; what++) {
switch (*what) {
case 'S': {
funcinfo(ar, f);
break;
}
case 'l': {
ar->currentline = (ci && isLua(ci)) ? currentline(ci) : -1;
break;
}
case 'u': {
ar->nups = (f == NULL) ? 0 : f->c.nupvalues;
if (noLuaClosure(f)) {
ar->isvararg = 1;
ar->nparams = 0;
}
else {
ar->isvararg = f->l.p->is_vararg;
ar->nparams = f->l.p->numparams;
}
break;
}
case 't': {
ar->istailcall = (ci) ? ci->callstatus & CIST_TAIL : 0;
break;
}
case 'n': {
/* calling function is a known Lua function? */
if (ci && !(ci->callstatus & CIST_TAIL) && isLua(ci->previous))
ar->namewhat = getfuncname(L, ci->previous, &ar->name);
else
ar->namewhat = NULL;
if (ar->namewhat == NULL) {
ar->namewhat = ""; /* not found */
ar->name = NULL;
}
break;
}
case 'L':
case 'f': /* handled by lua_getinfo */
break;
default: status = 0; /* invalid option */
}
}
return status;
}
LUA_API int lua_getinfo (lua_State *L, const char *what, lua_Debug *ar) {
int status;
Closure *cl;
CallInfo *ci;
StkId func;
lua_lock(L);
swapextra(L);
if (*what == '>') {
ci = NULL;
func = L->top - 1;
api_check(L, ttisfunction(func), "function expected");
what++; /* skip the '>' */
L->top--; /* pop function */
}
else {
ci = ar->i_ci;
func = ci->func;
lua_assert(ttisfunction(ci->func));
}
cl = ttisclosure(func) ? clvalue(func) : NULL;
status = auxgetinfo(L, what, ar, cl, ci);
if (strchr(what, 'f')) {
setobjs2s(L, L->top, func);
api_incr_top(L);
}
swapextra(L);
if (strchr(what, 'L'))
collectvalidlines(L, cl);
lua_unlock(L);
return status;
}
/*
** {======================================================
** Symbolic Execution
** =======================================================
*/
static const char *getobjname (Proto *p, int lastpc, int reg,
const char **name);
/*
** find a "name" for the RK value 'c'
*/
static void kname (Proto *p, int pc, int c, const char **name) {
if (ISK(c)) { /* is 'c' a constant? */
TValue *kvalue = &p->k[INDEXK(c)];
if (ttisstring(kvalue)) { /* literal constant? */
*name = svalue(kvalue); /* it is its own name */
return;
}
/* else no reasonable name found */
}
else { /* 'c' is a register */
const char *what = getobjname(p, pc, c, name); /* search for 'c' */
if (what && *what == 'c') { /* found a constant name? */
return; /* 'name' already filled */
}
/* else no reasonable name found */
}
*name = "?"; /* no reasonable name found */
}
static int filterpc (int pc, int jmptarget) {
if (pc < jmptarget) /* is code conditional (inside a jump)? */
return -1; /* cannot know who sets that register */
else return pc; /* current position sets that register */
}
/*
** try to find last instruction before 'lastpc' that modified register 'reg'
*/
static int findsetreg (Proto *p, int lastpc, int reg) {
int pc;
int setreg = -1; /* keep last instruction that changed 'reg' */
int jmptarget = 0; /* any code before this address is conditional */
for (pc = 0; pc < lastpc; pc++) {
Instruction i = p->code[pc];
OpCode op = GET_OPCODE(i);
int a = GETARG_A(i);
switch (op) {
case OP_LOADNIL: {
int b = GETARG_B(i);
if (a <= reg && reg <= a + b) /* set registers from 'a' to 'a+b' */
setreg = filterpc(pc, jmptarget);
break;
}
case OP_TFORCALL: {
if (reg >= a + 2) /* affect all regs above its base */
setreg = filterpc(pc, jmptarget);
break;
}
case OP_CALL:
case OP_TAILCALL: {
if (reg >= a) /* affect all registers above base */
setreg = filterpc(pc, jmptarget);
break;
}
case OP_JMP: {
int b = GETARG_sBx(i);
int dest = pc + 1 + b;
/* jump is forward and do not skip `lastpc'? */
if (pc < dest && dest <= lastpc) {
if (dest > jmptarget)
jmptarget = dest; /* update 'jmptarget' */
}
break;
}
case OP_TEST: {
if (reg == a) /* jumped code can change 'a' */
setreg = filterpc(pc, jmptarget);
break;
}
default:
if (testAMode(op) && reg == a) /* any instruction that set A */
setreg = filterpc(pc, jmptarget);
break;
}
}
return setreg;
}
static const char *getobjname (Proto *p, int lastpc, int reg,
const char **name) {
int pc;
*name = luaF_getlocalname(p, reg + 1, lastpc);
if (*name) /* is a local? */
return "local";
/* else try symbolic execution */
pc = findsetreg(p, lastpc, reg);
if (pc != -1) { /* could find instruction? */
Instruction i = p->code[pc];
OpCode op = GET_OPCODE(i);
switch (op) {
case OP_MOVE: {
int b = GETARG_B(i); /* move from 'b' to 'a' */
if (b < GETARG_A(i))
return getobjname(p, pc, b, name); /* get name for 'b' */
break;
}
case OP_GETTABUP:
case OP_GETTABLE: {
int k = GETARG_C(i); /* key index */
int t = GETARG_B(i); /* table index */
const char *vn = (op == OP_GETTABLE) /* name of indexed variable */
? luaF_getlocalname(p, t + 1, pc)
: upvalname(p, t);
kname(p, pc, k, name);
return (vn && strcmp(vn, LUA_ENV) == 0) ? "global" : "field";
}
case OP_GETUPVAL: {
*name = upvalname(p, GETARG_B(i));
return "upvalue";
}
case OP_LOADK:
case OP_LOADKX: {
int b = (op == OP_LOADK) ? GETARG_Bx(i)
: GETARG_Ax(p->code[pc + 1]);
if (ttisstring(&p->k[b])) {
*name = svalue(&p->k[b]);
return "constant";
}
break;
}
case OP_SELF: {
int k = GETARG_C(i); /* key index */
kname(p, pc, k, name);
return "method";
}
default: break; /* go through to return NULL */
}
}
return NULL; /* could not find reasonable name */
}
static const char *getfuncname (lua_State *L, CallInfo *ci, const char **name) {
TMS tm;
Proto *p = ci_func(ci)->p; /* calling function */
int pc = currentpc(ci); /* calling instruction index */
Instruction i = p->code[pc]; /* calling instruction */
switch (GET_OPCODE(i)) {
case OP_CALL:
case OP_TAILCALL: /* get function name */
return getobjname(p, pc, GETARG_A(i), name);
case OP_TFORCALL: { /* for iterator */
*name = "for iterator";
return "for iterator";
}
/* all other instructions can call only through metamethods */
case OP_SELF:
case OP_GETTABUP:
case OP_GETTABLE: tm = TM_INDEX; break;
case OP_SETTABUP:
case OP_SETTABLE: tm = TM_NEWINDEX; break;
case OP_EQ: tm = TM_EQ; break;
case OP_ADD: tm = TM_ADD; break;
case OP_SUB: tm = TM_SUB; break;
case OP_MUL: tm = TM_MUL; break;
case OP_DIV: tm = TM_DIV; break;
case OP_MOD: tm = TM_MOD; break;
case OP_POW: tm = TM_POW; break;
case OP_UNM: tm = TM_UNM; break;
case OP_LEN: tm = TM_LEN; break;
case OP_LT: tm = TM_LT; break;
case OP_LE: tm = TM_LE; break;
case OP_CONCAT: tm = TM_CONCAT; break;
default:
return NULL; /* else no useful name can be found */
}
*name = getstr(G(L)->tmname[tm]);
return "metamethod";
}
/* }====================================================== */
/*
** only ANSI way to check whether a pointer points to an array
** (used only for error messages, so efficiency is not a big concern)
*/
static int isinstack (CallInfo *ci, const TValue *o) {
StkId p;
for (p = ci->u.l.base; p < ci->top; p++)
if (o == p) return 1;
return 0;
}
static const char *getupvalname (CallInfo *ci, const TValue *o,
const char **name) {
LClosure *c = ci_func(ci);
int i;
for (i = 0; i < c->nupvalues; i++) {
if (c->upvals[i]->v == o) {
*name = upvalname(c->p, i);
return "upvalue";
}
}
return NULL;
}
l_noret luaG_typeerror (lua_State *L, const TValue *o, const char *op) {
CallInfo *ci = L->ci;
const char *name = NULL;
const char *t = objtypename(o);
const char *kind = NULL;
if (isLua(ci)) {
kind = getupvalname(ci, o, &name); /* check whether 'o' is an upvalue */
if (!kind && isinstack(ci, o)) /* no? try a register */
kind = getobjname(ci_func(ci)->p, currentpc(ci),
cast_int(o - ci->u.l.base), &name);
}
if (kind)
luaG_runerror(L, "attempt to %s %s " LUA_QS " (a %s value)",
op, kind, name, t);
else
luaG_runerror(L, "attempt to %s a %s value", op, t);
}
l_noret luaG_concaterror (lua_State *L, StkId p1, StkId p2) {
if (ttisstring(p1) || ttisnumber(p1)) p1 = p2;
lua_assert(!ttisstring(p1) && !ttisnumber(p1));
luaG_typeerror(L, p1, "concatenate");
}
l_noret luaG_aritherror (lua_State *L, const TValue *p1, const TValue *p2) {
TValue temp;
if (luaV_tonumber(p1, &temp) == NULL)
p2 = p1; /* first operand is wrong */
luaG_typeerror(L, p2, "perform arithmetic on");
}
l_noret luaG_ordererror (lua_State *L, const TValue *p1, const TValue *p2) {
const char *t1 = objtypename(p1);
const char *t2 = objtypename(p2);
if (t1 == t2)
luaG_runerror(L, "attempt to compare two %s values", t1);
else
luaG_runerror(L, "attempt to compare %s with %s", t1, t2);
}
static void addinfo (lua_State *L, const char *msg) {
CallInfo *ci = L->ci;
if (isLua(ci)) { /* is Lua code? */
char buff[LUA_IDSIZE]; /* add file:line information */
int line = currentline(ci);
TString *src = ci_func(ci)->p->source;
if (src)
luaO_chunkid(buff, getstr(src), LUA_IDSIZE);
else { /* no source available; use "?" instead */
buff[0] = '?'; buff[1] = '\0';
}
luaO_pushfstring(L, "%s:%d: %s", buff, line, msg);
}
}
l_noret luaG_errormsg (lua_State *L) {
if (L->errfunc != 0) { /* is there an error handling function? */
StkId errfunc = restorestack(L, L->errfunc);
if (!ttisfunction(errfunc)) luaD_throw(L, LUA_ERRERR);
setobjs2s(L, L->top, L->top - 1); /* move argument */
setobjs2s(L, L->top - 1, errfunc); /* push function */
L->top++;
luaD_call(L, L->top - 2, 1, 0); /* call it */
}
luaD_throw(L, LUA_ERRRUN);
}
l_noret luaG_runerror (lua_State *L, const char *fmt, ...) {
L->runerror++;
va_list argp;
va_start(argp, fmt);
addinfo(L, luaO_pushvfstring(L, fmt, argp));
va_end(argp);
luaG_errormsg(L);
L->runerror--;
}
diff --git a/sys/contrib/openzfs/module/os/freebsd/spl/spl_kstat.c b/sys/contrib/openzfs/module/os/freebsd/spl/spl_kstat.c
index 9f5f92e194ec..43cd4da02e30 100644
--- a/sys/contrib/openzfs/module/os/freebsd/spl/spl_kstat.c
+++ b/sys/contrib/openzfs/module/os/freebsd/spl/spl_kstat.c
@@ -1,579 +1,573 @@
/*
* Copyright (c) 2007 Pawel Jakub Dawidek <pjd@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Links to Illumos.org for more information on kstat function:
* [1] https://illumos.org/man/1M/kstat
* [2] https://illumos.org/man/9f/kstat_create
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/types.h>
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/sysctl.h>
#include <sys/kstat.h>
#include <sys/sbuf.h>
#include <sys/zone.h>
static MALLOC_DEFINE(M_KSTAT, "kstat_data", "Kernel statistics");
SYSCTL_ROOT_NODE(OID_AUTO, kstat, CTLFLAG_RW, 0, "Kernel statistics");
void
__kstat_set_raw_ops(kstat_t *ksp,
int (*headers)(char *buf, size_t size),
int (*data)(char *buf, size_t size, void *data),
void *(*addr)(kstat_t *ksp, loff_t index))
{
ksp->ks_raw_ops.headers = headers;
ksp->ks_raw_ops.data = data;
ksp->ks_raw_ops.addr = addr;
}
void
__kstat_set_seq_raw_ops(kstat_t *ksp,
int (*headers)(struct seq_file *f),
int (*data)(char *buf, size_t size, void *data),
void *(*addr)(kstat_t *ksp, loff_t index))
{
ksp->ks_raw_ops.seq_headers = headers;
ksp->ks_raw_ops.data = data;
ksp->ks_raw_ops.addr = addr;
}
static int
kstat_default_update(kstat_t *ksp, int rw)
{
ASSERT3P(ksp, !=, NULL);
if (rw == KSTAT_WRITE)
return (EACCES);
return (0);
}
static int
kstat_resize_raw(kstat_t *ksp)
{
if (ksp->ks_raw_bufsize == KSTAT_RAW_MAX)
return (ENOMEM);
free(ksp->ks_raw_buf, M_TEMP);
ksp->ks_raw_bufsize = MIN(ksp->ks_raw_bufsize * 2, KSTAT_RAW_MAX);
ksp->ks_raw_buf = malloc(ksp->ks_raw_bufsize, M_TEMP, M_WAITOK);
return (0);
}
static void *
kstat_raw_default_addr(kstat_t *ksp, loff_t n)
{
if (n == 0)
return (ksp->ks_data);
return (NULL);
}
static int
kstat_sysctl(SYSCTL_HANDLER_ARGS)
{
kstat_t *ksp = arg1;
kstat_named_t *ksent;
uint64_t val;
ksent = ksp->ks_data;
/* Select the correct element */
ksent += arg2;
/* Update the aggsums before reading */
(void) ksp->ks_update(ksp, KSTAT_READ);
val = ksent->value.ui64;
return (sysctl_handle_64(oidp, &val, 0, req));
}
static int
kstat_sysctl_string(SYSCTL_HANDLER_ARGS)
{
kstat_t *ksp = arg1;
kstat_named_t *ksent = ksp->ks_data;
char *val;
uint32_t len = 0;
/* Select the correct element */
ksent += arg2;
/* Update the aggsums before reading */
(void) ksp->ks_update(ksp, KSTAT_READ);
val = KSTAT_NAMED_STR_PTR(ksent);
len = KSTAT_NAMED_STR_BUFLEN(ksent);
val[len-1] = '\0';
return (sysctl_handle_string(oidp, val, len, req));
}
static int
kstat_sysctl_dataset(SYSCTL_HANDLER_ARGS)
{
kstat_t *ksp = arg1;
kstat_named_t *ksent;
kstat_named_t *ksent_ds;
uint64_t val;
char *ds_name;
uint32_t ds_len = 0;
ksent_ds = ksent = ksp->ks_data;
ds_name = KSTAT_NAMED_STR_PTR(ksent_ds);
ds_len = KSTAT_NAMED_STR_BUFLEN(ksent_ds);
ds_name[ds_len-1] = '\0';
if (!zone_dataset_visible(ds_name, NULL)) {
return (EPERM);
}
/* Select the correct element */
ksent += arg2;
/* Update the aggsums before reading */
(void) ksp->ks_update(ksp, KSTAT_READ);
val = ksent->value.ui64;
return (sysctl_handle_64(oidp, &val, 0, req));
}
static int
kstat_sysctl_dataset_string(SYSCTL_HANDLER_ARGS)
{
kstat_t *ksp = arg1;
kstat_named_t *ksent = ksp->ks_data;
char *val;
uint32_t len = 0;
/* Select the correct element */
ksent += arg2;
val = KSTAT_NAMED_STR_PTR(ksent);
len = KSTAT_NAMED_STR_BUFLEN(ksent);
val[len-1] = '\0';
if (!zone_dataset_visible(val, NULL)) {
return (EPERM);
}
return (sysctl_handle_string(oidp, val, len, req));
}
static int
kstat_sysctl_io(SYSCTL_HANDLER_ARGS)
{
- struct sbuf *sb;
+ struct sbuf sb;
kstat_t *ksp = arg1;
kstat_io_t *kip = ksp->ks_data;
int rc;
- sb = sbuf_new_auto();
- if (sb == NULL)
- return (ENOMEM);
+ sbuf_new_for_sysctl(&sb, NULL, 0, req);
+
/* Update the aggsums before reading */
(void) ksp->ks_update(ksp, KSTAT_READ);
/* though wlentime & friends are signed, they will never be negative */
- sbuf_printf(sb,
+ sbuf_printf(&sb,
"%-8llu %-8llu %-8u %-8u %-8llu %-8llu "
"%-8llu %-8llu %-8llu %-8llu %-8u %-8u\n",
kip->nread, kip->nwritten,
kip->reads, kip->writes,
kip->wtime, kip->wlentime, kip->wlastupdate,
kip->rtime, kip->rlentime, kip->rlastupdate,
kip->wcnt, kip->rcnt);
- rc = sbuf_finish(sb);
- if (rc == 0)
- rc = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb));
- sbuf_delete(sb);
+ rc = sbuf_finish(&sb);
+ sbuf_delete(&sb);
return (rc);
}
static int
kstat_sysctl_raw(SYSCTL_HANDLER_ARGS)
{
- struct sbuf *sb;
+ struct sbuf sb;
void *data;
kstat_t *ksp = arg1;
void *(*addr_op)(kstat_t *ksp, loff_t index);
int n, has_header, rc = 0;
- sb = sbuf_new_auto();
- if (sb == NULL)
- return (ENOMEM);
+ sbuf_new_for_sysctl(&sb, NULL, PAGE_SIZE, req);
if (ksp->ks_raw_ops.addr)
addr_op = ksp->ks_raw_ops.addr;
else
addr_op = kstat_raw_default_addr;
mutex_enter(ksp->ks_lock);
/* Update the aggsums before reading */
(void) ksp->ks_update(ksp, KSTAT_READ);
ksp->ks_raw_bufsize = PAGE_SIZE;
ksp->ks_raw_buf = malloc(PAGE_SIZE, M_TEMP, M_WAITOK);
n = 0;
has_header = (ksp->ks_raw_ops.headers ||
ksp->ks_raw_ops.seq_headers);
restart_headers:
if (ksp->ks_raw_ops.headers) {
rc = ksp->ks_raw_ops.headers(
ksp->ks_raw_buf, ksp->ks_raw_bufsize);
} else if (ksp->ks_raw_ops.seq_headers) {
struct seq_file f;
f.sf_buf = ksp->ks_raw_buf;
f.sf_size = ksp->ks_raw_bufsize;
rc = ksp->ks_raw_ops.seq_headers(&f);
}
if (has_header) {
if (rc == ENOMEM && !kstat_resize_raw(ksp))
goto restart_headers;
- if (rc == 0)
- sbuf_printf(sb, "\n%s", ksp->ks_raw_buf);
+ if (rc == 0) {
+ sbuf_cat(&sb, "\n");
+ sbuf_cat(&sb, ksp->ks_raw_buf);
+ }
}
while ((data = addr_op(ksp, n)) != NULL) {
restart:
if (ksp->ks_raw_ops.data) {
rc = ksp->ks_raw_ops.data(ksp->ks_raw_buf,
ksp->ks_raw_bufsize, data);
if (rc == ENOMEM && !kstat_resize_raw(ksp))
goto restart;
if (rc == 0)
- sbuf_printf(sb, "%s", ksp->ks_raw_buf);
+ sbuf_cat(&sb, ksp->ks_raw_buf);
} else {
ASSERT3U(ksp->ks_ndata, ==, 1);
- sbuf_hexdump(sb, ksp->ks_data,
+ sbuf_hexdump(&sb, ksp->ks_data,
ksp->ks_data_size, NULL, 0);
}
n++;
}
free(ksp->ks_raw_buf, M_TEMP);
mutex_exit(ksp->ks_lock);
- sbuf_trim(sb);
- rc = sbuf_finish(sb);
- if (rc == 0)
- rc = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb));
- sbuf_delete(sb);
+ rc = sbuf_finish(&sb);
+ sbuf_delete(&sb);
return (rc);
}
kstat_t *
__kstat_create(const char *module, int instance, const char *name,
const char *class, uchar_t ks_type, uint_t ks_ndata, uchar_t flags)
{
char buf[KSTAT_STRLEN];
struct sysctl_oid *root;
kstat_t *ksp;
char *pool;
KASSERT(instance == 0, ("instance=%d", instance));
if ((ks_type == KSTAT_TYPE_INTR) || (ks_type == KSTAT_TYPE_IO))
ASSERT3U(ks_ndata, ==, 1);
if (class == NULL)
class = "misc";
/*
* Allocate the main structure. We don't need to keep a copy of
* module in here, because it is only used for sysctl node creation
* done in this function.
*/
ksp = malloc(sizeof (*ksp), M_KSTAT, M_WAITOK|M_ZERO);
ksp->ks_crtime = gethrtime();
ksp->ks_snaptime = ksp->ks_crtime;
ksp->ks_instance = instance;
(void) strlcpy(ksp->ks_name, name, KSTAT_STRLEN);
(void) strlcpy(ksp->ks_class, class, KSTAT_STRLEN);
ksp->ks_type = ks_type;
ksp->ks_flags = flags;
ksp->ks_update = kstat_default_update;
mutex_init(&ksp->ks_private_lock, NULL, MUTEX_DEFAULT, NULL);
ksp->ks_lock = &ksp->ks_private_lock;
switch (ksp->ks_type) {
case KSTAT_TYPE_RAW:
ksp->ks_ndata = 1;
ksp->ks_data_size = ks_ndata;
break;
case KSTAT_TYPE_NAMED:
ksp->ks_ndata = ks_ndata;
ksp->ks_data_size = ks_ndata * sizeof (kstat_named_t);
break;
case KSTAT_TYPE_INTR:
ksp->ks_ndata = ks_ndata;
ksp->ks_data_size = ks_ndata * sizeof (kstat_intr_t);
break;
case KSTAT_TYPE_IO:
ksp->ks_ndata = ks_ndata;
ksp->ks_data_size = ks_ndata * sizeof (kstat_io_t);
break;
case KSTAT_TYPE_TIMER:
ksp->ks_ndata = ks_ndata;
ksp->ks_data_size = ks_ndata * sizeof (kstat_timer_t);
break;
default:
panic("Undefined kstat type %d\n", ksp->ks_type);
}
if (ksp->ks_flags & KSTAT_FLAG_VIRTUAL)
ksp->ks_data = NULL;
else
ksp->ks_data = kmem_zalloc(ksp->ks_data_size, KM_SLEEP);
/*
* Some kstats use a module name like "zfs/poolname" to distinguish a
* set of kstats belonging to a specific pool. Split on '/' to add an
* extra node for the pool name if needed.
*/
(void) strlcpy(buf, module, KSTAT_STRLEN);
module = buf;
pool = strchr(module, '/');
if (pool != NULL)
*pool++ = '\0';
/*
* Create sysctl tree for those statistics:
*
* kstat.<module>[.<pool>].<class>.<name>
*/
sysctl_ctx_init(&ksp->ks_sysctl_ctx);
root = SYSCTL_ADD_NODE(&ksp->ks_sysctl_ctx,
SYSCTL_STATIC_CHILDREN(_kstat), OID_AUTO, module, CTLFLAG_RW, 0,
"");
if (root == NULL) {
printf("%s: Cannot create kstat.%s tree!\n", __func__, module);
sysctl_ctx_free(&ksp->ks_sysctl_ctx);
free(ksp, M_KSTAT);
return (NULL);
}
if (pool != NULL) {
root = SYSCTL_ADD_NODE(&ksp->ks_sysctl_ctx,
SYSCTL_CHILDREN(root), OID_AUTO, pool, CTLFLAG_RW, 0, "");
if (root == NULL) {
printf("%s: Cannot create kstat.%s.%s tree!\n",
__func__, module, pool);
sysctl_ctx_free(&ksp->ks_sysctl_ctx);
free(ksp, M_KSTAT);
return (NULL);
}
}
root = SYSCTL_ADD_NODE(&ksp->ks_sysctl_ctx, SYSCTL_CHILDREN(root),
OID_AUTO, class, CTLFLAG_RW, 0, "");
if (root == NULL) {
if (pool != NULL)
printf("%s: Cannot create kstat.%s.%s.%s tree!\n",
__func__, module, pool, class);
else
printf("%s: Cannot create kstat.%s.%s tree!\n",
__func__, module, class);
sysctl_ctx_free(&ksp->ks_sysctl_ctx);
free(ksp, M_KSTAT);
return (NULL);
}
if (ksp->ks_type == KSTAT_TYPE_NAMED) {
root = SYSCTL_ADD_NODE(&ksp->ks_sysctl_ctx,
SYSCTL_CHILDREN(root),
OID_AUTO, name, CTLFLAG_RW, 0, "");
if (root == NULL) {
if (pool != NULL)
printf("%s: Cannot create kstat.%s.%s.%s.%s "
"tree!\n", __func__, module, pool, class,
name);
else
printf("%s: Cannot create kstat.%s.%s.%s "
"tree!\n", __func__, module, class, name);
sysctl_ctx_free(&ksp->ks_sysctl_ctx);
free(ksp, M_KSTAT);
return (NULL);
}
}
ksp->ks_sysctl_root = root;
return (ksp);
}
static void
kstat_install_named(kstat_t *ksp)
{
kstat_named_t *ksent;
char *namelast;
int typelast;
ksent = ksp->ks_data;
VERIFY((ksp->ks_flags & KSTAT_FLAG_VIRTUAL) || ksent != NULL);
typelast = 0;
namelast = NULL;
for (int i = 0; i < ksp->ks_ndata; i++, ksent++) {
if (ksent->data_type != 0) {
typelast = ksent->data_type;
namelast = ksent->name;
}
switch (typelast) {
case KSTAT_DATA_CHAR:
/* Not Implemented */
break;
case KSTAT_DATA_INT32:
SYSCTL_ADD_PROC(&ksp->ks_sysctl_ctx,
SYSCTL_CHILDREN(ksp->ks_sysctl_root),
OID_AUTO, namelast,
CTLTYPE_S32 | CTLFLAG_RD | CTLFLAG_MPSAFE,
ksp, i, kstat_sysctl, "I", namelast);
break;
case KSTAT_DATA_UINT32:
SYSCTL_ADD_PROC(&ksp->ks_sysctl_ctx,
SYSCTL_CHILDREN(ksp->ks_sysctl_root),
OID_AUTO, namelast,
CTLTYPE_U32 | CTLFLAG_RD | CTLFLAG_MPSAFE,
ksp, i, kstat_sysctl, "IU", namelast);
break;
case KSTAT_DATA_INT64:
SYSCTL_ADD_PROC(&ksp->ks_sysctl_ctx,
SYSCTL_CHILDREN(ksp->ks_sysctl_root),
OID_AUTO, namelast,
CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
ksp, i, kstat_sysctl, "Q", namelast);
break;
case KSTAT_DATA_UINT64:
if (strcmp(ksp->ks_class, "dataset") == 0) {
SYSCTL_ADD_PROC(&ksp->ks_sysctl_ctx,
SYSCTL_CHILDREN(ksp->ks_sysctl_root),
OID_AUTO, namelast,
CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
ksp, i, kstat_sysctl_dataset, "QU",
namelast);
} else {
SYSCTL_ADD_PROC(&ksp->ks_sysctl_ctx,
SYSCTL_CHILDREN(ksp->ks_sysctl_root),
OID_AUTO, namelast,
CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
ksp, i, kstat_sysctl, "QU", namelast);
}
break;
case KSTAT_DATA_LONG:
SYSCTL_ADD_PROC(&ksp->ks_sysctl_ctx,
SYSCTL_CHILDREN(ksp->ks_sysctl_root),
OID_AUTO, namelast,
CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
ksp, i, kstat_sysctl, "L", namelast);
break;
case KSTAT_DATA_ULONG:
SYSCTL_ADD_PROC(&ksp->ks_sysctl_ctx,
SYSCTL_CHILDREN(ksp->ks_sysctl_root),
OID_AUTO, namelast,
CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
ksp, i, kstat_sysctl, "LU", namelast);
break;
case KSTAT_DATA_STRING:
if (strcmp(ksp->ks_class, "dataset") == 0) {
SYSCTL_ADD_PROC(&ksp->ks_sysctl_ctx,
SYSCTL_CHILDREN(ksp->ks_sysctl_root),
OID_AUTO, namelast, CTLTYPE_STRING |
CTLFLAG_RD | CTLFLAG_MPSAFE,
ksp, i, kstat_sysctl_dataset_string, "A",
namelast);
} else {
SYSCTL_ADD_PROC(&ksp->ks_sysctl_ctx,
SYSCTL_CHILDREN(ksp->ks_sysctl_root),
OID_AUTO, namelast, CTLTYPE_STRING |
CTLFLAG_RD | CTLFLAG_MPSAFE,
ksp, i, kstat_sysctl_string, "A",
namelast);
}
break;
default:
panic("unsupported type: %d", typelast);
}
}
}
void
kstat_install(kstat_t *ksp)
{
struct sysctl_oid *root;
if (ksp->ks_ndata == UINT32_MAX)
VERIFY3U(ksp->ks_type, ==, KSTAT_TYPE_RAW);
switch (ksp->ks_type) {
case KSTAT_TYPE_NAMED:
return (kstat_install_named(ksp));
case KSTAT_TYPE_RAW:
if (ksp->ks_raw_ops.data) {
root = SYSCTL_ADD_PROC(&ksp->ks_sysctl_ctx,
SYSCTL_CHILDREN(ksp->ks_sysctl_root),
OID_AUTO, ksp->ks_name, CTLTYPE_STRING | CTLFLAG_RD
| CTLFLAG_MPSAFE | CTLFLAG_SKIP,
ksp, 0, kstat_sysctl_raw, "A", ksp->ks_name);
} else {
root = SYSCTL_ADD_PROC(&ksp->ks_sysctl_ctx,
SYSCTL_CHILDREN(ksp->ks_sysctl_root),
OID_AUTO, ksp->ks_name, CTLTYPE_OPAQUE | CTLFLAG_RD
| CTLFLAG_MPSAFE | CTLFLAG_SKIP,
ksp, 0, kstat_sysctl_raw, "", ksp->ks_name);
}
break;
case KSTAT_TYPE_IO:
root = SYSCTL_ADD_PROC(&ksp->ks_sysctl_ctx,
SYSCTL_CHILDREN(ksp->ks_sysctl_root),
OID_AUTO, ksp->ks_name,
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
ksp, 0, kstat_sysctl_io, "A", ksp->ks_name);
break;
case KSTAT_TYPE_TIMER:
case KSTAT_TYPE_INTR:
default:
panic("unsupported kstat type %d\n", ksp->ks_type);
}
VERIFY3P(root, !=, NULL);
ksp->ks_sysctl_root = root;
}
void
kstat_delete(kstat_t *ksp)
{
sysctl_ctx_free(&ksp->ks_sysctl_ctx);
ksp->ks_lock = NULL;
mutex_destroy(&ksp->ks_private_lock);
if (!(ksp->ks_flags & KSTAT_FLAG_VIRTUAL))
kmem_free(ksp->ks_data, ksp->ks_data_size);
free(ksp, M_KSTAT);
}
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/dmu_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/dmu_os.c
index a5f486b95db4..c33ce01ab39b 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/dmu_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/dmu_os.c
@@ -1,333 +1,333 @@
/*
* Copyright (c) 2020 iXsystems, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/types.h>
#include <sys/param.h>
#include <sys/dmu.h>
#include <sys/dmu_impl.h>
#include <sys/dmu_tx.h>
#include <sys/dbuf.h>
#include <sys/dnode.h>
#include <sys/zfs_context.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_traverse.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_synctask.h>
#include <sys/dsl_prop.h>
#include <sys/dmu_zfetch.h>
#include <sys/zfs_ioctl.h>
#include <sys/zap.h>
#include <sys/zio_checksum.h>
#include <sys/zio_compress.h>
#include <sys/sa.h>
#include <sys/zfeature.h>
#include <sys/abd.h>
#include <sys/zfs_rlock.h>
#include <sys/racct.h>
#include <sys/vm.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_vnops.h>
#include <sys/ccompat.h>
#ifndef IDX_TO_OFF
#define IDX_TO_OFF(idx) (((vm_ooffset_t)(idx)) << PAGE_SHIFT)
#endif
#if __FreeBSD_version < 1300051
#define VM_ALLOC_BUSY_FLAGS VM_ALLOC_NOBUSY
#else
#define VM_ALLOC_BUSY_FLAGS VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY
#endif
#if __FreeBSD_version < 1300072
#define dmu_page_lock(m) vm_page_lock(m)
#define dmu_page_unlock(m) vm_page_unlock(m)
#else
#define dmu_page_lock(m)
#define dmu_page_unlock(m)
#endif
int
dmu_write_pages(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
vm_page_t *ma, dmu_tx_t *tx)
{
dmu_buf_t **dbp;
struct sf_buf *sf;
int numbufs, i;
int err;
if (size == 0)
return (0);
err = dmu_buf_hold_array(os, object, offset, size,
FALSE, FTAG, &numbufs, &dbp);
if (err)
return (err);
for (i = 0; i < numbufs; i++) {
int tocpy, copied, thiscpy;
int bufoff;
dmu_buf_t *db = dbp[i];
caddr_t va;
ASSERT3U(size, >, 0);
ASSERT3U(db->db_size, >=, PAGESIZE);
bufoff = offset - db->db_offset;
tocpy = (int)MIN(db->db_size - bufoff, size);
ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
if (tocpy == db->db_size)
- dmu_buf_will_fill(db, tx);
+ dmu_buf_will_fill(db, tx, B_FALSE);
else
dmu_buf_will_dirty(db, tx);
for (copied = 0; copied < tocpy; copied += PAGESIZE) {
ASSERT3U(ptoa((*ma)->pindex), ==,
db->db_offset + bufoff);
thiscpy = MIN(PAGESIZE, tocpy - copied);
va = zfs_map_page(*ma, &sf);
memcpy((char *)db->db_data + bufoff, va, thiscpy);
zfs_unmap_page(sf);
ma += 1;
bufoff += PAGESIZE;
}
if (tocpy == db->db_size)
- dmu_buf_fill_done(db, tx);
+ dmu_buf_fill_done(db, tx, B_FALSE);
offset += tocpy;
size -= tocpy;
}
dmu_buf_rele_array(dbp, numbufs, FTAG);
return (err);
}
int
dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
int *rbehind, int *rahead, int last_size)
{
struct sf_buf *sf;
vm_object_t vmobj;
vm_page_t m;
dmu_buf_t **dbp;
dmu_buf_t *db;
caddr_t va;
int numbufs, i;
int bufoff, pgoff, tocpy;
int mi, di;
int err;
ASSERT3U(ma[0]->pindex + count - 1, ==, ma[count - 1]->pindex);
ASSERT3S(last_size, <=, PAGE_SIZE);
err = dmu_buf_hold_array(os, object, IDX_TO_OFF(ma[0]->pindex),
IDX_TO_OFF(count - 1) + last_size, TRUE, FTAG, &numbufs, &dbp);
if (err != 0)
return (err);
#ifdef ZFS_DEBUG
IMPLY(last_size < PAGE_SIZE, *rahead == 0);
if (dbp[0]->db_offset != 0 || numbufs > 1) {
for (i = 0; i < numbufs; i++) {
ASSERT(ISP2(dbp[i]->db_size));
ASSERT3U((dbp[i]->db_offset % dbp[i]->db_size), ==, 0);
ASSERT3U(dbp[i]->db_size, ==, dbp[0]->db_size);
}
}
#endif
vmobj = ma[0]->object;
zfs_vmobject_wlock_12(vmobj);
db = dbp[0];
for (i = 0; i < *rbehind; i++) {
m = vm_page_grab_unlocked(vmobj, ma[0]->pindex - 1 - i,
VM_ALLOC_NORMAL | VM_ALLOC_NOWAIT | VM_ALLOC_BUSY_FLAGS);
if (m == NULL)
break;
if (!vm_page_none_valid(m)) {
ASSERT3U(m->valid, ==, VM_PAGE_BITS_ALL);
vm_page_do_sunbusy(m);
break;
}
ASSERT3U(m->dirty, ==, 0);
ASSERT(!pmap_page_is_write_mapped(m));
ASSERT3U(db->db_size, >, PAGE_SIZE);
bufoff = IDX_TO_OFF(m->pindex) % db->db_size;
va = zfs_map_page(m, &sf);
memcpy(va, (char *)db->db_data + bufoff, PAGESIZE);
zfs_unmap_page(sf);
vm_page_valid(m);
dmu_page_lock(m);
if ((m->busy_lock & VPB_BIT_WAITERS) != 0)
vm_page_activate(m);
else
vm_page_deactivate(m);
dmu_page_unlock(m);
vm_page_do_sunbusy(m);
}
*rbehind = i;
bufoff = IDX_TO_OFF(ma[0]->pindex) % db->db_size;
pgoff = 0;
for (mi = 0, di = 0; mi < count && di < numbufs; ) {
if (pgoff == 0) {
m = ma[mi];
if (m != bogus_page) {
vm_page_assert_xbusied(m);
ASSERT(vm_page_none_valid(m));
ASSERT3U(m->dirty, ==, 0);
ASSERT(!pmap_page_is_write_mapped(m));
va = zfs_map_page(m, &sf);
}
}
if (bufoff == 0)
db = dbp[di];
if (m != bogus_page) {
ASSERT3U(IDX_TO_OFF(m->pindex) + pgoff, ==,
db->db_offset + bufoff);
}
/*
* We do not need to clamp the copy size by the file
* size as the last block is zero-filled beyond the
* end of file anyway.
*/
tocpy = MIN(db->db_size - bufoff, PAGESIZE - pgoff);
ASSERT3S(tocpy, >=, 0);
if (m != bogus_page)
memcpy(va + pgoff, (char *)db->db_data + bufoff, tocpy);
pgoff += tocpy;
ASSERT3S(pgoff, >=, 0);
ASSERT3S(pgoff, <=, PAGESIZE);
if (pgoff == PAGESIZE) {
if (m != bogus_page) {
zfs_unmap_page(sf);
vm_page_valid(m);
}
ASSERT3S(mi, <, count);
mi++;
pgoff = 0;
}
bufoff += tocpy;
ASSERT3S(bufoff, >=, 0);
ASSERT3S(bufoff, <=, db->db_size);
if (bufoff == db->db_size) {
ASSERT3S(di, <, numbufs);
di++;
bufoff = 0;
}
}
#ifdef ZFS_DEBUG
/*
* Three possibilities:
* - last requested page ends at a buffer boundary and , thus,
* all pages and buffers have been iterated;
* - all requested pages are filled, but the last buffer
* has not been exhausted;
* the read-ahead is possible only in this case;
* - all buffers have been read, but the last page has not been
* fully filled;
* this is only possible if the file has only a single buffer
* with a size that is not a multiple of the page size.
*/
if (mi == count) {
ASSERT3S(di, >=, numbufs - 1);
IMPLY(*rahead != 0, di == numbufs - 1);
IMPLY(*rahead != 0, bufoff != 0);
ASSERT0(pgoff);
}
if (di == numbufs) {
ASSERT3S(mi, >=, count - 1);
ASSERT0(*rahead);
IMPLY(pgoff == 0, mi == count);
if (pgoff != 0) {
ASSERT3S(mi, ==, count - 1);
ASSERT3U((dbp[0]->db_size & PAGE_MASK), !=, 0);
}
}
#endif
if (pgoff != 0) {
ASSERT3P(m, !=, bogus_page);
memset(va + pgoff, 0, PAGESIZE - pgoff);
zfs_unmap_page(sf);
vm_page_valid(m);
}
for (i = 0; i < *rahead; i++) {
m = vm_page_grab_unlocked(vmobj, ma[count - 1]->pindex + 1 + i,
VM_ALLOC_NORMAL | VM_ALLOC_NOWAIT | VM_ALLOC_BUSY_FLAGS);
if (m == NULL)
break;
if (!vm_page_none_valid(m)) {
ASSERT3U(m->valid, ==, VM_PAGE_BITS_ALL);
vm_page_do_sunbusy(m);
break;
}
ASSERT3U(m->dirty, ==, 0);
ASSERT(!pmap_page_is_write_mapped(m));
ASSERT3U(db->db_size, >, PAGE_SIZE);
bufoff = IDX_TO_OFF(m->pindex) % db->db_size;
tocpy = MIN(db->db_size - bufoff, PAGESIZE);
va = zfs_map_page(m, &sf);
memcpy(va, (char *)db->db_data + bufoff, tocpy);
if (tocpy < PAGESIZE) {
ASSERT3S(i, ==, *rahead - 1);
ASSERT3U((db->db_size & PAGE_MASK), !=, 0);
memset(va + tocpy, 0, PAGESIZE - tocpy);
}
zfs_unmap_page(sf);
vm_page_valid(m);
dmu_page_lock(m);
if ((m->busy_lock & VPB_BIT_WAITERS) != 0)
vm_page_activate(m);
else
vm_page_deactivate(m);
dmu_page_unlock(m);
vm_page_do_sunbusy(m);
}
*rahead = i;
zfs_vmobject_wunlock_12(vmobj);
dmu_buf_rele_array(dbp, numbufs, FTAG);
return (0);
}
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_file_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_file_os.c
index 60c9ff0581e0..f7f2be2cf95a 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_file_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_file_os.c
@@ -1,307 +1,346 @@
/*
* Copyright (c) 2020 iXsystems, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/dmu.h>
#include <sys/dmu_impl.h>
#include <sys/dmu_recv.h>
#include <sys/dmu_tx.h>
#include <sys/dbuf.h>
#include <sys/dnode.h>
#include <sys/zfs_context.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_traverse.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_synctask.h>
#include <sys/zfs_ioctl.h>
#include <sys/zap.h>
#include <sys/zio_checksum.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_file.h>
#include <sys/buf.h>
#include <sys/stat.h>
int
zfs_file_open(const char *path, int flags, int mode, zfs_file_t **fpp)
{
struct thread *td;
- int rc, fd;
+ struct vnode *vp;
+ struct file *fp;
+ struct nameidata nd;
+ int error;
td = curthread;
pwd_ensure_dirs();
- /* 12.x doesn't take a const char * */
- rc = kern_openat(td, AT_FDCWD, __DECONST(char *, path),
- UIO_SYSSPACE, flags, mode);
- if (rc)
- return (SET_ERROR(rc));
- fd = td->td_retval[0];
- td->td_retval[0] = 0;
- if (fget(curthread, fd, &cap_no_rights, fpp))
- kern_close(td, fd);
+
+ KASSERT((flags & (O_EXEC | O_PATH)) == 0,
+ ("invalid flags: 0x%x", flags));
+ KASSERT((flags & O_ACCMODE) != O_ACCMODE,
+ ("invalid flags: 0x%x", flags));
+ flags = FFLAGS(flags);
+
+ error = falloc_noinstall(td, &fp);
+ if (error != 0) {
+ return (error);
+ }
+ fp->f_flag = flags & FMASK;
+
+#if __FreeBSD_version >= 1400043
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, path);
+#else
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, path, td);
+#endif
+ error = vn_open(&nd, &flags, mode, fp);
+ if (error != 0) {
+ falloc_abort(td, fp);
+ return (SET_ERROR(error));
+ }
+ NDFREE_PNBUF(&nd);
+ vp = nd.ni_vp;
+ fp->f_vnode = vp;
+ if (fp->f_ops == &badfileops) {
+ finit_vnode(fp, flags, NULL, &vnops);
+ }
+ VOP_UNLOCK(vp);
+ if (vp->v_type != VREG) {
+ zfs_file_close(fp);
+ return (SET_ERROR(EACCES));
+ }
+
+ if (flags & O_TRUNC) {
+ error = fo_truncate(fp, 0, td->td_ucred, td);
+ if (error != 0) {
+ zfs_file_close(fp);
+ return (SET_ERROR(error));
+ }
+ }
+
+ *fpp = fp;
+
return (0);
}
void
zfs_file_close(zfs_file_t *fp)
{
- fo_close(fp, curthread);
+ fdrop(fp, curthread);
}
static int
zfs_file_write_impl(zfs_file_t *fp, const void *buf, size_t count, loff_t *offp,
ssize_t *resid)
{
ssize_t rc;
struct uio auio;
struct thread *td;
struct iovec aiov;
td = curthread;
aiov.iov_base = (void *)(uintptr_t)buf;
aiov.iov_len = count;
auio.uio_iov = &aiov;
auio.uio_iovcnt = 1;
auio.uio_segflg = UIO_SYSSPACE;
auio.uio_resid = count;
auio.uio_rw = UIO_WRITE;
auio.uio_td = td;
auio.uio_offset = *offp;
if ((fp->f_flag & FWRITE) == 0)
return (SET_ERROR(EBADF));
if (fp->f_type == DTYPE_VNODE)
bwillwrite();
rc = fo_write(fp, &auio, td->td_ucred, FOF_OFFSET, td);
if (rc)
return (SET_ERROR(rc));
if (resid)
*resid = auio.uio_resid;
else if (auio.uio_resid)
return (SET_ERROR(EIO));
*offp += count - auio.uio_resid;
return (rc);
}
int
zfs_file_write(zfs_file_t *fp, const void *buf, size_t count, ssize_t *resid)
{
loff_t off = fp->f_offset;
ssize_t rc;
rc = zfs_file_write_impl(fp, buf, count, &off, resid);
if (rc == 0)
fp->f_offset = off;
return (SET_ERROR(rc));
}
int
zfs_file_pwrite(zfs_file_t *fp, const void *buf, size_t count, loff_t off,
ssize_t *resid)
{
return (zfs_file_write_impl(fp, buf, count, &off, resid));
}
static int
zfs_file_read_impl(zfs_file_t *fp, void *buf, size_t count, loff_t *offp,
ssize_t *resid)
{
ssize_t rc;
struct uio auio;
struct thread *td;
struct iovec aiov;
td = curthread;
aiov.iov_base = (void *)(uintptr_t)buf;
aiov.iov_len = count;
auio.uio_iov = &aiov;
auio.uio_iovcnt = 1;
auio.uio_segflg = UIO_SYSSPACE;
auio.uio_resid = count;
auio.uio_rw = UIO_READ;
auio.uio_td = td;
auio.uio_offset = *offp;
if ((fp->f_flag & FREAD) == 0)
return (SET_ERROR(EBADF));
rc = fo_read(fp, &auio, td->td_ucred, FOF_OFFSET, td);
if (rc)
return (SET_ERROR(rc));
if (resid)
*resid = auio.uio_resid;
*offp += count - auio.uio_resid;
return (SET_ERROR(0));
}
int
zfs_file_read(zfs_file_t *fp, void *buf, size_t count, ssize_t *resid)
{
loff_t off = fp->f_offset;
ssize_t rc;
rc = zfs_file_read_impl(fp, buf, count, &off, resid);
if (rc == 0)
fp->f_offset = off;
return (rc);
}
int
zfs_file_pread(zfs_file_t *fp, void *buf, size_t count, loff_t off,
ssize_t *resid)
{
return (zfs_file_read_impl(fp, buf, count, &off, resid));
}
int
zfs_file_seek(zfs_file_t *fp, loff_t *offp, int whence)
{
int rc;
struct thread *td;
td = curthread;
if ((fp->f_ops->fo_flags & DFLAG_SEEKABLE) == 0)
return (SET_ERROR(ESPIPE));
rc = fo_seek(fp, *offp, whence, td);
if (rc == 0)
*offp = td->td_uretoff.tdu_off;
return (SET_ERROR(rc));
}
int
zfs_file_getattr(zfs_file_t *fp, zfs_file_attr_t *zfattr)
{
struct thread *td;
struct stat sb;
int rc;
td = curthread;
#if __FreeBSD_version < 1400037
rc = fo_stat(fp, &sb, td->td_ucred, td);
#else
rc = fo_stat(fp, &sb, td->td_ucred);
#endif
if (rc)
return (SET_ERROR(rc));
zfattr->zfa_size = sb.st_size;
zfattr->zfa_mode = sb.st_mode;
return (0);
}
static __inline int
zfs_vop_fsync(vnode_t *vp)
{
struct mount *mp;
int error;
#if __FreeBSD_version < 1400068
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
#else
if ((error = vn_start_write(vp, &mp, V_WAIT | V_PCATCH)) != 0)
#endif
goto drop;
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
error = VOP_FSYNC(vp, MNT_WAIT, curthread);
VOP_UNLOCK1(vp);
vn_finished_write(mp);
drop:
return (SET_ERROR(error));
}
int
zfs_file_fsync(zfs_file_t *fp, int flags)
{
if (fp->f_type != DTYPE_VNODE)
return (EINVAL);
return (zfs_vop_fsync(fp->f_vnode));
}
zfs_file_t *
zfs_file_get(int fd)
{
struct file *fp;
if (fget(curthread, fd, &cap_no_rights, &fp))
return (NULL);
return (fp);
}
void
zfs_file_put(zfs_file_t *fp)
{
- fdrop(fp, curthread);
+ zfs_file_close(fp);
}
loff_t
zfs_file_off(zfs_file_t *fp)
{
return (fp->f_offset);
}
void *
zfs_file_private(zfs_file_t *fp)
{
file_t *tmpfp;
void *data;
int error;
tmpfp = curthread->td_fpop;
curthread->td_fpop = fp;
error = devfs_get_cdevpriv(&data);
curthread->td_fpop = tmpfp;
if (error != 0)
return (NULL);
return (data);
}
int
zfs_file_unlink(const char *fnamep)
{
zfs_uio_seg_t seg = UIO_SYSSPACE;
int rc;
#if __FreeBSD_version >= 1300018
rc = kern_funlinkat(curthread, AT_FDCWD, fnamep, FD_NONE, seg, 0, 0);
#elif __FreeBSD_version >= 1202504 || defined(AT_BENEATH)
rc = kern_unlinkat(curthread, AT_FDCWD, __DECONST(char *, fnamep),
seg, 0, 0);
#else
rc = kern_unlinkat(curthread, AT_FDCWD, __DECONST(char *, fnamep),
seg, 0);
#endif
return (SET_ERROR(rc));
}
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c
index 23b8da184535..a972c720dfdb 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c
@@ -1,2551 +1,2547 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011 Pawel Jakub Dawidek <pawel@dawidek.net>.
* All rights reserved.
* Copyright (c) 2012, 2015 by Delphix. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2016 Nexenta Systems, Inc. All rights reserved.
*/
/* Portions Copyright 2010 Robert Milkowski */
#include <sys/types.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/sysmacros.h>
#include <sys/kmem.h>
#include <sys/acl.h>
#include <sys/vnode.h>
#include <sys/vfs.h>
#include <sys/mntent.h>
#include <sys/mount.h>
#include <sys/cmn_err.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_vnops.h>
#include <sys/zfs_dir.h>
#include <sys/zil.h>
#include <sys/fs/zfs.h>
#include <sys/dmu.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_deleg.h>
#include <sys/spa.h>
#include <sys/zap.h>
#include <sys/sa.h>
#include <sys/sa_impl.h>
#include <sys/policy.h>
#include <sys/atomic.h>
#include <sys/zfs_ioctl.h>
#include <sys/zfs_ctldir.h>
#include <sys/zfs_fuid.h>
#include <sys/sunddi.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_dir.h>
#include <sys/jail.h>
#include <sys/osd.h>
#include <ufs/ufs/quota.h>
#include <sys/zfs_quota.h>
#include "zfs_comutil.h"
#ifndef MNTK_VMSETSIZE_BUG
#define MNTK_VMSETSIZE_BUG 0
#endif
#ifndef MNTK_NOMSYNC
#define MNTK_NOMSYNC 8
#endif
struct mtx zfs_debug_mtx;
MTX_SYSINIT(zfs_debug_mtx, &zfs_debug_mtx, "zfs_debug", MTX_DEF);
SYSCTL_NODE(_vfs, OID_AUTO, zfs, CTLFLAG_RW, 0, "ZFS file system");
int zfs_super_owner;
SYSCTL_INT(_vfs_zfs, OID_AUTO, super_owner, CTLFLAG_RW, &zfs_super_owner, 0,
"File system owners can perform privileged operation on file systems");
int zfs_debug_level;
SYSCTL_INT(_vfs_zfs, OID_AUTO, debug, CTLFLAG_RWTUN, &zfs_debug_level, 0,
"Debug level");
-int zfs_bclone_enabled = 0;
-SYSCTL_INT(_vfs_zfs, OID_AUTO, bclone_enabled, CTLFLAG_RWTUN,
- &zfs_bclone_enabled, 0, "Enable block cloning");
-
struct zfs_jailparam {
int mount_snapshot;
};
static struct zfs_jailparam zfs_jailparam0 = {
.mount_snapshot = 0,
};
static int zfs_jailparam_slot;
SYSCTL_JAIL_PARAM_SYS_NODE(zfs, CTLFLAG_RW, "Jail ZFS parameters");
SYSCTL_JAIL_PARAM(_zfs, mount_snapshot, CTLTYPE_INT | CTLFLAG_RW, "I",
"Allow mounting snapshots in the .zfs directory for unjailed datasets");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, version, CTLFLAG_RD, 0, "ZFS versions");
static int zfs_version_acl = ZFS_ACL_VERSION;
SYSCTL_INT(_vfs_zfs_version, OID_AUTO, acl, CTLFLAG_RD, &zfs_version_acl, 0,
"ZFS_ACL_VERSION");
static int zfs_version_spa = SPA_VERSION;
SYSCTL_INT(_vfs_zfs_version, OID_AUTO, spa, CTLFLAG_RD, &zfs_version_spa, 0,
"SPA_VERSION");
static int zfs_version_zpl = ZPL_VERSION;
SYSCTL_INT(_vfs_zfs_version, OID_AUTO, zpl, CTLFLAG_RD, &zfs_version_zpl, 0,
"ZPL_VERSION");
#if __FreeBSD_version >= 1400018
static int zfs_quotactl(vfs_t *vfsp, int cmds, uid_t id, void *arg,
bool *mp_busy);
#else
static int zfs_quotactl(vfs_t *vfsp, int cmds, uid_t id, void *arg);
#endif
static int zfs_mount(vfs_t *vfsp);
static int zfs_umount(vfs_t *vfsp, int fflag);
static int zfs_root(vfs_t *vfsp, int flags, vnode_t **vpp);
static int zfs_statfs(vfs_t *vfsp, struct statfs *statp);
static int zfs_vget(vfs_t *vfsp, ino_t ino, int flags, vnode_t **vpp);
static int zfs_sync(vfs_t *vfsp, int waitfor);
#if __FreeBSD_version >= 1300098
static int zfs_checkexp(vfs_t *vfsp, struct sockaddr *nam, uint64_t *extflagsp,
struct ucred **credanonp, int *numsecflavors, int *secflavors);
#else
static int zfs_checkexp(vfs_t *vfsp, struct sockaddr *nam, int *extflagsp,
struct ucred **credanonp, int *numsecflavors, int **secflavors);
#endif
static int zfs_fhtovp(vfs_t *vfsp, fid_t *fidp, int flags, vnode_t **vpp);
static void zfs_freevfs(vfs_t *vfsp);
struct vfsops zfs_vfsops = {
.vfs_mount = zfs_mount,
.vfs_unmount = zfs_umount,
#if __FreeBSD_version >= 1300049
.vfs_root = vfs_cache_root,
.vfs_cachedroot = zfs_root,
#else
.vfs_root = zfs_root,
#endif
.vfs_statfs = zfs_statfs,
.vfs_vget = zfs_vget,
.vfs_sync = zfs_sync,
.vfs_checkexp = zfs_checkexp,
.vfs_fhtovp = zfs_fhtovp,
.vfs_quotactl = zfs_quotactl,
};
#ifdef VFCF_CROSS_COPY_FILE_RANGE
VFS_SET(zfs_vfsops, zfs,
VFCF_DELEGADMIN | VFCF_JAIL | VFCF_CROSS_COPY_FILE_RANGE);
#else
VFS_SET(zfs_vfsops, zfs, VFCF_DELEGADMIN | VFCF_JAIL);
#endif
/*
* We need to keep a count of active fs's.
* This is necessary to prevent our module
* from being unloaded after a umount -f
*/
static uint32_t zfs_active_fs_count = 0;
int
zfs_get_temporary_prop(dsl_dataset_t *ds, zfs_prop_t zfs_prop, uint64_t *val,
char *setpoint)
{
int error;
zfsvfs_t *zfvp;
vfs_t *vfsp;
objset_t *os;
uint64_t tmp = *val;
error = dmu_objset_from_ds(ds, &os);
if (error != 0)
return (error);
error = getzfsvfs_impl(os, &zfvp);
if (error != 0)
return (error);
if (zfvp == NULL)
return (ENOENT);
vfsp = zfvp->z_vfs;
switch (zfs_prop) {
case ZFS_PROP_ATIME:
if (vfs_optionisset(vfsp, MNTOPT_NOATIME, NULL))
tmp = 0;
if (vfs_optionisset(vfsp, MNTOPT_ATIME, NULL))
tmp = 1;
break;
case ZFS_PROP_DEVICES:
if (vfs_optionisset(vfsp, MNTOPT_NODEVICES, NULL))
tmp = 0;
if (vfs_optionisset(vfsp, MNTOPT_DEVICES, NULL))
tmp = 1;
break;
case ZFS_PROP_EXEC:
if (vfs_optionisset(vfsp, MNTOPT_NOEXEC, NULL))
tmp = 0;
if (vfs_optionisset(vfsp, MNTOPT_EXEC, NULL))
tmp = 1;
break;
case ZFS_PROP_SETUID:
if (vfs_optionisset(vfsp, MNTOPT_NOSETUID, NULL))
tmp = 0;
if (vfs_optionisset(vfsp, MNTOPT_SETUID, NULL))
tmp = 1;
break;
case ZFS_PROP_READONLY:
if (vfs_optionisset(vfsp, MNTOPT_RW, NULL))
tmp = 0;
if (vfs_optionisset(vfsp, MNTOPT_RO, NULL))
tmp = 1;
break;
case ZFS_PROP_XATTR:
if (zfvp->z_flags & ZSB_XATTR)
tmp = zfvp->z_xattr;
break;
case ZFS_PROP_NBMAND:
if (vfs_optionisset(vfsp, MNTOPT_NONBMAND, NULL))
tmp = 0;
if (vfs_optionisset(vfsp, MNTOPT_NBMAND, NULL))
tmp = 1;
break;
default:
vfs_unbusy(vfsp);
return (ENOENT);
}
vfs_unbusy(vfsp);
if (tmp != *val) {
if (setpoint)
(void) strcpy(setpoint, "temporary");
*val = tmp;
}
return (0);
}
static int
zfs_getquota(zfsvfs_t *zfsvfs, uid_t id, int isgroup, struct dqblk64 *dqp)
{
int error = 0;
char buf[32];
uint64_t usedobj, quotaobj;
uint64_t quota, used = 0;
timespec_t now;
usedobj = isgroup ? DMU_GROUPUSED_OBJECT : DMU_USERUSED_OBJECT;
quotaobj = isgroup ? zfsvfs->z_groupquota_obj : zfsvfs->z_userquota_obj;
if (quotaobj == 0 || zfsvfs->z_replay) {
error = ENOENT;
goto done;
}
(void) sprintf(buf, "%llx", (longlong_t)id);
if ((error = zap_lookup(zfsvfs->z_os, quotaobj,
buf, sizeof (quota), 1, &quota)) != 0) {
dprintf("%s(%d): quotaobj lookup failed\n",
__FUNCTION__, __LINE__);
goto done;
}
/*
* quota(8) uses bsoftlimit as "quoota", and hardlimit as "limit".
* So we set them to be the same.
*/
dqp->dqb_bsoftlimit = dqp->dqb_bhardlimit = btodb(quota);
error = zap_lookup(zfsvfs->z_os, usedobj, buf, sizeof (used), 1, &used);
if (error && error != ENOENT) {
dprintf("%s(%d): usedobj failed; %d\n",
__FUNCTION__, __LINE__, error);
goto done;
}
dqp->dqb_curblocks = btodb(used);
dqp->dqb_ihardlimit = dqp->dqb_isoftlimit = 0;
vfs_timestamp(&now);
/*
* Setting this to 0 causes FreeBSD quota(8) to print
* the number of days since the epoch, which isn't
* particularly useful.
*/
dqp->dqb_btime = dqp->dqb_itime = now.tv_sec;
done:
return (error);
}
static int
#if __FreeBSD_version >= 1400018
zfs_quotactl(vfs_t *vfsp, int cmds, uid_t id, void *arg, bool *mp_busy)
#else
zfs_quotactl(vfs_t *vfsp, int cmds, uid_t id, void *arg)
#endif
{
zfsvfs_t *zfsvfs = vfsp->vfs_data;
struct thread *td;
int cmd, type, error = 0;
int bitsize;
zfs_userquota_prop_t quota_type;
struct dqblk64 dqblk = { 0 };
td = curthread;
cmd = cmds >> SUBCMDSHIFT;
type = cmds & SUBCMDMASK;
if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
return (error);
if (id == -1) {
switch (type) {
case USRQUOTA:
id = td->td_ucred->cr_ruid;
break;
case GRPQUOTA:
id = td->td_ucred->cr_rgid;
break;
default:
error = EINVAL;
#if __FreeBSD_version < 1400018
if (cmd == Q_QUOTAON || cmd == Q_QUOTAOFF)
vfs_unbusy(vfsp);
#endif
goto done;
}
}
/*
* Map BSD type to:
* ZFS_PROP_USERUSED,
* ZFS_PROP_USERQUOTA,
* ZFS_PROP_GROUPUSED,
* ZFS_PROP_GROUPQUOTA
*/
switch (cmd) {
case Q_SETQUOTA:
case Q_SETQUOTA32:
if (type == USRQUOTA)
quota_type = ZFS_PROP_USERQUOTA;
else if (type == GRPQUOTA)
quota_type = ZFS_PROP_GROUPQUOTA;
else
error = EINVAL;
break;
case Q_GETQUOTA:
case Q_GETQUOTA32:
if (type == USRQUOTA)
quota_type = ZFS_PROP_USERUSED;
else if (type == GRPQUOTA)
quota_type = ZFS_PROP_GROUPUSED;
else
error = EINVAL;
break;
}
/*
* Depending on the cmd, we may need to get
* the ruid and domain (see fuidstr_to_sid?),
* the fuid (how?), or other information.
* Create fuid using zfs_fuid_create(zfsvfs, id,
* ZFS_OWNER or ZFS_GROUP, cr, &fuidp)?
* I think I can use just the id?
*
* Look at zfs_id_overquota() to look up a quota.
* zap_lookup(something, quotaobj, fuidstring,
* sizeof (long long), 1, &quota)
*
* See zfs_set_userquota() to set a quota.
*/
if ((uint32_t)type >= MAXQUOTAS) {
error = EINVAL;
goto done;
}
switch (cmd) {
case Q_GETQUOTASIZE:
bitsize = 64;
error = copyout(&bitsize, arg, sizeof (int));
break;
case Q_QUOTAON:
// As far as I can tell, you can't turn quotas on or off on zfs
error = 0;
#if __FreeBSD_version < 1400018
vfs_unbusy(vfsp);
#endif
break;
case Q_QUOTAOFF:
error = ENOTSUP;
#if __FreeBSD_version < 1400018
vfs_unbusy(vfsp);
#endif
break;
case Q_SETQUOTA:
error = copyin(arg, &dqblk, sizeof (dqblk));
if (error == 0)
error = zfs_set_userquota(zfsvfs, quota_type,
"", id, dbtob(dqblk.dqb_bhardlimit));
break;
case Q_GETQUOTA:
error = zfs_getquota(zfsvfs, id, type == GRPQUOTA, &dqblk);
if (error == 0)
error = copyout(&dqblk, arg, sizeof (dqblk));
break;
default:
error = EINVAL;
break;
}
done:
zfs_exit(zfsvfs, FTAG);
return (error);
}
boolean_t
zfs_is_readonly(zfsvfs_t *zfsvfs)
{
return (!!(zfsvfs->z_vfs->vfs_flag & VFS_RDONLY));
}
static int
zfs_sync(vfs_t *vfsp, int waitfor)
{
/*
* Data integrity is job one. We don't want a compromised kernel
* writing to the storage pool, so we never sync during panic.
*/
if (panicstr)
return (0);
/*
* Ignore the system syncher. ZFS already commits async data
* at zfs_txg_timeout intervals.
*/
if (waitfor == MNT_LAZY)
return (0);
if (vfsp != NULL) {
/*
* Sync a specific filesystem.
*/
zfsvfs_t *zfsvfs = vfsp->vfs_data;
dsl_pool_t *dp;
int error;
if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
return (error);
dp = dmu_objset_pool(zfsvfs->z_os);
/*
* If the system is shutting down, then skip any
* filesystems which may exist on a suspended pool.
*/
if (rebooting && spa_suspended(dp->dp_spa)) {
zfs_exit(zfsvfs, FTAG);
return (0);
}
if (zfsvfs->z_log != NULL)
zil_commit(zfsvfs->z_log, 0);
zfs_exit(zfsvfs, FTAG);
} else {
/*
* Sync all ZFS filesystems. This is what happens when you
* run sync(8). Unlike other filesystems, ZFS honors the
* request by waiting for all pools to commit all dirty data.
*/
spa_sync_allpools();
}
return (0);
}
static void
atime_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
if (newval == TRUE) {
zfsvfs->z_atime = TRUE;
zfsvfs->z_vfs->vfs_flag &= ~MNT_NOATIME;
vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NOATIME);
vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_ATIME, NULL, 0);
} else {
zfsvfs->z_atime = FALSE;
zfsvfs->z_vfs->vfs_flag |= MNT_NOATIME;
vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_ATIME);
vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NOATIME, NULL, 0);
}
}
static void
xattr_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
if (newval == ZFS_XATTR_OFF) {
zfsvfs->z_flags &= ~ZSB_XATTR;
} else {
zfsvfs->z_flags |= ZSB_XATTR;
if (newval == ZFS_XATTR_SA)
zfsvfs->z_xattr_sa = B_TRUE;
else
zfsvfs->z_xattr_sa = B_FALSE;
}
}
static void
blksz_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
ASSERT3U(newval, <=, spa_maxblocksize(dmu_objset_spa(zfsvfs->z_os)));
ASSERT3U(newval, >=, SPA_MINBLOCKSIZE);
ASSERT(ISP2(newval));
zfsvfs->z_max_blksz = newval;
zfsvfs->z_vfs->mnt_stat.f_iosize = newval;
}
static void
readonly_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
if (newval) {
/* XXX locking on vfs_flag? */
zfsvfs->z_vfs->vfs_flag |= VFS_RDONLY;
vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_RW);
vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_RO, NULL, 0);
} else {
/* XXX locking on vfs_flag? */
zfsvfs->z_vfs->vfs_flag &= ~VFS_RDONLY;
vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_RO);
vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_RW, NULL, 0);
}
}
static void
setuid_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
if (newval == FALSE) {
zfsvfs->z_vfs->vfs_flag |= VFS_NOSETUID;
vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_SETUID);
vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NOSETUID, NULL, 0);
} else {
zfsvfs->z_vfs->vfs_flag &= ~VFS_NOSETUID;
vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NOSETUID);
vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_SETUID, NULL, 0);
}
}
static void
exec_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
if (newval == FALSE) {
zfsvfs->z_vfs->vfs_flag |= VFS_NOEXEC;
vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_EXEC);
vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NOEXEC, NULL, 0);
} else {
zfsvfs->z_vfs->vfs_flag &= ~VFS_NOEXEC;
vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NOEXEC);
vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_EXEC, NULL, 0);
}
}
/*
* The nbmand mount option can be changed at mount time.
* We can't allow it to be toggled on live file systems or incorrect
* behavior may be seen from cifs clients
*
* This property isn't registered via dsl_prop_register(), but this callback
* will be called when a file system is first mounted
*/
static void
nbmand_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
if (newval == FALSE) {
vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NBMAND);
vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NONBMAND, NULL, 0);
} else {
vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NONBMAND);
vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NBMAND, NULL, 0);
}
}
static void
snapdir_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
zfsvfs->z_show_ctldir = newval;
}
static void
acl_mode_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
zfsvfs->z_acl_mode = newval;
}
static void
acl_inherit_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
zfsvfs->z_acl_inherit = newval;
}
static void
acl_type_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
zfsvfs->z_acl_type = newval;
}
static int
zfs_register_callbacks(vfs_t *vfsp)
{
struct dsl_dataset *ds = NULL;
objset_t *os = NULL;
zfsvfs_t *zfsvfs = NULL;
uint64_t nbmand;
boolean_t readonly = B_FALSE;
boolean_t do_readonly = B_FALSE;
boolean_t setuid = B_FALSE;
boolean_t do_setuid = B_FALSE;
boolean_t exec = B_FALSE;
boolean_t do_exec = B_FALSE;
boolean_t xattr = B_FALSE;
boolean_t atime = B_FALSE;
boolean_t do_atime = B_FALSE;
boolean_t do_xattr = B_FALSE;
int error = 0;
ASSERT3P(vfsp, !=, NULL);
zfsvfs = vfsp->vfs_data;
ASSERT3P(zfsvfs, !=, NULL);
os = zfsvfs->z_os;
/*
* This function can be called for a snapshot when we update snapshot's
* mount point, which isn't really supported.
*/
if (dmu_objset_is_snapshot(os))
return (EOPNOTSUPP);
/*
* The act of registering our callbacks will destroy any mount
* options we may have. In order to enable temporary overrides
* of mount options, we stash away the current values and
* restore them after we register the callbacks.
*/
if (vfs_optionisset(vfsp, MNTOPT_RO, NULL) ||
!spa_writeable(dmu_objset_spa(os))) {
readonly = B_TRUE;
do_readonly = B_TRUE;
} else if (vfs_optionisset(vfsp, MNTOPT_RW, NULL)) {
readonly = B_FALSE;
do_readonly = B_TRUE;
}
if (vfs_optionisset(vfsp, MNTOPT_NOSETUID, NULL)) {
setuid = B_FALSE;
do_setuid = B_TRUE;
} else if (vfs_optionisset(vfsp, MNTOPT_SETUID, NULL)) {
setuid = B_TRUE;
do_setuid = B_TRUE;
}
if (vfs_optionisset(vfsp, MNTOPT_NOEXEC, NULL)) {
exec = B_FALSE;
do_exec = B_TRUE;
} else if (vfs_optionisset(vfsp, MNTOPT_EXEC, NULL)) {
exec = B_TRUE;
do_exec = B_TRUE;
}
if (vfs_optionisset(vfsp, MNTOPT_NOXATTR, NULL)) {
zfsvfs->z_xattr = xattr = ZFS_XATTR_OFF;
do_xattr = B_TRUE;
} else if (vfs_optionisset(vfsp, MNTOPT_XATTR, NULL)) {
zfsvfs->z_xattr = xattr = ZFS_XATTR_DIR;
do_xattr = B_TRUE;
} else if (vfs_optionisset(vfsp, MNTOPT_DIRXATTR, NULL)) {
zfsvfs->z_xattr = xattr = ZFS_XATTR_DIR;
do_xattr = B_TRUE;
} else if (vfs_optionisset(vfsp, MNTOPT_SAXATTR, NULL)) {
zfsvfs->z_xattr = xattr = ZFS_XATTR_SA;
do_xattr = B_TRUE;
}
if (vfs_optionisset(vfsp, MNTOPT_NOATIME, NULL)) {
atime = B_FALSE;
do_atime = B_TRUE;
} else if (vfs_optionisset(vfsp, MNTOPT_ATIME, NULL)) {
atime = B_TRUE;
do_atime = B_TRUE;
}
/*
* We need to enter pool configuration here, so that we can use
* dsl_prop_get_int_ds() to handle the special nbmand property below.
* dsl_prop_get_integer() can not be used, because it has to acquire
* spa_namespace_lock and we can not do that because we already hold
* z_teardown_lock. The problem is that spa_write_cachefile() is called
* with spa_namespace_lock held and the function calls ZFS vnode
* operations to write the cache file and thus z_teardown_lock is
* acquired after spa_namespace_lock.
*/
ds = dmu_objset_ds(os);
dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
/*
* nbmand is a special property. It can only be changed at
* mount time.
*
* This is weird, but it is documented to only be changeable
* at mount time.
*/
if (vfs_optionisset(vfsp, MNTOPT_NONBMAND, NULL)) {
nbmand = B_FALSE;
} else if (vfs_optionisset(vfsp, MNTOPT_NBMAND, NULL)) {
nbmand = B_TRUE;
} else if ((error = dsl_prop_get_int_ds(ds, "nbmand", &nbmand)) != 0) {
dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
return (error);
}
/*
* Register property callbacks.
*
* It would probably be fine to just check for i/o error from
* the first prop_register(), but I guess I like to go
* overboard...
*/
error = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_ATIME), atime_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_XATTR), xattr_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_RECORDSIZE), blksz_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_READONLY), readonly_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_SETUID), setuid_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_EXEC), exec_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_SNAPDIR), snapdir_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_ACLTYPE), acl_type_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_ACLMODE), acl_mode_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_ACLINHERIT), acl_inherit_changed_cb,
zfsvfs);
dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
if (error)
goto unregister;
/*
* Invoke our callbacks to restore temporary mount options.
*/
if (do_readonly)
readonly_changed_cb(zfsvfs, readonly);
if (do_setuid)
setuid_changed_cb(zfsvfs, setuid);
if (do_exec)
exec_changed_cb(zfsvfs, exec);
if (do_xattr)
xattr_changed_cb(zfsvfs, xattr);
if (do_atime)
atime_changed_cb(zfsvfs, atime);
nbmand_changed_cb(zfsvfs, nbmand);
return (0);
unregister:
dsl_prop_unregister_all(ds, zfsvfs);
return (error);
}
/*
* Associate this zfsvfs with the given objset, which must be owned.
* This will cache a bunch of on-disk state from the objset in the
* zfsvfs.
*/
static int
zfsvfs_init(zfsvfs_t *zfsvfs, objset_t *os)
{
int error;
uint64_t val;
zfsvfs->z_max_blksz = SPA_OLD_MAXBLOCKSIZE;
zfsvfs->z_show_ctldir = ZFS_SNAPDIR_VISIBLE;
zfsvfs->z_os = os;
error = zfs_get_zplprop(os, ZFS_PROP_VERSION, &zfsvfs->z_version);
if (error != 0)
return (error);
if (zfsvfs->z_version >
zfs_zpl_version_map(spa_version(dmu_objset_spa(os)))) {
(void) printf("Can't mount a version %lld file system "
"on a version %lld pool\n. Pool must be upgraded to mount "
"this file system.", (u_longlong_t)zfsvfs->z_version,
(u_longlong_t)spa_version(dmu_objset_spa(os)));
return (SET_ERROR(ENOTSUP));
}
error = zfs_get_zplprop(os, ZFS_PROP_NORMALIZE, &val);
if (error != 0)
return (error);
zfsvfs->z_norm = (int)val;
error = zfs_get_zplprop(os, ZFS_PROP_UTF8ONLY, &val);
if (error != 0)
return (error);
zfsvfs->z_utf8 = (val != 0);
error = zfs_get_zplprop(os, ZFS_PROP_CASE, &val);
if (error != 0)
return (error);
zfsvfs->z_case = (uint_t)val;
error = zfs_get_zplprop(os, ZFS_PROP_ACLTYPE, &val);
if (error != 0)
return (error);
zfsvfs->z_acl_type = (uint_t)val;
/*
* Fold case on file systems that are always or sometimes case
* insensitive.
*/
if (zfsvfs->z_case == ZFS_CASE_INSENSITIVE ||
zfsvfs->z_case == ZFS_CASE_MIXED)
zfsvfs->z_norm |= U8_TEXTPREP_TOUPPER;
zfsvfs->z_use_fuids = USE_FUIDS(zfsvfs->z_version, zfsvfs->z_os);
zfsvfs->z_use_sa = USE_SA(zfsvfs->z_version, zfsvfs->z_os);
uint64_t sa_obj = 0;
if (zfsvfs->z_use_sa) {
/* should either have both of these objects or none */
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1,
&sa_obj);
if (error != 0)
return (error);
error = zfs_get_zplprop(os, ZFS_PROP_XATTR, &val);
if (error == 0 && val == ZFS_XATTR_SA)
zfsvfs->z_xattr_sa = B_TRUE;
}
error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
&zfsvfs->z_attr_table);
if (error != 0)
return (error);
if (zfsvfs->z_version >= ZPL_VERSION_SA)
sa_register_update_callback(os, zfs_sa_upgrade);
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ, 8, 1,
&zfsvfs->z_root);
if (error != 0)
return (error);
ASSERT3U(zfsvfs->z_root, !=, 0);
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_UNLINKED_SET, 8, 1,
&zfsvfs->z_unlinkedobj);
if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_USERQUOTA],
8, 1, &zfsvfs->z_userquota_obj);
if (error == ENOENT)
zfsvfs->z_userquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_GROUPQUOTA],
8, 1, &zfsvfs->z_groupquota_obj);
if (error == ENOENT)
zfsvfs->z_groupquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_PROJECTQUOTA],
8, 1, &zfsvfs->z_projectquota_obj);
if (error == ENOENT)
zfsvfs->z_projectquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_USEROBJQUOTA],
8, 1, &zfsvfs->z_userobjquota_obj);
if (error == ENOENT)
zfsvfs->z_userobjquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_GROUPOBJQUOTA],
8, 1, &zfsvfs->z_groupobjquota_obj);
if (error == ENOENT)
zfsvfs->z_groupobjquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_PROJECTOBJQUOTA],
8, 1, &zfsvfs->z_projectobjquota_obj);
if (error == ENOENT)
zfsvfs->z_projectobjquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, 8, 1,
&zfsvfs->z_fuid_obj);
if (error == ENOENT)
zfsvfs->z_fuid_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SHARES_DIR, 8, 1,
&zfsvfs->z_shares_dir);
if (error == ENOENT)
zfsvfs->z_shares_dir = 0;
else if (error != 0)
return (error);
/*
* Only use the name cache if we are looking for a
* name on a file system that does not require normalization
* or case folding. We can also look there if we happen to be
* on a non-normalizing, mixed sensitivity file system IF we
* are looking for the exact name (which is always the case on
* FreeBSD).
*/
zfsvfs->z_use_namecache = !zfsvfs->z_norm ||
((zfsvfs->z_case == ZFS_CASE_MIXED) &&
!(zfsvfs->z_norm & ~U8_TEXTPREP_TOUPPER));
return (0);
}
taskq_t *zfsvfs_taskq;
static void
zfsvfs_task_unlinked_drain(void *context, int pending __unused)
{
zfs_unlinked_drain((zfsvfs_t *)context);
}
int
zfsvfs_create(const char *osname, boolean_t readonly, zfsvfs_t **zfvp)
{
objset_t *os;
zfsvfs_t *zfsvfs;
int error;
boolean_t ro = (readonly || (strchr(osname, '@') != NULL));
/*
* XXX: Fix struct statfs so this isn't necessary!
*
* The 'osname' is used as the filesystem's special node, which means
* it must fit in statfs.f_mntfromname, or else it can't be
* enumerated, so libzfs_mnttab_find() returns NULL, which causes
* 'zfs unmount' to think it's not mounted when it is.
*/
if (strlen(osname) >= MNAMELEN)
return (SET_ERROR(ENAMETOOLONG));
zfsvfs = kmem_zalloc(sizeof (zfsvfs_t), KM_SLEEP);
error = dmu_objset_own(osname, DMU_OST_ZFS, ro, B_TRUE, zfsvfs,
&os);
if (error != 0) {
kmem_free(zfsvfs, sizeof (zfsvfs_t));
return (error);
}
error = zfsvfs_create_impl(zfvp, zfsvfs, os);
return (error);
}
int
zfsvfs_create_impl(zfsvfs_t **zfvp, zfsvfs_t *zfsvfs, objset_t *os)
{
int error;
zfsvfs->z_vfs = NULL;
zfsvfs->z_parent = zfsvfs;
mutex_init(&zfsvfs->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&zfsvfs->z_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&zfsvfs->z_all_znodes, sizeof (znode_t),
offsetof(znode_t, z_link_node));
TASK_INIT(&zfsvfs->z_unlinked_drain_task, 0,
zfsvfs_task_unlinked_drain, zfsvfs);
ZFS_TEARDOWN_INIT(zfsvfs);
ZFS_TEARDOWN_INACTIVE_INIT(zfsvfs);
rw_init(&zfsvfs->z_fuid_lock, NULL, RW_DEFAULT, NULL);
for (int i = 0; i != ZFS_OBJ_MTX_SZ; i++)
mutex_init(&zfsvfs->z_hold_mtx[i], NULL, MUTEX_DEFAULT, NULL);
error = zfsvfs_init(zfsvfs, os);
if (error != 0) {
dmu_objset_disown(os, B_TRUE, zfsvfs);
*zfvp = NULL;
kmem_free(zfsvfs, sizeof (zfsvfs_t));
return (error);
}
*zfvp = zfsvfs;
return (0);
}
static int
zfsvfs_setup(zfsvfs_t *zfsvfs, boolean_t mounting)
{
int error;
/*
* Check for a bad on-disk format version now since we
* lied about owning the dataset readonly before.
*/
if (!(zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) &&
dmu_objset_incompatible_encryption_version(zfsvfs->z_os))
return (SET_ERROR(EROFS));
error = zfs_register_callbacks(zfsvfs->z_vfs);
if (error)
return (error);
/*
* If we are not mounting (ie: online recv), then we don't
* have to worry about replaying the log as we blocked all
* operations out since we closed the ZIL.
*/
if (mounting) {
boolean_t readonly;
ASSERT3P(zfsvfs->z_kstat.dk_kstats, ==, NULL);
error = dataset_kstats_create(&zfsvfs->z_kstat, zfsvfs->z_os);
if (error)
return (error);
zfsvfs->z_log = zil_open(zfsvfs->z_os, zfs_get_data,
&zfsvfs->z_kstat.dk_zil_sums);
/*
* During replay we remove the read only flag to
* allow replays to succeed.
*/
readonly = zfsvfs->z_vfs->vfs_flag & VFS_RDONLY;
if (readonly != 0) {
zfsvfs->z_vfs->vfs_flag &= ~VFS_RDONLY;
} else {
dsl_dir_t *dd;
zap_stats_t zs;
if (zap_get_stats(zfsvfs->z_os, zfsvfs->z_unlinkedobj,
&zs) == 0) {
dataset_kstats_update_nunlinks_kstat(
&zfsvfs->z_kstat, zs.zs_num_entries);
dprintf_ds(zfsvfs->z_os->os_dsl_dataset,
"num_entries in unlinked set: %llu",
(u_longlong_t)zs.zs_num_entries);
}
zfs_unlinked_drain(zfsvfs);
dd = zfsvfs->z_os->os_dsl_dataset->ds_dir;
dd->dd_activity_cancelled = B_FALSE;
}
/*
* Parse and replay the intent log.
*
* Because of ziltest, this must be done after
* zfs_unlinked_drain(). (Further note: ziltest
* doesn't use readonly mounts, where
* zfs_unlinked_drain() isn't called.) This is because
* ziltest causes spa_sync() to think it's committed,
* but actually it is not, so the intent log contains
* many txg's worth of changes.
*
* In particular, if object N is in the unlinked set in
* the last txg to actually sync, then it could be
* actually freed in a later txg and then reallocated
* in a yet later txg. This would write a "create
* object N" record to the intent log. Normally, this
* would be fine because the spa_sync() would have
* written out the fact that object N is free, before
* we could write the "create object N" intent log
* record.
*
* But when we are in ziltest mode, we advance the "open
* txg" without actually spa_sync()-ing the changes to
* disk. So we would see that object N is still
* allocated and in the unlinked set, and there is an
* intent log record saying to allocate it.
*/
if (spa_writeable(dmu_objset_spa(zfsvfs->z_os))) {
if (zil_replay_disable) {
zil_destroy(zfsvfs->z_log, B_FALSE);
} else {
boolean_t use_nc = zfsvfs->z_use_namecache;
zfsvfs->z_use_namecache = B_FALSE;
zfsvfs->z_replay = B_TRUE;
zil_replay(zfsvfs->z_os, zfsvfs,
zfs_replay_vector);
zfsvfs->z_replay = B_FALSE;
zfsvfs->z_use_namecache = use_nc;
}
}
/* restore readonly bit */
if (readonly != 0)
zfsvfs->z_vfs->vfs_flag |= VFS_RDONLY;
} else {
ASSERT3P(zfsvfs->z_kstat.dk_kstats, !=, NULL);
zfsvfs->z_log = zil_open(zfsvfs->z_os, zfs_get_data,
&zfsvfs->z_kstat.dk_zil_sums);
}
/*
* Set the objset user_ptr to track its zfsvfs.
*/
mutex_enter(&zfsvfs->z_os->os_user_ptr_lock);
dmu_objset_set_user(zfsvfs->z_os, zfsvfs);
mutex_exit(&zfsvfs->z_os->os_user_ptr_lock);
return (0);
}
void
zfsvfs_free(zfsvfs_t *zfsvfs)
{
int i;
zfs_fuid_destroy(zfsvfs);
mutex_destroy(&zfsvfs->z_znodes_lock);
mutex_destroy(&zfsvfs->z_lock);
list_destroy(&zfsvfs->z_all_znodes);
ZFS_TEARDOWN_DESTROY(zfsvfs);
ZFS_TEARDOWN_INACTIVE_DESTROY(zfsvfs);
rw_destroy(&zfsvfs->z_fuid_lock);
for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
mutex_destroy(&zfsvfs->z_hold_mtx[i]);
dataset_kstats_destroy(&zfsvfs->z_kstat);
kmem_free(zfsvfs, sizeof (zfsvfs_t));
}
static void
zfs_set_fuid_feature(zfsvfs_t *zfsvfs)
{
zfsvfs->z_use_fuids = USE_FUIDS(zfsvfs->z_version, zfsvfs->z_os);
zfsvfs->z_use_sa = USE_SA(zfsvfs->z_version, zfsvfs->z_os);
}
static int
zfs_domount(vfs_t *vfsp, char *osname)
{
uint64_t recordsize, fsid_guid;
int error = 0;
zfsvfs_t *zfsvfs;
ASSERT3P(vfsp, !=, NULL);
ASSERT3P(osname, !=, NULL);
error = zfsvfs_create(osname, vfsp->mnt_flag & MNT_RDONLY, &zfsvfs);
if (error)
return (error);
zfsvfs->z_vfs = vfsp;
if ((error = dsl_prop_get_integer(osname,
"recordsize", &recordsize, NULL)))
goto out;
zfsvfs->z_vfs->vfs_bsize = SPA_MINBLOCKSIZE;
zfsvfs->z_vfs->mnt_stat.f_iosize = recordsize;
vfsp->vfs_data = zfsvfs;
vfsp->mnt_flag |= MNT_LOCAL;
vfsp->mnt_kern_flag |= MNTK_LOOKUP_SHARED;
vfsp->mnt_kern_flag |= MNTK_SHARED_WRITES;
vfsp->mnt_kern_flag |= MNTK_EXTENDED_SHARED;
/*
* This can cause a loss of coherence between ARC and page cache
* on ZoF - unclear if the problem is in FreeBSD or ZoF
*/
vfsp->mnt_kern_flag |= MNTK_NO_IOPF; /* vn_io_fault can be used */
vfsp->mnt_kern_flag |= MNTK_NOMSYNC;
vfsp->mnt_kern_flag |= MNTK_VMSETSIZE_BUG;
#if defined(_KERNEL) && !defined(KMEM_DEBUG)
vfsp->mnt_kern_flag |= MNTK_FPLOOKUP;
#endif
/*
* The fsid is 64 bits, composed of an 8-bit fs type, which
* separates our fsid from any other filesystem types, and a
* 56-bit objset unique ID. The objset unique ID is unique to
* all objsets open on this system, provided by unique_create().
* The 8-bit fs type must be put in the low bits of fsid[1]
* because that's where other Solaris filesystems put it.
*/
fsid_guid = dmu_objset_fsid_guid(zfsvfs->z_os);
ASSERT3U((fsid_guid & ~((1ULL << 56) - 1)), ==, 0);
vfsp->vfs_fsid.val[0] = fsid_guid;
vfsp->vfs_fsid.val[1] = ((fsid_guid >> 32) << 8) |
(vfsp->mnt_vfc->vfc_typenum & 0xFF);
/*
* Set features for file system.
*/
zfs_set_fuid_feature(zfsvfs);
if (dmu_objset_is_snapshot(zfsvfs->z_os)) {
uint64_t pval;
atime_changed_cb(zfsvfs, B_FALSE);
readonly_changed_cb(zfsvfs, B_TRUE);
if ((error = dsl_prop_get_integer(osname,
"xattr", &pval, NULL)))
goto out;
xattr_changed_cb(zfsvfs, pval);
if ((error = dsl_prop_get_integer(osname,
"acltype", &pval, NULL)))
goto out;
acl_type_changed_cb(zfsvfs, pval);
zfsvfs->z_issnap = B_TRUE;
zfsvfs->z_os->os_sync = ZFS_SYNC_DISABLED;
mutex_enter(&zfsvfs->z_os->os_user_ptr_lock);
dmu_objset_set_user(zfsvfs->z_os, zfsvfs);
mutex_exit(&zfsvfs->z_os->os_user_ptr_lock);
} else {
if ((error = zfsvfs_setup(zfsvfs, B_TRUE)))
goto out;
}
vfs_mountedfrom(vfsp, osname);
if (!zfsvfs->z_issnap)
zfsctl_create(zfsvfs);
out:
if (error) {
dmu_objset_disown(zfsvfs->z_os, B_TRUE, zfsvfs);
zfsvfs_free(zfsvfs);
} else {
atomic_inc_32(&zfs_active_fs_count);
}
return (error);
}
static void
zfs_unregister_callbacks(zfsvfs_t *zfsvfs)
{
objset_t *os = zfsvfs->z_os;
if (!dmu_objset_is_snapshot(os))
dsl_prop_unregister_all(dmu_objset_ds(os), zfsvfs);
}
static int
getpoolname(const char *osname, char *poolname)
{
char *p;
p = strchr(osname, '/');
if (p == NULL) {
if (strlen(osname) >= MAXNAMELEN)
return (ENAMETOOLONG);
(void) strcpy(poolname, osname);
} else {
if (p - osname >= MAXNAMELEN)
return (ENAMETOOLONG);
(void) strlcpy(poolname, osname, p - osname + 1);
}
return (0);
}
static void
fetch_osname_options(char *name, bool *checkpointrewind)
{
if (name[0] == '!') {
*checkpointrewind = true;
memmove(name, name + 1, strlen(name));
} else {
*checkpointrewind = false;
}
}
static int
zfs_mount(vfs_t *vfsp)
{
kthread_t *td = curthread;
vnode_t *mvp = vfsp->mnt_vnodecovered;
cred_t *cr = td->td_ucred;
char *osname;
int error = 0;
int canwrite;
bool checkpointrewind, isctlsnap = false;
if (vfs_getopt(vfsp->mnt_optnew, "from", (void **)&osname, NULL))
return (SET_ERROR(EINVAL));
/*
* If full-owner-access is enabled and delegated administration is
* turned on, we must set nosuid.
*/
if (zfs_super_owner &&
dsl_deleg_access(osname, ZFS_DELEG_PERM_MOUNT, cr) != ECANCELED) {
secpolicy_fs_mount_clearopts(cr, vfsp);
}
fetch_osname_options(osname, &checkpointrewind);
isctlsnap = (mvp != NULL && zfsctl_is_node(mvp) &&
strchr(osname, '@') != NULL);
/*
* Check for mount privilege?
*
* If we don't have privilege then see if
* we have local permission to allow it
*/
error = secpolicy_fs_mount(cr, mvp, vfsp);
if (error && isctlsnap) {
secpolicy_fs_mount_clearopts(cr, vfsp);
} else if (error) {
if (dsl_deleg_access(osname, ZFS_DELEG_PERM_MOUNT, cr) != 0)
goto out;
if (!(vfsp->vfs_flag & MS_REMOUNT)) {
vattr_t vattr;
/*
* Make sure user is the owner of the mount point
* or has sufficient privileges.
*/
vattr.va_mask = AT_UID;
vn_lock(mvp, LK_SHARED | LK_RETRY);
if (VOP_GETATTR(mvp, &vattr, cr)) {
VOP_UNLOCK1(mvp);
goto out;
}
if (secpolicy_vnode_owner(mvp, cr, vattr.va_uid) != 0 &&
VOP_ACCESS(mvp, VWRITE, cr, td) != 0) {
VOP_UNLOCK1(mvp);
goto out;
}
VOP_UNLOCK1(mvp);
}
secpolicy_fs_mount_clearopts(cr, vfsp);
}
/*
* Refuse to mount a filesystem if we are in a local zone and the
* dataset is not visible.
*/
if (!INGLOBALZONE(curproc) &&
(!zone_dataset_visible(osname, &canwrite) || !canwrite)) {
boolean_t mount_snapshot = B_FALSE;
/*
* Snapshots may be mounted in .zfs for unjailed datasets
* if allowed by the jail param zfs.mount_snapshot.
*/
if (isctlsnap) {
struct prison *pr;
struct zfs_jailparam *zjp;
pr = curthread->td_ucred->cr_prison;
mtx_lock(&pr->pr_mtx);
zjp = osd_jail_get(pr, zfs_jailparam_slot);
mtx_unlock(&pr->pr_mtx);
if (zjp && zjp->mount_snapshot)
mount_snapshot = B_TRUE;
}
if (!mount_snapshot) {
error = SET_ERROR(EPERM);
goto out;
}
}
vfsp->vfs_flag |= MNT_NFS4ACLS;
/*
* When doing a remount, we simply refresh our temporary properties
* according to those options set in the current VFS options.
*/
if (vfsp->vfs_flag & MS_REMOUNT) {
zfsvfs_t *zfsvfs = vfsp->vfs_data;
/*
* Refresh mount options with z_teardown_lock blocking I/O while
* the filesystem is in an inconsistent state.
* The lock also serializes this code with filesystem
* manipulations between entry to zfs_suspend_fs() and return
* from zfs_resume_fs().
*/
ZFS_TEARDOWN_ENTER_WRITE(zfsvfs, FTAG);
zfs_unregister_callbacks(zfsvfs);
error = zfs_register_callbacks(vfsp);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
goto out;
}
/* Initial root mount: try hard to import the requested root pool. */
if ((vfsp->vfs_flag & MNT_ROOTFS) != 0 &&
(vfsp->vfs_flag & MNT_UPDATE) == 0) {
char pname[MAXNAMELEN];
error = getpoolname(osname, pname);
if (error == 0)
error = spa_import_rootpool(pname, checkpointrewind);
if (error)
goto out;
}
DROP_GIANT();
error = zfs_domount(vfsp, osname);
PICKUP_GIANT();
out:
return (error);
}
static int
zfs_statfs(vfs_t *vfsp, struct statfs *statp)
{
zfsvfs_t *zfsvfs = vfsp->vfs_data;
uint64_t refdbytes, availbytes, usedobjs, availobjs;
int error;
statp->f_version = STATFS_VERSION;
if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
return (error);
dmu_objset_space(zfsvfs->z_os,
&refdbytes, &availbytes, &usedobjs, &availobjs);
/*
* The underlying storage pool actually uses multiple block sizes.
* We report the fragsize as the smallest block size we support,
* and we report our blocksize as the filesystem's maximum blocksize.
*/
statp->f_bsize = SPA_MINBLOCKSIZE;
statp->f_iosize = zfsvfs->z_vfs->mnt_stat.f_iosize;
/*
* The following report "total" blocks of various kinds in the
* file system, but reported in terms of f_frsize - the
* "fragment" size.
*/
statp->f_blocks = (refdbytes + availbytes) >> SPA_MINBLOCKSHIFT;
statp->f_bfree = availbytes / statp->f_bsize;
statp->f_bavail = statp->f_bfree; /* no root reservation */
/*
* statvfs() should really be called statufs(), because it assumes
* static metadata. ZFS doesn't preallocate files, so the best
* we can do is report the max that could possibly fit in f_files,
* and that minus the number actually used in f_ffree.
* For f_ffree, report the smaller of the number of object available
* and the number of blocks (each object will take at least a block).
*/
statp->f_ffree = MIN(availobjs, statp->f_bfree);
statp->f_files = statp->f_ffree + usedobjs;
/*
* We're a zfs filesystem.
*/
strlcpy(statp->f_fstypename, "zfs",
sizeof (statp->f_fstypename));
strlcpy(statp->f_mntfromname, vfsp->mnt_stat.f_mntfromname,
sizeof (statp->f_mntfromname));
strlcpy(statp->f_mntonname, vfsp->mnt_stat.f_mntonname,
sizeof (statp->f_mntonname));
statp->f_namemax = MAXNAMELEN - 1;
zfs_exit(zfsvfs, FTAG);
return (0);
}
static int
zfs_root(vfs_t *vfsp, int flags, vnode_t **vpp)
{
zfsvfs_t *zfsvfs = vfsp->vfs_data;
znode_t *rootzp;
int error;
if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
return (error);
error = zfs_zget(zfsvfs, zfsvfs->z_root, &rootzp);
if (error == 0)
*vpp = ZTOV(rootzp);
zfs_exit(zfsvfs, FTAG);
if (error == 0) {
error = vn_lock(*vpp, flags);
if (error != 0) {
VN_RELE(*vpp);
*vpp = NULL;
}
}
return (error);
}
/*
* Teardown the zfsvfs::z_os.
*
* Note, if 'unmounting' is FALSE, we return with the 'z_teardown_lock'
* and 'z_teardown_inactive_lock' held.
*/
static int
zfsvfs_teardown(zfsvfs_t *zfsvfs, boolean_t unmounting)
{
znode_t *zp;
dsl_dir_t *dd;
/*
* If someone has not already unmounted this file system,
* drain the zrele_taskq to ensure all active references to the
* zfsvfs_t have been handled only then can it be safely destroyed.
*/
if (zfsvfs->z_os) {
/*
* If we're unmounting we have to wait for the list to
* drain completely.
*
* If we're not unmounting there's no guarantee the list
* will drain completely, but zreles run from the taskq
* may add the parents of dir-based xattrs to the taskq
* so we want to wait for these.
*
* We can safely check z_all_znodes for being empty because the
* VFS has already blocked operations which add to it.
*/
int round = 0;
while (!list_is_empty(&zfsvfs->z_all_znodes)) {
taskq_wait_outstanding(dsl_pool_zrele_taskq(
dmu_objset_pool(zfsvfs->z_os)), 0);
if (++round > 1 && !unmounting)
break;
}
}
ZFS_TEARDOWN_ENTER_WRITE(zfsvfs, FTAG);
if (!unmounting) {
/*
* We purge the parent filesystem's vfsp as the parent
* filesystem and all of its snapshots have their vnode's
* v_vfsp set to the parent's filesystem's vfsp. Note,
* 'z_parent' is self referential for non-snapshots.
*/
#ifdef FREEBSD_NAMECACHE
#if __FreeBSD_version >= 1300117
cache_purgevfs(zfsvfs->z_parent->z_vfs);
#else
cache_purgevfs(zfsvfs->z_parent->z_vfs, true);
#endif
#endif
}
/*
* Close the zil. NB: Can't close the zil while zfs_inactive
* threads are blocked as zil_close can call zfs_inactive.
*/
if (zfsvfs->z_log) {
zil_close(zfsvfs->z_log);
zfsvfs->z_log = NULL;
}
ZFS_TEARDOWN_INACTIVE_ENTER_WRITE(zfsvfs);
/*
* If we are not unmounting (ie: online recv) and someone already
* unmounted this file system while we were doing the switcheroo,
* or a reopen of z_os failed then just bail out now.
*/
if (!unmounting && (zfsvfs->z_unmounted || zfsvfs->z_os == NULL)) {
ZFS_TEARDOWN_INACTIVE_EXIT_WRITE(zfsvfs);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
return (SET_ERROR(EIO));
}
/*
* At this point there are no vops active, and any new vops will
* fail with EIO since we have z_teardown_lock for writer (only
* relevant for forced unmount).
*
* Release all holds on dbufs.
*/
mutex_enter(&zfsvfs->z_znodes_lock);
for (zp = list_head(&zfsvfs->z_all_znodes); zp != NULL;
zp = list_next(&zfsvfs->z_all_znodes, zp)) {
if (zp->z_sa_hdl != NULL) {
zfs_znode_dmu_fini(zp);
}
}
mutex_exit(&zfsvfs->z_znodes_lock);
/*
* If we are unmounting, set the unmounted flag and let new vops
* unblock. zfs_inactive will have the unmounted behavior, and all
* other vops will fail with EIO.
*/
if (unmounting) {
zfsvfs->z_unmounted = B_TRUE;
ZFS_TEARDOWN_INACTIVE_EXIT_WRITE(zfsvfs);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
}
/*
* z_os will be NULL if there was an error in attempting to reopen
* zfsvfs, so just return as the properties had already been
* unregistered and cached data had been evicted before.
*/
if (zfsvfs->z_os == NULL)
return (0);
/*
* Unregister properties.
*/
zfs_unregister_callbacks(zfsvfs);
/*
* Evict cached data
*/
if (!zfs_is_readonly(zfsvfs))
txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0);
dmu_objset_evict_dbufs(zfsvfs->z_os);
dd = zfsvfs->z_os->os_dsl_dataset->ds_dir;
dsl_dir_cancel_waiters(dd);
return (0);
}
static int
zfs_umount(vfs_t *vfsp, int fflag)
{
kthread_t *td = curthread;
zfsvfs_t *zfsvfs = vfsp->vfs_data;
objset_t *os;
cred_t *cr = td->td_ucred;
int ret;
ret = secpolicy_fs_unmount(cr, vfsp);
if (ret) {
if (dsl_deleg_access((char *)vfsp->vfs_resource,
ZFS_DELEG_PERM_MOUNT, cr))
return (ret);
}
/*
* Unmount any snapshots mounted under .zfs before unmounting the
* dataset itself.
*/
if (zfsvfs->z_ctldir != NULL) {
if ((ret = zfsctl_umount_snapshots(vfsp, fflag, cr)) != 0)
return (ret);
}
if (fflag & MS_FORCE) {
/*
* Mark file system as unmounted before calling
* vflush(FORCECLOSE). This way we ensure no future vnops
* will be called and risk operating on DOOMED vnodes.
*/
ZFS_TEARDOWN_ENTER_WRITE(zfsvfs, FTAG);
zfsvfs->z_unmounted = B_TRUE;
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
}
/*
* Flush all the files.
*/
ret = vflush(vfsp, 0, (fflag & MS_FORCE) ? FORCECLOSE : 0, td);
if (ret != 0)
return (ret);
while (taskqueue_cancel(zfsvfs_taskq->tq_queue,
&zfsvfs->z_unlinked_drain_task, NULL) != 0)
taskqueue_drain(zfsvfs_taskq->tq_queue,
&zfsvfs->z_unlinked_drain_task);
VERIFY0(zfsvfs_teardown(zfsvfs, B_TRUE));
os = zfsvfs->z_os;
/*
* z_os will be NULL if there was an error in
* attempting to reopen zfsvfs.
*/
if (os != NULL) {
/*
* Unset the objset user_ptr.
*/
mutex_enter(&os->os_user_ptr_lock);
dmu_objset_set_user(os, NULL);
mutex_exit(&os->os_user_ptr_lock);
/*
* Finally release the objset
*/
dmu_objset_disown(os, B_TRUE, zfsvfs);
}
/*
* We can now safely destroy the '.zfs' directory node.
*/
if (zfsvfs->z_ctldir != NULL)
zfsctl_destroy(zfsvfs);
zfs_freevfs(vfsp);
return (0);
}
static int
zfs_vget(vfs_t *vfsp, ino_t ino, int flags, vnode_t **vpp)
{
zfsvfs_t *zfsvfs = vfsp->vfs_data;
znode_t *zp;
int err;
/*
* zfs_zget() can't operate on virtual entries like .zfs/ or
* .zfs/snapshot/ directories, that's why we return EOPNOTSUPP.
* This will make NFS to switch to LOOKUP instead of using VGET.
*/
if (ino == ZFSCTL_INO_ROOT || ino == ZFSCTL_INO_SNAPDIR ||
(zfsvfs->z_shares_dir != 0 && ino == zfsvfs->z_shares_dir))
return (EOPNOTSUPP);
if ((err = zfs_enter(zfsvfs, FTAG)) != 0)
return (err);
err = zfs_zget(zfsvfs, ino, &zp);
if (err == 0 && zp->z_unlinked) {
vrele(ZTOV(zp));
err = EINVAL;
}
if (err == 0)
*vpp = ZTOV(zp);
zfs_exit(zfsvfs, FTAG);
if (err == 0) {
err = vn_lock(*vpp, flags);
if (err != 0)
vrele(*vpp);
}
if (err != 0)
*vpp = NULL;
return (err);
}
static int
#if __FreeBSD_version >= 1300098
zfs_checkexp(vfs_t *vfsp, struct sockaddr *nam, uint64_t *extflagsp,
struct ucred **credanonp, int *numsecflavors, int *secflavors)
#else
zfs_checkexp(vfs_t *vfsp, struct sockaddr *nam, int *extflagsp,
struct ucred **credanonp, int *numsecflavors, int **secflavors)
#endif
{
zfsvfs_t *zfsvfs = vfsp->vfs_data;
/*
* If this is regular file system vfsp is the same as
* zfsvfs->z_parent->z_vfs, but if it is snapshot,
* zfsvfs->z_parent->z_vfs represents parent file system
* which we have to use here, because only this file system
* has mnt_export configured.
*/
return (vfs_stdcheckexp(zfsvfs->z_parent->z_vfs, nam, extflagsp,
credanonp, numsecflavors, secflavors));
}
_Static_assert(sizeof (struct fid) >= SHORT_FID_LEN,
"struct fid bigger than SHORT_FID_LEN");
_Static_assert(sizeof (struct fid) >= LONG_FID_LEN,
"struct fid bigger than LONG_FID_LEN");
static int
zfs_fhtovp(vfs_t *vfsp, fid_t *fidp, int flags, vnode_t **vpp)
{
struct componentname cn;
zfsvfs_t *zfsvfs = vfsp->vfs_data;
znode_t *zp;
vnode_t *dvp;
uint64_t object = 0;
uint64_t fid_gen = 0;
uint64_t setgen = 0;
uint64_t gen_mask;
uint64_t zp_gen;
int i, err;
*vpp = NULL;
if ((err = zfs_enter(zfsvfs, FTAG)) != 0)
return (err);
/*
* On FreeBSD we can get snapshot's mount point or its parent file
* system mount point depending if snapshot is already mounted or not.
*/
if (zfsvfs->z_parent == zfsvfs && fidp->fid_len == LONG_FID_LEN) {
zfid_long_t *zlfid = (zfid_long_t *)fidp;
uint64_t objsetid = 0;
for (i = 0; i < sizeof (zlfid->zf_setid); i++)
objsetid |= ((uint64_t)zlfid->zf_setid[i]) << (8 * i);
for (i = 0; i < sizeof (zlfid->zf_setgen); i++)
setgen |= ((uint64_t)zlfid->zf_setgen[i]) << (8 * i);
zfs_exit(zfsvfs, FTAG);
err = zfsctl_lookup_objset(vfsp, objsetid, &zfsvfs);
if (err)
return (SET_ERROR(EINVAL));
if ((err = zfs_enter(zfsvfs, FTAG)) != 0)
return (err);
}
if (fidp->fid_len == SHORT_FID_LEN || fidp->fid_len == LONG_FID_LEN) {
zfid_short_t *zfid = (zfid_short_t *)fidp;
for (i = 0; i < sizeof (zfid->zf_object); i++)
object |= ((uint64_t)zfid->zf_object[i]) << (8 * i);
for (i = 0; i < sizeof (zfid->zf_gen); i++)
fid_gen |= ((uint64_t)zfid->zf_gen[i]) << (8 * i);
} else {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
if (fidp->fid_len == LONG_FID_LEN && setgen != 0) {
zfs_exit(zfsvfs, FTAG);
dprintf("snapdir fid: fid_gen (%llu) and setgen (%llu)\n",
(u_longlong_t)fid_gen, (u_longlong_t)setgen);
return (SET_ERROR(EINVAL));
}
/*
* A zero fid_gen means we are in .zfs or the .zfs/snapshot
* directory tree. If the object == zfsvfs->z_shares_dir, then
* we are in the .zfs/shares directory tree.
*/
if ((fid_gen == 0 &&
(object == ZFSCTL_INO_ROOT || object == ZFSCTL_INO_SNAPDIR)) ||
(zfsvfs->z_shares_dir != 0 && object == zfsvfs->z_shares_dir)) {
zfs_exit(zfsvfs, FTAG);
VERIFY0(zfsctl_root(zfsvfs, LK_SHARED, &dvp));
if (object == ZFSCTL_INO_SNAPDIR) {
cn.cn_nameptr = "snapshot";
cn.cn_namelen = strlen(cn.cn_nameptr);
cn.cn_nameiop = LOOKUP;
cn.cn_flags = ISLASTCN | LOCKLEAF;
cn.cn_lkflags = flags;
VERIFY0(VOP_LOOKUP(dvp, vpp, &cn));
vput(dvp);
} else if (object == zfsvfs->z_shares_dir) {
/*
* XXX This branch must not be taken,
* if it is, then the lookup below will
* explode.
*/
cn.cn_nameptr = "shares";
cn.cn_namelen = strlen(cn.cn_nameptr);
cn.cn_nameiop = LOOKUP;
cn.cn_flags = ISLASTCN;
cn.cn_lkflags = flags;
VERIFY0(VOP_LOOKUP(dvp, vpp, &cn));
vput(dvp);
} else {
*vpp = dvp;
}
return (err);
}
gen_mask = -1ULL >> (64 - 8 * i);
dprintf("getting %llu [%llu mask %llx]\n", (u_longlong_t)object,
(u_longlong_t)fid_gen,
(u_longlong_t)gen_mask);
if ((err = zfs_zget(zfsvfs, object, &zp))) {
zfs_exit(zfsvfs, FTAG);
return (err);
}
(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs), &zp_gen,
sizeof (uint64_t));
zp_gen = zp_gen & gen_mask;
if (zp_gen == 0)
zp_gen = 1;
if (zp->z_unlinked || zp_gen != fid_gen) {
dprintf("znode gen (%llu) != fid gen (%llu)\n",
(u_longlong_t)zp_gen, (u_longlong_t)fid_gen);
vrele(ZTOV(zp));
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
*vpp = ZTOV(zp);
zfs_exit(zfsvfs, FTAG);
err = vn_lock(*vpp, flags);
if (err == 0)
vnode_create_vobject(*vpp, zp->z_size, curthread);
else
*vpp = NULL;
return (err);
}
/*
* Block out VOPs and close zfsvfs_t::z_os
*
* Note, if successful, then we return with the 'z_teardown_lock' and
* 'z_teardown_inactive_lock' write held. We leave ownership of the underlying
* dataset and objset intact so that they can be atomically handed off during
* a subsequent rollback or recv operation and the resume thereafter.
*/
int
zfs_suspend_fs(zfsvfs_t *zfsvfs)
{
int error;
if ((error = zfsvfs_teardown(zfsvfs, B_FALSE)) != 0)
return (error);
return (0);
}
/*
* Rebuild SA and release VOPs. Note that ownership of the underlying dataset
* is an invariant across any of the operations that can be performed while the
* filesystem was suspended. Whether it succeeded or failed, the preconditions
* are the same: the relevant objset and associated dataset are owned by
* zfsvfs, held, and long held on entry.
*/
int
zfs_resume_fs(zfsvfs_t *zfsvfs, dsl_dataset_t *ds)
{
int err;
znode_t *zp;
ASSERT(ZFS_TEARDOWN_WRITE_HELD(zfsvfs));
ASSERT(ZFS_TEARDOWN_INACTIVE_WRITE_HELD(zfsvfs));
/*
* We already own this, so just update the objset_t, as the one we
* had before may have been evicted.
*/
objset_t *os;
VERIFY3P(ds->ds_owner, ==, zfsvfs);
VERIFY(dsl_dataset_long_held(ds));
dsl_pool_t *dp = spa_get_dsl(dsl_dataset_get_spa(ds));
dsl_pool_config_enter(dp, FTAG);
VERIFY0(dmu_objset_from_ds(ds, &os));
dsl_pool_config_exit(dp, FTAG);
err = zfsvfs_init(zfsvfs, os);
if (err != 0)
goto bail;
ds->ds_dir->dd_activity_cancelled = B_FALSE;
VERIFY0(zfsvfs_setup(zfsvfs, B_FALSE));
zfs_set_fuid_feature(zfsvfs);
/*
* Attempt to re-establish all the active znodes with
* their dbufs. If a zfs_rezget() fails, then we'll let
* any potential callers discover that via zfs_enter_verify_zp
* when they try to use their znode.
*/
mutex_enter(&zfsvfs->z_znodes_lock);
for (zp = list_head(&zfsvfs->z_all_znodes); zp;
zp = list_next(&zfsvfs->z_all_znodes, zp)) {
(void) zfs_rezget(zp);
}
mutex_exit(&zfsvfs->z_znodes_lock);
bail:
/* release the VOPs */
ZFS_TEARDOWN_INACTIVE_EXIT_WRITE(zfsvfs);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
if (err) {
/*
* Since we couldn't setup the sa framework, try to force
* unmount this file system.
*/
if (vn_vfswlock(zfsvfs->z_vfs->vfs_vnodecovered) == 0) {
vfs_ref(zfsvfs->z_vfs);
(void) dounmount(zfsvfs->z_vfs, MS_FORCE, curthread);
}
}
return (err);
}
static void
zfs_freevfs(vfs_t *vfsp)
{
zfsvfs_t *zfsvfs = vfsp->vfs_data;
zfsvfs_free(zfsvfs);
atomic_dec_32(&zfs_active_fs_count);
}
#ifdef __i386__
static int desiredvnodes_backup;
#include <sys/vmmeter.h>
#include <vm/vm_page.h>
#include <vm/vm_object.h>
#include <vm/vm_kern.h>
#include <vm/vm_map.h>
#endif
static void
zfs_vnodes_adjust(void)
{
#ifdef __i386__
int newdesiredvnodes;
desiredvnodes_backup = desiredvnodes;
/*
* We calculate newdesiredvnodes the same way it is done in
* vntblinit(). If it is equal to desiredvnodes, it means that
* it wasn't tuned by the administrator and we can tune it down.
*/
newdesiredvnodes = min(maxproc + vm_cnt.v_page_count / 4, 2 *
vm_kmem_size / (5 * (sizeof (struct vm_object) +
sizeof (struct vnode))));
if (newdesiredvnodes == desiredvnodes)
desiredvnodes = (3 * newdesiredvnodes) / 4;
#endif
}
static void
zfs_vnodes_adjust_back(void)
{
#ifdef __i386__
desiredvnodes = desiredvnodes_backup;
#endif
}
#if __FreeBSD_version >= 1300139
static struct sx zfs_vnlru_lock;
static struct vnode *zfs_vnlru_marker;
#endif
static arc_prune_t *zfs_prune;
static void
zfs_prune_task(uint64_t nr_to_scan, void *arg __unused)
{
if (nr_to_scan > INT_MAX)
nr_to_scan = INT_MAX;
#if __FreeBSD_version >= 1300139
sx_xlock(&zfs_vnlru_lock);
vnlru_free_vfsops(nr_to_scan, &zfs_vfsops, zfs_vnlru_marker);
sx_xunlock(&zfs_vnlru_lock);
#else
vnlru_free(nr_to_scan, &zfs_vfsops);
#endif
}
void
zfs_init(void)
{
printf("ZFS filesystem version: " ZPL_VERSION_STRING "\n");
/*
* Initialize .zfs directory structures
*/
zfsctl_init();
/*
* Initialize znode cache, vnode ops, etc...
*/
zfs_znode_init();
/*
* Reduce number of vnodes. Originally number of vnodes is calculated
* with UFS inode in mind. We reduce it here, because it's too big for
* ZFS/i386.
*/
zfs_vnodes_adjust();
dmu_objset_register_type(DMU_OST_ZFS, zpl_get_file_info);
zfsvfs_taskq = taskq_create("zfsvfs", 1, minclsyspri, 0, 0, 0);
#if __FreeBSD_version >= 1300139
zfs_vnlru_marker = vnlru_alloc_marker();
sx_init(&zfs_vnlru_lock, "zfs vnlru lock");
#endif
zfs_prune = arc_add_prune_callback(zfs_prune_task, NULL);
}
void
zfs_fini(void)
{
arc_remove_prune_callback(zfs_prune);
#if __FreeBSD_version >= 1300139
vnlru_free_marker(zfs_vnlru_marker);
sx_destroy(&zfs_vnlru_lock);
#endif
taskq_destroy(zfsvfs_taskq);
zfsctl_fini();
zfs_znode_fini();
zfs_vnodes_adjust_back();
}
int
zfs_busy(void)
{
return (zfs_active_fs_count != 0);
}
/*
* Release VOPs and unmount a suspended filesystem.
*/
int
zfs_end_fs(zfsvfs_t *zfsvfs, dsl_dataset_t *ds)
{
ASSERT(ZFS_TEARDOWN_WRITE_HELD(zfsvfs));
ASSERT(ZFS_TEARDOWN_INACTIVE_WRITE_HELD(zfsvfs));
/*
* We already own this, so just hold and rele it to update the
* objset_t, as the one we had before may have been evicted.
*/
objset_t *os;
VERIFY3P(ds->ds_owner, ==, zfsvfs);
VERIFY(dsl_dataset_long_held(ds));
dsl_pool_t *dp = spa_get_dsl(dsl_dataset_get_spa(ds));
dsl_pool_config_enter(dp, FTAG);
VERIFY0(dmu_objset_from_ds(ds, &os));
dsl_pool_config_exit(dp, FTAG);
zfsvfs->z_os = os;
/* release the VOPs */
ZFS_TEARDOWN_INACTIVE_EXIT_WRITE(zfsvfs);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
/*
* Try to force unmount this file system.
*/
(void) zfs_umount(zfsvfs->z_vfs, 0);
zfsvfs->z_unmounted = B_TRUE;
return (0);
}
int
zfs_set_version(zfsvfs_t *zfsvfs, uint64_t newvers)
{
int error;
objset_t *os = zfsvfs->z_os;
dmu_tx_t *tx;
if (newvers < ZPL_VERSION_INITIAL || newvers > ZPL_VERSION)
return (SET_ERROR(EINVAL));
if (newvers < zfsvfs->z_version)
return (SET_ERROR(EINVAL));
if (zfs_spa_version_map(newvers) >
spa_version(dmu_objset_spa(zfsvfs->z_os)))
return (SET_ERROR(ENOTSUP));
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_FALSE, ZPL_VERSION_STR);
if (newvers >= ZPL_VERSION_SA && !zfsvfs->z_use_sa) {
dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_TRUE,
ZFS_SA_ATTRS);
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
}
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
return (error);
}
error = zap_update(os, MASTER_NODE_OBJ, ZPL_VERSION_STR,
8, 1, &newvers, tx);
if (error) {
dmu_tx_commit(tx);
return (error);
}
if (newvers >= ZPL_VERSION_SA && !zfsvfs->z_use_sa) {
uint64_t sa_obj;
ASSERT3U(spa_version(dmu_objset_spa(zfsvfs->z_os)), >=,
SPA_VERSION_SA);
sa_obj = zap_create(os, DMU_OT_SA_MASTER_NODE,
DMU_OT_NONE, 0, tx);
error = zap_add(os, MASTER_NODE_OBJ,
ZFS_SA_ATTRS, 8, 1, &sa_obj, tx);
ASSERT0(error);
VERIFY0(sa_set_sa_object(os, sa_obj));
sa_register_update_callback(os, zfs_sa_upgrade);
}
spa_history_log_internal_ds(dmu_objset_ds(os), "upgrade", tx,
"from %ju to %ju", (uintmax_t)zfsvfs->z_version,
(uintmax_t)newvers);
dmu_tx_commit(tx);
zfsvfs->z_version = newvers;
os->os_version = newvers;
zfs_set_fuid_feature(zfsvfs);
return (0);
}
/*
* Return true if the corresponding vfs's unmounted flag is set.
* Otherwise return false.
* If this function returns true we know VFS unmount has been initiated.
*/
boolean_t
zfs_get_vfs_flag_unmounted(objset_t *os)
{
zfsvfs_t *zfvp;
boolean_t unmounted = B_FALSE;
ASSERT3U(dmu_objset_type(os), ==, DMU_OST_ZFS);
mutex_enter(&os->os_user_ptr_lock);
zfvp = dmu_objset_get_user(os);
if (zfvp != NULL && zfvp->z_vfs != NULL &&
(zfvp->z_vfs->mnt_kern_flag & MNTK_UNMOUNT))
unmounted = B_TRUE;
mutex_exit(&os->os_user_ptr_lock);
return (unmounted);
}
#ifdef _KERNEL
void
zfsvfs_update_fromname(const char *oldname, const char *newname)
{
char tmpbuf[MAXPATHLEN];
struct mount *mp;
char *fromname;
size_t oldlen;
oldlen = strlen(oldname);
mtx_lock(&mountlist_mtx);
TAILQ_FOREACH(mp, &mountlist, mnt_list) {
fromname = mp->mnt_stat.f_mntfromname;
if (strcmp(fromname, oldname) == 0) {
(void) strlcpy(fromname, newname,
sizeof (mp->mnt_stat.f_mntfromname));
continue;
}
if (strncmp(fromname, oldname, oldlen) == 0 &&
(fromname[oldlen] == '/' || fromname[oldlen] == '@')) {
(void) snprintf(tmpbuf, sizeof (tmpbuf), "%s%s",
newname, fromname + oldlen);
(void) strlcpy(fromname, tmpbuf,
sizeof (mp->mnt_stat.f_mntfromname));
continue;
}
}
mtx_unlock(&mountlist_mtx);
}
#endif
/*
* Find a prison with ZFS info.
* Return the ZFS info and the (locked) prison.
*/
static struct zfs_jailparam *
zfs_jailparam_find(struct prison *spr, struct prison **prp)
{
struct prison *pr;
struct zfs_jailparam *zjp;
for (pr = spr; ; pr = pr->pr_parent) {
mtx_lock(&pr->pr_mtx);
if (pr == &prison0) {
zjp = &zfs_jailparam0;
break;
}
zjp = osd_jail_get(pr, zfs_jailparam_slot);
if (zjp != NULL)
break;
mtx_unlock(&pr->pr_mtx);
}
*prp = pr;
return (zjp);
}
/*
* Ensure a prison has its own ZFS info. If zjpp is non-null, point it to the
* ZFS info and lock the prison.
*/
static void
zfs_jailparam_alloc(struct prison *pr, struct zfs_jailparam **zjpp)
{
struct prison *ppr;
struct zfs_jailparam *zjp, *nzjp;
void **rsv;
/* If this prison already has ZFS info, return that. */
zjp = zfs_jailparam_find(pr, &ppr);
if (ppr == pr)
goto done;
/*
* Allocate a new info record. Then check again, in case something
* changed during the allocation.
*/
mtx_unlock(&ppr->pr_mtx);
nzjp = malloc(sizeof (struct zfs_jailparam), M_PRISON, M_WAITOK);
rsv = osd_reserve(zfs_jailparam_slot);
zjp = zfs_jailparam_find(pr, &ppr);
if (ppr == pr) {
free(nzjp, M_PRISON);
osd_free_reserved(rsv);
goto done;
}
/* Inherit the initial values from the ancestor. */
mtx_lock(&pr->pr_mtx);
(void) osd_jail_set_reserved(pr, zfs_jailparam_slot, rsv, nzjp);
(void) memcpy(nzjp, zjp, sizeof (*zjp));
zjp = nzjp;
mtx_unlock(&ppr->pr_mtx);
done:
if (zjpp != NULL)
*zjpp = zjp;
else
mtx_unlock(&pr->pr_mtx);
}
/*
* Jail OSD methods for ZFS VFS info.
*/
static int
zfs_jailparam_create(void *obj, void *data)
{
struct prison *pr = obj;
struct vfsoptlist *opts = data;
int jsys;
if (vfs_copyopt(opts, "zfs", &jsys, sizeof (jsys)) == 0 &&
jsys == JAIL_SYS_INHERIT)
return (0);
/*
* Inherit a prison's initial values from its parent
* (different from JAIL_SYS_INHERIT which also inherits changes).
*/
zfs_jailparam_alloc(pr, NULL);
return (0);
}
static int
zfs_jailparam_get(void *obj, void *data)
{
struct prison *ppr, *pr = obj;
struct vfsoptlist *opts = data;
struct zfs_jailparam *zjp;
int jsys, error;
zjp = zfs_jailparam_find(pr, &ppr);
jsys = (ppr == pr) ? JAIL_SYS_NEW : JAIL_SYS_INHERIT;
error = vfs_setopt(opts, "zfs", &jsys, sizeof (jsys));
if (error != 0 && error != ENOENT)
goto done;
if (jsys == JAIL_SYS_NEW) {
error = vfs_setopt(opts, "zfs.mount_snapshot",
&zjp->mount_snapshot, sizeof (zjp->mount_snapshot));
if (error != 0 && error != ENOENT)
goto done;
} else {
/*
* If this prison is inheriting its ZFS info, report
* empty/zero parameters.
*/
static int mount_snapshot = 0;
error = vfs_setopt(opts, "zfs.mount_snapshot",
&mount_snapshot, sizeof (mount_snapshot));
if (error != 0 && error != ENOENT)
goto done;
}
error = 0;
done:
mtx_unlock(&ppr->pr_mtx);
return (error);
}
static int
zfs_jailparam_set(void *obj, void *data)
{
struct prison *pr = obj;
struct prison *ppr;
struct vfsoptlist *opts = data;
int error, jsys, mount_snapshot;
/* Set the parameters, which should be correct. */
error = vfs_copyopt(opts, "zfs", &jsys, sizeof (jsys));
if (error == ENOENT)
jsys = -1;
error = vfs_copyopt(opts, "zfs.mount_snapshot", &mount_snapshot,
sizeof (mount_snapshot));
if (error == ENOENT)
mount_snapshot = -1;
else
jsys = JAIL_SYS_NEW;
switch (jsys) {
case JAIL_SYS_NEW:
{
/* "zfs=new" or "zfs.*": the prison gets its own ZFS info. */
struct zfs_jailparam *zjp;
/*
* A child jail cannot have more permissions than its parent
*/
if (pr->pr_parent != &prison0) {
zjp = zfs_jailparam_find(pr->pr_parent, &ppr);
mtx_unlock(&ppr->pr_mtx);
if (zjp->mount_snapshot < mount_snapshot) {
return (EPERM);
}
}
zfs_jailparam_alloc(pr, &zjp);
if (mount_snapshot != -1)
zjp->mount_snapshot = mount_snapshot;
mtx_unlock(&pr->pr_mtx);
break;
}
case JAIL_SYS_INHERIT:
/* "zfs=inherit": inherit the parent's ZFS info. */
mtx_lock(&pr->pr_mtx);
osd_jail_del(pr, zfs_jailparam_slot);
mtx_unlock(&pr->pr_mtx);
break;
case -1:
/*
* If the setting being changed is not ZFS related
* then do nothing.
*/
break;
}
return (0);
}
static int
zfs_jailparam_check(void *obj __unused, void *data)
{
struct vfsoptlist *opts = data;
int error, jsys, mount_snapshot;
/* Check that the parameters are correct. */
error = vfs_copyopt(opts, "zfs", &jsys, sizeof (jsys));
if (error != ENOENT) {
if (error != 0)
return (error);
if (jsys != JAIL_SYS_NEW && jsys != JAIL_SYS_INHERIT)
return (EINVAL);
}
error = vfs_copyopt(opts, "zfs.mount_snapshot", &mount_snapshot,
sizeof (mount_snapshot));
if (error != ENOENT) {
if (error != 0)
return (error);
if (mount_snapshot != 0 && mount_snapshot != 1)
return (EINVAL);
}
return (0);
}
static void
zfs_jailparam_destroy(void *data)
{
free(data, M_PRISON);
}
static void
zfs_jailparam_sysinit(void *arg __unused)
{
struct prison *pr;
osd_method_t methods[PR_MAXMETHOD] = {
[PR_METHOD_CREATE] = zfs_jailparam_create,
[PR_METHOD_GET] = zfs_jailparam_get,
[PR_METHOD_SET] = zfs_jailparam_set,
[PR_METHOD_CHECK] = zfs_jailparam_check,
};
zfs_jailparam_slot = osd_jail_register(zfs_jailparam_destroy, methods);
/* Copy the defaults to any existing prisons. */
sx_slock(&allprison_lock);
TAILQ_FOREACH(pr, &allprison, pr_list)
zfs_jailparam_alloc(pr, NULL);
sx_sunlock(&allprison_lock);
}
static void
zfs_jailparam_sysuninit(void *arg __unused)
{
osd_jail_deregister(zfs_jailparam_slot);
}
SYSINIT(zfs_jailparam_sysinit, SI_SUB_DRIVERS, SI_ORDER_ANY,
zfs_jailparam_sysinit, NULL);
SYSUNINIT(zfs_jailparam_sysuninit, SI_SUB_DRIVERS, SI_ORDER_ANY,
zfs_jailparam_sysuninit, NULL);
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zio_crypt.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zio_crypt.c
index 024a931d7816..b08916b317f8 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zio_crypt.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zio_crypt.c
@@ -1,1832 +1,1837 @@
/*
* CDDL HEADER START
*
* This file and its contents are supplied under the terms of the
* Common Development and Distribution License ("CDDL"), version 1.0.
* You may only use this file in accordance with the terms of version
* 1.0 of the CDDL.
*
* A full copy of the text of the CDDL should have accompanied this
* source. A copy of the CDDL is also available via the Internet at
* http://www.illumos.org/license/CDDL.
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2017, Datto, Inc. All rights reserved.
*/
#include <sys/zio_crypt.h>
#include <sys/dmu.h>
#include <sys/dmu_objset.h>
#include <sys/dnode.h>
#include <sys/fs/zfs.h>
#include <sys/zio.h>
#include <sys/zil.h>
#include <sys/sha2.h>
#include <sys/hkdf.h>
/*
* This file is responsible for handling all of the details of generating
* encryption parameters and performing encryption and authentication.
*
* BLOCK ENCRYPTION PARAMETERS:
* Encryption /Authentication Algorithm Suite (crypt):
* The encryption algorithm, mode, and key length we are going to use. We
* currently support AES in either GCM or CCM modes with 128, 192, and 256 bit
* keys. All authentication is currently done with SHA512-HMAC.
*
* Plaintext:
* The unencrypted data that we want to encrypt.
*
* Initialization Vector (IV):
* An initialization vector for the encryption algorithms. This is used to
* "tweak" the encryption algorithms so that two blocks of the same data are
* encrypted into different ciphertext outputs, thus obfuscating block patterns.
* The supported encryption modes (AES-GCM and AES-CCM) require that an IV is
* never reused with the same encryption key. This value is stored unencrypted
* and must simply be provided to the decryption function. We use a 96 bit IV
* (as recommended by NIST) for all block encryption. For non-dedup blocks we
* derive the IV randomly. The first 64 bits of the IV are stored in the second
* word of DVA[2] and the remaining 32 bits are stored in the upper 32 bits of
* blk_fill. This is safe because encrypted blocks can't use the upper 32 bits
* of blk_fill. We only encrypt level 0 blocks, which normally have a fill count
* of 1. The only exception is for DMU_OT_DNODE objects, where the fill count of
* level 0 blocks is the number of allocated dnodes in that block. The on-disk
* format supports at most 2^15 slots per L0 dnode block, because the maximum
* block size is 16MB (2^24). In either case, for level 0 blocks this number
* will still be smaller than UINT32_MAX so it is safe to store the IV in the
* top 32 bits of blk_fill, while leaving the bottom 32 bits of the fill count
* for the dnode code.
*
* Master key:
* This is the most important secret data of an encrypted dataset. It is used
* along with the salt to generate that actual encryption keys via HKDF. We
* do not use the master key to directly encrypt any data because there are
* theoretical limits on how much data can actually be safely encrypted with
* any encryption mode. The master key is stored encrypted on disk with the
* user's wrapping key. Its length is determined by the encryption algorithm.
* For details on how this is stored see the block comment in dsl_crypt.c
*
* Salt:
* Used as an input to the HKDF function, along with the master key. We use a
* 64 bit salt, stored unencrypted in the first word of DVA[2]. Any given salt
* can be used for encrypting many blocks, so we cache the current salt and the
* associated derived key in zio_crypt_t so we do not need to derive it again
* needlessly.
*
* Encryption Key:
* A secret binary key, generated from an HKDF function used to encrypt and
* decrypt data.
*
* Message Authentication Code (MAC)
* The MAC is an output of authenticated encryption modes such as AES-GCM and
* AES-CCM. Its purpose is to ensure that an attacker cannot modify encrypted
* data on disk and return garbage to the application. Effectively, it is a
* checksum that can not be reproduced by an attacker. We store the MAC in the
* second 128 bits of blk_cksum, leaving the first 128 bits for a truncated
* regular checksum of the ciphertext which can be used for scrubbing.
*
* OBJECT AUTHENTICATION:
* Some object types, such as DMU_OT_MASTER_NODE cannot be encrypted because
* they contain some info that always needs to be readable. To prevent this
* data from being altered, we authenticate this data using SHA512-HMAC. This
* will produce a MAC (similar to the one produced via encryption) which can
* be used to verify the object was not modified. HMACs do not require key
* rotation or IVs, so we can keep up to the full 3 copies of authenticated
* data.
*
* ZIL ENCRYPTION:
* ZIL blocks have their bp written to disk ahead of the associated data, so we
* cannot store the MAC there as we normally do. For these blocks the MAC is
* stored in the embedded checksum within the zil_chain_t header. The salt and
* IV are generated for the block on bp allocation instead of at encryption
* time. In addition, ZIL blocks have some pieces that must be left in plaintext
* for claiming even though all of the sensitive user data still needs to be
* encrypted. The function zio_crypt_init_uios_zil() handles parsing which
* pieces of the block need to be encrypted. All data that is not encrypted is
* authenticated using the AAD mechanisms that the supported encryption modes
* provide for. In order to preserve the semantics of the ZIL for encrypted
* datasets, the ZIL is not protected at the objset level as described below.
*
* DNODE ENCRYPTION:
* Similarly to ZIL blocks, the core part of each dnode_phys_t needs to be left
* in plaintext for scrubbing and claiming, but the bonus buffers might contain
* sensitive user data. The function zio_crypt_init_uios_dnode() handles parsing
* which pieces of the block need to be encrypted. For more details about
* dnode authentication and encryption, see zio_crypt_init_uios_dnode().
*
* OBJECT SET AUTHENTICATION:
* Up to this point, everything we have encrypted and authenticated has been
* at level 0 (or -2 for the ZIL). If we did not do any further work the
* on-disk format would be susceptible to attacks that deleted or rearranged
* the order of level 0 blocks. Ideally, the cleanest solution would be to
* maintain a tree of authentication MACs going up the bp tree. However, this
* presents a problem for raw sends. Send files do not send information about
* indirect blocks so there would be no convenient way to transfer the MACs and
* they cannot be recalculated on the receive side without the master key which
* would defeat one of the purposes of raw sends in the first place. Instead,
* for the indirect levels of the bp tree, we use a regular SHA512 of the MACs
* from the level below. We also include some portable fields from blk_prop such
* as the lsize and compression algorithm to prevent the data from being
* misinterpreted.
*
* At the objset level, we maintain 2 separate 256 bit MACs in the
* objset_phys_t. The first one is "portable" and is the logical root of the
* MAC tree maintained in the metadnode's bps. The second, is "local" and is
* used as the root MAC for the user accounting objects, which are also not
* transferred via "zfs send". The portable MAC is sent in the DRR_BEGIN payload
* of the send file. The useraccounting code ensures that the useraccounting
* info is not present upon a receive, so the local MAC can simply be cleared
* out at that time. For more info about objset_phys_t authentication, see
* zio_crypt_do_objset_hmacs().
*
* CONSIDERATIONS FOR DEDUP:
* In order for dedup to work, blocks that we want to dedup with one another
* need to use the same IV and encryption key, so that they will have the same
* ciphertext. Normally, one should never reuse an IV with the same encryption
* key or else AES-GCM and AES-CCM can both actually leak the plaintext of both
* blocks. In this case, however, since we are using the same plaintext as
* well all that we end up with is a duplicate of the original ciphertext we
* already had. As a result, an attacker with read access to the raw disk will
* be able to tell which blocks are the same but this information is given away
* by dedup anyway. In order to get the same IVs and encryption keys for
* equivalent blocks of data we use an HMAC of the plaintext. We use an HMAC
* here so that a reproducible checksum of the plaintext is never available to
* the attacker. The HMAC key is kept alongside the master key, encrypted on
* disk. The first 64 bits of the HMAC are used in place of the random salt, and
* the next 96 bits are used as the IV. As a result of this mechanism, dedup
* will only work within a clone family since encrypted dedup requires use of
* the same master and HMAC keys.
*/
/*
* After encrypting many blocks with the same key we may start to run up
* against the theoretical limits of how much data can securely be encrypted
* with a single key using the supported encryption modes. The most obvious
* limitation is that our risk of generating 2 equivalent 96 bit IVs increases
* the more IVs we generate (which both GCM and CCM modes strictly forbid).
* This risk actually grows surprisingly quickly over time according to the
* Birthday Problem. With a total IV space of 2^(96 bits), and assuming we have
* generated n IVs with a cryptographically secure RNG, the approximate
* probability p(n) of a collision is given as:
*
* p(n) ~= e^(-n*(n-1)/(2*(2^96)))
*
* [http://www.math.cornell.edu/~mec/2008-2009/TianyiZheng/Birthday.html]
*
* Assuming that we want to ensure that p(n) never goes over 1 / 1 trillion
* we must not write more than 398,065,730 blocks with the same encryption key.
* Therefore, we rotate our keys after 400,000,000 blocks have been written by
* generating a new random 64 bit salt for our HKDF encryption key generation
* function.
*/
#define ZFS_KEY_MAX_SALT_USES_DEFAULT 400000000
#define ZFS_CURRENT_MAX_SALT_USES \
(MIN(zfs_key_max_salt_uses, ZFS_KEY_MAX_SALT_USES_DEFAULT))
static unsigned long zfs_key_max_salt_uses = ZFS_KEY_MAX_SALT_USES_DEFAULT;
typedef struct blkptr_auth_buf {
uint64_t bab_prop; /* blk_prop - portable mask */
uint8_t bab_mac[ZIO_DATA_MAC_LEN]; /* MAC from blk_cksum */
uint64_t bab_pad; /* reserved for future use */
} blkptr_auth_buf_t;
const zio_crypt_info_t zio_crypt_table[ZIO_CRYPT_FUNCTIONS] = {
{"", ZC_TYPE_NONE, 0, "inherit"},
{"", ZC_TYPE_NONE, 0, "on"},
{"", ZC_TYPE_NONE, 0, "off"},
{SUN_CKM_AES_CCM, ZC_TYPE_CCM, 16, "aes-128-ccm"},
{SUN_CKM_AES_CCM, ZC_TYPE_CCM, 24, "aes-192-ccm"},
{SUN_CKM_AES_CCM, ZC_TYPE_CCM, 32, "aes-256-ccm"},
{SUN_CKM_AES_GCM, ZC_TYPE_GCM, 16, "aes-128-gcm"},
{SUN_CKM_AES_GCM, ZC_TYPE_GCM, 24, "aes-192-gcm"},
{SUN_CKM_AES_GCM, ZC_TYPE_GCM, 32, "aes-256-gcm"}
};
static void
zio_crypt_key_destroy_early(zio_crypt_key_t *key)
{
rw_destroy(&key->zk_salt_lock);
/* free crypto templates */
memset(&key->zk_session, 0, sizeof (key->zk_session));
/* zero out sensitive data */
memset(key, 0, sizeof (zio_crypt_key_t));
}
void
zio_crypt_key_destroy(zio_crypt_key_t *key)
{
freebsd_crypt_freesession(&key->zk_session);
zio_crypt_key_destroy_early(key);
}
int
zio_crypt_key_init(uint64_t crypt, zio_crypt_key_t *key)
{
int ret;
crypto_mechanism_t mech __unused;
uint_t keydata_len;
const zio_crypt_info_t *ci = NULL;
ASSERT3P(key, !=, NULL);
ASSERT3U(crypt, <, ZIO_CRYPT_FUNCTIONS);
ci = &zio_crypt_table[crypt];
if (ci->ci_crypt_type != ZC_TYPE_GCM &&
ci->ci_crypt_type != ZC_TYPE_CCM)
return (ENOTSUP);
keydata_len = zio_crypt_table[crypt].ci_keylen;
memset(key, 0, sizeof (zio_crypt_key_t));
rw_init(&key->zk_salt_lock, NULL, RW_DEFAULT, NULL);
/* fill keydata buffers and salt with random data */
ret = random_get_bytes((uint8_t *)&key->zk_guid, sizeof (uint64_t));
if (ret != 0)
goto error;
ret = random_get_bytes(key->zk_master_keydata, keydata_len);
if (ret != 0)
goto error;
ret = random_get_bytes(key->zk_hmac_keydata, SHA512_HMAC_KEYLEN);
if (ret != 0)
goto error;
ret = random_get_bytes(key->zk_salt, ZIO_DATA_SALT_LEN);
if (ret != 0)
goto error;
/* derive the current key from the master key */
ret = hkdf_sha512(key->zk_master_keydata, keydata_len, NULL, 0,
key->zk_salt, ZIO_DATA_SALT_LEN, key->zk_current_keydata,
keydata_len);
if (ret != 0)
goto error;
/* initialize keys for the ICP */
key->zk_current_key.ck_data = key->zk_current_keydata;
key->zk_current_key.ck_length = CRYPTO_BYTES2BITS(keydata_len);
key->zk_hmac_key.ck_data = &key->zk_hmac_key;
key->zk_hmac_key.ck_length = CRYPTO_BYTES2BITS(SHA512_HMAC_KEYLEN);
ci = &zio_crypt_table[crypt];
if (ci->ci_crypt_type != ZC_TYPE_GCM &&
ci->ci_crypt_type != ZC_TYPE_CCM)
return (ENOTSUP);
ret = freebsd_crypt_newsession(&key->zk_session, ci,
&key->zk_current_key);
if (ret)
goto error;
key->zk_crypt = crypt;
key->zk_version = ZIO_CRYPT_KEY_CURRENT_VERSION;
key->zk_salt_count = 0;
return (0);
error:
zio_crypt_key_destroy_early(key);
return (ret);
}
static int
zio_crypt_key_change_salt(zio_crypt_key_t *key)
{
int ret = 0;
uint8_t salt[ZIO_DATA_SALT_LEN];
crypto_mechanism_t mech __unused;
uint_t keydata_len = zio_crypt_table[key->zk_crypt].ci_keylen;
/* generate a new salt */
ret = random_get_bytes(salt, ZIO_DATA_SALT_LEN);
if (ret != 0)
goto error;
rw_enter(&key->zk_salt_lock, RW_WRITER);
/* someone beat us to the salt rotation, just unlock and return */
if (key->zk_salt_count < ZFS_CURRENT_MAX_SALT_USES)
goto out_unlock;
/* derive the current key from the master key and the new salt */
ret = hkdf_sha512(key->zk_master_keydata, keydata_len, NULL, 0,
salt, ZIO_DATA_SALT_LEN, key->zk_current_keydata, keydata_len);
if (ret != 0)
goto out_unlock;
/* assign the salt and reset the usage count */
memcpy(key->zk_salt, salt, ZIO_DATA_SALT_LEN);
key->zk_salt_count = 0;
freebsd_crypt_freesession(&key->zk_session);
ret = freebsd_crypt_newsession(&key->zk_session,
&zio_crypt_table[key->zk_crypt], &key->zk_current_key);
if (ret != 0)
goto out_unlock;
rw_exit(&key->zk_salt_lock);
return (0);
out_unlock:
rw_exit(&key->zk_salt_lock);
error:
return (ret);
}
/* See comment above zfs_key_max_salt_uses definition for details */
int
zio_crypt_key_get_salt(zio_crypt_key_t *key, uint8_t *salt)
{
int ret;
boolean_t salt_change;
rw_enter(&key->zk_salt_lock, RW_READER);
memcpy(salt, key->zk_salt, ZIO_DATA_SALT_LEN);
salt_change = (atomic_inc_64_nv(&key->zk_salt_count) >=
ZFS_CURRENT_MAX_SALT_USES);
rw_exit(&key->zk_salt_lock);
if (salt_change) {
ret = zio_crypt_key_change_salt(key);
if (ret != 0)
goto error;
}
return (0);
error:
return (ret);
}
void *failed_decrypt_buf;
int failed_decrypt_size;
/*
* This function handles all encryption and decryption in zfs. When
* encrypting it expects puio to reference the plaintext and cuio to
* reference the ciphertext. cuio must have enough space for the
* ciphertext + room for a MAC. datalen should be the length of the
* plaintext / ciphertext alone.
*/
/*
* The implementation for FreeBSD's OpenCrypto.
*
* The big difference between ICP and FOC is that FOC uses a single
* buffer for input and output. This means that (for AES-GCM, the
* only one supported right now) the source must be copied into the
* destination, and the destination must have the AAD, and the tag/MAC,
* already associated with it. (Both implementations can use a uio.)
*
* Since the auth data is part of the iovec array, all we need to know
* is the length: 0 means there's no AAD.
*
*/
static int
zio_do_crypt_uio_opencrypto(boolean_t encrypt, freebsd_crypt_session_t *sess,
uint64_t crypt, crypto_key_t *key, uint8_t *ivbuf, uint_t datalen,
zfs_uio_t *uio, uint_t auth_len)
{
const zio_crypt_info_t *ci = &zio_crypt_table[crypt];
if (ci->ci_crypt_type != ZC_TYPE_GCM &&
ci->ci_crypt_type != ZC_TYPE_CCM)
return (ENOTSUP);
int ret = freebsd_crypt_uio(encrypt, sess, ci, uio, key, ivbuf,
datalen, auth_len);
if (ret != 0) {
#ifdef FCRYPTO_DEBUG
printf("%s(%d): Returning error %s\n",
__FUNCTION__, __LINE__, encrypt ? "EIO" : "ECKSUM");
#endif
ret = SET_ERROR(encrypt ? EIO : ECKSUM);
}
return (ret);
}
int
zio_crypt_key_wrap(crypto_key_t *cwkey, zio_crypt_key_t *key, uint8_t *iv,
uint8_t *mac, uint8_t *keydata_out, uint8_t *hmac_keydata_out)
{
int ret;
uint64_t aad[3];
/*
* With OpenCrypto in FreeBSD, the same buffer is used for
* input and output. Also, the AAD (for AES-GMC at least)
* needs to logically go in front.
*/
zfs_uio_t cuio;
struct uio cuio_s;
iovec_t iovecs[4];
uint64_t crypt = key->zk_crypt;
uint_t enc_len, keydata_len, aad_len;
ASSERT3U(crypt, <, ZIO_CRYPT_FUNCTIONS);
zfs_uio_init(&cuio, &cuio_s);
keydata_len = zio_crypt_table[crypt].ci_keylen;
/* generate iv for wrapping the master and hmac key */
ret = random_get_pseudo_bytes(iv, WRAPPING_IV_LEN);
if (ret != 0)
goto error;
/*
* Since we only support one buffer, we need to copy
* the plain text (source) to the cipher buffer (dest).
* We set iovecs[0] -- the authentication data -- below.
*/
memcpy(keydata_out, key->zk_master_keydata, keydata_len);
memcpy(hmac_keydata_out, key->zk_hmac_keydata, SHA512_HMAC_KEYLEN);
iovecs[1].iov_base = keydata_out;
iovecs[1].iov_len = keydata_len;
iovecs[2].iov_base = hmac_keydata_out;
iovecs[2].iov_len = SHA512_HMAC_KEYLEN;
iovecs[3].iov_base = mac;
iovecs[3].iov_len = WRAPPING_MAC_LEN;
/*
* Although we don't support writing to the old format, we do
* support rewrapping the key so that the user can move and
* quarantine datasets on the old format.
*/
if (key->zk_version == 0) {
aad_len = sizeof (uint64_t);
aad[0] = LE_64(key->zk_guid);
} else {
ASSERT3U(key->zk_version, ==, ZIO_CRYPT_KEY_CURRENT_VERSION);
aad_len = sizeof (uint64_t) * 3;
aad[0] = LE_64(key->zk_guid);
aad[1] = LE_64(crypt);
aad[2] = LE_64(key->zk_version);
}
iovecs[0].iov_base = aad;
iovecs[0].iov_len = aad_len;
enc_len = zio_crypt_table[crypt].ci_keylen + SHA512_HMAC_KEYLEN;
GET_UIO_STRUCT(&cuio)->uio_iov = iovecs;
zfs_uio_iovcnt(&cuio) = 4;
zfs_uio_segflg(&cuio) = UIO_SYSSPACE;
/* encrypt the keys and store the resulting ciphertext and mac */
ret = zio_do_crypt_uio_opencrypto(B_TRUE, NULL, crypt, cwkey,
iv, enc_len, &cuio, aad_len);
if (ret != 0)
goto error;
return (0);
error:
return (ret);
}
int
zio_crypt_key_unwrap(crypto_key_t *cwkey, uint64_t crypt, uint64_t version,
uint64_t guid, uint8_t *keydata, uint8_t *hmac_keydata, uint8_t *iv,
uint8_t *mac, zio_crypt_key_t *key)
{
int ret;
uint64_t aad[3];
/*
* With OpenCrypto in FreeBSD, the same buffer is used for
* input and output. Also, the AAD (for AES-GMC at least)
* needs to logically go in front.
*/
zfs_uio_t cuio;
struct uio cuio_s;
iovec_t iovecs[4];
void *src, *dst;
uint_t enc_len, keydata_len, aad_len;
ASSERT3U(crypt, <, ZIO_CRYPT_FUNCTIONS);
keydata_len = zio_crypt_table[crypt].ci_keylen;
rw_init(&key->zk_salt_lock, NULL, RW_DEFAULT, NULL);
zfs_uio_init(&cuio, &cuio_s);
/*
* Since we only support one buffer, we need to copy
* the encrypted buffer (source) to the plain buffer
* (dest). We set iovecs[0] -- the authentication data --
* below.
*/
dst = key->zk_master_keydata;
src = keydata;
memcpy(dst, src, keydata_len);
dst = key->zk_hmac_keydata;
src = hmac_keydata;
memcpy(dst, src, SHA512_HMAC_KEYLEN);
iovecs[1].iov_base = key->zk_master_keydata;
iovecs[1].iov_len = keydata_len;
iovecs[2].iov_base = key->zk_hmac_keydata;
iovecs[2].iov_len = SHA512_HMAC_KEYLEN;
iovecs[3].iov_base = mac;
iovecs[3].iov_len = WRAPPING_MAC_LEN;
if (version == 0) {
aad_len = sizeof (uint64_t);
aad[0] = LE_64(guid);
} else {
ASSERT3U(version, ==, ZIO_CRYPT_KEY_CURRENT_VERSION);
aad_len = sizeof (uint64_t) * 3;
aad[0] = LE_64(guid);
aad[1] = LE_64(crypt);
aad[2] = LE_64(version);
}
enc_len = keydata_len + SHA512_HMAC_KEYLEN;
iovecs[0].iov_base = aad;
iovecs[0].iov_len = aad_len;
GET_UIO_STRUCT(&cuio)->uio_iov = iovecs;
zfs_uio_iovcnt(&cuio) = 4;
zfs_uio_segflg(&cuio) = UIO_SYSSPACE;
/* decrypt the keys and store the result in the output buffers */
ret = zio_do_crypt_uio_opencrypto(B_FALSE, NULL, crypt, cwkey,
iv, enc_len, &cuio, aad_len);
if (ret != 0)
goto error;
/* generate a fresh salt */
ret = random_get_bytes(key->zk_salt, ZIO_DATA_SALT_LEN);
if (ret != 0)
goto error;
/* derive the current key from the master key */
ret = hkdf_sha512(key->zk_master_keydata, keydata_len, NULL, 0,
key->zk_salt, ZIO_DATA_SALT_LEN, key->zk_current_keydata,
keydata_len);
if (ret != 0)
goto error;
/* initialize keys for ICP */
key->zk_current_key.ck_data = key->zk_current_keydata;
key->zk_current_key.ck_length = CRYPTO_BYTES2BITS(keydata_len);
key->zk_hmac_key.ck_data = key->zk_hmac_keydata;
key->zk_hmac_key.ck_length = CRYPTO_BYTES2BITS(SHA512_HMAC_KEYLEN);
ret = freebsd_crypt_newsession(&key->zk_session,
&zio_crypt_table[crypt], &key->zk_current_key);
if (ret != 0)
goto error;
key->zk_crypt = crypt;
key->zk_version = version;
key->zk_guid = guid;
key->zk_salt_count = 0;
return (0);
error:
zio_crypt_key_destroy_early(key);
return (ret);
}
int
zio_crypt_generate_iv(uint8_t *ivbuf)
{
int ret;
/* randomly generate the IV */
ret = random_get_pseudo_bytes(ivbuf, ZIO_DATA_IV_LEN);
if (ret != 0)
goto error;
return (0);
error:
memset(ivbuf, 0, ZIO_DATA_IV_LEN);
return (ret);
}
int
zio_crypt_do_hmac(zio_crypt_key_t *key, uint8_t *data, uint_t datalen,
uint8_t *digestbuf, uint_t digestlen)
{
uint8_t raw_digestbuf[SHA512_DIGEST_LENGTH];
ASSERT3U(digestlen, <=, SHA512_DIGEST_LENGTH);
crypto_mac(&key->zk_hmac_key, data, datalen,
raw_digestbuf, SHA512_DIGEST_LENGTH);
memcpy(digestbuf, raw_digestbuf, digestlen);
return (0);
}
int
zio_crypt_generate_iv_salt_dedup(zio_crypt_key_t *key, uint8_t *data,
uint_t datalen, uint8_t *ivbuf, uint8_t *salt)
{
int ret;
uint8_t digestbuf[SHA512_DIGEST_LENGTH];
ret = zio_crypt_do_hmac(key, data, datalen,
digestbuf, SHA512_DIGEST_LENGTH);
if (ret != 0)
return (ret);
memcpy(salt, digestbuf, ZIO_DATA_SALT_LEN);
memcpy(ivbuf, digestbuf + ZIO_DATA_SALT_LEN, ZIO_DATA_IV_LEN);
return (0);
}
/*
* The following functions are used to encode and decode encryption parameters
* into blkptr_t and zil_header_t. The ICP wants to use these parameters as
* byte strings, which normally means that these strings would not need to deal
* with byteswapping at all. However, both blkptr_t and zil_header_t may be
* byteswapped by lower layers and so we must "undo" that byteswap here upon
* decoding and encoding in a non-native byteorder. These functions require
* that the byteorder bit is correct before being called.
*/
void
zio_crypt_encode_params_bp(blkptr_t *bp, uint8_t *salt, uint8_t *iv)
{
uint64_t val64;
uint32_t val32;
ASSERT(BP_IS_ENCRYPTED(bp));
if (!BP_SHOULD_BYTESWAP(bp)) {
memcpy(&bp->blk_dva[2].dva_word[0], salt, sizeof (uint64_t));
memcpy(&bp->blk_dva[2].dva_word[1], iv, sizeof (uint64_t));
memcpy(&val32, iv + sizeof (uint64_t), sizeof (uint32_t));
BP_SET_IV2(bp, val32);
} else {
memcpy(&val64, salt, sizeof (uint64_t));
bp->blk_dva[2].dva_word[0] = BSWAP_64(val64);
memcpy(&val64, iv, sizeof (uint64_t));
bp->blk_dva[2].dva_word[1] = BSWAP_64(val64);
memcpy(&val32, iv + sizeof (uint64_t), sizeof (uint32_t));
BP_SET_IV2(bp, BSWAP_32(val32));
}
}
void
zio_crypt_decode_params_bp(const blkptr_t *bp, uint8_t *salt, uint8_t *iv)
{
uint64_t val64;
uint32_t val32;
ASSERT(BP_IS_PROTECTED(bp));
/* for convenience, so callers don't need to check */
if (BP_IS_AUTHENTICATED(bp)) {
memset(salt, 0, ZIO_DATA_SALT_LEN);
memset(iv, 0, ZIO_DATA_IV_LEN);
return;
}
if (!BP_SHOULD_BYTESWAP(bp)) {
memcpy(salt, &bp->blk_dva[2].dva_word[0], sizeof (uint64_t));
memcpy(iv, &bp->blk_dva[2].dva_word[1], sizeof (uint64_t));
val32 = (uint32_t)BP_GET_IV2(bp);
memcpy(iv + sizeof (uint64_t), &val32, sizeof (uint32_t));
} else {
val64 = BSWAP_64(bp->blk_dva[2].dva_word[0]);
memcpy(salt, &val64, sizeof (uint64_t));
val64 = BSWAP_64(bp->blk_dva[2].dva_word[1]);
memcpy(iv, &val64, sizeof (uint64_t));
val32 = BSWAP_32((uint32_t)BP_GET_IV2(bp));
memcpy(iv + sizeof (uint64_t), &val32, sizeof (uint32_t));
}
}
void
zio_crypt_encode_mac_bp(blkptr_t *bp, uint8_t *mac)
{
uint64_t val64;
ASSERT(BP_USES_CRYPT(bp));
ASSERT3U(BP_GET_TYPE(bp), !=, DMU_OT_OBJSET);
if (!BP_SHOULD_BYTESWAP(bp)) {
memcpy(&bp->blk_cksum.zc_word[2], mac, sizeof (uint64_t));
memcpy(&bp->blk_cksum.zc_word[3], mac + sizeof (uint64_t),
sizeof (uint64_t));
} else {
memcpy(&val64, mac, sizeof (uint64_t));
bp->blk_cksum.zc_word[2] = BSWAP_64(val64);
memcpy(&val64, mac + sizeof (uint64_t), sizeof (uint64_t));
bp->blk_cksum.zc_word[3] = BSWAP_64(val64);
}
}
void
zio_crypt_decode_mac_bp(const blkptr_t *bp, uint8_t *mac)
{
uint64_t val64;
ASSERT(BP_USES_CRYPT(bp) || BP_IS_HOLE(bp));
/* for convenience, so callers don't need to check */
if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
memset(mac, 0, ZIO_DATA_MAC_LEN);
return;
}
if (!BP_SHOULD_BYTESWAP(bp)) {
memcpy(mac, &bp->blk_cksum.zc_word[2], sizeof (uint64_t));
memcpy(mac + sizeof (uint64_t), &bp->blk_cksum.zc_word[3],
sizeof (uint64_t));
} else {
val64 = BSWAP_64(bp->blk_cksum.zc_word[2]);
memcpy(mac, &val64, sizeof (uint64_t));
val64 = BSWAP_64(bp->blk_cksum.zc_word[3]);
memcpy(mac + sizeof (uint64_t), &val64, sizeof (uint64_t));
}
}
void
zio_crypt_encode_mac_zil(void *data, uint8_t *mac)
{
zil_chain_t *zilc = data;
memcpy(&zilc->zc_eck.zec_cksum.zc_word[2], mac, sizeof (uint64_t));
memcpy(&zilc->zc_eck.zec_cksum.zc_word[3], mac + sizeof (uint64_t),
sizeof (uint64_t));
}
void
zio_crypt_decode_mac_zil(const void *data, uint8_t *mac)
{
/*
* The ZIL MAC is embedded in the block it protects, which will
* not have been byteswapped by the time this function has been called.
* As a result, we don't need to worry about byteswapping the MAC.
*/
const zil_chain_t *zilc = data;
memcpy(mac, &zilc->zc_eck.zec_cksum.zc_word[2], sizeof (uint64_t));
memcpy(mac + sizeof (uint64_t), &zilc->zc_eck.zec_cksum.zc_word[3],
sizeof (uint64_t));
}
/*
* This routine takes a block of dnodes (src_abd) and copies only the bonus
* buffers to the same offsets in the dst buffer. datalen should be the size
* of both the src_abd and the dst buffer (not just the length of the bonus
* buffers).
*/
void
zio_crypt_copy_dnode_bonus(abd_t *src_abd, uint8_t *dst, uint_t datalen)
{
uint_t i, max_dnp = datalen >> DNODE_SHIFT;
uint8_t *src;
dnode_phys_t *dnp, *sdnp, *ddnp;
src = abd_borrow_buf_copy(src_abd, datalen);
sdnp = (dnode_phys_t *)src;
ddnp = (dnode_phys_t *)dst;
for (i = 0; i < max_dnp; i += sdnp[i].dn_extra_slots + 1) {
dnp = &sdnp[i];
if (dnp->dn_type != DMU_OT_NONE &&
DMU_OT_IS_ENCRYPTED(dnp->dn_bonustype) &&
dnp->dn_bonuslen != 0) {
memcpy(DN_BONUS(&ddnp[i]), DN_BONUS(dnp),
DN_MAX_BONUS_LEN(dnp));
}
}
abd_return_buf(src_abd, src, datalen);
}
/*
* This function decides what fields from blk_prop are included in
* the on-disk various MAC algorithms.
*/
static void
zio_crypt_bp_zero_nonportable_blkprop(blkptr_t *bp, uint64_t version)
{
int avoidlint = SPA_MINBLOCKSIZE;
/*
* Version 0 did not properly zero out all non-portable fields
* as it should have done. We maintain this code so that we can
* do read-only imports of pools on this version.
*/
if (version == 0) {
BP_SET_DEDUP(bp, 0);
BP_SET_CHECKSUM(bp, 0);
BP_SET_PSIZE(bp, avoidlint);
return;
}
ASSERT3U(version, ==, ZIO_CRYPT_KEY_CURRENT_VERSION);
/*
* The hole_birth feature might set these fields even if this bp
* is a hole. We zero them out here to guarantee that raw sends
* will function with or without the feature.
*/
if (BP_IS_HOLE(bp)) {
bp->blk_prop = 0ULL;
return;
}
/*
* At L0 we want to verify these fields to ensure that data blocks
* can not be reinterpreted. For instance, we do not want an attacker
* to trick us into returning raw lz4 compressed data to the user
* by modifying the compression bits. At higher levels, we cannot
* enforce this policy since raw sends do not convey any information
* about indirect blocks, so these values might be different on the
* receive side. Fortunately, this does not open any new attack
* vectors, since any alterations that can be made to a higher level
* bp must still verify the correct order of the layer below it.
*/
if (BP_GET_LEVEL(bp) != 0) {
BP_SET_BYTEORDER(bp, 0);
BP_SET_COMPRESS(bp, 0);
/*
* psize cannot be set to zero or it will trigger
* asserts, but the value doesn't really matter as
* long as it is constant.
*/
BP_SET_PSIZE(bp, avoidlint);
}
BP_SET_DEDUP(bp, 0);
BP_SET_CHECKSUM(bp, 0);
}
static void
zio_crypt_bp_auth_init(uint64_t version, boolean_t should_bswap, blkptr_t *bp,
blkptr_auth_buf_t *bab, uint_t *bab_len)
{
blkptr_t tmpbp = *bp;
if (should_bswap)
byteswap_uint64_array(&tmpbp, sizeof (blkptr_t));
ASSERT(BP_USES_CRYPT(&tmpbp) || BP_IS_HOLE(&tmpbp));
ASSERT0(BP_IS_EMBEDDED(&tmpbp));
zio_crypt_decode_mac_bp(&tmpbp, bab->bab_mac);
/*
* We always MAC blk_prop in LE to ensure portability. This
* must be done after decoding the mac, since the endianness
* will get zero'd out here.
*/
zio_crypt_bp_zero_nonportable_blkprop(&tmpbp, version);
bab->bab_prop = LE_64(tmpbp.blk_prop);
bab->bab_pad = 0ULL;
/* version 0 did not include the padding */
*bab_len = sizeof (blkptr_auth_buf_t);
if (version == 0)
*bab_len -= sizeof (uint64_t);
}
static int
zio_crypt_bp_do_hmac_updates(crypto_context_t ctx, uint64_t version,
boolean_t should_bswap, blkptr_t *bp)
{
uint_t bab_len;
blkptr_auth_buf_t bab;
zio_crypt_bp_auth_init(version, should_bswap, bp, &bab, &bab_len);
crypto_mac_update(ctx, &bab, bab_len);
return (0);
}
static void
zio_crypt_bp_do_indrect_checksum_updates(SHA2_CTX *ctx, uint64_t version,
boolean_t should_bswap, blkptr_t *bp)
{
uint_t bab_len;
blkptr_auth_buf_t bab;
zio_crypt_bp_auth_init(version, should_bswap, bp, &bab, &bab_len);
SHA2Update(ctx, &bab, bab_len);
}
static void
zio_crypt_bp_do_aad_updates(uint8_t **aadp, uint_t *aad_len, uint64_t version,
boolean_t should_bswap, blkptr_t *bp)
{
uint_t bab_len;
blkptr_auth_buf_t bab;
zio_crypt_bp_auth_init(version, should_bswap, bp, &bab, &bab_len);
memcpy(*aadp, &bab, bab_len);
*aadp += bab_len;
*aad_len += bab_len;
}
static int
zio_crypt_do_dnode_hmac_updates(crypto_context_t ctx, uint64_t version,
boolean_t should_bswap, dnode_phys_t *dnp)
{
int ret, i;
dnode_phys_t *adnp;
boolean_t le_bswap = (should_bswap == ZFS_HOST_BYTEORDER);
uint8_t tmp_dncore[offsetof(dnode_phys_t, dn_blkptr)];
/* authenticate the core dnode (masking out non-portable bits) */
memcpy(tmp_dncore, dnp, sizeof (tmp_dncore));
adnp = (dnode_phys_t *)tmp_dncore;
if (le_bswap) {
adnp->dn_datablkszsec = BSWAP_16(adnp->dn_datablkszsec);
adnp->dn_bonuslen = BSWAP_16(adnp->dn_bonuslen);
adnp->dn_maxblkid = BSWAP_64(adnp->dn_maxblkid);
adnp->dn_used = BSWAP_64(adnp->dn_used);
}
adnp->dn_flags &= DNODE_CRYPT_PORTABLE_FLAGS_MASK;
adnp->dn_used = 0;
crypto_mac_update(ctx, adnp, sizeof (tmp_dncore));
for (i = 0; i < dnp->dn_nblkptr; i++) {
ret = zio_crypt_bp_do_hmac_updates(ctx, version,
should_bswap, &dnp->dn_blkptr[i]);
if (ret != 0)
goto error;
}
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
ret = zio_crypt_bp_do_hmac_updates(ctx, version,
should_bswap, DN_SPILL_BLKPTR(dnp));
if (ret != 0)
goto error;
}
return (0);
error:
return (ret);
}
/*
* objset_phys_t blocks introduce a number of exceptions to the normal
* authentication process. objset_phys_t's contain 2 separate HMACS for
* protecting the integrity of their data. The portable_mac protects the
* metadnode. This MAC can be sent with a raw send and protects against
* reordering of data within the metadnode. The local_mac protects the user
* accounting objects which are not sent from one system to another.
*
* In addition, objset blocks are the only blocks that can be modified and
* written to disk without the key loaded under certain circumstances. During
* zil_claim() we need to be able to update the zil_header_t to complete
* claiming log blocks and during raw receives we need to write out the
* portable_mac from the send file. Both of these actions are possible
* because these fields are not protected by either MAC so neither one will
* need to modify the MACs without the key. However, when the modified blocks
* are written out they will be byteswapped into the host machine's native
* endianness which will modify fields protected by the MAC. As a result, MAC
* calculation for objset blocks works slightly differently from other block
* types. Where other block types MAC the data in whatever endianness is
* written to disk, objset blocks always MAC little endian version of their
* values. In the code, should_bswap is the value from BP_SHOULD_BYTESWAP()
* and le_bswap indicates whether a byteswap is needed to get this block
* into little endian format.
*/
int
zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen,
boolean_t should_bswap, uint8_t *portable_mac, uint8_t *local_mac)
{
int ret;
struct hmac_ctx hash_ctx;
struct hmac_ctx *ctx = &hash_ctx;
objset_phys_t *osp = data;
uint64_t intval;
boolean_t le_bswap = (should_bswap == ZFS_HOST_BYTEORDER);
uint8_t raw_portable_mac[SHA512_DIGEST_LENGTH];
uint8_t raw_local_mac[SHA512_DIGEST_LENGTH];
/* calculate the portable MAC from the portable fields and metadnode */
crypto_mac_init(ctx, &key->zk_hmac_key);
/* add in the os_type */
intval = (le_bswap) ? osp->os_type : BSWAP_64(osp->os_type);
crypto_mac_update(ctx, &intval, sizeof (uint64_t));
/* add in the portable os_flags */
intval = osp->os_flags;
if (should_bswap)
intval = BSWAP_64(intval);
intval &= OBJSET_CRYPT_PORTABLE_FLAGS_MASK;
if (!ZFS_HOST_BYTEORDER)
intval = BSWAP_64(intval);
crypto_mac_update(ctx, &intval, sizeof (uint64_t));
/* add in fields from the metadnode */
ret = zio_crypt_do_dnode_hmac_updates(ctx, key->zk_version,
should_bswap, &osp->os_meta_dnode);
if (ret)
goto error;
crypto_mac_final(ctx, raw_portable_mac, SHA512_DIGEST_LENGTH);
memcpy(portable_mac, raw_portable_mac, ZIO_OBJSET_MAC_LEN);
/*
* This is necessary here as we check next whether
* OBJSET_FLAG_USERACCOUNTING_COMPLETE is set in order to
* decide if the local_mac should be zeroed out. That flag will always
* be set by dmu_objset_id_quota_upgrade_cb() and
* dmu_objset_userspace_upgrade_cb() if useraccounting has been
* completed.
*/
intval = osp->os_flags;
if (should_bswap)
intval = BSWAP_64(intval);
boolean_t uacct_incomplete =
!(intval & OBJSET_FLAG_USERACCOUNTING_COMPLETE);
/*
* The local MAC protects the user, group and project accounting.
* If these objects are not present, the local MAC is zeroed out.
*/
if (uacct_incomplete ||
(datalen >= OBJSET_PHYS_SIZE_V3 &&
osp->os_userused_dnode.dn_type == DMU_OT_NONE &&
osp->os_groupused_dnode.dn_type == DMU_OT_NONE &&
osp->os_projectused_dnode.dn_type == DMU_OT_NONE) ||
(datalen >= OBJSET_PHYS_SIZE_V2 &&
osp->os_userused_dnode.dn_type == DMU_OT_NONE &&
osp->os_groupused_dnode.dn_type == DMU_OT_NONE) ||
(datalen <= OBJSET_PHYS_SIZE_V1)) {
memset(local_mac, 0, ZIO_OBJSET_MAC_LEN);
return (0);
}
/* calculate the local MAC from the userused and groupused dnodes */
crypto_mac_init(ctx, &key->zk_hmac_key);
/* add in the non-portable os_flags */
intval = osp->os_flags;
if (should_bswap)
intval = BSWAP_64(intval);
intval &= ~OBJSET_CRYPT_PORTABLE_FLAGS_MASK;
if (!ZFS_HOST_BYTEORDER)
intval = BSWAP_64(intval);
crypto_mac_update(ctx, &intval, sizeof (uint64_t));
/* XXX check dnode type ... */
/* add in fields from the user accounting dnodes */
if (osp->os_userused_dnode.dn_type != DMU_OT_NONE) {
ret = zio_crypt_do_dnode_hmac_updates(ctx, key->zk_version,
should_bswap, &osp->os_userused_dnode);
if (ret)
goto error;
}
if (osp->os_groupused_dnode.dn_type != DMU_OT_NONE) {
ret = zio_crypt_do_dnode_hmac_updates(ctx, key->zk_version,
should_bswap, &osp->os_groupused_dnode);
if (ret)
goto error;
}
if (osp->os_projectused_dnode.dn_type != DMU_OT_NONE &&
datalen >= OBJSET_PHYS_SIZE_V3) {
ret = zio_crypt_do_dnode_hmac_updates(ctx, key->zk_version,
should_bswap, &osp->os_projectused_dnode);
if (ret)
goto error;
}
crypto_mac_final(ctx, raw_local_mac, SHA512_DIGEST_LENGTH);
memcpy(local_mac, raw_local_mac, ZIO_OBJSET_MAC_LEN);
return (0);
error:
memset(portable_mac, 0, ZIO_OBJSET_MAC_LEN);
memset(local_mac, 0, ZIO_OBJSET_MAC_LEN);
return (ret);
}
static void
zio_crypt_destroy_uio(zfs_uio_t *uio)
{
if (GET_UIO_STRUCT(uio)->uio_iov)
kmem_free(GET_UIO_STRUCT(uio)->uio_iov,
zfs_uio_iovcnt(uio) * sizeof (iovec_t));
}
/*
* This function parses an uncompressed indirect block and returns a checksum
* of all the portable fields from all of the contained bps. The portable
* fields are the MAC and all of the fields from blk_prop except for the dedup,
* checksum, and psize bits. For an explanation of the purpose of this, see
* the comment block on object set authentication.
*/
static int
zio_crypt_do_indirect_mac_checksum_impl(boolean_t generate, void *buf,
uint_t datalen, uint64_t version, boolean_t byteswap, uint8_t *cksum)
{
blkptr_t *bp;
int i, epb = datalen >> SPA_BLKPTRSHIFT;
SHA2_CTX ctx;
uint8_t digestbuf[SHA512_DIGEST_LENGTH];
/* checksum all of the MACs from the layer below */
SHA2Init(SHA512, &ctx);
for (i = 0, bp = buf; i < epb; i++, bp++) {
zio_crypt_bp_do_indrect_checksum_updates(&ctx, version,
byteswap, bp);
}
SHA2Final(digestbuf, &ctx);
if (generate) {
memcpy(cksum, digestbuf, ZIO_DATA_MAC_LEN);
return (0);
}
if (memcmp(digestbuf, cksum, ZIO_DATA_MAC_LEN) != 0) {
#ifdef FCRYPTO_DEBUG
printf("%s(%d): Setting ECKSUM\n", __FUNCTION__, __LINE__);
#endif
return (SET_ERROR(ECKSUM));
}
return (0);
}
int
zio_crypt_do_indirect_mac_checksum(boolean_t generate, void *buf,
uint_t datalen, boolean_t byteswap, uint8_t *cksum)
{
int ret;
/*
* Unfortunately, callers of this function will not always have
* easy access to the on-disk format version. This info is
* normally found in the DSL Crypto Key, but the checksum-of-MACs
* is expected to be verifiable even when the key isn't loaded.
* Here, instead of doing a ZAP lookup for the version for each
* zio, we simply try both existing formats.
*/
ret = zio_crypt_do_indirect_mac_checksum_impl(generate, buf,
datalen, ZIO_CRYPT_KEY_CURRENT_VERSION, byteswap, cksum);
if (ret == ECKSUM) {
ASSERT(!generate);
ret = zio_crypt_do_indirect_mac_checksum_impl(generate,
buf, datalen, 0, byteswap, cksum);
}
return (ret);
}
int
zio_crypt_do_indirect_mac_checksum_abd(boolean_t generate, abd_t *abd,
uint_t datalen, boolean_t byteswap, uint8_t *cksum)
{
int ret;
void *buf;
buf = abd_borrow_buf_copy(abd, datalen);
ret = zio_crypt_do_indirect_mac_checksum(generate, buf, datalen,
byteswap, cksum);
abd_return_buf(abd, buf, datalen);
return (ret);
}
/*
* Special case handling routine for encrypting / decrypting ZIL blocks.
* We do not check for the older ZIL chain because the encryption feature
* was not available before the newer ZIL chain was introduced. The goal
* here is to encrypt everything except the blkptr_t of a lr_write_t and
* the zil_chain_t header. Everything that is not encrypted is authenticated.
*/
/*
* The OpenCrypto used in FreeBSD does not use separate source and
* destination buffers; instead, the same buffer is used. Further, to
* accommodate some of the drivers, the authbuf needs to be logically before
* the data. This means that we need to copy the source to the destination,
* and set up an extra iovec_t at the beginning to handle the authbuf.
* It also means we'll only return one zfs_uio_t.
*/
static int
zio_crypt_init_uios_zil(boolean_t encrypt, uint8_t *plainbuf,
uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap, zfs_uio_t *puio,
zfs_uio_t *out_uio, uint_t *enc_len, uint8_t **authbuf, uint_t *auth_len,
boolean_t *no_crypt)
{
(void) puio;
uint8_t *aadbuf = zio_buf_alloc(datalen);
uint8_t *src, *dst, *slrp, *dlrp, *blkend, *aadp;
iovec_t *dst_iovecs;
zil_chain_t *zilc;
lr_t *lr;
- uint64_t txtype, lr_len;
+ uint64_t txtype, lr_len, nused;
uint_t crypt_len, nr_iovecs, vec;
uint_t aad_len = 0, total_len = 0;
if (encrypt) {
src = plainbuf;
dst = cipherbuf;
} else {
src = cipherbuf;
dst = plainbuf;
}
memcpy(dst, src, datalen);
/* Find the start and end record of the log block. */
zilc = (zil_chain_t *)src;
slrp = src + sizeof (zil_chain_t);
aadp = aadbuf;
- blkend = src + ((byteswap) ? BSWAP_64(zilc->zc_nused) : zilc->zc_nused);
+ nused = ((byteswap) ? BSWAP_64(zilc->zc_nused) : zilc->zc_nused);
+ ASSERT3U(nused, >=, sizeof (zil_chain_t));
+ ASSERT3U(nused, <=, datalen);
+ blkend = src + nused;
/*
* Calculate the number of encrypted iovecs we will need.
*/
/* We need at least two iovecs -- one for the AAD, one for the MAC. */
nr_iovecs = 2;
for (; slrp < blkend; slrp += lr_len) {
lr = (lr_t *)slrp;
if (byteswap) {
txtype = BSWAP_64(lr->lrc_txtype);
lr_len = BSWAP_64(lr->lrc_reclen);
} else {
txtype = lr->lrc_txtype;
lr_len = lr->lrc_reclen;
}
+ ASSERT3U(lr_len, >=, sizeof (lr_t));
+ ASSERT3U(lr_len, <=, blkend - slrp);
nr_iovecs++;
if (txtype == TX_WRITE && lr_len != sizeof (lr_write_t))
nr_iovecs++;
}
dst_iovecs = kmem_alloc(nr_iovecs * sizeof (iovec_t), KM_SLEEP);
/*
* Copy the plain zil header over and authenticate everything except
* the checksum that will store our MAC. If we are writing the data
* the embedded checksum will not have been calculated yet, so we don't
* authenticate that.
*/
memcpy(aadp, src, sizeof (zil_chain_t) - sizeof (zio_eck_t));
aadp += sizeof (zil_chain_t) - sizeof (zio_eck_t);
aad_len += sizeof (zil_chain_t) - sizeof (zio_eck_t);
slrp = src + sizeof (zil_chain_t);
dlrp = dst + sizeof (zil_chain_t);
/*
* Loop over records again, filling in iovecs.
*/
/* The first iovec will contain the authbuf. */
vec = 1;
for (; slrp < blkend; slrp += lr_len, dlrp += lr_len) {
lr = (lr_t *)slrp;
if (!byteswap) {
txtype = lr->lrc_txtype;
lr_len = lr->lrc_reclen;
} else {
txtype = BSWAP_64(lr->lrc_txtype);
lr_len = BSWAP_64(lr->lrc_reclen);
}
/* copy the common lr_t */
memcpy(dlrp, slrp, sizeof (lr_t));
memcpy(aadp, slrp, sizeof (lr_t));
aadp += sizeof (lr_t);
aad_len += sizeof (lr_t);
/*
* If this is a TX_WRITE record we want to encrypt everything
* except the bp if exists. If the bp does exist we want to
* authenticate it.
*/
if (txtype == TX_WRITE) {
crypt_len = sizeof (lr_write_t) -
sizeof (lr_t) - sizeof (blkptr_t);
dst_iovecs[vec].iov_base = (char *)dlrp +
sizeof (lr_t);
dst_iovecs[vec].iov_len = crypt_len;
/* copy the bp now since it will not be encrypted */
memcpy(dlrp + sizeof (lr_write_t) - sizeof (blkptr_t),
slrp + sizeof (lr_write_t) - sizeof (blkptr_t),
sizeof (blkptr_t));
memcpy(aadp,
slrp + sizeof (lr_write_t) - sizeof (blkptr_t),
sizeof (blkptr_t));
aadp += sizeof (blkptr_t);
aad_len += sizeof (blkptr_t);
vec++;
total_len += crypt_len;
if (lr_len != sizeof (lr_write_t)) {
crypt_len = lr_len - sizeof (lr_write_t);
dst_iovecs[vec].iov_base = (char *)
dlrp + sizeof (lr_write_t);
dst_iovecs[vec].iov_len = crypt_len;
vec++;
total_len += crypt_len;
}
} else if (txtype == TX_CLONE_RANGE) {
const size_t o = offsetof(lr_clone_range_t, lr_nbps);
crypt_len = o - sizeof (lr_t);
dst_iovecs[vec].iov_base = (char *)dlrp + sizeof (lr_t);
dst_iovecs[vec].iov_len = crypt_len;
/* copy the bps now since they will not be encrypted */
memcpy(dlrp + o, slrp + o, lr_len - o);
memcpy(aadp, slrp + o, lr_len - o);
aadp += lr_len - o;
aad_len += lr_len - o;
vec++;
total_len += crypt_len;
} else {
crypt_len = lr_len - sizeof (lr_t);
dst_iovecs[vec].iov_base = (char *)dlrp +
sizeof (lr_t);
dst_iovecs[vec].iov_len = crypt_len;
vec++;
total_len += crypt_len;
}
}
/* The last iovec will contain the MAC. */
ASSERT3U(vec, ==, nr_iovecs - 1);
/* AAD */
dst_iovecs[0].iov_base = aadbuf;
dst_iovecs[0].iov_len = aad_len;
/* MAC */
dst_iovecs[vec].iov_base = 0;
dst_iovecs[vec].iov_len = 0;
*no_crypt = (vec == 1);
*enc_len = total_len;
*authbuf = aadbuf;
*auth_len = aad_len;
GET_UIO_STRUCT(out_uio)->uio_iov = dst_iovecs;
zfs_uio_iovcnt(out_uio) = nr_iovecs;
return (0);
}
/*
* Special case handling routine for encrypting / decrypting dnode blocks.
*/
static int
zio_crypt_init_uios_dnode(boolean_t encrypt, uint64_t version,
uint8_t *plainbuf, uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap,
zfs_uio_t *puio, zfs_uio_t *out_uio, uint_t *enc_len, uint8_t **authbuf,
uint_t *auth_len, boolean_t *no_crypt)
{
uint8_t *aadbuf = zio_buf_alloc(datalen);
uint8_t *src, *dst, *aadp;
dnode_phys_t *dnp, *adnp, *sdnp, *ddnp;
iovec_t *dst_iovecs;
uint_t nr_iovecs, crypt_len, vec;
uint_t aad_len = 0, total_len = 0;
uint_t i, j, max_dnp = datalen >> DNODE_SHIFT;
if (encrypt) {
src = plainbuf;
dst = cipherbuf;
} else {
src = cipherbuf;
dst = plainbuf;
}
memcpy(dst, src, datalen);
sdnp = (dnode_phys_t *)src;
ddnp = (dnode_phys_t *)dst;
aadp = aadbuf;
/*
* Count the number of iovecs we will need to do the encryption by
* counting the number of bonus buffers that need to be encrypted.
*/
/* We need at least two iovecs -- one for the AAD, one for the MAC. */
nr_iovecs = 2;
for (i = 0; i < max_dnp; i += sdnp[i].dn_extra_slots + 1) {
/*
* This block may still be byteswapped. However, all of the
* values we use are either uint8_t's (for which byteswapping
* is a noop) or a * != 0 check, which will work regardless
* of whether or not we byteswap.
*/
if (sdnp[i].dn_type != DMU_OT_NONE &&
DMU_OT_IS_ENCRYPTED(sdnp[i].dn_bonustype) &&
sdnp[i].dn_bonuslen != 0) {
nr_iovecs++;
}
}
dst_iovecs = kmem_alloc(nr_iovecs * sizeof (iovec_t), KM_SLEEP);
/*
* Iterate through the dnodes again, this time filling in the uios
* we allocated earlier. We also concatenate any data we want to
* authenticate onto aadbuf.
*/
/* The first iovec will contain the authbuf. */
vec = 1;
for (i = 0; i < max_dnp; i += sdnp[i].dn_extra_slots + 1) {
dnp = &sdnp[i];
/* copy over the core fields and blkptrs (kept as plaintext) */
memcpy(&ddnp[i], dnp,
(uint8_t *)DN_BONUS(dnp) - (uint8_t *)dnp);
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
memcpy(DN_SPILL_BLKPTR(&ddnp[i]), DN_SPILL_BLKPTR(dnp),
sizeof (blkptr_t));
}
/*
* Handle authenticated data. We authenticate everything in
* the dnode that can be brought over when we do a raw send.
* This includes all of the core fields as well as the MACs
* stored in the bp checksums and all of the portable bits
* from blk_prop. We include the dnode padding here in case it
* ever gets used in the future. Some dn_flags and dn_used are
* not portable so we mask those out values out of the
* authenticated data.
*/
crypt_len = offsetof(dnode_phys_t, dn_blkptr);
memcpy(aadp, dnp, crypt_len);
adnp = (dnode_phys_t *)aadp;
adnp->dn_flags &= DNODE_CRYPT_PORTABLE_FLAGS_MASK;
adnp->dn_used = 0;
aadp += crypt_len;
aad_len += crypt_len;
for (j = 0; j < dnp->dn_nblkptr; j++) {
zio_crypt_bp_do_aad_updates(&aadp, &aad_len,
version, byteswap, &dnp->dn_blkptr[j]);
}
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
zio_crypt_bp_do_aad_updates(&aadp, &aad_len,
version, byteswap, DN_SPILL_BLKPTR(dnp));
}
/*
* If this bonus buffer needs to be encrypted, we prepare an
* iovec_t. The encryption / decryption functions will fill
* this in for us with the encrypted or decrypted data.
* Otherwise we add the bonus buffer to the authenticated
* data buffer and copy it over to the destination. The
* encrypted iovec extends to DN_MAX_BONUS_LEN(dnp) so that
* we can guarantee alignment with the AES block size
* (128 bits).
*/
crypt_len = DN_MAX_BONUS_LEN(dnp);
if (dnp->dn_type != DMU_OT_NONE &&
DMU_OT_IS_ENCRYPTED(dnp->dn_bonustype) &&
dnp->dn_bonuslen != 0) {
dst_iovecs[vec].iov_base = DN_BONUS(&ddnp[i]);
dst_iovecs[vec].iov_len = crypt_len;
vec++;
total_len += crypt_len;
} else {
memcpy(DN_BONUS(&ddnp[i]), DN_BONUS(dnp), crypt_len);
memcpy(aadp, DN_BONUS(dnp), crypt_len);
aadp += crypt_len;
aad_len += crypt_len;
}
}
/* The last iovec will contain the MAC. */
ASSERT3U(vec, ==, nr_iovecs - 1);
/* AAD */
dst_iovecs[0].iov_base = aadbuf;
dst_iovecs[0].iov_len = aad_len;
/* MAC */
dst_iovecs[vec].iov_base = 0;
dst_iovecs[vec].iov_len = 0;
*no_crypt = (vec == 1);
*enc_len = total_len;
*authbuf = aadbuf;
*auth_len = aad_len;
GET_UIO_STRUCT(out_uio)->uio_iov = dst_iovecs;
zfs_uio_iovcnt(out_uio) = nr_iovecs;
return (0);
}
static int
zio_crypt_init_uios_normal(boolean_t encrypt, uint8_t *plainbuf,
uint8_t *cipherbuf, uint_t datalen, zfs_uio_t *puio, zfs_uio_t *out_uio,
uint_t *enc_len)
{
(void) puio;
int ret;
uint_t nr_plain = 1, nr_cipher = 2;
iovec_t *plain_iovecs = NULL, *cipher_iovecs = NULL;
void *src, *dst;
cipher_iovecs = kmem_zalloc(nr_cipher * sizeof (iovec_t),
KM_SLEEP);
if (!cipher_iovecs) {
ret = SET_ERROR(ENOMEM);
goto error;
}
if (encrypt) {
src = plainbuf;
dst = cipherbuf;
} else {
src = cipherbuf;
dst = plainbuf;
}
memcpy(dst, src, datalen);
cipher_iovecs[0].iov_base = dst;
cipher_iovecs[0].iov_len = datalen;
*enc_len = datalen;
GET_UIO_STRUCT(out_uio)->uio_iov = cipher_iovecs;
zfs_uio_iovcnt(out_uio) = nr_cipher;
return (0);
error:
if (plain_iovecs != NULL)
kmem_free(plain_iovecs, nr_plain * sizeof (iovec_t));
if (cipher_iovecs != NULL)
kmem_free(cipher_iovecs, nr_cipher * sizeof (iovec_t));
*enc_len = 0;
GET_UIO_STRUCT(out_uio)->uio_iov = NULL;
zfs_uio_iovcnt(out_uio) = 0;
return (ret);
}
/*
* This function builds up the plaintext (puio) and ciphertext (cuio) uios so
* that they can be used for encryption and decryption by zio_do_crypt_uio().
* Most blocks will use zio_crypt_init_uios_normal(), with ZIL and dnode blocks
* requiring special handling to parse out pieces that are to be encrypted. The
* authbuf is used by these special cases to store additional authenticated
* data (AAD) for the encryption modes.
*/
static int
zio_crypt_init_uios(boolean_t encrypt, uint64_t version, dmu_object_type_t ot,
uint8_t *plainbuf, uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap,
uint8_t *mac, zfs_uio_t *puio, zfs_uio_t *cuio, uint_t *enc_len,
uint8_t **authbuf, uint_t *auth_len, boolean_t *no_crypt)
{
int ret;
iovec_t *mac_iov;
ASSERT(DMU_OT_IS_ENCRYPTED(ot) || ot == DMU_OT_NONE);
/* route to handler */
switch (ot) {
case DMU_OT_INTENT_LOG:
ret = zio_crypt_init_uios_zil(encrypt, plainbuf, cipherbuf,
datalen, byteswap, puio, cuio, enc_len, authbuf, auth_len,
no_crypt);
break;
case DMU_OT_DNODE:
ret = zio_crypt_init_uios_dnode(encrypt, version, plainbuf,
cipherbuf, datalen, byteswap, puio, cuio, enc_len, authbuf,
auth_len, no_crypt);
break;
default:
ret = zio_crypt_init_uios_normal(encrypt, plainbuf, cipherbuf,
datalen, puio, cuio, enc_len);
*authbuf = NULL;
*auth_len = 0;
*no_crypt = B_FALSE;
break;
}
if (ret != 0)
goto error;
/* populate the uios */
zfs_uio_segflg(cuio) = UIO_SYSSPACE;
mac_iov =
((iovec_t *)&(GET_UIO_STRUCT(cuio)->
uio_iov[zfs_uio_iovcnt(cuio) - 1]));
mac_iov->iov_base = (void *)mac;
mac_iov->iov_len = ZIO_DATA_MAC_LEN;
return (0);
error:
return (ret);
}
void *failed_decrypt_buf;
int faile_decrypt_size;
/*
* Primary encryption / decryption entrypoint for zio data.
*/
int
zio_do_crypt_data(boolean_t encrypt, zio_crypt_key_t *key,
dmu_object_type_t ot, boolean_t byteswap, uint8_t *salt, uint8_t *iv,
uint8_t *mac, uint_t datalen, uint8_t *plainbuf, uint8_t *cipherbuf,
boolean_t *no_crypt)
{
int ret;
boolean_t locked = B_FALSE;
uint64_t crypt = key->zk_crypt;
uint_t keydata_len = zio_crypt_table[crypt].ci_keylen;
uint_t enc_len, auth_len;
zfs_uio_t puio, cuio;
struct uio puio_s, cuio_s;
uint8_t enc_keydata[MASTER_KEY_MAX_LEN];
crypto_key_t tmp_ckey, *ckey = NULL;
freebsd_crypt_session_t *tmpl = NULL;
uint8_t *authbuf = NULL;
zfs_uio_init(&puio, &puio_s);
zfs_uio_init(&cuio, &cuio_s);
memset(GET_UIO_STRUCT(&puio), 0, sizeof (struct uio));
memset(GET_UIO_STRUCT(&cuio), 0, sizeof (struct uio));
#ifdef FCRYPTO_DEBUG
printf("%s(%s, %p, %p, %d, %p, %p, %u, %s, %p, %p, %p)\n",
__FUNCTION__,
encrypt ? "encrypt" : "decrypt",
key, salt, ot, iv, mac, datalen,
byteswap ? "byteswap" : "native_endian", plainbuf,
cipherbuf, no_crypt);
printf("\tkey = {");
for (int i = 0; i < key->zk_current_key.ck_length/8; i++)
printf("%02x ", ((uint8_t *)key->zk_current_key.ck_data)[i]);
printf("}\n");
#endif
/* create uios for encryption */
ret = zio_crypt_init_uios(encrypt, key->zk_version, ot, plainbuf,
cipherbuf, datalen, byteswap, mac, &puio, &cuio, &enc_len,
&authbuf, &auth_len, no_crypt);
if (ret != 0)
return (ret);
/*
* If the needed key is the current one, just use it. Otherwise we
* need to generate a temporary one from the given salt + master key.
* If we are encrypting, we must return a copy of the current salt
* so that it can be stored in the blkptr_t.
*/
rw_enter(&key->zk_salt_lock, RW_READER);
locked = B_TRUE;
if (memcmp(salt, key->zk_salt, ZIO_DATA_SALT_LEN) == 0) {
ckey = &key->zk_current_key;
tmpl = &key->zk_session;
} else {
rw_exit(&key->zk_salt_lock);
locked = B_FALSE;
ret = hkdf_sha512(key->zk_master_keydata, keydata_len, NULL, 0,
salt, ZIO_DATA_SALT_LEN, enc_keydata, keydata_len);
if (ret != 0)
goto error;
tmp_ckey.ck_data = enc_keydata;
tmp_ckey.ck_length = CRYPTO_BYTES2BITS(keydata_len);
ckey = &tmp_ckey;
tmpl = NULL;
}
/* perform the encryption / decryption */
ret = zio_do_crypt_uio_opencrypto(encrypt, tmpl, key->zk_crypt,
ckey, iv, enc_len, &cuio, auth_len);
if (ret != 0)
goto error;
if (locked) {
rw_exit(&key->zk_salt_lock);
}
if (authbuf != NULL)
zio_buf_free(authbuf, datalen);
if (ckey == &tmp_ckey)
memset(enc_keydata, 0, keydata_len);
zio_crypt_destroy_uio(&puio);
zio_crypt_destroy_uio(&cuio);
return (0);
error:
if (!encrypt) {
if (failed_decrypt_buf != NULL)
kmem_free(failed_decrypt_buf, failed_decrypt_size);
failed_decrypt_buf = kmem_alloc(datalen, KM_SLEEP);
failed_decrypt_size = datalen;
memcpy(failed_decrypt_buf, cipherbuf, datalen);
}
if (locked)
rw_exit(&key->zk_salt_lock);
if (authbuf != NULL)
zio_buf_free(authbuf, datalen);
if (ckey == &tmp_ckey)
memset(enc_keydata, 0, keydata_len);
zio_crypt_destroy_uio(&puio);
zio_crypt_destroy_uio(&cuio);
return (SET_ERROR(ret));
}
/*
* Simple wrapper around zio_do_crypt_data() to work with abd's instead of
* linear buffers.
*/
int
zio_do_crypt_abd(boolean_t encrypt, zio_crypt_key_t *key, dmu_object_type_t ot,
boolean_t byteswap, uint8_t *salt, uint8_t *iv, uint8_t *mac,
uint_t datalen, abd_t *pabd, abd_t *cabd, boolean_t *no_crypt)
{
int ret;
void *ptmp, *ctmp;
if (encrypt) {
ptmp = abd_borrow_buf_copy(pabd, datalen);
ctmp = abd_borrow_buf(cabd, datalen);
} else {
ptmp = abd_borrow_buf(pabd, datalen);
ctmp = abd_borrow_buf_copy(cabd, datalen);
}
ret = zio_do_crypt_data(encrypt, key, ot, byteswap, salt, iv, mac,
datalen, ptmp, ctmp, no_crypt);
if (ret != 0)
goto error;
if (encrypt) {
abd_return_buf(pabd, ptmp, datalen);
abd_return_buf_copy(cabd, ctmp, datalen);
} else {
abd_return_buf_copy(pabd, ptmp, datalen);
abd_return_buf(cabd, ctmp, datalen);
}
return (0);
error:
if (encrypt) {
abd_return_buf(pabd, ptmp, datalen);
abd_return_buf_copy(cabd, ctmp, datalen);
} else {
abd_return_buf_copy(pabd, ptmp, datalen);
abd_return_buf(cabd, ctmp, datalen);
}
return (SET_ERROR(ret));
}
#if defined(_KERNEL) && defined(HAVE_SPL)
/* CSTYLED */
module_param(zfs_key_max_salt_uses, ulong, 0644);
MODULE_PARM_DESC(zfs_key_max_salt_uses, "Max number of times a salt value "
"can be used for generating encryption keys before it is rotated");
#endif
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c
index 2520507b98aa..b6edac434dea 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c
@@ -1,1626 +1,1627 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
*
* Copyright (c) 2006-2010 Pawel Jakub Dawidek <pjd@FreeBSD.org>
* All rights reserved.
*
* Portions Copyright 2010 Robert Milkowski
*
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2012, 2017 by Delphix. All rights reserved.
* Copyright (c) 2013, Joyent, Inc. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
*/
/* Portions Copyright 2011 Martin Matuska <mm@FreeBSD.org> */
/*
* ZFS volume emulation driver.
*
* Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
* Volumes are accessed through the symbolic links named:
*
* /dev/zvol/<pool_name>/<dataset_name>
*
* Volumes are persistent through reboot. No user command needs to be
* run before opening and using a device.
*
* On FreeBSD ZVOLs are simply GEOM providers like any other storage device
* in the system. Except when they're simply character devices (volmode=dev).
*/
#include <sys/types.h>
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/errno.h>
#include <sys/uio.h>
#include <sys/bio.h>
#include <sys/buf.h>
#include <sys/kmem.h>
#include <sys/conf.h>
#include <sys/cmn_err.h>
#include <sys/stat.h>
#include <sys/proc.h>
#include <sys/zap.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/zio.h>
#include <sys/disk.h>
#include <sys/dmu_traverse.h>
#include <sys/dnode.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_dir.h>
#include <sys/byteorder.h>
#include <sys/sunddi.h>
#include <sys/dirent.h>
#include <sys/policy.h>
#include <sys/queue.h>
#include <sys/fs/zfs.h>
#include <sys/zfs_ioctl.h>
#include <sys/zil.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_rlock.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_raidz.h>
#include <sys/zvol.h>
#include <sys/zil_impl.h>
#include <sys/dataset_kstats.h>
#include <sys/dbuf.h>
#include <sys/dmu_tx.h>
#include <sys/zfeature.h>
#include <sys/zio_checksum.h>
#include <sys/zil_impl.h>
#include <sys/filio.h>
#include <sys/freebsd_event.h>
#include <geom/geom.h>
#include <sys/zvol.h>
#include <sys/zvol_impl.h>
#include "zfs_namecheck.h"
#define ZVOL_DUMPSIZE "dumpsize"
#ifdef ZVOL_LOCK_DEBUG
#define ZVOL_RW_READER RW_WRITER
#define ZVOL_RW_READ_HELD RW_WRITE_HELD
#else
#define ZVOL_RW_READER RW_READER
#define ZVOL_RW_READ_HELD RW_READ_HELD
#endif
enum zvol_geom_state {
ZVOL_GEOM_UNINIT,
ZVOL_GEOM_STOPPED,
ZVOL_GEOM_RUNNING,
};
struct zvol_state_os {
#define zso_dev _zso_state._zso_dev
#define zso_geom _zso_state._zso_geom
union {
/* volmode=dev */
struct zvol_state_dev {
struct cdev *zsd_cdev;
uint64_t zsd_sync_cnt;
struct selinfo zsd_selinfo;
} _zso_dev;
/* volmode=geom */
struct zvol_state_geom {
struct g_provider *zsg_provider;
struct bio_queue_head zsg_queue;
struct mtx zsg_queue_mtx;
enum zvol_geom_state zsg_state;
} _zso_geom;
} _zso_state;
int zso_dying;
};
static uint32_t zvol_minors;
SYSCTL_DECL(_vfs_zfs);
SYSCTL_NODE(_vfs_zfs, OID_AUTO, vol, CTLFLAG_RW, 0, "ZFS VOLUME");
SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, mode, CTLFLAG_RWTUN, &zvol_volmode, 0,
"Expose as GEOM providers (1), device files (2) or neither");
static boolean_t zpool_on_zvol = B_FALSE;
SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, recursive, CTLFLAG_RWTUN, &zpool_on_zvol, 0,
"Allow zpools to use zvols as vdevs (DANGEROUS)");
/*
* Toggle unmap functionality.
*/
boolean_t zvol_unmap_enabled = B_TRUE;
SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, unmap_enabled, CTLFLAG_RWTUN,
&zvol_unmap_enabled, 0, "Enable UNMAP functionality");
/*
* zvol maximum transfer in one DMU tx.
*/
int zvol_maxphys = DMU_MAX_ACCESS / 2;
static void zvol_ensure_zilog(zvol_state_t *zv);
static d_open_t zvol_cdev_open;
static d_close_t zvol_cdev_close;
static d_ioctl_t zvol_cdev_ioctl;
static d_read_t zvol_cdev_read;
static d_write_t zvol_cdev_write;
static d_strategy_t zvol_geom_bio_strategy;
static d_kqfilter_t zvol_cdev_kqfilter;
static struct cdevsw zvol_cdevsw = {
.d_name = "zvol",
.d_version = D_VERSION,
.d_flags = D_DISK | D_TRACKCLOSE,
.d_open = zvol_cdev_open,
.d_close = zvol_cdev_close,
.d_ioctl = zvol_cdev_ioctl,
.d_read = zvol_cdev_read,
.d_write = zvol_cdev_write,
.d_strategy = zvol_geom_bio_strategy,
.d_kqfilter = zvol_cdev_kqfilter,
};
static void zvol_filter_detach(struct knote *kn);
static int zvol_filter_vnode(struct knote *kn, long hint);
static struct filterops zvol_filterops_vnode = {
.f_isfd = 1,
.f_detach = zvol_filter_detach,
.f_event = zvol_filter_vnode,
};
extern uint_t zfs_geom_probe_vdev_key;
struct g_class zfs_zvol_class = {
.name = "ZFS::ZVOL",
.version = G_VERSION,
};
DECLARE_GEOM_CLASS(zfs_zvol_class, zfs_zvol);
static int zvol_geom_open(struct g_provider *pp, int flag, int count);
static int zvol_geom_close(struct g_provider *pp, int flag, int count);
static void zvol_geom_run(zvol_state_t *zv);
static void zvol_geom_destroy(zvol_state_t *zv);
static int zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace);
static void zvol_geom_worker(void *arg);
static void zvol_geom_bio_start(struct bio *bp);
static int zvol_geom_bio_getattr(struct bio *bp);
/* static d_strategy_t zvol_geom_bio_strategy; (declared elsewhere) */
/*
* GEOM mode implementation
*/
static int
zvol_geom_open(struct g_provider *pp, int flag, int count)
{
zvol_state_t *zv;
int err = 0;
boolean_t drop_suspend = B_FALSE;
if (!zpool_on_zvol && tsd_get(zfs_geom_probe_vdev_key) != NULL) {
/*
* If zfs_geom_probe_vdev_key is set, that means that zfs is
* attempting to probe geom providers while looking for a
* replacement for a missing VDEV. In this case, the
* spa_namespace_lock will not be held, but it is still illegal
* to use a zvol as a vdev. Deadlocks can result if another
* thread has spa_namespace_lock.
*/
return (SET_ERROR(EOPNOTSUPP));
}
retry:
rw_enter(&zvol_state_lock, ZVOL_RW_READER);
/*
* Obtain a copy of private under zvol_state_lock to make sure either
* the result of zvol free code setting private to NULL is observed,
* or the zv is protected from being freed because of the positive
* zv_open_count.
*/
zv = pp->private;
if (zv == NULL) {
rw_exit(&zvol_state_lock);
err = SET_ERROR(ENXIO);
goto out_locked;
}
mutex_enter(&zv->zv_state_lock);
if (zv->zv_zso->zso_dying) {
rw_exit(&zvol_state_lock);
err = SET_ERROR(ENXIO);
goto out_zv_locked;
}
ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_GEOM);
/*
* Make sure zvol is not suspended during first open
* (hold zv_suspend_lock) and respect proper lock acquisition
* ordering - zv_suspend_lock before zv_state_lock.
*/
if (zv->zv_open_count == 0) {
drop_suspend = B_TRUE;
if (!rw_tryenter(&zv->zv_suspend_lock, ZVOL_RW_READER)) {
mutex_exit(&zv->zv_state_lock);
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
mutex_enter(&zv->zv_state_lock);
/* Check to see if zv_suspend_lock is needed. */
if (zv->zv_open_count != 0) {
rw_exit(&zv->zv_suspend_lock);
drop_suspend = B_FALSE;
}
}
}
rw_exit(&zvol_state_lock);
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
if (zv->zv_open_count == 0) {
boolean_t drop_namespace = B_FALSE;
ASSERT(ZVOL_RW_READ_HELD(&zv->zv_suspend_lock));
/*
* Take spa_namespace_lock to prevent lock inversion when
* zvols from one pool are opened as vdevs in another.
*/
if (!mutex_owned(&spa_namespace_lock)) {
if (!mutex_tryenter(&spa_namespace_lock)) {
mutex_exit(&zv->zv_state_lock);
rw_exit(&zv->zv_suspend_lock);
kern_yield(PRI_USER);
goto retry;
} else {
drop_namespace = B_TRUE;
}
}
err = zvol_first_open(zv, !(flag & FWRITE));
if (drop_namespace)
mutex_exit(&spa_namespace_lock);
if (err)
goto out_zv_locked;
pp->mediasize = zv->zv_volsize;
pp->stripeoffset = 0;
pp->stripesize = zv->zv_volblocksize;
}
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
/*
* Check for a bad on-disk format version now since we
* lied about owning the dataset readonly before.
*/
if ((flag & FWRITE) && ((zv->zv_flags & ZVOL_RDONLY) ||
dmu_objset_incompatible_encryption_version(zv->zv_objset))) {
err = SET_ERROR(EROFS);
goto out_opened;
}
if (zv->zv_flags & ZVOL_EXCL) {
err = SET_ERROR(EBUSY);
goto out_opened;
}
if (flag & O_EXCL) {
if (zv->zv_open_count != 0) {
err = SET_ERROR(EBUSY);
goto out_opened;
}
zv->zv_flags |= ZVOL_EXCL;
}
zv->zv_open_count += count;
out_opened:
if (zv->zv_open_count == 0) {
zvol_last_close(zv);
wakeup(zv);
}
out_zv_locked:
mutex_exit(&zv->zv_state_lock);
out_locked:
if (drop_suspend)
rw_exit(&zv->zv_suspend_lock);
return (err);
}
static int
zvol_geom_close(struct g_provider *pp, int flag, int count)
{
(void) flag;
zvol_state_t *zv;
boolean_t drop_suspend = B_TRUE;
int new_open_count;
rw_enter(&zvol_state_lock, ZVOL_RW_READER);
zv = pp->private;
if (zv == NULL) {
rw_exit(&zvol_state_lock);
return (SET_ERROR(ENXIO));
}
mutex_enter(&zv->zv_state_lock);
if (zv->zv_flags & ZVOL_EXCL) {
ASSERT3U(zv->zv_open_count, ==, 1);
zv->zv_flags &= ~ZVOL_EXCL;
}
ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_GEOM);
/*
* If the open count is zero, this is a spurious close.
* That indicates a bug in the kernel / DDI framework.
*/
ASSERT3U(zv->zv_open_count, >, 0);
/*
* Make sure zvol is not suspended during last close
* (hold zv_suspend_lock) and respect proper lock acquisition
* ordering - zv_suspend_lock before zv_state_lock.
*/
new_open_count = zv->zv_open_count - count;
if (new_open_count == 0) {
if (!rw_tryenter(&zv->zv_suspend_lock, ZVOL_RW_READER)) {
mutex_exit(&zv->zv_state_lock);
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
mutex_enter(&zv->zv_state_lock);
/* Check to see if zv_suspend_lock is needed. */
new_open_count = zv->zv_open_count - count;
if (new_open_count != 0) {
rw_exit(&zv->zv_suspend_lock);
drop_suspend = B_FALSE;
}
}
} else {
drop_suspend = B_FALSE;
}
rw_exit(&zvol_state_lock);
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
/*
* You may get multiple opens, but only one close.
*/
zv->zv_open_count = new_open_count;
if (zv->zv_open_count == 0) {
ASSERT(ZVOL_RW_READ_HELD(&zv->zv_suspend_lock));
zvol_last_close(zv);
wakeup(zv);
}
mutex_exit(&zv->zv_state_lock);
if (drop_suspend)
rw_exit(&zv->zv_suspend_lock);
return (0);
}
static void
zvol_geom_run(zvol_state_t *zv)
{
struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
struct g_provider *pp = zsg->zsg_provider;
ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_GEOM);
g_error_provider(pp, 0);
kproc_kthread_add(zvol_geom_worker, zv, &system_proc, NULL, 0, 0,
"zfskern", "zvol %s", pp->name + sizeof (ZVOL_DRIVER));
}
static void
zvol_geom_destroy(zvol_state_t *zv)
{
struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
struct g_provider *pp = zsg->zsg_provider;
ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_GEOM);
g_topology_assert();
mutex_enter(&zv->zv_state_lock);
VERIFY3S(zsg->zsg_state, ==, ZVOL_GEOM_RUNNING);
mutex_exit(&zv->zv_state_lock);
zsg->zsg_provider = NULL;
g_wither_geom(pp->geom, ENXIO);
}
void
zvol_wait_close(zvol_state_t *zv)
{
if (zv->zv_volmode != ZFS_VOLMODE_GEOM)
return;
mutex_enter(&zv->zv_state_lock);
zv->zv_zso->zso_dying = B_TRUE;
if (zv->zv_open_count)
msleep(zv, &zv->zv_state_lock,
PRIBIO, "zvol:dying", 10*hz);
mutex_exit(&zv->zv_state_lock);
}
static int
zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace)
{
int count, error, flags;
g_topology_assert();
/*
* To make it easier we expect either open or close, but not both
* at the same time.
*/
KASSERT((acr >= 0 && acw >= 0 && ace >= 0) ||
(acr <= 0 && acw <= 0 && ace <= 0),
("Unsupported access request to %s (acr=%d, acw=%d, ace=%d).",
pp->name, acr, acw, ace));
if (pp->private == NULL) {
if (acr <= 0 && acw <= 0 && ace <= 0)
return (0);
return (pp->error);
}
/*
* We don't pass FEXCL flag to zvol_geom_open()/zvol_geom_close() if
* ace != 0, because GEOM already handles that and handles it a bit
* differently. GEOM allows for multiple read/exclusive consumers and
* ZFS allows only one exclusive consumer, no matter if it is reader or
* writer. I like better the way GEOM works so I'll leave it for GEOM
* to decide what to do.
*/
count = acr + acw + ace;
if (count == 0)
return (0);
flags = 0;
if (acr != 0 || ace != 0)
flags |= FREAD;
if (acw != 0)
flags |= FWRITE;
g_topology_unlock();
if (count > 0)
error = zvol_geom_open(pp, flags, count);
else
error = zvol_geom_close(pp, flags, -count);
g_topology_lock();
return (error);
}
static void
zvol_geom_worker(void *arg)
{
zvol_state_t *zv = arg;
struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
struct bio *bp;
ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_GEOM);
thread_lock(curthread);
sched_prio(curthread, PRIBIO);
thread_unlock(curthread);
for (;;) {
mtx_lock(&zsg->zsg_queue_mtx);
bp = bioq_takefirst(&zsg->zsg_queue);
if (bp == NULL) {
if (zsg->zsg_state == ZVOL_GEOM_STOPPED) {
zsg->zsg_state = ZVOL_GEOM_RUNNING;
wakeup(&zsg->zsg_state);
mtx_unlock(&zsg->zsg_queue_mtx);
kthread_exit();
}
msleep(&zsg->zsg_queue, &zsg->zsg_queue_mtx,
PRIBIO | PDROP, "zvol:io", 0);
continue;
}
mtx_unlock(&zsg->zsg_queue_mtx);
zvol_geom_bio_strategy(bp);
}
}
static void
zvol_geom_bio_start(struct bio *bp)
{
zvol_state_t *zv = bp->bio_to->private;
struct zvol_state_geom *zsg;
boolean_t first;
if (zv == NULL) {
g_io_deliver(bp, ENXIO);
return;
}
if (bp->bio_cmd == BIO_GETATTR) {
if (zvol_geom_bio_getattr(bp))
g_io_deliver(bp, EOPNOTSUPP);
return;
}
if (!THREAD_CAN_SLEEP()) {
zsg = &zv->zv_zso->zso_geom;
mtx_lock(&zsg->zsg_queue_mtx);
first = (bioq_first(&zsg->zsg_queue) == NULL);
bioq_insert_tail(&zsg->zsg_queue, bp);
mtx_unlock(&zsg->zsg_queue_mtx);
if (first)
wakeup_one(&zsg->zsg_queue);
return;
}
zvol_geom_bio_strategy(bp);
}
static int
zvol_geom_bio_getattr(struct bio *bp)
{
zvol_state_t *zv;
zv = bp->bio_to->private;
ASSERT3P(zv, !=, NULL);
spa_t *spa = dmu_objset_spa(zv->zv_objset);
uint64_t refd, avail, usedobjs, availobjs;
if (g_handleattr_int(bp, "GEOM::candelete", 1))
return (0);
if (strcmp(bp->bio_attribute, "blocksavail") == 0) {
dmu_objset_space(zv->zv_objset, &refd, &avail,
&usedobjs, &availobjs);
if (g_handleattr_off_t(bp, "blocksavail", avail / DEV_BSIZE))
return (0);
} else if (strcmp(bp->bio_attribute, "blocksused") == 0) {
dmu_objset_space(zv->zv_objset, &refd, &avail,
&usedobjs, &availobjs);
if (g_handleattr_off_t(bp, "blocksused", refd / DEV_BSIZE))
return (0);
} else if (strcmp(bp->bio_attribute, "poolblocksavail") == 0) {
avail = metaslab_class_get_space(spa_normal_class(spa));
avail -= metaslab_class_get_alloc(spa_normal_class(spa));
if (g_handleattr_off_t(bp, "poolblocksavail",
avail / DEV_BSIZE))
return (0);
} else if (strcmp(bp->bio_attribute, "poolblocksused") == 0) {
refd = metaslab_class_get_alloc(spa_normal_class(spa));
if (g_handleattr_off_t(bp, "poolblocksused", refd / DEV_BSIZE))
return (0);
}
return (1);
}
static void
zvol_filter_detach(struct knote *kn)
{
zvol_state_t *zv;
struct zvol_state_dev *zsd;
zv = kn->kn_hook;
zsd = &zv->zv_zso->zso_dev;
knlist_remove(&zsd->zsd_selinfo.si_note, kn, 0);
}
static int
zvol_filter_vnode(struct knote *kn, long hint)
{
kn->kn_fflags |= kn->kn_sfflags & hint;
return (kn->kn_fflags != 0);
}
static int
zvol_cdev_kqfilter(struct cdev *dev, struct knote *kn)
{
zvol_state_t *zv;
struct zvol_state_dev *zsd;
zv = dev->si_drv2;
zsd = &zv->zv_zso->zso_dev;
if (kn->kn_filter != EVFILT_VNODE)
return (EINVAL);
/* XXX: extend support for other NOTE_* events */
if (kn->kn_sfflags != NOTE_ATTRIB)
return (EINVAL);
kn->kn_fop = &zvol_filterops_vnode;
kn->kn_hook = zv;
knlist_add(&zsd->zsd_selinfo.si_note, kn, 0);
return (0);
}
static void
zvol_geom_bio_strategy(struct bio *bp)
{
zvol_state_t *zv;
uint64_t off, volsize;
size_t resid;
char *addr;
objset_t *os;
zfs_locked_range_t *lr;
int error = 0;
boolean_t doread = B_FALSE;
boolean_t is_dumpified;
boolean_t sync;
if (bp->bio_to)
zv = bp->bio_to->private;
else
zv = bp->bio_dev->si_drv2;
if (zv == NULL) {
error = SET_ERROR(ENXIO);
goto out;
}
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
switch (bp->bio_cmd) {
case BIO_READ:
doread = B_TRUE;
break;
case BIO_WRITE:
case BIO_FLUSH:
case BIO_DELETE:
if (zv->zv_flags & ZVOL_RDONLY) {
error = SET_ERROR(EROFS);
goto resume;
}
zvol_ensure_zilog(zv);
if (bp->bio_cmd == BIO_FLUSH)
goto sync;
break;
default:
error = SET_ERROR(EOPNOTSUPP);
goto resume;
}
off = bp->bio_offset;
volsize = zv->zv_volsize;
os = zv->zv_objset;
ASSERT3P(os, !=, NULL);
addr = bp->bio_data;
resid = bp->bio_length;
if (resid > 0 && off >= volsize) {
error = SET_ERROR(EIO);
goto resume;
}
is_dumpified = B_FALSE;
sync = !doread && !is_dumpified &&
zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
/*
* There must be no buffer changes when doing a dmu_sync() because
* we can't change the data whilst calculating the checksum.
*/
lr = zfs_rangelock_enter(&zv->zv_rangelock, off, resid,
doread ? RL_READER : RL_WRITER);
if (bp->bio_cmd == BIO_DELETE) {
dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error != 0) {
dmu_tx_abort(tx);
} else {
zvol_log_truncate(zv, tx, off, resid, sync);
dmu_tx_commit(tx);
error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
off, resid);
resid = 0;
}
goto unlock;
}
while (resid != 0 && off < volsize) {
size_t size = MIN(resid, zvol_maxphys);
if (doread) {
error = dmu_read(os, ZVOL_OBJ, off, size, addr,
DMU_READ_PREFETCH);
} else {
dmu_tx_t *tx = dmu_tx_create(os);
dmu_tx_hold_write_by_dnode(tx, zv->zv_dn, off, size);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
} else {
dmu_write(os, ZVOL_OBJ, off, size, addr, tx);
zvol_log_write(zv, tx, off, size, sync);
dmu_tx_commit(tx);
}
}
if (error) {
/* Convert checksum errors into IO errors. */
if (error == ECKSUM)
error = SET_ERROR(EIO);
break;
}
off += size;
addr += size;
resid -= size;
}
unlock:
zfs_rangelock_exit(lr);
bp->bio_completed = bp->bio_length - resid;
if (bp->bio_completed < bp->bio_length && off > volsize)
error = SET_ERROR(EINVAL);
switch (bp->bio_cmd) {
case BIO_FLUSH:
break;
case BIO_READ:
dataset_kstats_update_read_kstats(&zv->zv_kstat,
bp->bio_completed);
break;
case BIO_WRITE:
dataset_kstats_update_write_kstats(&zv->zv_kstat,
bp->bio_completed);
break;
case BIO_DELETE:
break;
default:
break;
}
if (sync) {
sync:
zil_commit(zv->zv_zilog, ZVOL_OBJ);
}
resume:
rw_exit(&zv->zv_suspend_lock);
out:
if (bp->bio_to)
g_io_deliver(bp, error);
else
biofinish(bp, NULL, error);
}
/*
* Character device mode implementation
*/
static int
zvol_cdev_read(struct cdev *dev, struct uio *uio_s, int ioflag)
{
zvol_state_t *zv;
uint64_t volsize;
zfs_locked_range_t *lr;
int error = 0;
zfs_uio_t uio;
zfs_uio_init(&uio, uio_s);
zv = dev->si_drv2;
volsize = zv->zv_volsize;
/*
* uio_loffset == volsize isn't an error as
* it's required for EOF processing.
*/
if (zfs_uio_resid(&uio) > 0 &&
(zfs_uio_offset(&uio) < 0 || zfs_uio_offset(&uio) > volsize))
return (SET_ERROR(EIO));
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
ssize_t start_resid = zfs_uio_resid(&uio);
lr = zfs_rangelock_enter(&zv->zv_rangelock, zfs_uio_offset(&uio),
zfs_uio_resid(&uio), RL_READER);
while (zfs_uio_resid(&uio) > 0 && zfs_uio_offset(&uio) < volsize) {
uint64_t bytes = MIN(zfs_uio_resid(&uio), DMU_MAX_ACCESS >> 1);
/* Don't read past the end. */
if (bytes > volsize - zfs_uio_offset(&uio))
bytes = volsize - zfs_uio_offset(&uio);
error = dmu_read_uio_dnode(zv->zv_dn, &uio, bytes);
if (error) {
/* Convert checksum errors into IO errors. */
if (error == ECKSUM)
error = SET_ERROR(EIO);
break;
}
}
zfs_rangelock_exit(lr);
int64_t nread = start_resid - zfs_uio_resid(&uio);
dataset_kstats_update_read_kstats(&zv->zv_kstat, nread);
rw_exit(&zv->zv_suspend_lock);
return (error);
}
static int
zvol_cdev_write(struct cdev *dev, struct uio *uio_s, int ioflag)
{
zvol_state_t *zv;
uint64_t volsize;
zfs_locked_range_t *lr;
int error = 0;
boolean_t sync;
zfs_uio_t uio;
zv = dev->si_drv2;
volsize = zv->zv_volsize;
zfs_uio_init(&uio, uio_s);
if (zfs_uio_resid(&uio) > 0 &&
(zfs_uio_offset(&uio) < 0 || zfs_uio_offset(&uio) > volsize))
return (SET_ERROR(EIO));
ssize_t start_resid = zfs_uio_resid(&uio);
sync = (ioflag & IO_SYNC) ||
(zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS);
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
zvol_ensure_zilog(zv);
lr = zfs_rangelock_enter(&zv->zv_rangelock, zfs_uio_offset(&uio),
zfs_uio_resid(&uio), RL_WRITER);
while (zfs_uio_resid(&uio) > 0 && zfs_uio_offset(&uio) < volsize) {
uint64_t bytes = MIN(zfs_uio_resid(&uio), DMU_MAX_ACCESS >> 1);
uint64_t off = zfs_uio_offset(&uio);
dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
if (bytes > volsize - off) /* Don't write past the end. */
bytes = volsize - off;
dmu_tx_hold_write_by_dnode(tx, zv->zv_dn, off, bytes);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
break;
}
error = dmu_write_uio_dnode(zv->zv_dn, &uio, bytes, tx);
if (error == 0)
zvol_log_write(zv, tx, off, bytes, sync);
dmu_tx_commit(tx);
if (error)
break;
}
zfs_rangelock_exit(lr);
int64_t nwritten = start_resid - zfs_uio_resid(&uio);
dataset_kstats_update_write_kstats(&zv->zv_kstat, nwritten);
if (sync)
zil_commit(zv->zv_zilog, ZVOL_OBJ);
rw_exit(&zv->zv_suspend_lock);
return (error);
}
static int
zvol_cdev_open(struct cdev *dev, int flags, int fmt, struct thread *td)
{
zvol_state_t *zv;
struct zvol_state_dev *zsd;
int err = 0;
boolean_t drop_suspend = B_FALSE;
retry:
rw_enter(&zvol_state_lock, ZVOL_RW_READER);
/*
* Obtain a copy of si_drv2 under zvol_state_lock to make sure either
* the result of zvol free code setting si_drv2 to NULL is observed,
* or the zv is protected from being freed because of the positive
* zv_open_count.
*/
zv = dev->si_drv2;
if (zv == NULL) {
rw_exit(&zvol_state_lock);
err = SET_ERROR(ENXIO);
goto out_locked;
}
mutex_enter(&zv->zv_state_lock);
if (zv->zv_zso->zso_dying) {
rw_exit(&zvol_state_lock);
err = SET_ERROR(ENXIO);
goto out_zv_locked;
}
ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_DEV);
/*
* Make sure zvol is not suspended during first open
* (hold zv_suspend_lock) and respect proper lock acquisition
* ordering - zv_suspend_lock before zv_state_lock.
*/
if (zv->zv_open_count == 0) {
drop_suspend = B_TRUE;
if (!rw_tryenter(&zv->zv_suspend_lock, ZVOL_RW_READER)) {
mutex_exit(&zv->zv_state_lock);
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
mutex_enter(&zv->zv_state_lock);
/* Check to see if zv_suspend_lock is needed. */
if (zv->zv_open_count != 0) {
rw_exit(&zv->zv_suspend_lock);
drop_suspend = B_FALSE;
}
}
}
rw_exit(&zvol_state_lock);
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
if (zv->zv_open_count == 0) {
boolean_t drop_namespace = B_FALSE;
ASSERT(ZVOL_RW_READ_HELD(&zv->zv_suspend_lock));
/*
* Take spa_namespace_lock to prevent lock inversion when
* zvols from one pool are opened as vdevs in another.
*/
if (!mutex_owned(&spa_namespace_lock)) {
if (!mutex_tryenter(&spa_namespace_lock)) {
mutex_exit(&zv->zv_state_lock);
rw_exit(&zv->zv_suspend_lock);
kern_yield(PRI_USER);
goto retry;
} else {
drop_namespace = B_TRUE;
}
}
err = zvol_first_open(zv, !(flags & FWRITE));
if (drop_namespace)
mutex_exit(&spa_namespace_lock);
if (err)
goto out_zv_locked;
}
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
if ((flags & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
err = SET_ERROR(EROFS);
goto out_opened;
}
if (zv->zv_flags & ZVOL_EXCL) {
err = SET_ERROR(EBUSY);
goto out_opened;
}
if (flags & O_EXCL) {
if (zv->zv_open_count != 0) {
err = SET_ERROR(EBUSY);
goto out_opened;
}
zv->zv_flags |= ZVOL_EXCL;
}
zv->zv_open_count++;
if (flags & O_SYNC) {
zsd = &zv->zv_zso->zso_dev;
zsd->zsd_sync_cnt++;
if (zsd->zsd_sync_cnt == 1 &&
(zv->zv_flags & ZVOL_WRITTEN_TO) != 0)
zil_async_to_sync(zv->zv_zilog, ZVOL_OBJ);
}
out_opened:
if (zv->zv_open_count == 0) {
zvol_last_close(zv);
wakeup(zv);
}
out_zv_locked:
mutex_exit(&zv->zv_state_lock);
out_locked:
if (drop_suspend)
rw_exit(&zv->zv_suspend_lock);
return (err);
}
static int
zvol_cdev_close(struct cdev *dev, int flags, int fmt, struct thread *td)
{
zvol_state_t *zv;
struct zvol_state_dev *zsd;
boolean_t drop_suspend = B_TRUE;
rw_enter(&zvol_state_lock, ZVOL_RW_READER);
zv = dev->si_drv2;
if (zv == NULL) {
rw_exit(&zvol_state_lock);
return (SET_ERROR(ENXIO));
}
mutex_enter(&zv->zv_state_lock);
if (zv->zv_flags & ZVOL_EXCL) {
ASSERT3U(zv->zv_open_count, ==, 1);
zv->zv_flags &= ~ZVOL_EXCL;
}
ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_DEV);
/*
* If the open count is zero, this is a spurious close.
* That indicates a bug in the kernel / DDI framework.
*/
ASSERT3U(zv->zv_open_count, >, 0);
/*
* Make sure zvol is not suspended during last close
* (hold zv_suspend_lock) and respect proper lock acquisition
* ordering - zv_suspend_lock before zv_state_lock.
*/
if (zv->zv_open_count == 1) {
if (!rw_tryenter(&zv->zv_suspend_lock, ZVOL_RW_READER)) {
mutex_exit(&zv->zv_state_lock);
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
mutex_enter(&zv->zv_state_lock);
/* Check to see if zv_suspend_lock is needed. */
if (zv->zv_open_count != 1) {
rw_exit(&zv->zv_suspend_lock);
drop_suspend = B_FALSE;
}
}
} else {
drop_suspend = B_FALSE;
}
rw_exit(&zvol_state_lock);
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
/*
* You may get multiple opens, but only one close.
*/
zv->zv_open_count--;
if (flags & O_SYNC) {
zsd = &zv->zv_zso->zso_dev;
zsd->zsd_sync_cnt--;
}
if (zv->zv_open_count == 0) {
ASSERT(ZVOL_RW_READ_HELD(&zv->zv_suspend_lock));
zvol_last_close(zv);
wakeup(zv);
}
mutex_exit(&zv->zv_state_lock);
if (drop_suspend)
rw_exit(&zv->zv_suspend_lock);
return (0);
}
static int
zvol_cdev_ioctl(struct cdev *dev, ulong_t cmd, caddr_t data,
int fflag, struct thread *td)
{
zvol_state_t *zv;
zfs_locked_range_t *lr;
off_t offset, length;
int error;
boolean_t sync;
zv = dev->si_drv2;
error = 0;
KASSERT(zv->zv_open_count > 0,
("Device with zero access count in %s", __func__));
switch (cmd) {
case DIOCGSECTORSIZE:
*(uint32_t *)data = DEV_BSIZE;
break;
case DIOCGMEDIASIZE:
*(off_t *)data = zv->zv_volsize;
break;
case DIOCGFLUSH:
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
if (zv->zv_zilog != NULL)
zil_commit(zv->zv_zilog, ZVOL_OBJ);
rw_exit(&zv->zv_suspend_lock);
break;
case DIOCGDELETE:
if (!zvol_unmap_enabled)
break;
offset = ((off_t *)data)[0];
length = ((off_t *)data)[1];
if ((offset % DEV_BSIZE) != 0 || (length % DEV_BSIZE) != 0 ||
offset < 0 || offset >= zv->zv_volsize ||
length <= 0) {
printf("%s: offset=%jd length=%jd\n", __func__, offset,
length);
error = SET_ERROR(EINVAL);
break;
}
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
zvol_ensure_zilog(zv);
lr = zfs_rangelock_enter(&zv->zv_rangelock, offset, length,
RL_WRITER);
dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error != 0) {
sync = FALSE;
dmu_tx_abort(tx);
} else {
sync = (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS);
zvol_log_truncate(zv, tx, offset, length, sync);
dmu_tx_commit(tx);
error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
offset, length);
}
zfs_rangelock_exit(lr);
if (sync)
zil_commit(zv->zv_zilog, ZVOL_OBJ);
rw_exit(&zv->zv_suspend_lock);
break;
case DIOCGSTRIPESIZE:
*(off_t *)data = zv->zv_volblocksize;
break;
case DIOCGSTRIPEOFFSET:
*(off_t *)data = 0;
break;
case DIOCGATTR: {
spa_t *spa = dmu_objset_spa(zv->zv_objset);
struct diocgattr_arg *arg = (struct diocgattr_arg *)data;
uint64_t refd, avail, usedobjs, availobjs;
if (strcmp(arg->name, "GEOM::candelete") == 0)
arg->value.i = 1;
else if (strcmp(arg->name, "blocksavail") == 0) {
dmu_objset_space(zv->zv_objset, &refd, &avail,
&usedobjs, &availobjs);
arg->value.off = avail / DEV_BSIZE;
} else if (strcmp(arg->name, "blocksused") == 0) {
dmu_objset_space(zv->zv_objset, &refd, &avail,
&usedobjs, &availobjs);
arg->value.off = refd / DEV_BSIZE;
} else if (strcmp(arg->name, "poolblocksavail") == 0) {
avail = metaslab_class_get_space(spa_normal_class(spa));
avail -= metaslab_class_get_alloc(
spa_normal_class(spa));
arg->value.off = avail / DEV_BSIZE;
} else if (strcmp(arg->name, "poolblocksused") == 0) {
refd = metaslab_class_get_alloc(spa_normal_class(spa));
arg->value.off = refd / DEV_BSIZE;
} else
error = SET_ERROR(ENOIOCTL);
break;
}
case FIOSEEKHOLE:
case FIOSEEKDATA: {
off_t *off = (off_t *)data;
uint64_t noff;
boolean_t hole;
hole = (cmd == FIOSEEKHOLE);
noff = *off;
lr = zfs_rangelock_enter(&zv->zv_rangelock, 0, UINT64_MAX,
RL_READER);
error = dmu_offset_next(zv->zv_objset, ZVOL_OBJ, hole, &noff);
zfs_rangelock_exit(lr);
*off = noff;
break;
}
default:
error = SET_ERROR(ENOIOCTL);
}
return (error);
}
/*
* Misc. helpers
*/
static void
zvol_ensure_zilog(zvol_state_t *zv)
{
ASSERT(ZVOL_RW_READ_HELD(&zv->zv_suspend_lock));
/*
* Open a ZIL if this is the first time we have written to this
* zvol. We protect zv->zv_zilog with zv_suspend_lock rather
* than zv_state_lock so that we don't need to acquire an
* additional lock in this path.
*/
if (zv->zv_zilog == NULL) {
if (!rw_tryupgrade(&zv->zv_suspend_lock)) {
rw_exit(&zv->zv_suspend_lock);
rw_enter(&zv->zv_suspend_lock, RW_WRITER);
}
if (zv->zv_zilog == NULL) {
zv->zv_zilog = zil_open(zv->zv_objset,
zvol_get_data, &zv->zv_kstat.dk_zil_sums);
zv->zv_flags |= ZVOL_WRITTEN_TO;
/* replay / destroy done in zvol_os_create_minor() */
VERIFY0(zv->zv_zilog->zl_header->zh_flags &
ZIL_REPLAY_NEEDED);
}
rw_downgrade(&zv->zv_suspend_lock);
}
}
boolean_t
zvol_os_is_zvol(const char *device)
{
return (device && strncmp(device, ZVOL_DIR, strlen(ZVOL_DIR)) == 0);
}
void
zvol_os_rename_minor(zvol_state_t *zv, const char *newname)
{
ASSERT(RW_LOCK_HELD(&zvol_state_lock));
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
/* Move to a new hashtable entry. */
zv->zv_hash = zvol_name_hash(zv->zv_name);
hlist_del(&zv->zv_hlink);
hlist_add_head(&zv->zv_hlink, ZVOL_HT_HEAD(zv->zv_hash));
if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
struct g_provider *pp = zsg->zsg_provider;
struct g_geom *gp;
g_topology_lock();
gp = pp->geom;
ASSERT3P(gp, !=, NULL);
zsg->zsg_provider = NULL;
g_wither_provider(pp, ENXIO);
pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, newname);
pp->flags |= G_PF_DIRECT_RECEIVE | G_PF_DIRECT_SEND;
pp->sectorsize = DEV_BSIZE;
pp->mediasize = zv->zv_volsize;
pp->private = zv;
zsg->zsg_provider = pp;
g_error_provider(pp, 0);
g_topology_unlock();
} else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
struct zvol_state_dev *zsd = &zv->zv_zso->zso_dev;
struct cdev *dev;
struct make_dev_args args;
dev = zsd->zsd_cdev;
if (dev != NULL) {
destroy_dev(dev);
dev = zsd->zsd_cdev = NULL;
if (zv->zv_open_count > 0) {
zv->zv_flags &= ~ZVOL_EXCL;
zv->zv_open_count = 0;
/* XXX need suspend lock but lock order */
zvol_last_close(zv);
}
}
make_dev_args_init(&args);
args.mda_flags = MAKEDEV_CHECKNAME | MAKEDEV_WAITOK;
args.mda_devsw = &zvol_cdevsw;
args.mda_cr = NULL;
args.mda_uid = UID_ROOT;
args.mda_gid = GID_OPERATOR;
args.mda_mode = 0640;
args.mda_si_drv2 = zv;
if (make_dev_s(&args, &dev, "%s/%s", ZVOL_DRIVER, newname)
== 0) {
#if __FreeBSD_version > 1300130
dev->si_iosize_max = maxphys;
#else
dev->si_iosize_max = MAXPHYS;
#endif
zsd->zsd_cdev = dev;
}
}
strlcpy(zv->zv_name, newname, sizeof (zv->zv_name));
+ dataset_kstats_rename(&zv->zv_kstat, newname);
}
/*
* Remove minor node for the specified volume.
*/
void
zvol_os_free(zvol_state_t *zv)
{
ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock));
ASSERT(!MUTEX_HELD(&zv->zv_state_lock));
ASSERT0(zv->zv_open_count);
ZFS_LOG(1, "ZVOL %s destroyed.", zv->zv_name);
rw_destroy(&zv->zv_suspend_lock);
zfs_rangelock_fini(&zv->zv_rangelock);
if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
struct g_provider *pp __maybe_unused = zsg->zsg_provider;
ASSERT3P(pp->private, ==, NULL);
g_topology_lock();
zvol_geom_destroy(zv);
g_topology_unlock();
mtx_destroy(&zsg->zsg_queue_mtx);
} else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
struct zvol_state_dev *zsd = &zv->zv_zso->zso_dev;
struct cdev *dev = zsd->zsd_cdev;
if (dev != NULL) {
ASSERT3P(dev->si_drv2, ==, NULL);
destroy_dev(dev);
knlist_clear(&zsd->zsd_selinfo.si_note, 0);
knlist_destroy(&zsd->zsd_selinfo.si_note);
}
}
mutex_destroy(&zv->zv_state_lock);
dataset_kstats_destroy(&zv->zv_kstat);
kmem_free(zv->zv_zso, sizeof (struct zvol_state_os));
kmem_free(zv, sizeof (zvol_state_t));
zvol_minors--;
}
/*
* Create a minor node (plus a whole lot more) for the specified volume.
*/
int
zvol_os_create_minor(const char *name)
{
zvol_state_t *zv;
objset_t *os;
dmu_object_info_t *doi;
uint64_t volsize;
uint64_t volmode, hash;
int error;
bool replayed_zil = B_FALSE;
ZFS_LOG(1, "Creating ZVOL %s...", name);
hash = zvol_name_hash(name);
if ((zv = zvol_find_by_name_hash(name, hash, RW_NONE)) != NULL) {
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
mutex_exit(&zv->zv_state_lock);
return (SET_ERROR(EEXIST));
}
DROP_GIANT();
doi = kmem_alloc(sizeof (dmu_object_info_t), KM_SLEEP);
/* Lie and say we're read-only. */
error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, B_TRUE, FTAG, &os);
if (error)
goto out_doi;
error = dmu_object_info(os, ZVOL_OBJ, doi);
if (error)
goto out_dmu_objset_disown;
error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
if (error)
goto out_dmu_objset_disown;
error = dsl_prop_get_integer(name,
zfs_prop_to_name(ZFS_PROP_VOLMODE), &volmode, NULL);
if (error || volmode == ZFS_VOLMODE_DEFAULT)
volmode = zvol_volmode;
error = 0;
/*
* zvol_alloc equivalent ...
*/
zv = kmem_zalloc(sizeof (*zv), KM_SLEEP);
zv->zv_hash = hash;
mutex_init(&zv->zv_state_lock, NULL, MUTEX_DEFAULT, NULL);
zv->zv_zso = kmem_zalloc(sizeof (struct zvol_state_os), KM_SLEEP);
zv->zv_volmode = volmode;
if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
struct g_provider *pp;
struct g_geom *gp;
zsg->zsg_state = ZVOL_GEOM_UNINIT;
mtx_init(&zsg->zsg_queue_mtx, "zvol", NULL, MTX_DEF);
g_topology_lock();
gp = g_new_geomf(&zfs_zvol_class, "zfs::zvol::%s", name);
gp->start = zvol_geom_bio_start;
gp->access = zvol_geom_access;
pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, name);
pp->flags |= G_PF_DIRECT_RECEIVE | G_PF_DIRECT_SEND;
pp->sectorsize = DEV_BSIZE;
pp->mediasize = 0;
pp->private = zv;
zsg->zsg_provider = pp;
bioq_init(&zsg->zsg_queue);
} else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
struct zvol_state_dev *zsd = &zv->zv_zso->zso_dev;
struct cdev *dev;
struct make_dev_args args;
make_dev_args_init(&args);
args.mda_flags = MAKEDEV_CHECKNAME | MAKEDEV_WAITOK;
args.mda_devsw = &zvol_cdevsw;
args.mda_cr = NULL;
args.mda_uid = UID_ROOT;
args.mda_gid = GID_OPERATOR;
args.mda_mode = 0640;
args.mda_si_drv2 = zv;
if (make_dev_s(&args, &dev, "%s/%s", ZVOL_DRIVER, name)
== 0) {
#if __FreeBSD_version > 1300130
dev->si_iosize_max = maxphys;
#else
dev->si_iosize_max = MAXPHYS;
#endif
zsd->zsd_cdev = dev;
knlist_init_sx(&zsd->zsd_selinfo.si_note,
&zv->zv_state_lock);
}
}
(void) strlcpy(zv->zv_name, name, MAXPATHLEN);
rw_init(&zv->zv_suspend_lock, NULL, RW_DEFAULT, NULL);
zfs_rangelock_init(&zv->zv_rangelock, NULL, NULL);
if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os)))
zv->zv_flags |= ZVOL_RDONLY;
zv->zv_volblocksize = doi->doi_data_block_size;
zv->zv_volsize = volsize;
zv->zv_objset = os;
ASSERT3P(zv->zv_kstat.dk_kstats, ==, NULL);
error = dataset_kstats_create(&zv->zv_kstat, zv->zv_objset);
if (error)
goto out_dmu_objset_disown;
ASSERT3P(zv->zv_zilog, ==, NULL);
zv->zv_zilog = zil_open(os, zvol_get_data, &zv->zv_kstat.dk_zil_sums);
if (spa_writeable(dmu_objset_spa(os))) {
if (zil_replay_disable)
replayed_zil = zil_destroy(zv->zv_zilog, B_FALSE);
else
replayed_zil = zil_replay(os, zv, zvol_replay_vector);
}
if (replayed_zil)
zil_close(zv->zv_zilog);
zv->zv_zilog = NULL;
/* TODO: prefetch for geom tasting */
zv->zv_objset = NULL;
out_dmu_objset_disown:
dmu_objset_disown(os, B_TRUE, FTAG);
if (error == 0 && volmode == ZFS_VOLMODE_GEOM) {
zvol_geom_run(zv);
g_topology_unlock();
}
out_doi:
kmem_free(doi, sizeof (dmu_object_info_t));
if (error == 0) {
rw_enter(&zvol_state_lock, RW_WRITER);
zvol_insert(zv);
zvol_minors++;
rw_exit(&zvol_state_lock);
ZFS_LOG(1, "ZVOL %s created.", name);
}
PICKUP_GIANT();
return (error);
}
void
zvol_os_clear_private(zvol_state_t *zv)
{
ASSERT(RW_LOCK_HELD(&zvol_state_lock));
if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
struct g_provider *pp = zsg->zsg_provider;
if (pp->private == NULL) /* already cleared */
return;
mtx_lock(&zsg->zsg_queue_mtx);
zsg->zsg_state = ZVOL_GEOM_STOPPED;
pp->private = NULL;
wakeup_one(&zsg->zsg_queue);
while (zsg->zsg_state != ZVOL_GEOM_RUNNING)
msleep(&zsg->zsg_state, &zsg->zsg_queue_mtx,
0, "zvol:w", 0);
mtx_unlock(&zsg->zsg_queue_mtx);
ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock));
} else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
struct zvol_state_dev *zsd = &zv->zv_zso->zso_dev;
struct cdev *dev = zsd->zsd_cdev;
if (dev != NULL)
dev->si_drv2 = NULL;
}
}
int
zvol_os_update_volsize(zvol_state_t *zv, uint64_t volsize)
{
zv->zv_volsize = volsize;
if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
struct g_provider *pp = zsg->zsg_provider;
g_topology_lock();
if (pp->private == NULL) {
g_topology_unlock();
return (SET_ERROR(ENXIO));
}
/*
* Do not invoke resize event when initial size was zero.
* ZVOL initializes the size on first open, this is not
* real resizing.
*/
if (pp->mediasize == 0)
pp->mediasize = zv->zv_volsize;
else
g_resize_provider(pp, zv->zv_volsize);
g_topology_unlock();
} else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
struct zvol_state_dev *zsd = &zv->zv_zso->zso_dev;
KNOTE_UNLOCKED(&zsd->zsd_selinfo.si_note, NOTE_ATTRIB);
}
return (0);
}
void
zvol_os_set_disk_ro(zvol_state_t *zv, int flags)
{
// XXX? set_disk_ro(zv->zv_zso->zvo_disk, flags);
}
void
zvol_os_set_capacity(zvol_state_t *zv, uint64_t capacity)
{
// XXX? set_capacity(zv->zv_zso->zvo_disk, capacity);
}
/*
* Public interfaces
*/
int
zvol_busy(void)
{
return (zvol_minors != 0);
}
int
zvol_init(void)
{
zvol_init_impl();
return (0);
}
void
zvol_fini(void)
{
zvol_fini_impl();
}
diff --git a/sys/contrib/openzfs/module/os/linux/spl/spl-condvar.c b/sys/contrib/openzfs/module/os/linux/spl/spl-condvar.c
index e87954714e3a..5898789ad53d 100644
--- a/sys/contrib/openzfs/module/os/linux/spl/spl-condvar.c
+++ b/sys/contrib/openzfs/module/os/linux/spl/spl-condvar.c
@@ -1,509 +1,509 @@
/*
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
* Copyright (C) 2007 The Regents of the University of California.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
* UCRL-CODE-235197
*
* This file is part of the SPL, Solaris Porting Layer.
*
* The SPL is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* The SPL is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
*
- * Solaris Porting Layer (SPL) Credential Implementation.
+ * Solaris Porting Layer (SPL) Condition Variables Implementation.
*/
#include <sys/condvar.h>
#include <sys/time.h>
#include <sys/sysmacros.h>
#include <linux/hrtimer.h>
#include <linux/compiler_compat.h>
#include <linux/mod_compat.h>
#include <linux/sched.h>
#ifdef HAVE_SCHED_SIGNAL_HEADER
#include <linux/sched/signal.h>
#endif
#define MAX_HRTIMEOUT_SLACK_US 1000
static unsigned int spl_schedule_hrtimeout_slack_us = 0;
static int
param_set_hrtimeout_slack(const char *buf, zfs_kernel_param_t *kp)
{
unsigned long val;
int error;
error = kstrtoul(buf, 0, &val);
if (error)
return (error);
if (val > MAX_HRTIMEOUT_SLACK_US)
return (-EINVAL);
error = param_set_uint(buf, kp);
if (error < 0)
return (error);
return (0);
}
module_param_call(spl_schedule_hrtimeout_slack_us, param_set_hrtimeout_slack,
param_get_uint, &spl_schedule_hrtimeout_slack_us, 0644);
MODULE_PARM_DESC(spl_schedule_hrtimeout_slack_us,
"schedule_hrtimeout_range() delta/slack value in us, default(0)");
void
__cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
{
ASSERT(cvp);
ASSERT(name == NULL);
ASSERT(type == CV_DEFAULT);
ASSERT(arg == NULL);
cvp->cv_magic = CV_MAGIC;
init_waitqueue_head(&cvp->cv_event);
init_waitqueue_head(&cvp->cv_destroy);
atomic_set(&cvp->cv_waiters, 0);
atomic_set(&cvp->cv_refs, 1);
cvp->cv_mutex = NULL;
}
EXPORT_SYMBOL(__cv_init);
static int
cv_destroy_wakeup(kcondvar_t *cvp)
{
if (!atomic_read(&cvp->cv_waiters) && !atomic_read(&cvp->cv_refs)) {
ASSERT(cvp->cv_mutex == NULL);
ASSERT(!waitqueue_active(&cvp->cv_event));
return (1);
}
return (0);
}
void
__cv_destroy(kcondvar_t *cvp)
{
ASSERT(cvp);
ASSERT(cvp->cv_magic == CV_MAGIC);
cvp->cv_magic = CV_DESTROY;
atomic_dec(&cvp->cv_refs);
/* Block until all waiters are woken and references dropped. */
while (cv_destroy_wakeup(cvp) == 0)
wait_event_timeout(cvp->cv_destroy, cv_destroy_wakeup(cvp), 1);
ASSERT3P(cvp->cv_mutex, ==, NULL);
ASSERT3S(atomic_read(&cvp->cv_refs), ==, 0);
ASSERT3S(atomic_read(&cvp->cv_waiters), ==, 0);
ASSERT3S(waitqueue_active(&cvp->cv_event), ==, 0);
}
EXPORT_SYMBOL(__cv_destroy);
static void
cv_wait_common(kcondvar_t *cvp, kmutex_t *mp, int state, int io)
{
DEFINE_WAIT(wait);
kmutex_t *m;
ASSERT(cvp);
ASSERT(mp);
ASSERT(cvp->cv_magic == CV_MAGIC);
ASSERT(mutex_owned(mp));
atomic_inc(&cvp->cv_refs);
m = READ_ONCE(cvp->cv_mutex);
if (!m)
m = xchg(&cvp->cv_mutex, mp);
/* Ensure the same mutex is used by all callers */
ASSERT(m == NULL || m == mp);
prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
atomic_inc(&cvp->cv_waiters);
/*
* Mutex should be dropped after prepare_to_wait() this
* ensures we're linked in to the waiters list and avoids the
* race where 'cvp->cv_waiters > 0' but the list is empty.
*/
mutex_exit(mp);
if (io)
io_schedule();
else
schedule();
/* No more waiters a different mutex could be used */
if (atomic_dec_and_test(&cvp->cv_waiters)) {
/*
* This is set without any lock, so it's racy. But this is
* just for debug anyway, so make it best-effort
*/
cvp->cv_mutex = NULL;
wake_up(&cvp->cv_destroy);
}
finish_wait(&cvp->cv_event, &wait);
atomic_dec(&cvp->cv_refs);
/*
* Hold mutex after we release the cvp, otherwise we could dead lock
* with a thread holding the mutex and call cv_destroy.
*/
mutex_enter(mp);
}
void
__cv_wait(kcondvar_t *cvp, kmutex_t *mp)
{
cv_wait_common(cvp, mp, TASK_UNINTERRUPTIBLE, 0);
}
EXPORT_SYMBOL(__cv_wait);
void
__cv_wait_io(kcondvar_t *cvp, kmutex_t *mp)
{
cv_wait_common(cvp, mp, TASK_UNINTERRUPTIBLE, 1);
}
EXPORT_SYMBOL(__cv_wait_io);
int
__cv_wait_io_sig(kcondvar_t *cvp, kmutex_t *mp)
{
cv_wait_common(cvp, mp, TASK_INTERRUPTIBLE, 1);
return (signal_pending(current) ? 0 : 1);
}
EXPORT_SYMBOL(__cv_wait_io_sig);
int
__cv_wait_sig(kcondvar_t *cvp, kmutex_t *mp)
{
cv_wait_common(cvp, mp, TASK_INTERRUPTIBLE, 0);
return (signal_pending(current) ? 0 : 1);
}
EXPORT_SYMBOL(__cv_wait_sig);
void
__cv_wait_idle(kcondvar_t *cvp, kmutex_t *mp)
{
sigset_t blocked, saved;
sigfillset(&blocked);
(void) sigprocmask(SIG_BLOCK, &blocked, &saved);
cv_wait_common(cvp, mp, TASK_INTERRUPTIBLE, 0);
(void) sigprocmask(SIG_SETMASK, &saved, NULL);
}
EXPORT_SYMBOL(__cv_wait_idle);
#if defined(HAVE_IO_SCHEDULE_TIMEOUT)
#define spl_io_schedule_timeout(t) io_schedule_timeout(t)
#else
struct spl_task_timer {
struct timer_list timer;
struct task_struct *task;
};
static void
__cv_wakeup(spl_timer_list_t t)
{
struct timer_list *tmr = (struct timer_list *)t;
struct spl_task_timer *task_timer = from_timer(task_timer, tmr, timer);
wake_up_process(task_timer->task);
}
static long
spl_io_schedule_timeout(long time_left)
{
long expire_time = jiffies + time_left;
struct spl_task_timer task_timer;
struct timer_list *timer = &task_timer.timer;
task_timer.task = current;
timer_setup(timer, __cv_wakeup, 0);
timer->expires = expire_time;
add_timer(timer);
io_schedule();
del_timer_sync(timer);
time_left = expire_time - jiffies;
return (time_left < 0 ? 0 : time_left);
}
#endif
/*
* 'expire_time' argument is an absolute wall clock time in jiffies.
* Return value is time left (expire_time - now) or -1 if timeout occurred.
*/
static clock_t
__cv_timedwait_common(kcondvar_t *cvp, kmutex_t *mp, clock_t expire_time,
int state, int io)
{
DEFINE_WAIT(wait);
kmutex_t *m;
clock_t time_left;
ASSERT(cvp);
ASSERT(mp);
ASSERT(cvp->cv_magic == CV_MAGIC);
ASSERT(mutex_owned(mp));
/* XXX - Does not handle jiffie wrap properly */
time_left = expire_time - jiffies;
if (time_left <= 0)
return (-1);
atomic_inc(&cvp->cv_refs);
m = READ_ONCE(cvp->cv_mutex);
if (!m)
m = xchg(&cvp->cv_mutex, mp);
/* Ensure the same mutex is used by all callers */
ASSERT(m == NULL || m == mp);
prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
atomic_inc(&cvp->cv_waiters);
/*
* Mutex should be dropped after prepare_to_wait() this
* ensures we're linked in to the waiters list and avoids the
* race where 'cvp->cv_waiters > 0' but the list is empty.
*/
mutex_exit(mp);
if (io)
time_left = spl_io_schedule_timeout(time_left);
else
time_left = schedule_timeout(time_left);
/* No more waiters a different mutex could be used */
if (atomic_dec_and_test(&cvp->cv_waiters)) {
/*
* This is set without any lock, so it's racy. But this is
* just for debug anyway, so make it best-effort
*/
cvp->cv_mutex = NULL;
wake_up(&cvp->cv_destroy);
}
finish_wait(&cvp->cv_event, &wait);
atomic_dec(&cvp->cv_refs);
/*
* Hold mutex after we release the cvp, otherwise we could dead lock
* with a thread holding the mutex and call cv_destroy.
*/
mutex_enter(mp);
return (time_left > 0 ? 1 : -1);
}
int
__cv_timedwait(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
{
return (__cv_timedwait_common(cvp, mp, exp_time,
TASK_UNINTERRUPTIBLE, 0));
}
EXPORT_SYMBOL(__cv_timedwait);
int
__cv_timedwait_io(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
{
return (__cv_timedwait_common(cvp, mp, exp_time,
TASK_UNINTERRUPTIBLE, 1));
}
EXPORT_SYMBOL(__cv_timedwait_io);
int
__cv_timedwait_sig(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
{
int rc;
rc = __cv_timedwait_common(cvp, mp, exp_time, TASK_INTERRUPTIBLE, 0);
return (signal_pending(current) ? 0 : rc);
}
EXPORT_SYMBOL(__cv_timedwait_sig);
int
__cv_timedwait_idle(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
{
sigset_t blocked, saved;
int rc;
sigfillset(&blocked);
(void) sigprocmask(SIG_BLOCK, &blocked, &saved);
rc = __cv_timedwait_common(cvp, mp, exp_time,
TASK_INTERRUPTIBLE, 0);
(void) sigprocmask(SIG_SETMASK, &saved, NULL);
return (rc);
}
EXPORT_SYMBOL(__cv_timedwait_idle);
/*
* 'expire_time' argument is an absolute clock time in nanoseconds.
* Return value is time left (expire_time - now) or -1 if timeout occurred.
*/
static clock_t
__cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t expire_time,
hrtime_t res, int state)
{
DEFINE_WAIT(wait);
kmutex_t *m;
hrtime_t time_left;
ktime_t ktime_left;
u64 slack = 0;
int rc;
ASSERT(cvp);
ASSERT(mp);
ASSERT(cvp->cv_magic == CV_MAGIC);
ASSERT(mutex_owned(mp));
time_left = expire_time - gethrtime();
if (time_left <= 0)
return (-1);
atomic_inc(&cvp->cv_refs);
m = READ_ONCE(cvp->cv_mutex);
if (!m)
m = xchg(&cvp->cv_mutex, mp);
/* Ensure the same mutex is used by all callers */
ASSERT(m == NULL || m == mp);
prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
atomic_inc(&cvp->cv_waiters);
/*
* Mutex should be dropped after prepare_to_wait() this
* ensures we're linked in to the waiters list and avoids the
* race where 'cvp->cv_waiters > 0' but the list is empty.
*/
mutex_exit(mp);
ktime_left = ktime_set(0, time_left);
slack = MIN(MAX(res, spl_schedule_hrtimeout_slack_us * NSEC_PER_USEC),
MAX_HRTIMEOUT_SLACK_US * NSEC_PER_USEC);
rc = schedule_hrtimeout_range(&ktime_left, slack, HRTIMER_MODE_REL);
/* No more waiters a different mutex could be used */
if (atomic_dec_and_test(&cvp->cv_waiters)) {
/*
* This is set without any lock, so it's racy. But this is
* just for debug anyway, so make it best-effort
*/
cvp->cv_mutex = NULL;
wake_up(&cvp->cv_destroy);
}
finish_wait(&cvp->cv_event, &wait);
atomic_dec(&cvp->cv_refs);
mutex_enter(mp);
return (rc == -EINTR ? 1 : -1);
}
/*
* Compatibility wrapper for the cv_timedwait_hires() Illumos interface.
*/
static int
cv_timedwait_hires_common(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim,
hrtime_t res, int flag, int state)
{
if (!(flag & CALLOUT_FLAG_ABSOLUTE))
tim += gethrtime();
return (__cv_timedwait_hires(cvp, mp, tim, res, state));
}
int
cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim, hrtime_t res,
int flag)
{
return (cv_timedwait_hires_common(cvp, mp, tim, res, flag,
TASK_UNINTERRUPTIBLE));
}
EXPORT_SYMBOL(cv_timedwait_hires);
int
cv_timedwait_sig_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim,
hrtime_t res, int flag)
{
int rc;
rc = cv_timedwait_hires_common(cvp, mp, tim, res, flag,
TASK_INTERRUPTIBLE);
return (signal_pending(current) ? 0 : rc);
}
EXPORT_SYMBOL(cv_timedwait_sig_hires);
int
cv_timedwait_idle_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim,
hrtime_t res, int flag)
{
sigset_t blocked, saved;
int rc;
sigfillset(&blocked);
(void) sigprocmask(SIG_BLOCK, &blocked, &saved);
rc = cv_timedwait_hires_common(cvp, mp, tim, res, flag,
TASK_INTERRUPTIBLE);
(void) sigprocmask(SIG_SETMASK, &saved, NULL);
return (rc);
}
EXPORT_SYMBOL(cv_timedwait_idle_hires);
void
__cv_signal(kcondvar_t *cvp)
{
ASSERT(cvp);
ASSERT(cvp->cv_magic == CV_MAGIC);
atomic_inc(&cvp->cv_refs);
/*
* All waiters are added with WQ_FLAG_EXCLUSIVE so only one
* waiter will be set runnable with each call to wake_up().
* Additionally wake_up() holds a spin_lock associated with
* the wait queue to ensure we don't race waking up processes.
*/
if (atomic_read(&cvp->cv_waiters) > 0)
wake_up(&cvp->cv_event);
atomic_dec(&cvp->cv_refs);
}
EXPORT_SYMBOL(__cv_signal);
void
__cv_broadcast(kcondvar_t *cvp)
{
ASSERT(cvp);
ASSERT(cvp->cv_magic == CV_MAGIC);
atomic_inc(&cvp->cv_refs);
/*
* Wake_up_all() will wake up all waiters even those which
* have the WQ_FLAG_EXCLUSIVE flag set.
*/
if (atomic_read(&cvp->cv_waiters) > 0)
wake_up_all(&cvp->cv_event);
atomic_dec(&cvp->cv_refs);
}
EXPORT_SYMBOL(__cv_broadcast);
diff --git a/sys/contrib/openzfs/module/os/linux/spl/spl-kmem-cache.c b/sys/contrib/openzfs/module/os/linux/spl/spl-kmem-cache.c
index 3c30dfc577b4..42821ad60256 100644
--- a/sys/contrib/openzfs/module/os/linux/spl/spl-kmem-cache.c
+++ b/sys/contrib/openzfs/module/os/linux/spl/spl-kmem-cache.c
@@ -1,1474 +1,1465 @@
/*
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
* Copyright (C) 2007 The Regents of the University of California.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
* UCRL-CODE-235197
*
* This file is part of the SPL, Solaris Porting Layer.
*
* The SPL is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* The SPL is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/percpu_compat.h>
#include <sys/kmem.h>
#include <sys/kmem_cache.h>
#include <sys/taskq.h>
#include <sys/timer.h>
#include <sys/vmem.h>
#include <sys/wait.h>
+#include <sys/string.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/prefetch.h>
/*
* Within the scope of spl-kmem.c file the kmem_cache_* definitions
* are removed to allow access to the real Linux slab allocator.
*/
#undef kmem_cache_destroy
#undef kmem_cache_create
#undef kmem_cache_alloc
#undef kmem_cache_free
/*
* Linux 3.16 replaced smp_mb__{before,after}_{atomic,clear}_{dec,inc,bit}()
* with smp_mb__{before,after}_atomic() because they were redundant. This is
* only used inside our SLAB allocator, so we implement an internal wrapper
* here to give us smp_mb__{before,after}_atomic() on older kernels.
*/
#ifndef smp_mb__before_atomic
#define smp_mb__before_atomic(x) smp_mb__before_clear_bit(x)
#endif
#ifndef smp_mb__after_atomic
#define smp_mb__after_atomic(x) smp_mb__after_clear_bit(x)
#endif
/* BEGIN CSTYLED */
/*
* Cache magazines are an optimization designed to minimize the cost of
* allocating memory. They do this by keeping a per-cpu cache of recently
* freed objects, which can then be reallocated without taking a lock. This
* can improve performance on highly contended caches. However, because
* objects in magazines will prevent otherwise empty slabs from being
* immediately released this may not be ideal for low memory machines.
*
* For this reason spl_kmem_cache_magazine_size can be used to set a maximum
* magazine size. When this value is set to 0 the magazine size will be
* automatically determined based on the object size. Otherwise magazines
* will be limited to 2-256 objects per magazine (i.e per cpu). Magazines
* may never be entirely disabled in this implementation.
*/
static unsigned int spl_kmem_cache_magazine_size = 0;
module_param(spl_kmem_cache_magazine_size, uint, 0444);
MODULE_PARM_DESC(spl_kmem_cache_magazine_size,
"Default magazine size (2-256), set automatically (0)");
-/*
- * The default behavior is to report the number of objects remaining in the
- * cache. This allows the Linux VM to repeatedly reclaim objects from the
- * cache when memory is low satisfy other memory allocations. Alternately,
- * setting this value to KMC_RECLAIM_ONCE limits how aggressively the cache
- * is reclaimed. This may increase the likelihood of out of memory events.
- */
-static unsigned int spl_kmem_cache_reclaim = 0 /* KMC_RECLAIM_ONCE */;
-module_param(spl_kmem_cache_reclaim, uint, 0644);
-MODULE_PARM_DESC(spl_kmem_cache_reclaim, "Single reclaim pass (0x1)");
-
static unsigned int spl_kmem_cache_obj_per_slab = SPL_KMEM_CACHE_OBJ_PER_SLAB;
module_param(spl_kmem_cache_obj_per_slab, uint, 0644);
MODULE_PARM_DESC(spl_kmem_cache_obj_per_slab, "Number of objects per slab");
static unsigned int spl_kmem_cache_max_size = SPL_KMEM_CACHE_MAX_SIZE;
module_param(spl_kmem_cache_max_size, uint, 0644);
MODULE_PARM_DESC(spl_kmem_cache_max_size, "Maximum size of slab in MB");
/*
* For small objects the Linux slab allocator should be used to make the most
* efficient use of the memory. However, large objects are not supported by
* the Linux slab and therefore the SPL implementation is preferred. A cutoff
* of 16K was determined to be optimal for architectures using 4K pages and
* to also work well on architecutres using larger 64K page sizes.
*/
-static unsigned int spl_kmem_cache_slab_limit = 16384;
+static unsigned int spl_kmem_cache_slab_limit =
+ SPL_MAX_KMEM_ORDER_NR_PAGES * PAGE_SIZE;
module_param(spl_kmem_cache_slab_limit, uint, 0644);
MODULE_PARM_DESC(spl_kmem_cache_slab_limit,
"Objects less than N bytes use the Linux slab");
/*
* The number of threads available to allocate new slabs for caches. This
* should not need to be tuned but it is available for performance analysis.
*/
static unsigned int spl_kmem_cache_kmem_threads = 4;
module_param(spl_kmem_cache_kmem_threads, uint, 0444);
MODULE_PARM_DESC(spl_kmem_cache_kmem_threads,
"Number of spl_kmem_cache threads");
/* END CSTYLED */
/*
* Slab allocation interfaces
*
* While the Linux slab implementation was inspired by the Solaris
* implementation I cannot use it to emulate the Solaris APIs. I
* require two features which are not provided by the Linux slab.
*
* 1) Constructors AND destructors. Recent versions of the Linux
* kernel have removed support for destructors. This is a deal
* breaker for the SPL which contains particularly expensive
* initializers for mutex's, condition variables, etc. We also
* require a minimal level of cleanup for these data types unlike
* many Linux data types which do need to be explicitly destroyed.
*
* 2) Virtual address space backed slab. Callers of the Solaris slab
* expect it to work well for both small are very large allocations.
* Because of memory fragmentation the Linux slab which is backed
* by kmalloc'ed memory performs very badly when confronted with
* large numbers of large allocations. Basing the slab on the
* virtual address space removes the need for contiguous pages
* and greatly improve performance for large allocations.
*
* For these reasons, the SPL has its own slab implementation with
* the needed features. It is not as highly optimized as either the
* Solaris or Linux slabs, but it should get me most of what is
* needed until it can be optimized or obsoleted by another approach.
*
* One serious concern I do have about this method is the relatively
* small virtual address space on 32bit arches. This will seriously
* constrain the size of the slab caches and their performance.
*/
struct list_head spl_kmem_cache_list; /* List of caches */
struct rw_semaphore spl_kmem_cache_sem; /* Cache list lock */
static taskq_t *spl_kmem_cache_taskq; /* Task queue for aging / reclaim */
static void spl_cache_shrink(spl_kmem_cache_t *skc, void *obj);
static void *
kv_alloc(spl_kmem_cache_t *skc, int size, int flags)
{
gfp_t lflags = kmem_flags_convert(flags);
void *ptr;
ptr = spl_vmalloc(size, lflags | __GFP_HIGHMEM);
/* Resulting allocated memory will be page aligned */
ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE));
return (ptr);
}
static void
kv_free(spl_kmem_cache_t *skc, void *ptr, int size)
{
ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE));
/*
* The Linux direct reclaim path uses this out of band value to
* determine if forward progress is being made. Normally this is
* incremented by kmem_freepages() which is part of the various
* Linux slab implementations. However, since we are using none
* of that infrastructure we are responsible for incrementing it.
*/
if (current->reclaim_state)
#ifdef HAVE_RECLAIM_STATE_RECLAIMED
current->reclaim_state->reclaimed += size >> PAGE_SHIFT;
#else
current->reclaim_state->reclaimed_slab += size >> PAGE_SHIFT;
#endif
vfree(ptr);
}
/*
* Required space for each aligned sks.
*/
static inline uint32_t
spl_sks_size(spl_kmem_cache_t *skc)
{
return (P2ROUNDUP_TYPED(sizeof (spl_kmem_slab_t),
skc->skc_obj_align, uint32_t));
}
/*
* Required space for each aligned object.
*/
static inline uint32_t
spl_obj_size(spl_kmem_cache_t *skc)
{
uint32_t align = skc->skc_obj_align;
return (P2ROUNDUP_TYPED(skc->skc_obj_size, align, uint32_t) +
P2ROUNDUP_TYPED(sizeof (spl_kmem_obj_t), align, uint32_t));
}
uint64_t
spl_kmem_cache_inuse(kmem_cache_t *cache)
{
return (cache->skc_obj_total);
}
EXPORT_SYMBOL(spl_kmem_cache_inuse);
uint64_t
spl_kmem_cache_entry_size(kmem_cache_t *cache)
{
return (cache->skc_obj_size);
}
EXPORT_SYMBOL(spl_kmem_cache_entry_size);
/*
* Lookup the spl_kmem_object_t for an object given that object.
*/
static inline spl_kmem_obj_t *
spl_sko_from_obj(spl_kmem_cache_t *skc, void *obj)
{
return (obj + P2ROUNDUP_TYPED(skc->skc_obj_size,
skc->skc_obj_align, uint32_t));
}
/*
* It's important that we pack the spl_kmem_obj_t structure and the
* actual objects in to one large address space to minimize the number
* of calls to the allocator. It is far better to do a few large
* allocations and then subdivide it ourselves. Now which allocator
* we use requires balancing a few trade offs.
*
* For small objects we use kmem_alloc() because as long as you are
* only requesting a small number of pages (ideally just one) its cheap.
* However, when you start requesting multiple pages with kmem_alloc()
* it gets increasingly expensive since it requires contiguous pages.
* For this reason we shift to vmem_alloc() for slabs of large objects
* which removes the need for contiguous pages. We do not use
* vmem_alloc() in all cases because there is significant locking
* overhead in __get_vm_area_node(). This function takes a single
* global lock when acquiring an available virtual address range which
* serializes all vmem_alloc()'s for all slab caches. Using slightly
* different allocation functions for small and large objects should
* give us the best of both worlds.
*
* +------------------------+
* | spl_kmem_slab_t --+-+ |
* | skc_obj_size <-+ | |
* | spl_kmem_obj_t | |
* | skc_obj_size <---+ |
* | spl_kmem_obj_t | |
* | ... v |
* +------------------------+
*/
static spl_kmem_slab_t *
spl_slab_alloc(spl_kmem_cache_t *skc, int flags)
{
spl_kmem_slab_t *sks;
void *base;
uint32_t obj_size;
base = kv_alloc(skc, skc->skc_slab_size, flags);
if (base == NULL)
return (NULL);
sks = (spl_kmem_slab_t *)base;
sks->sks_magic = SKS_MAGIC;
sks->sks_objs = skc->skc_slab_objs;
sks->sks_age = jiffies;
sks->sks_cache = skc;
INIT_LIST_HEAD(&sks->sks_list);
INIT_LIST_HEAD(&sks->sks_free_list);
sks->sks_ref = 0;
obj_size = spl_obj_size(skc);
for (int i = 0; i < sks->sks_objs; i++) {
void *obj = base + spl_sks_size(skc) + (i * obj_size);
ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align));
spl_kmem_obj_t *sko = spl_sko_from_obj(skc, obj);
sko->sko_addr = obj;
sko->sko_magic = SKO_MAGIC;
sko->sko_slab = sks;
INIT_LIST_HEAD(&sko->sko_list);
list_add_tail(&sko->sko_list, &sks->sks_free_list);
}
return (sks);
}
/*
* Remove a slab from complete or partial list, it must be called with
* the 'skc->skc_lock' held but the actual free must be performed
* outside the lock to prevent deadlocking on vmem addresses.
*/
static void
spl_slab_free(spl_kmem_slab_t *sks,
struct list_head *sks_list, struct list_head *sko_list)
{
spl_kmem_cache_t *skc;
ASSERT(sks->sks_magic == SKS_MAGIC);
ASSERT(sks->sks_ref == 0);
skc = sks->sks_cache;
ASSERT(skc->skc_magic == SKC_MAGIC);
/*
* Update slab/objects counters in the cache, then remove the
* slab from the skc->skc_partial_list. Finally add the slab
* and all its objects in to the private work lists where the
* destructors will be called and the memory freed to the system.
*/
skc->skc_obj_total -= sks->sks_objs;
skc->skc_slab_total--;
list_del(&sks->sks_list);
list_add(&sks->sks_list, sks_list);
list_splice_init(&sks->sks_free_list, sko_list);
}
/*
* Reclaim empty slabs at the end of the partial list.
*/
static void
spl_slab_reclaim(spl_kmem_cache_t *skc)
{
spl_kmem_slab_t *sks = NULL, *m = NULL;
spl_kmem_obj_t *sko = NULL, *n = NULL;
LIST_HEAD(sks_list);
LIST_HEAD(sko_list);
/*
* Empty slabs and objects must be moved to a private list so they
* can be safely freed outside the spin lock. All empty slabs are
* at the end of skc->skc_partial_list, therefore once a non-empty
* slab is found we can stop scanning.
*/
spin_lock(&skc->skc_lock);
list_for_each_entry_safe_reverse(sks, m,
&skc->skc_partial_list, sks_list) {
if (sks->sks_ref > 0)
break;
spl_slab_free(sks, &sks_list, &sko_list);
}
spin_unlock(&skc->skc_lock);
/*
* The following two loops ensure all the object destructors are run,
* and the slabs themselves are freed. This is all done outside the
* skc->skc_lock since this allows the destructor to sleep, and
* allows us to perform a conditional reschedule when a freeing a
* large number of objects and slabs back to the system.
*/
list_for_each_entry_safe(sko, n, &sko_list, sko_list) {
ASSERT(sko->sko_magic == SKO_MAGIC);
}
list_for_each_entry_safe(sks, m, &sks_list, sks_list) {
ASSERT(sks->sks_magic == SKS_MAGIC);
kv_free(skc, sks, skc->skc_slab_size);
}
}
static spl_kmem_emergency_t *
spl_emergency_search(struct rb_root *root, void *obj)
{
struct rb_node *node = root->rb_node;
spl_kmem_emergency_t *ske;
unsigned long address = (unsigned long)obj;
while (node) {
ske = container_of(node, spl_kmem_emergency_t, ske_node);
if (address < ske->ske_obj)
node = node->rb_left;
else if (address > ske->ske_obj)
node = node->rb_right;
else
return (ske);
}
return (NULL);
}
static int
spl_emergency_insert(struct rb_root *root, spl_kmem_emergency_t *ske)
{
struct rb_node **new = &(root->rb_node), *parent = NULL;
spl_kmem_emergency_t *ske_tmp;
unsigned long address = ske->ske_obj;
while (*new) {
ske_tmp = container_of(*new, spl_kmem_emergency_t, ske_node);
parent = *new;
if (address < ske_tmp->ske_obj)
new = &((*new)->rb_left);
else if (address > ske_tmp->ske_obj)
new = &((*new)->rb_right);
else
return (0);
}
rb_link_node(&ske->ske_node, parent, new);
rb_insert_color(&ske->ske_node, root);
return (1);
}
/*
* Allocate a single emergency object and track it in a red black tree.
*/
static int
spl_emergency_alloc(spl_kmem_cache_t *skc, int flags, void **obj)
{
gfp_t lflags = kmem_flags_convert(flags);
spl_kmem_emergency_t *ske;
int order = get_order(skc->skc_obj_size);
int empty;
/* Last chance use a partial slab if one now exists */
spin_lock(&skc->skc_lock);
empty = list_empty(&skc->skc_partial_list);
spin_unlock(&skc->skc_lock);
if (!empty)
return (-EEXIST);
ske = kmalloc(sizeof (*ske), lflags);
if (ske == NULL)
return (-ENOMEM);
ske->ske_obj = __get_free_pages(lflags, order);
if (ske->ske_obj == 0) {
kfree(ske);
return (-ENOMEM);
}
spin_lock(&skc->skc_lock);
empty = spl_emergency_insert(&skc->skc_emergency_tree, ske);
if (likely(empty)) {
skc->skc_obj_total++;
skc->skc_obj_emergency++;
if (skc->skc_obj_emergency > skc->skc_obj_emergency_max)
skc->skc_obj_emergency_max = skc->skc_obj_emergency;
}
spin_unlock(&skc->skc_lock);
if (unlikely(!empty)) {
free_pages(ske->ske_obj, order);
kfree(ske);
return (-EINVAL);
}
*obj = (void *)ske->ske_obj;
return (0);
}
/*
* Locate the passed object in the red black tree and free it.
*/
static int
spl_emergency_free(spl_kmem_cache_t *skc, void *obj)
{
spl_kmem_emergency_t *ske;
int order = get_order(skc->skc_obj_size);
spin_lock(&skc->skc_lock);
ske = spl_emergency_search(&skc->skc_emergency_tree, obj);
if (ske) {
rb_erase(&ske->ske_node, &skc->skc_emergency_tree);
skc->skc_obj_emergency--;
skc->skc_obj_total--;
}
spin_unlock(&skc->skc_lock);
if (ske == NULL)
return (-ENOENT);
free_pages(ske->ske_obj, order);
kfree(ske);
return (0);
}
/*
* Release objects from the per-cpu magazine back to their slab. The flush
* argument contains the max number of entries to remove from the magazine.
*/
static void
spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
{
spin_lock(&skc->skc_lock);
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(skm->skm_magic == SKM_MAGIC);
int count = MIN(flush, skm->skm_avail);
for (int i = 0; i < count; i++)
spl_cache_shrink(skc, skm->skm_objs[i]);
skm->skm_avail -= count;
memmove(skm->skm_objs, &(skm->skm_objs[count]),
sizeof (void *) * skm->skm_avail);
spin_unlock(&skc->skc_lock);
}
/*
* Size a slab based on the size of each aligned object plus spl_kmem_obj_t.
* When on-slab we want to target spl_kmem_cache_obj_per_slab. However,
* for very small objects we may end up with more than this so as not
* to waste space in the minimal allocation of a single page.
*/
static int
spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size)
{
uint32_t sks_size, obj_size, max_size, tgt_size, tgt_objs;
sks_size = spl_sks_size(skc);
obj_size = spl_obj_size(skc);
max_size = (spl_kmem_cache_max_size * 1024 * 1024);
tgt_size = (spl_kmem_cache_obj_per_slab * obj_size + sks_size);
if (tgt_size <= max_size) {
tgt_objs = (tgt_size - sks_size) / obj_size;
} else {
tgt_objs = (max_size - sks_size) / obj_size;
tgt_size = (tgt_objs * obj_size) + sks_size;
}
if (tgt_objs == 0)
return (-ENOSPC);
*objs = tgt_objs;
*size = tgt_size;
return (0);
}
/*
* Make a guess at reasonable per-cpu magazine size based on the size of
* each object and the cost of caching N of them in each magazine. Long
* term this should really adapt based on an observed usage heuristic.
*/
static int
spl_magazine_size(spl_kmem_cache_t *skc)
{
uint32_t obj_size = spl_obj_size(skc);
int size;
if (spl_kmem_cache_magazine_size > 0)
return (MAX(MIN(spl_kmem_cache_magazine_size, 256), 2));
/* Per-magazine sizes below assume a 4Kib page size */
if (obj_size > (PAGE_SIZE * 256))
size = 4; /* Minimum 4Mib per-magazine */
else if (obj_size > (PAGE_SIZE * 32))
size = 16; /* Minimum 2Mib per-magazine */
else if (obj_size > (PAGE_SIZE))
size = 64; /* Minimum 256Kib per-magazine */
else if (obj_size > (PAGE_SIZE / 4))
size = 128; /* Minimum 128Kib per-magazine */
else
size = 256;
return (size);
}
/*
* Allocate a per-cpu magazine to associate with a specific core.
*/
static spl_kmem_magazine_t *
spl_magazine_alloc(spl_kmem_cache_t *skc, int cpu)
{
spl_kmem_magazine_t *skm;
int size = sizeof (spl_kmem_magazine_t) +
sizeof (void *) * skc->skc_mag_size;
skm = kmalloc_node(size, GFP_KERNEL, cpu_to_node(cpu));
if (skm) {
skm->skm_magic = SKM_MAGIC;
skm->skm_avail = 0;
skm->skm_size = skc->skc_mag_size;
skm->skm_refill = skc->skc_mag_refill;
skm->skm_cache = skc;
skm->skm_cpu = cpu;
}
return (skm);
}
/*
* Free a per-cpu magazine associated with a specific core.
*/
static void
spl_magazine_free(spl_kmem_magazine_t *skm)
{
ASSERT(skm->skm_magic == SKM_MAGIC);
ASSERT(skm->skm_avail == 0);
kfree(skm);
}
/*
* Create all pre-cpu magazines of reasonable sizes.
*/
static int
spl_magazine_create(spl_kmem_cache_t *skc)
{
int i = 0;
ASSERT((skc->skc_flags & KMC_SLAB) == 0);
skc->skc_mag = kzalloc(sizeof (spl_kmem_magazine_t *) *
num_possible_cpus(), kmem_flags_convert(KM_SLEEP));
skc->skc_mag_size = spl_magazine_size(skc);
skc->skc_mag_refill = (skc->skc_mag_size + 1) / 2;
for_each_possible_cpu(i) {
skc->skc_mag[i] = spl_magazine_alloc(skc, i);
if (!skc->skc_mag[i]) {
for (i--; i >= 0; i--)
spl_magazine_free(skc->skc_mag[i]);
kfree(skc->skc_mag);
return (-ENOMEM);
}
}
return (0);
}
/*
* Destroy all pre-cpu magazines.
*/
static void
spl_magazine_destroy(spl_kmem_cache_t *skc)
{
spl_kmem_magazine_t *skm;
int i = 0;
ASSERT((skc->skc_flags & KMC_SLAB) == 0);
for_each_possible_cpu(i) {
skm = skc->skc_mag[i];
spl_cache_flush(skc, skm, skm->skm_avail);
spl_magazine_free(skm);
}
kfree(skc->skc_mag);
}
/*
* Create a object cache based on the following arguments:
* name cache name
* size cache object size
* align cache object alignment
* ctor cache object constructor
* dtor cache object destructor
* reclaim cache object reclaim
* priv cache private data for ctor/dtor/reclaim
* vmp unused must be NULL
* flags
* KMC_KVMEM Force kvmem backed SPL cache
* KMC_SLAB Force Linux slab backed cache
* KMC_NODEBUG Disable debugging (unsupported)
*/
spl_kmem_cache_t *
spl_kmem_cache_create(const char *name, size_t size, size_t align,
spl_kmem_ctor_t ctor, spl_kmem_dtor_t dtor, void *reclaim,
void *priv, void *vmp, int flags)
{
gfp_t lflags = kmem_flags_convert(KM_SLEEP);
spl_kmem_cache_t *skc;
int rc;
/*
* Unsupported flags
*/
ASSERT(vmp == NULL);
ASSERT(reclaim == NULL);
might_sleep();
skc = kzalloc(sizeof (*skc), lflags);
if (skc == NULL)
return (NULL);
skc->skc_magic = SKC_MAGIC;
skc->skc_name_size = strlen(name) + 1;
skc->skc_name = kmalloc(skc->skc_name_size, lflags);
if (skc->skc_name == NULL) {
kfree(skc);
return (NULL);
}
strlcpy(skc->skc_name, name, skc->skc_name_size);
skc->skc_ctor = ctor;
skc->skc_dtor = dtor;
skc->skc_private = priv;
skc->skc_vmp = vmp;
skc->skc_linux_cache = NULL;
skc->skc_flags = flags;
skc->skc_obj_size = size;
skc->skc_obj_align = SPL_KMEM_CACHE_ALIGN;
atomic_set(&skc->skc_ref, 0);
INIT_LIST_HEAD(&skc->skc_list);
INIT_LIST_HEAD(&skc->skc_complete_list);
INIT_LIST_HEAD(&skc->skc_partial_list);
skc->skc_emergency_tree = RB_ROOT;
spin_lock_init(&skc->skc_lock);
init_waitqueue_head(&skc->skc_waitq);
skc->skc_slab_fail = 0;
skc->skc_slab_create = 0;
skc->skc_slab_destroy = 0;
skc->skc_slab_total = 0;
skc->skc_slab_alloc = 0;
skc->skc_slab_max = 0;
skc->skc_obj_total = 0;
skc->skc_obj_alloc = 0;
skc->skc_obj_max = 0;
skc->skc_obj_deadlock = 0;
skc->skc_obj_emergency = 0;
skc->skc_obj_emergency_max = 0;
rc = percpu_counter_init_common(&skc->skc_linux_alloc, 0,
GFP_KERNEL);
if (rc != 0) {
kfree(skc);
return (NULL);
}
/*
* Verify the requested alignment restriction is sane.
*/
if (align) {
VERIFY(ISP2(align));
VERIFY3U(align, >=, SPL_KMEM_CACHE_ALIGN);
VERIFY3U(align, <=, PAGE_SIZE);
skc->skc_obj_align = align;
}
/*
* When no specific type of slab is requested (kmem, vmem, or
* linuxslab) then select a cache type based on the object size
* and default tunables.
*/
if (!(skc->skc_flags & (KMC_SLAB | KMC_KVMEM))) {
if (spl_kmem_cache_slab_limit &&
size <= (size_t)spl_kmem_cache_slab_limit) {
/*
* Objects smaller than spl_kmem_cache_slab_limit can
* use the Linux slab for better space-efficiency.
*/
skc->skc_flags |= KMC_SLAB;
} else {
/*
* All other objects are considered large and are
* placed on kvmem backed slabs.
*/
skc->skc_flags |= KMC_KVMEM;
}
}
/*
* Given the type of slab allocate the required resources.
*/
if (skc->skc_flags & KMC_KVMEM) {
rc = spl_slab_size(skc,
&skc->skc_slab_objs, &skc->skc_slab_size);
if (rc)
goto out;
rc = spl_magazine_create(skc);
if (rc)
goto out;
} else {
unsigned long slabflags = 0;
- if (size > (SPL_MAX_KMEM_ORDER_NR_PAGES * PAGE_SIZE))
+ if (size > spl_kmem_cache_slab_limit)
goto out;
#if defined(SLAB_USERCOPY)
/*
* Required for PAX-enabled kernels if the slab is to be
* used for copying between user and kernel space.
*/
slabflags |= SLAB_USERCOPY;
#endif
#if defined(HAVE_KMEM_CACHE_CREATE_USERCOPY)
/*
* Newer grsec patchset uses kmem_cache_create_usercopy()
* instead of SLAB_USERCOPY flag
*/
skc->skc_linux_cache = kmem_cache_create_usercopy(
skc->skc_name, size, align, slabflags, 0, size, NULL);
#else
skc->skc_linux_cache = kmem_cache_create(
skc->skc_name, size, align, slabflags, NULL);
#endif
if (skc->skc_linux_cache == NULL)
goto out;
}
down_write(&spl_kmem_cache_sem);
list_add_tail(&skc->skc_list, &spl_kmem_cache_list);
up_write(&spl_kmem_cache_sem);
return (skc);
out:
kfree(skc->skc_name);
percpu_counter_destroy(&skc->skc_linux_alloc);
kfree(skc);
return (NULL);
}
EXPORT_SYMBOL(spl_kmem_cache_create);
/*
* Register a move callback for cache defragmentation.
* XXX: Unimplemented but harmless to stub out for now.
*/
void
spl_kmem_cache_set_move(spl_kmem_cache_t *skc,
kmem_cbrc_t (move)(void *, void *, size_t, void *))
{
ASSERT(move != NULL);
}
EXPORT_SYMBOL(spl_kmem_cache_set_move);
/*
* Destroy a cache and all objects associated with the cache.
*/
void
spl_kmem_cache_destroy(spl_kmem_cache_t *skc)
{
DECLARE_WAIT_QUEUE_HEAD(wq);
taskqid_t id;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(skc->skc_flags & (KMC_KVMEM | KMC_SLAB));
down_write(&spl_kmem_cache_sem);
list_del_init(&skc->skc_list);
up_write(&spl_kmem_cache_sem);
/* Cancel any and wait for any pending delayed tasks */
VERIFY(!test_and_set_bit(KMC_BIT_DESTROY, &skc->skc_flags));
spin_lock(&skc->skc_lock);
id = skc->skc_taskqid;
spin_unlock(&skc->skc_lock);
taskq_cancel_id(spl_kmem_cache_taskq, id);
/*
* Wait until all current callers complete, this is mainly
* to catch the case where a low memory situation triggers a
* cache reaping action which races with this destroy.
*/
wait_event(wq, atomic_read(&skc->skc_ref) == 0);
if (skc->skc_flags & KMC_KVMEM) {
spl_magazine_destroy(skc);
spl_slab_reclaim(skc);
} else {
ASSERT(skc->skc_flags & KMC_SLAB);
kmem_cache_destroy(skc->skc_linux_cache);
}
spin_lock(&skc->skc_lock);
/*
* Validate there are no objects in use and free all the
* spl_kmem_slab_t, spl_kmem_obj_t, and object buffers.
*/
ASSERT3U(skc->skc_slab_alloc, ==, 0);
ASSERT3U(skc->skc_obj_alloc, ==, 0);
ASSERT3U(skc->skc_slab_total, ==, 0);
ASSERT3U(skc->skc_obj_total, ==, 0);
ASSERT3U(skc->skc_obj_emergency, ==, 0);
ASSERT(list_empty(&skc->skc_complete_list));
ASSERT3U(percpu_counter_sum(&skc->skc_linux_alloc), ==, 0);
percpu_counter_destroy(&skc->skc_linux_alloc);
spin_unlock(&skc->skc_lock);
kfree(skc->skc_name);
kfree(skc);
}
EXPORT_SYMBOL(spl_kmem_cache_destroy);
/*
* Allocate an object from a slab attached to the cache. This is used to
* repopulate the per-cpu magazine caches in batches when they run low.
*/
static void *
spl_cache_obj(spl_kmem_cache_t *skc, spl_kmem_slab_t *sks)
{
spl_kmem_obj_t *sko;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(sks->sks_magic == SKS_MAGIC);
sko = list_entry(sks->sks_free_list.next, spl_kmem_obj_t, sko_list);
ASSERT(sko->sko_magic == SKO_MAGIC);
ASSERT(sko->sko_addr != NULL);
/* Remove from sks_free_list */
list_del_init(&sko->sko_list);
sks->sks_age = jiffies;
sks->sks_ref++;
skc->skc_obj_alloc++;
/* Track max obj usage statistics */
if (skc->skc_obj_alloc > skc->skc_obj_max)
skc->skc_obj_max = skc->skc_obj_alloc;
/* Track max slab usage statistics */
if (sks->sks_ref == 1) {
skc->skc_slab_alloc++;
if (skc->skc_slab_alloc > skc->skc_slab_max)
skc->skc_slab_max = skc->skc_slab_alloc;
}
return (sko->sko_addr);
}
/*
* Generic slab allocation function to run by the global work queues.
* It is responsible for allocating a new slab, linking it in to the list
* of partial slabs, and then waking any waiters.
*/
static int
__spl_cache_grow(spl_kmem_cache_t *skc, int flags)
{
spl_kmem_slab_t *sks;
fstrans_cookie_t cookie = spl_fstrans_mark();
sks = spl_slab_alloc(skc, flags);
spl_fstrans_unmark(cookie);
spin_lock(&skc->skc_lock);
if (sks) {
skc->skc_slab_total++;
skc->skc_obj_total += sks->sks_objs;
list_add_tail(&sks->sks_list, &skc->skc_partial_list);
smp_mb__before_atomic();
clear_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags);
smp_mb__after_atomic();
}
spin_unlock(&skc->skc_lock);
return (sks == NULL ? -ENOMEM : 0);
}
static void
spl_cache_grow_work(void *data)
{
spl_kmem_alloc_t *ska = (spl_kmem_alloc_t *)data;
spl_kmem_cache_t *skc = ska->ska_cache;
int error = __spl_cache_grow(skc, ska->ska_flags);
atomic_dec(&skc->skc_ref);
smp_mb__before_atomic();
clear_bit(KMC_BIT_GROWING, &skc->skc_flags);
smp_mb__after_atomic();
if (error == 0)
wake_up_all(&skc->skc_waitq);
kfree(ska);
}
/*
* Returns non-zero when a new slab should be available.
*/
static int
spl_cache_grow_wait(spl_kmem_cache_t *skc)
{
return (!test_bit(KMC_BIT_GROWING, &skc->skc_flags));
}
/*
* No available objects on any slabs, create a new slab. Note that this
* functionality is disabled for KMC_SLAB caches which are backed by the
* Linux slab.
*/
static int
spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj)
{
int remaining, rc = 0;
ASSERT0(flags & ~KM_PUBLIC_MASK);
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT((skc->skc_flags & KMC_SLAB) == 0);
*obj = NULL;
/*
* Since we can't sleep attempt an emergency allocation to satisfy
* the request. The only alterative is to fail the allocation but
* it's preferable try. The use of KM_NOSLEEP is expected to be rare.
*/
if (flags & KM_NOSLEEP)
return (spl_emergency_alloc(skc, flags, obj));
might_sleep();
/*
* Before allocating a new slab wait for any reaping to complete and
* then return so the local magazine can be rechecked for new objects.
*/
if (test_bit(KMC_BIT_REAPING, &skc->skc_flags)) {
rc = spl_wait_on_bit(&skc->skc_flags, KMC_BIT_REAPING,
TASK_UNINTERRUPTIBLE);
return (rc ? rc : -EAGAIN);
}
/*
* Note: It would be nice to reduce the overhead of context switch
* and improve NUMA locality, by trying to allocate a new slab in the
* current process context with KM_NOSLEEP flag.
*
* However, this can't be applied to vmem/kvmem due to a bug that
* spl_vmalloc() doesn't honor gfp flags in page table allocation.
*/
/*
* This is handled by dispatching a work request to the global work
* queue. This allows us to asynchronously allocate a new slab while
* retaining the ability to safely fall back to a smaller synchronous
* allocations to ensure forward progress is always maintained.
*/
if (test_and_set_bit(KMC_BIT_GROWING, &skc->skc_flags) == 0) {
spl_kmem_alloc_t *ska;
ska = kmalloc(sizeof (*ska), kmem_flags_convert(flags));
if (ska == NULL) {
clear_bit_unlock(KMC_BIT_GROWING, &skc->skc_flags);
smp_mb__after_atomic();
wake_up_all(&skc->skc_waitq);
return (-ENOMEM);
}
atomic_inc(&skc->skc_ref);
ska->ska_cache = skc;
ska->ska_flags = flags;
taskq_init_ent(&ska->ska_tqe);
taskq_dispatch_ent(spl_kmem_cache_taskq,
spl_cache_grow_work, ska, 0, &ska->ska_tqe);
}
/*
* The goal here is to only detect the rare case where a virtual slab
* allocation has deadlocked. We must be careful to minimize the use
* of emergency objects which are more expensive to track. Therefore,
* we set a very long timeout for the asynchronous allocation and if
* the timeout is reached the cache is flagged as deadlocked. From
* this point only new emergency objects will be allocated until the
* asynchronous allocation completes and clears the deadlocked flag.
*/
if (test_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags)) {
rc = spl_emergency_alloc(skc, flags, obj);
} else {
remaining = wait_event_timeout(skc->skc_waitq,
spl_cache_grow_wait(skc), HZ / 10);
if (!remaining) {
spin_lock(&skc->skc_lock);
if (test_bit(KMC_BIT_GROWING, &skc->skc_flags)) {
set_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags);
skc->skc_obj_deadlock++;
}
spin_unlock(&skc->skc_lock);
}
rc = -ENOMEM;
}
return (rc);
}
/*
* Refill a per-cpu magazine with objects from the slabs for this cache.
* Ideally the magazine can be repopulated using existing objects which have
* been released, however if we are unable to locate enough free objects new
* slabs of objects will be created. On success NULL is returned, otherwise
* the address of a single emergency object is returned for use by the caller.
*/
static void *
spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags)
{
spl_kmem_slab_t *sks;
int count = 0, rc, refill;
void *obj = NULL;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(skm->skm_magic == SKM_MAGIC);
refill = MIN(skm->skm_refill, skm->skm_size - skm->skm_avail);
spin_lock(&skc->skc_lock);
while (refill > 0) {
/* No slabs available we may need to grow the cache */
if (list_empty(&skc->skc_partial_list)) {
spin_unlock(&skc->skc_lock);
local_irq_enable();
rc = spl_cache_grow(skc, flags, &obj);
local_irq_disable();
/* Emergency object for immediate use by caller */
if (rc == 0 && obj != NULL)
return (obj);
if (rc)
goto out;
/* Rescheduled to different CPU skm is not local */
if (skm != skc->skc_mag[smp_processor_id()])
goto out;
/*
* Potentially rescheduled to the same CPU but
* allocations may have occurred from this CPU while
* we were sleeping so recalculate max refill.
*/
refill = MIN(refill, skm->skm_size - skm->skm_avail);
spin_lock(&skc->skc_lock);
continue;
}
/* Grab the next available slab */
sks = list_entry((&skc->skc_partial_list)->next,
spl_kmem_slab_t, sks_list);
ASSERT(sks->sks_magic == SKS_MAGIC);
ASSERT(sks->sks_ref < sks->sks_objs);
ASSERT(!list_empty(&sks->sks_free_list));
/*
* Consume as many objects as needed to refill the requested
* cache. We must also be careful not to overfill it.
*/
while (sks->sks_ref < sks->sks_objs && refill-- > 0 &&
++count) {
ASSERT(skm->skm_avail < skm->skm_size);
ASSERT(count < skm->skm_size);
skm->skm_objs[skm->skm_avail++] =
spl_cache_obj(skc, sks);
}
/* Move slab to skc_complete_list when full */
if (sks->sks_ref == sks->sks_objs) {
list_del(&sks->sks_list);
list_add(&sks->sks_list, &skc->skc_complete_list);
}
}
spin_unlock(&skc->skc_lock);
out:
return (NULL);
}
/*
* Release an object back to the slab from which it came.
*/
static void
spl_cache_shrink(spl_kmem_cache_t *skc, void *obj)
{
spl_kmem_slab_t *sks = NULL;
spl_kmem_obj_t *sko = NULL;
ASSERT(skc->skc_magic == SKC_MAGIC);
sko = spl_sko_from_obj(skc, obj);
ASSERT(sko->sko_magic == SKO_MAGIC);
sks = sko->sko_slab;
ASSERT(sks->sks_magic == SKS_MAGIC);
ASSERT(sks->sks_cache == skc);
list_add(&sko->sko_list, &sks->sks_free_list);
sks->sks_age = jiffies;
sks->sks_ref--;
skc->skc_obj_alloc--;
/*
* Move slab to skc_partial_list when no longer full. Slabs
* are added to the head to keep the partial list is quasi-full
* sorted order. Fuller at the head, emptier at the tail.
*/
if (sks->sks_ref == (sks->sks_objs - 1)) {
list_del(&sks->sks_list);
list_add(&sks->sks_list, &skc->skc_partial_list);
}
/*
* Move empty slabs to the end of the partial list so
* they can be easily found and freed during reclamation.
*/
if (sks->sks_ref == 0) {
list_del(&sks->sks_list);
list_add_tail(&sks->sks_list, &skc->skc_partial_list);
skc->skc_slab_alloc--;
}
}
/*
* Allocate an object from the per-cpu magazine, or if the magazine
* is empty directly allocate from a slab and repopulate the magazine.
*/
void *
spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags)
{
spl_kmem_magazine_t *skm;
void *obj = NULL;
ASSERT0(flags & ~KM_PUBLIC_MASK);
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
/*
* Allocate directly from a Linux slab. All optimizations are left
* to the underlying cache we only need to guarantee that KM_SLEEP
* callers will never fail.
*/
if (skc->skc_flags & KMC_SLAB) {
struct kmem_cache *slc = skc->skc_linux_cache;
do {
obj = kmem_cache_alloc(slc, kmem_flags_convert(flags));
} while ((obj == NULL) && !(flags & KM_NOSLEEP));
if (obj != NULL) {
/*
* Even though we leave everything up to the
* underlying cache we still keep track of
* how many objects we've allocated in it for
* better debuggability.
*/
percpu_counter_inc(&skc->skc_linux_alloc);
}
goto ret;
}
local_irq_disable();
restart:
/*
* Safe to update per-cpu structure without lock, but
* in the restart case we must be careful to reacquire
* the local magazine since this may have changed
* when we need to grow the cache.
*/
skm = skc->skc_mag[smp_processor_id()];
ASSERT(skm->skm_magic == SKM_MAGIC);
if (likely(skm->skm_avail)) {
/* Object available in CPU cache, use it */
obj = skm->skm_objs[--skm->skm_avail];
} else {
obj = spl_cache_refill(skc, skm, flags);
if ((obj == NULL) && !(flags & KM_NOSLEEP))
goto restart;
local_irq_enable();
goto ret;
}
local_irq_enable();
ASSERT(obj);
ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align));
ret:
/* Pre-emptively migrate object to CPU L1 cache */
if (obj) {
if (obj && skc->skc_ctor)
skc->skc_ctor(obj, skc->skc_private, flags);
else
prefetchw(obj);
}
return (obj);
}
EXPORT_SYMBOL(spl_kmem_cache_alloc);
/*
* Free an object back to the local per-cpu magazine, there is no
* guarantee that this is the same magazine the object was originally
* allocated from. We may need to flush entire from the magazine
* back to the slabs to make space.
*/
void
spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
{
spl_kmem_magazine_t *skm;
unsigned long flags;
int do_reclaim = 0;
int do_emergency = 0;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
/*
* Run the destructor
*/
if (skc->skc_dtor)
skc->skc_dtor(obj, skc->skc_private);
/*
* Free the object from the Linux underlying Linux slab.
*/
if (skc->skc_flags & KMC_SLAB) {
kmem_cache_free(skc->skc_linux_cache, obj);
percpu_counter_dec(&skc->skc_linux_alloc);
return;
}
/*
* While a cache has outstanding emergency objects all freed objects
* must be checked. However, since emergency objects will never use
* a virtual address these objects can be safely excluded as an
* optimization.
*/
if (!is_vmalloc_addr(obj)) {
spin_lock(&skc->skc_lock);
do_emergency = (skc->skc_obj_emergency > 0);
spin_unlock(&skc->skc_lock);
if (do_emergency && (spl_emergency_free(skc, obj) == 0))
return;
}
local_irq_save(flags);
/*
* Safe to update per-cpu structure without lock, but
* no remote memory allocation tracking is being performed
* it is entirely possible to allocate an object from one
* CPU cache and return it to another.
*/
skm = skc->skc_mag[smp_processor_id()];
ASSERT(skm->skm_magic == SKM_MAGIC);
/*
* Per-CPU cache full, flush it to make space for this object,
* this may result in an empty slab which can be reclaimed once
* interrupts are re-enabled.
*/
if (unlikely(skm->skm_avail >= skm->skm_size)) {
spl_cache_flush(skc, skm, skm->skm_refill);
do_reclaim = 1;
}
/* Available space in cache, use it */
skm->skm_objs[skm->skm_avail++] = obj;
local_irq_restore(flags);
if (do_reclaim)
spl_slab_reclaim(skc);
}
EXPORT_SYMBOL(spl_kmem_cache_free);
/*
* Depending on how many and which objects are released it may simply
* repopulate the local magazine which will then need to age-out. Objects
* which cannot fit in the magazine will be released back to their slabs
* which will also need to age out before being released. This is all just
* best effort and we do not want to thrash creating and destroying slabs.
*/
void
spl_kmem_cache_reap_now(spl_kmem_cache_t *skc)
{
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
if (skc->skc_flags & KMC_SLAB)
return;
atomic_inc(&skc->skc_ref);
/*
* Prevent concurrent cache reaping when contended.
*/
if (test_and_set_bit(KMC_BIT_REAPING, &skc->skc_flags))
goto out;
/* Reclaim from the magazine and free all now empty slabs. */
unsigned long irq_flags;
local_irq_save(irq_flags);
spl_kmem_magazine_t *skm = skc->skc_mag[smp_processor_id()];
spl_cache_flush(skc, skm, skm->skm_avail);
local_irq_restore(irq_flags);
spl_slab_reclaim(skc);
clear_bit_unlock(KMC_BIT_REAPING, &skc->skc_flags);
smp_mb__after_atomic();
wake_up_bit(&skc->skc_flags, KMC_BIT_REAPING);
out:
atomic_dec(&skc->skc_ref);
}
EXPORT_SYMBOL(spl_kmem_cache_reap_now);
/*
* This is stubbed out for code consistency with other platforms. There
* is existing logic to prevent concurrent reaping so while this is ugly
* it should do no harm.
*/
int
spl_kmem_cache_reap_active(void)
{
return (0);
}
EXPORT_SYMBOL(spl_kmem_cache_reap_active);
/*
* Reap all free slabs from all registered caches.
*/
void
spl_kmem_reap(void)
{
spl_kmem_cache_t *skc = NULL;
down_read(&spl_kmem_cache_sem);
list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
spl_kmem_cache_reap_now(skc);
}
up_read(&spl_kmem_cache_sem);
}
EXPORT_SYMBOL(spl_kmem_reap);
int
spl_kmem_cache_init(void)
{
init_rwsem(&spl_kmem_cache_sem);
INIT_LIST_HEAD(&spl_kmem_cache_list);
spl_kmem_cache_taskq = taskq_create("spl_kmem_cache",
spl_kmem_cache_kmem_threads, maxclsyspri,
spl_kmem_cache_kmem_threads * 8, INT_MAX,
TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
if (spl_kmem_cache_taskq == NULL)
return (-ENOMEM);
return (0);
}
void
spl_kmem_cache_fini(void)
{
taskq_destroy(spl_kmem_cache_taskq);
}
diff --git a/sys/contrib/openzfs/module/os/linux/spl/spl-kstat.c b/sys/contrib/openzfs/module/os/linux/spl/spl-kstat.c
index 4308581147a9..ad553a73a69e 100644
--- a/sys/contrib/openzfs/module/os/linux/spl/spl-kstat.c
+++ b/sys/contrib/openzfs/module/os/linux/spl/spl-kstat.c
@@ -1,715 +1,716 @@
/*
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
* Copyright (C) 2007 The Regents of the University of California.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
* UCRL-CODE-235197
*
* This file is part of the SPL, Solaris Porting Layer.
*
* The SPL is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* The SPL is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
*
* Solaris Porting Layer (SPL) Kstat Implementation.
*
* Links to Illumos.org for more information on kstat function:
* [1] https://illumos.org/man/1M/kstat
* [2] https://illumos.org/man/9f/kstat_create
*/
#include <linux/seq_file.h>
#include <sys/kstat.h>
#include <sys/vmem.h>
#include <sys/cmn_err.h>
#include <sys/sysmacros.h>
+#include <sys/string.h>
static kmutex_t kstat_module_lock;
static struct list_head kstat_module_list;
static kid_t kstat_id;
static int
kstat_resize_raw(kstat_t *ksp)
{
if (ksp->ks_raw_bufsize == KSTAT_RAW_MAX)
return (ENOMEM);
vmem_free(ksp->ks_raw_buf, ksp->ks_raw_bufsize);
ksp->ks_raw_bufsize = MIN(ksp->ks_raw_bufsize * 2, KSTAT_RAW_MAX);
ksp->ks_raw_buf = vmem_alloc(ksp->ks_raw_bufsize, KM_SLEEP);
return (0);
}
static int
kstat_seq_show_headers(struct seq_file *f)
{
kstat_t *ksp = (kstat_t *)f->private;
int rc = 0;
ASSERT(ksp->ks_magic == KS_MAGIC);
seq_printf(f, "%d %d 0x%02x %d %d %lld %lld\n",
ksp->ks_kid, ksp->ks_type, ksp->ks_flags,
ksp->ks_ndata, (int)ksp->ks_data_size,
ksp->ks_crtime, ksp->ks_snaptime);
switch (ksp->ks_type) {
case KSTAT_TYPE_RAW:
restart:
if (ksp->ks_raw_ops.headers) {
rc = ksp->ks_raw_ops.headers(
ksp->ks_raw_buf, ksp->ks_raw_bufsize);
if (rc == ENOMEM && !kstat_resize_raw(ksp))
goto restart;
if (!rc)
seq_puts(f, ksp->ks_raw_buf);
} else {
seq_printf(f, "raw data\n");
}
break;
case KSTAT_TYPE_NAMED:
seq_printf(f, "%-31s %-4s %s\n",
"name", "type", "data");
break;
case KSTAT_TYPE_INTR:
seq_printf(f, "%-8s %-8s %-8s %-8s %-8s\n",
"hard", "soft", "watchdog",
"spurious", "multsvc");
break;
case KSTAT_TYPE_IO:
seq_printf(f,
"%-8s %-8s %-8s %-8s %-8s %-8s "
"%-8s %-8s %-8s %-8s %-8s %-8s\n",
"nread", "nwritten", "reads", "writes",
"wtime", "wlentime", "wupdate",
"rtime", "rlentime", "rupdate",
"wcnt", "rcnt");
break;
case KSTAT_TYPE_TIMER:
seq_printf(f,
"%-31s %-8s "
"%-8s %-8s %-8s %-8s %-8s\n",
"name", "events", "elapsed",
"min", "max", "start", "stop");
break;
default:
PANIC("Undefined kstat type %d\n", ksp->ks_type);
}
return (-rc);
}
static int
kstat_seq_show_raw(struct seq_file *f, unsigned char *p, int l)
{
int i, j;
for (i = 0; ; i++) {
seq_printf(f, "%03x:", i);
for (j = 0; j < 16; j++) {
if (i * 16 + j >= l) {
seq_printf(f, "\n");
goto out;
}
seq_printf(f, " %02x", (unsigned char)p[i * 16 + j]);
}
seq_printf(f, "\n");
}
out:
return (0);
}
static int
kstat_seq_show_named(struct seq_file *f, kstat_named_t *knp)
{
seq_printf(f, "%-31s %-4d ", knp->name, knp->data_type);
switch (knp->data_type) {
case KSTAT_DATA_CHAR:
knp->value.c[15] = '\0'; /* NULL terminate */
seq_printf(f, "%-16s", knp->value.c);
break;
/*
* NOTE - We need to be more careful able what tokens are
* used for each arch, for now this is correct for x86_64.
*/
case KSTAT_DATA_INT32:
seq_printf(f, "%d", knp->value.i32);
break;
case KSTAT_DATA_UINT32:
seq_printf(f, "%u", knp->value.ui32);
break;
case KSTAT_DATA_INT64:
seq_printf(f, "%lld", (signed long long)knp->value.i64);
break;
case KSTAT_DATA_UINT64:
seq_printf(f, "%llu",
(unsigned long long)knp->value.ui64);
break;
case KSTAT_DATA_LONG:
seq_printf(f, "%ld", knp->value.l);
break;
case KSTAT_DATA_ULONG:
seq_printf(f, "%lu", knp->value.ul);
break;
case KSTAT_DATA_STRING:
KSTAT_NAMED_STR_PTR(knp)
[KSTAT_NAMED_STR_BUFLEN(knp)-1] = '\0';
seq_printf(f, "%s", KSTAT_NAMED_STR_PTR(knp));
break;
default:
PANIC("Undefined kstat data type %d\n", knp->data_type);
}
seq_printf(f, "\n");
return (0);
}
static int
kstat_seq_show_intr(struct seq_file *f, kstat_intr_t *kip)
{
seq_printf(f, "%-8u %-8u %-8u %-8u %-8u\n",
kip->intrs[KSTAT_INTR_HARD],
kip->intrs[KSTAT_INTR_SOFT],
kip->intrs[KSTAT_INTR_WATCHDOG],
kip->intrs[KSTAT_INTR_SPURIOUS],
kip->intrs[KSTAT_INTR_MULTSVC]);
return (0);
}
static int
kstat_seq_show_io(struct seq_file *f, kstat_io_t *kip)
{
/* though wlentime & friends are signed, they will never be negative */
seq_printf(f,
"%-8llu %-8llu %-8u %-8u %-8llu %-8llu "
"%-8llu %-8llu %-8llu %-8llu %-8u %-8u\n",
kip->nread, kip->nwritten,
kip->reads, kip->writes,
kip->wtime, kip->wlentime, kip->wlastupdate,
kip->rtime, kip->rlentime, kip->rlastupdate,
kip->wcnt, kip->rcnt);
return (0);
}
static int
kstat_seq_show_timer(struct seq_file *f, kstat_timer_t *ktp)
{
seq_printf(f,
"%-31s %-8llu %-8llu %-8llu %-8llu %-8llu %-8llu\n",
ktp->name, ktp->num_events, ktp->elapsed_time,
ktp->min_time, ktp->max_time,
ktp->start_time, ktp->stop_time);
return (0);
}
static int
kstat_seq_show(struct seq_file *f, void *p)
{
kstat_t *ksp = (kstat_t *)f->private;
int rc = 0;
ASSERT(ksp->ks_magic == KS_MAGIC);
switch (ksp->ks_type) {
case KSTAT_TYPE_RAW:
restart:
if (ksp->ks_raw_ops.data) {
rc = ksp->ks_raw_ops.data(
ksp->ks_raw_buf, ksp->ks_raw_bufsize, p);
if (rc == ENOMEM && !kstat_resize_raw(ksp))
goto restart;
if (!rc)
seq_puts(f, ksp->ks_raw_buf);
} else {
ASSERT(ksp->ks_ndata == 1);
rc = kstat_seq_show_raw(f, ksp->ks_data,
ksp->ks_data_size);
}
break;
case KSTAT_TYPE_NAMED:
rc = kstat_seq_show_named(f, (kstat_named_t *)p);
break;
case KSTAT_TYPE_INTR:
rc = kstat_seq_show_intr(f, (kstat_intr_t *)p);
break;
case KSTAT_TYPE_IO:
rc = kstat_seq_show_io(f, (kstat_io_t *)p);
break;
case KSTAT_TYPE_TIMER:
rc = kstat_seq_show_timer(f, (kstat_timer_t *)p);
break;
default:
PANIC("Undefined kstat type %d\n", ksp->ks_type);
}
return (-rc);
}
static int
kstat_default_update(kstat_t *ksp, int rw)
{
ASSERT(ksp != NULL);
if (rw == KSTAT_WRITE)
return (EACCES);
return (0);
}
static void *
kstat_seq_data_addr(kstat_t *ksp, loff_t n)
{
void *rc = NULL;
switch (ksp->ks_type) {
case KSTAT_TYPE_RAW:
if (ksp->ks_raw_ops.addr)
rc = ksp->ks_raw_ops.addr(ksp, n);
else
rc = ksp->ks_data;
break;
case KSTAT_TYPE_NAMED:
rc = ksp->ks_data + n * sizeof (kstat_named_t);
break;
case KSTAT_TYPE_INTR:
rc = ksp->ks_data + n * sizeof (kstat_intr_t);
break;
case KSTAT_TYPE_IO:
rc = ksp->ks_data + n * sizeof (kstat_io_t);
break;
case KSTAT_TYPE_TIMER:
rc = ksp->ks_data + n * sizeof (kstat_timer_t);
break;
default:
PANIC("Undefined kstat type %d\n", ksp->ks_type);
}
return (rc);
}
static void *
kstat_seq_start(struct seq_file *f, loff_t *pos)
{
loff_t n = *pos;
kstat_t *ksp = (kstat_t *)f->private;
ASSERT(ksp->ks_magic == KS_MAGIC);
mutex_enter(ksp->ks_lock);
if (ksp->ks_type == KSTAT_TYPE_RAW) {
ksp->ks_raw_bufsize = PAGE_SIZE;
ksp->ks_raw_buf = vmem_alloc(ksp->ks_raw_bufsize, KM_SLEEP);
}
/* Dynamically update kstat, on error existing kstats are used */
(void) ksp->ks_update(ksp, KSTAT_READ);
ksp->ks_snaptime = gethrtime();
if (!(ksp->ks_flags & KSTAT_FLAG_NO_HEADERS) && !n &&
kstat_seq_show_headers(f))
return (NULL);
if (n >= ksp->ks_ndata)
return (NULL);
return (kstat_seq_data_addr(ksp, n));
}
static void *
kstat_seq_next(struct seq_file *f, void *p, loff_t *pos)
{
kstat_t *ksp = (kstat_t *)f->private;
ASSERT(ksp->ks_magic == KS_MAGIC);
++*pos;
if (*pos >= ksp->ks_ndata)
return (NULL);
return (kstat_seq_data_addr(ksp, *pos));
}
static void
kstat_seq_stop(struct seq_file *f, void *v)
{
kstat_t *ksp = (kstat_t *)f->private;
ASSERT(ksp->ks_magic == KS_MAGIC);
if (ksp->ks_type == KSTAT_TYPE_RAW)
vmem_free(ksp->ks_raw_buf, ksp->ks_raw_bufsize);
mutex_exit(ksp->ks_lock);
}
static const struct seq_operations kstat_seq_ops = {
.show = kstat_seq_show,
.start = kstat_seq_start,
.next = kstat_seq_next,
.stop = kstat_seq_stop,
};
static kstat_module_t *
kstat_find_module(char *name)
{
kstat_module_t *module = NULL;
list_for_each_entry(module, &kstat_module_list, ksm_module_list) {
if (strncmp(name, module->ksm_name, KSTAT_STRLEN) == 0)
return (module);
}
return (NULL);
}
static kstat_module_t *
kstat_create_module(char *name)
{
kstat_module_t *module;
struct proc_dir_entry *pde;
pde = proc_mkdir(name, proc_spl_kstat);
if (pde == NULL)
return (NULL);
module = kmem_alloc(sizeof (kstat_module_t), KM_SLEEP);
module->ksm_proc = pde;
strlcpy(module->ksm_name, name, KSTAT_STRLEN);
INIT_LIST_HEAD(&module->ksm_kstat_list);
list_add_tail(&module->ksm_module_list, &kstat_module_list);
return (module);
}
static void
kstat_delete_module(kstat_module_t *module)
{
ASSERT(list_empty(&module->ksm_kstat_list));
remove_proc_entry(module->ksm_name, proc_spl_kstat);
list_del(&module->ksm_module_list);
kmem_free(module, sizeof (kstat_module_t));
}
static int
proc_kstat_open(struct inode *inode, struct file *filp)
{
struct seq_file *f;
int rc;
rc = seq_open(filp, &kstat_seq_ops);
if (rc)
return (rc);
f = filp->private_data;
f->private = SPL_PDE_DATA(inode);
return (0);
}
static ssize_t
proc_kstat_write(struct file *filp, const char __user *buf, size_t len,
loff_t *ppos)
{
struct seq_file *f = filp->private_data;
kstat_t *ksp = f->private;
int rc;
ASSERT(ksp->ks_magic == KS_MAGIC);
mutex_enter(ksp->ks_lock);
rc = ksp->ks_update(ksp, KSTAT_WRITE);
mutex_exit(ksp->ks_lock);
if (rc)
return (-rc);
*ppos += len;
return (len);
}
static const kstat_proc_op_t proc_kstat_operations = {
#ifdef HAVE_PROC_OPS_STRUCT
.proc_open = proc_kstat_open,
.proc_write = proc_kstat_write,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
.proc_release = seq_release,
#else
.open = proc_kstat_open,
.write = proc_kstat_write,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
#endif
};
void
__kstat_set_raw_ops(kstat_t *ksp,
int (*headers)(char *buf, size_t size),
int (*data)(char *buf, size_t size, void *data),
void *(*addr)(kstat_t *ksp, loff_t index))
{
ksp->ks_raw_ops.headers = headers;
ksp->ks_raw_ops.data = data;
ksp->ks_raw_ops.addr = addr;
}
EXPORT_SYMBOL(__kstat_set_raw_ops);
void
kstat_proc_entry_init(kstat_proc_entry_t *kpep, const char *module,
const char *name)
{
kpep->kpe_owner = NULL;
kpep->kpe_proc = NULL;
INIT_LIST_HEAD(&kpep->kpe_list);
strlcpy(kpep->kpe_module, module, sizeof (kpep->kpe_module));
strlcpy(kpep->kpe_name, name, sizeof (kpep->kpe_name));
}
EXPORT_SYMBOL(kstat_proc_entry_init);
kstat_t *
__kstat_create(const char *ks_module, int ks_instance, const char *ks_name,
const char *ks_class, uchar_t ks_type, uint_t ks_ndata,
uchar_t ks_flags)
{
kstat_t *ksp;
ASSERT(ks_module);
ASSERT(ks_instance == 0);
ASSERT(ks_name);
if ((ks_type == KSTAT_TYPE_INTR) || (ks_type == KSTAT_TYPE_IO))
ASSERT(ks_ndata == 1);
ksp = kmem_zalloc(sizeof (*ksp), KM_SLEEP);
if (ksp == NULL)
return (ksp);
mutex_enter(&kstat_module_lock);
ksp->ks_kid = kstat_id;
kstat_id++;
mutex_exit(&kstat_module_lock);
ksp->ks_magic = KS_MAGIC;
mutex_init(&ksp->ks_private_lock, NULL, MUTEX_DEFAULT, NULL);
ksp->ks_lock = &ksp->ks_private_lock;
ksp->ks_crtime = gethrtime();
ksp->ks_snaptime = ksp->ks_crtime;
ksp->ks_instance = ks_instance;
strlcpy(ksp->ks_class, ks_class, sizeof (ksp->ks_class));
ksp->ks_type = ks_type;
ksp->ks_flags = ks_flags;
ksp->ks_update = kstat_default_update;
ksp->ks_private = NULL;
ksp->ks_raw_ops.headers = NULL;
ksp->ks_raw_ops.data = NULL;
ksp->ks_raw_ops.addr = NULL;
ksp->ks_raw_buf = NULL;
ksp->ks_raw_bufsize = 0;
kstat_proc_entry_init(&ksp->ks_proc, ks_module, ks_name);
switch (ksp->ks_type) {
case KSTAT_TYPE_RAW:
ksp->ks_ndata = 1;
ksp->ks_data_size = ks_ndata;
break;
case KSTAT_TYPE_NAMED:
ksp->ks_ndata = ks_ndata;
ksp->ks_data_size = ks_ndata * sizeof (kstat_named_t);
break;
case KSTAT_TYPE_INTR:
ksp->ks_ndata = ks_ndata;
ksp->ks_data_size = ks_ndata * sizeof (kstat_intr_t);
break;
case KSTAT_TYPE_IO:
ksp->ks_ndata = ks_ndata;
ksp->ks_data_size = ks_ndata * sizeof (kstat_io_t);
break;
case KSTAT_TYPE_TIMER:
ksp->ks_ndata = ks_ndata;
ksp->ks_data_size = ks_ndata * sizeof (kstat_timer_t);
break;
default:
PANIC("Undefined kstat type %d\n", ksp->ks_type);
}
if (ksp->ks_flags & KSTAT_FLAG_VIRTUAL) {
ksp->ks_data = NULL;
} else {
ksp->ks_data = kmem_zalloc(ksp->ks_data_size, KM_SLEEP);
if (ksp->ks_data == NULL) {
kmem_free(ksp, sizeof (*ksp));
ksp = NULL;
}
}
return (ksp);
}
EXPORT_SYMBOL(__kstat_create);
static int
kstat_detect_collision(kstat_proc_entry_t *kpep)
{
kstat_module_t *module;
kstat_proc_entry_t *tmp = NULL;
char *parent;
char *cp;
parent = kmem_asprintf("%s", kpep->kpe_module);
if ((cp = strrchr(parent, '/')) == NULL) {
kmem_strfree(parent);
return (0);
}
cp[0] = '\0';
if ((module = kstat_find_module(parent)) != NULL) {
list_for_each_entry(tmp, &module->ksm_kstat_list, kpe_list) {
if (strncmp(tmp->kpe_name, cp+1, KSTAT_STRLEN) == 0) {
kmem_strfree(parent);
return (EEXIST);
}
}
}
kmem_strfree(parent);
return (0);
}
/*
* Add a file to the proc filesystem under the kstat namespace (i.e.
* /proc/spl/kstat/). The file need not necessarily be implemented as a
* kstat.
*/
void
kstat_proc_entry_install(kstat_proc_entry_t *kpep, mode_t mode,
const kstat_proc_op_t *proc_ops, void *data)
{
kstat_module_t *module;
kstat_proc_entry_t *tmp = NULL;
ASSERT(kpep);
mutex_enter(&kstat_module_lock);
module = kstat_find_module(kpep->kpe_module);
if (module == NULL) {
if (kstat_detect_collision(kpep) != 0) {
cmn_err(CE_WARN, "kstat_create('%s', '%s'): namespace" \
" collision", kpep->kpe_module, kpep->kpe_name);
goto out;
}
module = kstat_create_module(kpep->kpe_module);
if (module == NULL)
goto out;
}
/*
* Only one entry by this name per-module, on failure the module
* shouldn't be deleted because we know it has at least one entry.
*/
list_for_each_entry(tmp, &module->ksm_kstat_list, kpe_list) {
if (strncmp(tmp->kpe_name, kpep->kpe_name, KSTAT_STRLEN) == 0)
goto out;
}
list_add_tail(&kpep->kpe_list, &module->ksm_kstat_list);
kpep->kpe_owner = module;
kpep->kpe_proc = proc_create_data(kpep->kpe_name, mode,
module->ksm_proc, proc_ops, data);
if (kpep->kpe_proc == NULL) {
list_del_init(&kpep->kpe_list);
if (list_empty(&module->ksm_kstat_list))
kstat_delete_module(module);
}
out:
mutex_exit(&kstat_module_lock);
}
EXPORT_SYMBOL(kstat_proc_entry_install);
void
__kstat_install(kstat_t *ksp)
{
ASSERT(ksp);
mode_t mode;
/* Specify permission modes for different kstats */
if (strncmp(ksp->ks_proc.kpe_name, "dbufs", KSTAT_STRLEN) == 0) {
mode = 0600;
} else {
mode = 0644;
}
kstat_proc_entry_install(
&ksp->ks_proc, mode, &proc_kstat_operations, ksp);
}
EXPORT_SYMBOL(__kstat_install);
void
kstat_proc_entry_delete(kstat_proc_entry_t *kpep)
{
kstat_module_t *module = kpep->kpe_owner;
if (kpep->kpe_proc)
remove_proc_entry(kpep->kpe_name, module->ksm_proc);
mutex_enter(&kstat_module_lock);
list_del_init(&kpep->kpe_list);
/*
* Remove top level module directory if it wasn't empty before, but now
* is.
*/
if (kpep->kpe_proc && list_empty(&module->ksm_kstat_list))
kstat_delete_module(module);
mutex_exit(&kstat_module_lock);
}
EXPORT_SYMBOL(kstat_proc_entry_delete);
void
__kstat_delete(kstat_t *ksp)
{
kstat_proc_entry_delete(&ksp->ks_proc);
if (!(ksp->ks_flags & KSTAT_FLAG_VIRTUAL))
kmem_free(ksp->ks_data, ksp->ks_data_size);
ksp->ks_lock = NULL;
mutex_destroy(&ksp->ks_private_lock);
kmem_free(ksp, sizeof (*ksp));
}
EXPORT_SYMBOL(__kstat_delete);
int
spl_kstat_init(void)
{
mutex_init(&kstat_module_lock, NULL, MUTEX_DEFAULT, NULL);
INIT_LIST_HEAD(&kstat_module_list);
kstat_id = 0;
return (0);
}
void
spl_kstat_fini(void)
{
ASSERT(list_empty(&kstat_module_list));
mutex_destroy(&kstat_module_lock);
}
diff --git a/sys/contrib/openzfs/module/os/linux/spl/spl-shrinker.c b/sys/contrib/openzfs/module/os/linux/spl/spl-shrinker.c
new file mode 100644
index 000000000000..d5c8da471cbb
--- /dev/null
+++ b/sys/contrib/openzfs/module/os/linux/spl/spl-shrinker.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
+ * Copyright (C) 2007 The Regents of the University of California.
+ * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
+ * UCRL-CODE-235197
+ *
+ * This file is part of the SPL, Solaris Porting Layer.
+ *
+ * The SPL is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * The SPL is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with the SPL. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Solaris Porting Layer (SPL) Shrinker Implementation.
+ */
+
+#include <sys/kmem.h>
+#include <sys/shrinker.h>
+
+#ifdef HAVE_SINGLE_SHRINKER_CALLBACK
+/* 3.0-3.11: single shrink() callback, which we wrap to carry both functions */
+struct spl_shrinker_wrap {
+ struct shrinker shrinker;
+ spl_shrinker_cb countfunc;
+ spl_shrinker_cb scanfunc;
+};
+
+static int
+spl_shrinker_single_cb(struct shrinker *shrinker, struct shrink_control *sc)
+{
+ struct spl_shrinker_wrap *sw = (struct spl_shrinker_wrap *)shrinker;
+
+ if (sc->nr_to_scan != 0)
+ (void) sw->scanfunc(&sw->shrinker, sc);
+ return (sw->countfunc(&sw->shrinker, sc));
+}
+#endif
+
+struct shrinker *
+spl_register_shrinker(const char *name, spl_shrinker_cb countfunc,
+ spl_shrinker_cb scanfunc, int seek_cost)
+{
+ struct shrinker *shrinker;
+
+ /* allocate shrinker */
+#if defined(HAVE_SHRINKER_REGISTER)
+ /* 6.7: kernel will allocate the shrinker for us */
+ shrinker = shrinker_alloc(0, name);
+#elif defined(HAVE_SPLIT_SHRINKER_CALLBACK)
+ /* 3.12-6.6: we allocate the shrinker */
+ shrinker = kmem_zalloc(sizeof (struct shrinker), KM_SLEEP);
+#elif defined(HAVE_SINGLE_SHRINKER_CALLBACK)
+ /* 3.0-3.11: allocate a wrapper */
+ struct spl_shrinker_wrap *sw =
+ kmem_zalloc(sizeof (struct spl_shrinker_wrap), KM_SLEEP);
+ shrinker = &sw->shrinker;
+#else
+ /* 2.x-2.6.22, or a newer shrinker API has been introduced. */
+#error "Unknown shrinker API"
+#endif
+
+ if (shrinker == NULL)
+ return (NULL);
+
+ /* set callbacks */
+#ifdef HAVE_SINGLE_SHRINKER_CALLBACK
+ sw->countfunc = countfunc;
+ sw->scanfunc = scanfunc;
+ shrinker->shrink = spl_shrinker_single_cb;
+#else
+ shrinker->count_objects = countfunc;
+ shrinker->scan_objects = scanfunc;
+#endif
+
+ /* set params */
+ shrinker->seeks = seek_cost;
+
+ /* register with kernel */
+#if defined(HAVE_SHRINKER_REGISTER)
+ shrinker_register(shrinker);
+#elif defined(HAVE_REGISTER_SHRINKER_VARARG)
+ register_shrinker(shrinker, name);
+#else
+ register_shrinker(shrinker);
+#endif
+
+ return (shrinker);
+}
+EXPORT_SYMBOL(spl_register_shrinker);
+
+void
+spl_unregister_shrinker(struct shrinker *shrinker)
+{
+#if defined(HAVE_SHRINKER_REGISTER)
+ shrinker_free(shrinker);
+#elif defined(HAVE_SPLIT_SHRINKER_CALLBACK)
+ unregister_shrinker(shrinker);
+ kmem_free(shrinker, sizeof (struct shrinker));
+#elif defined(HAVE_SINGLE_SHRINKER_CALLBACK)
+ unregister_shrinker(shrinker);
+ kmem_free(shrinker, sizeof (struct spl_shrinker_wrap));
+#else
+#error "Unknown shrinker API"
+#endif
+}
+EXPORT_SYMBOL(spl_unregister_shrinker);
diff --git a/sys/contrib/openzfs/module/os/linux/spl/spl-thread.c b/sys/contrib/openzfs/module/os/linux/spl/spl-thread.c
index b4ef86a5e4a6..ee3eb4690c3a 100644
--- a/sys/contrib/openzfs/module/os/linux/spl/spl-thread.c
+++ b/sys/contrib/openzfs/module/os/linux/spl/spl-thread.c
@@ -1,206 +1,207 @@
/*
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
* Copyright (C) 2007 The Regents of the University of California.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
* UCRL-CODE-235197
*
* This file is part of the SPL, Solaris Porting Layer.
*
* The SPL is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* The SPL is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
*
* Solaris Porting Layer (SPL) Thread Implementation.
*/
#include <sys/thread.h>
#include <sys/kmem.h>
#include <sys/tsd.h>
+#include <sys/string.h>
/*
* Thread interfaces
*/
typedef struct thread_priv_s {
unsigned long tp_magic; /* Magic */
int tp_name_size; /* Name size */
char *tp_name; /* Name (without _thread suffix) */
void (*tp_func)(void *); /* Registered function */
void *tp_args; /* Args to be passed to function */
size_t tp_len; /* Len to be passed to function */
int tp_state; /* State to start thread at */
pri_t tp_pri; /* Priority to start threat at */
} thread_priv_t;
static int
thread_generic_wrapper(void *arg)
{
thread_priv_t *tp = (thread_priv_t *)arg;
void (*func)(void *);
void *args;
ASSERT(tp->tp_magic == TP_MAGIC);
func = tp->tp_func;
args = tp->tp_args;
set_current_state(tp->tp_state);
set_user_nice((kthread_t *)current, PRIO_TO_NICE(tp->tp_pri));
kmem_free(tp->tp_name, tp->tp_name_size);
kmem_free(tp, sizeof (thread_priv_t));
if (func)
func(args);
return (0);
}
/*
* thread_create() may block forever if it cannot create a thread or
* allocate memory. This is preferable to returning a NULL which Solaris
* style callers likely never check for... since it can't fail.
*/
kthread_t *
__thread_create(caddr_t stk, size_t stksize, thread_func_t func,
const char *name, void *args, size_t len, proc_t *pp, int state, pri_t pri)
{
thread_priv_t *tp;
struct task_struct *tsk;
char *p;
/* Option pp is simply ignored */
/* Variable stack size unsupported */
ASSERT(stk == NULL);
tp = kmem_alloc(sizeof (thread_priv_t), KM_PUSHPAGE);
if (tp == NULL)
return (NULL);
tp->tp_magic = TP_MAGIC;
tp->tp_name_size = strlen(name) + 1;
tp->tp_name = kmem_alloc(tp->tp_name_size, KM_PUSHPAGE);
if (tp->tp_name == NULL) {
kmem_free(tp, sizeof (thread_priv_t));
return (NULL);
}
strlcpy(tp->tp_name, name, tp->tp_name_size);
/*
* Strip trailing "_thread" from passed name which will be the func
* name since the exposed API has no parameter for passing a name.
*/
p = strstr(tp->tp_name, "_thread");
if (p)
p[0] = '\0';
tp->tp_func = func;
tp->tp_args = args;
tp->tp_len = len;
tp->tp_state = state;
tp->tp_pri = pri;
tsk = spl_kthread_create(thread_generic_wrapper, (void *)tp,
"%s", tp->tp_name);
if (IS_ERR(tsk))
return (NULL);
wake_up_process(tsk);
return ((kthread_t *)tsk);
}
EXPORT_SYMBOL(__thread_create);
/*
* spl_kthread_create - Wrapper providing pre-3.13 semantics for
* kthread_create() in which it is not killable and less likely
* to return -ENOMEM.
*/
struct task_struct *
spl_kthread_create(int (*func)(void *), void *data, const char namefmt[], ...)
{
struct task_struct *tsk;
va_list args;
char name[TASK_COMM_LEN];
va_start(args, namefmt);
vsnprintf(name, sizeof (name), namefmt, args);
va_end(args);
do {
tsk = kthread_create(func, data, "%s", name);
if (IS_ERR(tsk)) {
if (signal_pending(current)) {
clear_thread_flag(TIF_SIGPENDING);
continue;
}
if (PTR_ERR(tsk) == -ENOMEM)
continue;
return (NULL);
} else {
return (tsk);
}
} while (1);
}
EXPORT_SYMBOL(spl_kthread_create);
/*
* The "why" argument indicates the allowable side-effects of the call:
*
* FORREAL: Extract the next pending signal from p_sig into p_cursig;
* stop the process if a stop has been requested or if a traced signal
* is pending.
*
* JUSTLOOKING: Don't stop the process, just indicate whether or not
* a signal might be pending (FORREAL is needed to tell for sure).
*/
int
issig(int why)
{
ASSERT(why == FORREAL || why == JUSTLOOKING);
if (!signal_pending(current))
return (0);
if (why != FORREAL)
return (1);
struct task_struct *task = current;
spl_kernel_siginfo_t __info;
sigset_t set;
siginitsetinv(&set, 1ULL << (SIGSTOP - 1) | 1ULL << (SIGTSTP - 1));
sigorsets(&set, &task->blocked, &set);
spin_lock_irq(&task->sighand->siglock);
#ifdef HAVE_DEQUEUE_SIGNAL_4ARG
enum pid_type __type;
if (dequeue_signal(task, &set, &__info, &__type) != 0) {
#else
if (dequeue_signal(task, &set, &__info) != 0) {
#endif
#ifdef HAVE_SIGNAL_STOP
spin_unlock_irq(&task->sighand->siglock);
kernel_signal_stop();
#else
if (current->jobctl & JOBCTL_STOP_DEQUEUED)
spl_set_special_state(TASK_STOPPED);
spin_unlock_irq(&current->sighand->siglock);
schedule();
#endif
return (0);
}
spin_unlock_irq(&task->sighand->siglock);
return (1);
}
EXPORT_SYMBOL(issig);
diff --git a/sys/contrib/openzfs/module/os/linux/spl/spl-zone.c b/sys/contrib/openzfs/module/os/linux/spl/spl-zone.c
index e821fbb4f3a1..d0d0cca154a7 100644
--- a/sys/contrib/openzfs/module/os/linux/spl/spl-zone.c
+++ b/sys/contrib/openzfs/module/os/linux/spl/spl-zone.c
@@ -1,424 +1,425 @@
/*
* Copyright (c) 2021 Klara Systems, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/types.h>
#include <sys/sysmacros.h>
#include <sys/kmem.h>
#include <linux/file.h>
#include <linux/magic.h>
#include <sys/zone.h>
+#include <sys/string.h>
#if defined(CONFIG_USER_NS)
#include <linux/statfs.h>
#include <linux/proc_ns.h>
#endif
#include <sys/mutex.h>
static kmutex_t zone_datasets_lock;
static struct list_head zone_datasets;
typedef struct zone_datasets {
struct list_head zds_list; /* zone_datasets linkage */
struct user_namespace *zds_userns; /* namespace reference */
struct list_head zds_datasets; /* datasets for the namespace */
} zone_datasets_t;
typedef struct zone_dataset {
struct list_head zd_list; /* zone_dataset linkage */
size_t zd_dsnamelen; /* length of name */
char zd_dsname[]; /* name of the member dataset */
} zone_dataset_t;
#if defined(CONFIG_USER_NS) && defined(HAVE_USER_NS_COMMON_INUM)
/*
* Returns:
* - 0 on success
* - EBADF if it cannot open the provided file descriptor
* - ENOTTY if the file itself is a not a user namespace file. We want to
* intercept this error in the ZFS layer. We cannot just return one of the
* ZFS_ERR_* errors here as we want to preserve the seperation of the ZFS
* and the SPL layers.
*/
static int
user_ns_get(int fd, struct user_namespace **userns)
{
struct kstatfs st;
struct file *nsfile;
struct ns_common *ns;
int error;
if ((nsfile = fget(fd)) == NULL)
return (EBADF);
if (vfs_statfs(&nsfile->f_path, &st) != 0) {
error = ENOTTY;
goto done;
}
if (st.f_type != NSFS_MAGIC) {
error = ENOTTY;
goto done;
}
ns = get_proc_ns(file_inode(nsfile));
if (ns->ops->type != CLONE_NEWUSER) {
error = ENOTTY;
goto done;
}
*userns = container_of(ns, struct user_namespace, ns);
error = 0;
done:
fput(nsfile);
return (error);
}
#endif /* defined(CONFIG_USER_NS) && defined(HAVE_USER_NS_COMMON_INUM) */
static unsigned int
user_ns_zoneid(struct user_namespace *user_ns)
{
unsigned int r;
#if defined(HAVE_USER_NS_COMMON_INUM)
r = user_ns->ns.inum;
#else
r = user_ns->proc_inum;
#endif
return (r);
}
static struct zone_datasets *
zone_datasets_lookup(unsigned int nsinum)
{
zone_datasets_t *zds;
list_for_each_entry(zds, &zone_datasets, zds_list) {
if (user_ns_zoneid(zds->zds_userns) == nsinum)
return (zds);
}
return (NULL);
}
#if defined(CONFIG_USER_NS) && defined(HAVE_USER_NS_COMMON_INUM)
static struct zone_dataset *
zone_dataset_lookup(zone_datasets_t *zds, const char *dataset, size_t dsnamelen)
{
zone_dataset_t *zd;
list_for_each_entry(zd, &zds->zds_datasets, zd_list) {
if (zd->zd_dsnamelen != dsnamelen)
continue;
if (strncmp(zd->zd_dsname, dataset, dsnamelen) == 0)
return (zd);
}
return (NULL);
}
static int
zone_dataset_cred_check(cred_t *cred)
{
if (!uid_eq(cred->uid, GLOBAL_ROOT_UID))
return (EPERM);
return (0);
}
#endif /* defined(CONFIG_USER_NS) && defined(HAVE_USER_NS_COMMON_INUM) */
static int
zone_dataset_name_check(const char *dataset, size_t *dsnamelen)
{
if (dataset[0] == '\0' || dataset[0] == '/')
return (ENOENT);
*dsnamelen = strlen(dataset);
/* Ignore trailing slash, if supplied. */
if (dataset[*dsnamelen - 1] == '/')
(*dsnamelen)--;
return (0);
}
int
zone_dataset_attach(cred_t *cred, const char *dataset, int userns_fd)
{
#if defined(CONFIG_USER_NS) && defined(HAVE_USER_NS_COMMON_INUM)
struct user_namespace *userns;
zone_datasets_t *zds;
zone_dataset_t *zd;
int error;
size_t dsnamelen;
if ((error = zone_dataset_cred_check(cred)) != 0)
return (error);
if ((error = zone_dataset_name_check(dataset, &dsnamelen)) != 0)
return (error);
if ((error = user_ns_get(userns_fd, &userns)) != 0)
return (error);
mutex_enter(&zone_datasets_lock);
zds = zone_datasets_lookup(user_ns_zoneid(userns));
if (zds == NULL) {
zds = kmem_alloc(sizeof (zone_datasets_t), KM_SLEEP);
INIT_LIST_HEAD(&zds->zds_list);
INIT_LIST_HEAD(&zds->zds_datasets);
zds->zds_userns = userns;
/*
* Lock the namespace by incresing its refcount to prevent
* the namespace ID from being reused.
*/
get_user_ns(userns);
list_add_tail(&zds->zds_list, &zone_datasets);
} else {
zd = zone_dataset_lookup(zds, dataset, dsnamelen);
if (zd != NULL) {
mutex_exit(&zone_datasets_lock);
return (EEXIST);
}
}
zd = kmem_alloc(sizeof (zone_dataset_t) + dsnamelen + 1, KM_SLEEP);
zd->zd_dsnamelen = dsnamelen;
strlcpy(zd->zd_dsname, dataset, dsnamelen + 1);
INIT_LIST_HEAD(&zd->zd_list);
list_add_tail(&zd->zd_list, &zds->zds_datasets);
mutex_exit(&zone_datasets_lock);
return (0);
#else
return (ENXIO);
#endif /* defined(CONFIG_USER_NS) && defined(HAVE_USER_NS_COMMON_INUM) */
}
EXPORT_SYMBOL(zone_dataset_attach);
int
zone_dataset_detach(cred_t *cred, const char *dataset, int userns_fd)
{
#if defined(CONFIG_USER_NS) && defined(HAVE_USER_NS_COMMON_INUM)
struct user_namespace *userns;
zone_datasets_t *zds;
zone_dataset_t *zd;
int error;
size_t dsnamelen;
if ((error = zone_dataset_cred_check(cred)) != 0)
return (error);
if ((error = zone_dataset_name_check(dataset, &dsnamelen)) != 0)
return (error);
if ((error = user_ns_get(userns_fd, &userns)) != 0)
return (error);
mutex_enter(&zone_datasets_lock);
zds = zone_datasets_lookup(user_ns_zoneid(userns));
if (zds != NULL)
zd = zone_dataset_lookup(zds, dataset, dsnamelen);
if (zds == NULL || zd == NULL) {
mutex_exit(&zone_datasets_lock);
return (ENOENT);
}
list_del(&zd->zd_list);
kmem_free(zd, sizeof (*zd) + zd->zd_dsnamelen + 1);
/* Prune the namespace entry if it has no more delegations. */
if (list_empty(&zds->zds_datasets)) {
/*
* Decrease the refcount now that the namespace is no longer
* used. It is no longer necessary to prevent the namespace ID
* from being reused.
*/
put_user_ns(userns);
list_del(&zds->zds_list);
kmem_free(zds, sizeof (*zds));
}
mutex_exit(&zone_datasets_lock);
return (0);
#else
return (ENXIO);
#endif /* defined(CONFIG_USER_NS) && defined(HAVE_USER_NS_COMMON_INUM) */
}
EXPORT_SYMBOL(zone_dataset_detach);
/*
* A dataset is visible if:
* - It is a parent of a namespace entry.
* - It is one of the namespace entries.
* - It is a child of a namespace entry.
*
* A dataset is writable if:
* - It is one of the namespace entries.
* - It is a child of a namespace entry.
*
* The parent datasets of namespace entries are visible and
* read-only to provide a path back to the root of the pool.
*/
int
zone_dataset_visible(const char *dataset, int *write)
{
zone_datasets_t *zds;
zone_dataset_t *zd;
size_t dsnamelen, zd_len;
int visible;
/* Default to read-only, in case visible is returned. */
if (write != NULL)
*write = 0;
if (zone_dataset_name_check(dataset, &dsnamelen) != 0)
return (0);
if (INGLOBALZONE(curproc)) {
if (write != NULL)
*write = 1;
return (1);
}
mutex_enter(&zone_datasets_lock);
zds = zone_datasets_lookup(crgetzoneid(curproc->cred));
if (zds == NULL) {
mutex_exit(&zone_datasets_lock);
return (0);
}
visible = 0;
list_for_each_entry(zd, &zds->zds_datasets, zd_list) {
zd_len = strlen(zd->zd_dsname);
if (zd_len > dsnamelen) {
/*
* The name of the namespace entry is longer than that
* of the dataset, so it could be that the dataset is a
* parent of the namespace entry.
*/
visible = memcmp(zd->zd_dsname, dataset,
dsnamelen) == 0 &&
zd->zd_dsname[dsnamelen] == '/';
if (visible)
break;
} else if (zd_len == dsnamelen) {
/*
* The name of the namespace entry is as long as that
* of the dataset, so perhaps the dataset itself is the
* namespace entry.
*/
visible = memcmp(zd->zd_dsname, dataset, zd_len) == 0;
if (visible) {
if (write != NULL)
*write = 1;
break;
}
} else {
/*
* The name of the namespace entry is shorter than that
* of the dataset, so perhaps the dataset is a child of
* the namespace entry.
*/
visible = memcmp(zd->zd_dsname, dataset,
zd_len) == 0 && dataset[zd_len] == '/';
if (visible) {
if (write != NULL)
*write = 1;
break;
}
}
}
mutex_exit(&zone_datasets_lock);
return (visible);
}
EXPORT_SYMBOL(zone_dataset_visible);
unsigned int
global_zoneid(void)
{
unsigned int z = 0;
#if defined(CONFIG_USER_NS)
z = user_ns_zoneid(&init_user_ns);
#endif
return (z);
}
EXPORT_SYMBOL(global_zoneid);
unsigned int
crgetzoneid(const cred_t *cr)
{
unsigned int r = 0;
#if defined(CONFIG_USER_NS)
r = user_ns_zoneid(cr->user_ns);
#endif
return (r);
}
EXPORT_SYMBOL(crgetzoneid);
boolean_t
inglobalzone(proc_t *proc)
{
#if defined(CONFIG_USER_NS)
return (proc->cred->user_ns == &init_user_ns);
#else
return (B_TRUE);
#endif
}
EXPORT_SYMBOL(inglobalzone);
int
spl_zone_init(void)
{
mutex_init(&zone_datasets_lock, NULL, MUTEX_DEFAULT, NULL);
INIT_LIST_HEAD(&zone_datasets);
return (0);
}
void
spl_zone_fini(void)
{
zone_datasets_t *zds;
zone_dataset_t *zd;
/*
* It would be better to assert an empty zone_datasets, but since
* there's no automatic mechanism for cleaning them up if the user
* namespace is destroyed, just do it here, since spl is about to go
* out of context.
*/
while (!list_empty(&zone_datasets)) {
zds = list_entry(zone_datasets.next, zone_datasets_t, zds_list);
while (!list_empty(&zds->zds_datasets)) {
zd = list_entry(zds->zds_datasets.next,
zone_dataset_t, zd_list);
list_del(&zd->zd_list);
kmem_free(zd, sizeof (*zd) + zd->zd_dsnamelen + 1);
}
put_user_ns(zds->zds_userns);
list_del(&zds->zds_list);
kmem_free(zds, sizeof (*zds));
}
mutex_destroy(&zone_datasets_lock);
}
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/abd_os.c b/sys/contrib/openzfs/module/os/linux/zfs/abd_os.c
index 13150adbe0cf..24390fbbf125 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/abd_os.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/abd_os.c
@@ -1,1157 +1,1166 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2014 by Chunwei Chen. All rights reserved.
* Copyright (c) 2019 by Delphix. All rights reserved.
*/
/*
* See abd.c for a general overview of the arc buffered data (ABD).
*
* Linear buffers act exactly like normal buffers and are always mapped into the
* kernel's virtual memory space, while scattered ABD data chunks are allocated
* as physical pages and then mapped in only while they are actually being
* accessed through one of the abd_* library functions. Using scattered ABDs
* provides several benefits:
*
* (1) They avoid use of kmem_*, preventing performance problems where running
* kmem_reap on very large memory systems never finishes and causes
* constant TLB shootdowns.
*
* (2) Fragmentation is less of an issue since when we are at the limit of
* allocatable space, we won't have to search around for a long free
* hole in the VA space for large ARC allocations. Each chunk is mapped in
* individually, so even if we are using HIGHMEM (see next point) we
* wouldn't need to worry about finding a contiguous address range.
*
* (3) If we are not using HIGHMEM, then all physical memory is always
* mapped into the kernel's address space, so we also avoid the map /
* unmap costs on each ABD access.
*
* If we are not using HIGHMEM, scattered buffers which have only one chunk
* can be treated as linear buffers, because they are contiguous in the
* kernel's virtual address space. See abd_alloc_chunks() for details.
*/
#include <sys/abd_impl.h>
#include <sys/param.h>
#include <sys/zio.h>
#include <sys/arc.h>
#include <sys/zfs_context.h>
#include <sys/zfs_znode.h>
#ifdef _KERNEL
#include <linux/kmap_compat.h>
#include <linux/scatterlist.h>
+#endif
+
+#ifdef _KERNEL
+#if defined(MAX_ORDER)
+#define ABD_MAX_ORDER (MAX_ORDER)
+#elif defined(MAX_PAGE_ORDER)
+#define ABD_MAX_ORDER (MAX_PAGE_ORDER)
+#endif
#else
-#define MAX_ORDER 1
+#define ABD_MAX_ORDER (1)
#endif
typedef struct abd_stats {
kstat_named_t abdstat_struct_size;
kstat_named_t abdstat_linear_cnt;
kstat_named_t abdstat_linear_data_size;
kstat_named_t abdstat_scatter_cnt;
kstat_named_t abdstat_scatter_data_size;
kstat_named_t abdstat_scatter_chunk_waste;
- kstat_named_t abdstat_scatter_orders[MAX_ORDER];
+ kstat_named_t abdstat_scatter_orders[ABD_MAX_ORDER];
kstat_named_t abdstat_scatter_page_multi_chunk;
kstat_named_t abdstat_scatter_page_multi_zone;
kstat_named_t abdstat_scatter_page_alloc_retry;
kstat_named_t abdstat_scatter_sg_table_retry;
} abd_stats_t;
static abd_stats_t abd_stats = {
/* Amount of memory occupied by all of the abd_t struct allocations */
{ "struct_size", KSTAT_DATA_UINT64 },
/*
* The number of linear ABDs which are currently allocated, excluding
* ABDs which don't own their data (for instance the ones which were
* allocated through abd_get_offset() and abd_get_from_buf()). If an
* ABD takes ownership of its buf then it will become tracked.
*/
{ "linear_cnt", KSTAT_DATA_UINT64 },
/* Amount of data stored in all linear ABDs tracked by linear_cnt */
{ "linear_data_size", KSTAT_DATA_UINT64 },
/*
* The number of scatter ABDs which are currently allocated, excluding
* ABDs which don't own their data (for instance the ones which were
* allocated through abd_get_offset()).
*/
{ "scatter_cnt", KSTAT_DATA_UINT64 },
/* Amount of data stored in all scatter ABDs tracked by scatter_cnt */
{ "scatter_data_size", KSTAT_DATA_UINT64 },
/*
* The amount of space wasted at the end of the last chunk across all
* scatter ABDs tracked by scatter_cnt.
*/
{ "scatter_chunk_waste", KSTAT_DATA_UINT64 },
/*
* The number of compound allocations of a given order. These
* allocations are spread over all currently allocated ABDs, and
* act as a measure of memory fragmentation.
*/
{ { "scatter_order_N", KSTAT_DATA_UINT64 } },
/*
* The number of scatter ABDs which contain multiple chunks.
* ABDs are preferentially allocated from the minimum number of
* contiguous multi-page chunks, a single chunk is optimal.
*/
{ "scatter_page_multi_chunk", KSTAT_DATA_UINT64 },
/*
* The number of scatter ABDs which are split across memory zones.
* ABDs are preferentially allocated using pages from a single zone.
*/
{ "scatter_page_multi_zone", KSTAT_DATA_UINT64 },
/*
* The total number of retries encountered when attempting to
* allocate the pages to populate the scatter ABD.
*/
{ "scatter_page_alloc_retry", KSTAT_DATA_UINT64 },
/*
* The total number of retries encountered when attempting to
* allocate the sg table for an ABD.
*/
{ "scatter_sg_table_retry", KSTAT_DATA_UINT64 },
};
static struct {
wmsum_t abdstat_struct_size;
wmsum_t abdstat_linear_cnt;
wmsum_t abdstat_linear_data_size;
wmsum_t abdstat_scatter_cnt;
wmsum_t abdstat_scatter_data_size;
wmsum_t abdstat_scatter_chunk_waste;
- wmsum_t abdstat_scatter_orders[MAX_ORDER];
+ wmsum_t abdstat_scatter_orders[ABD_MAX_ORDER];
wmsum_t abdstat_scatter_page_multi_chunk;
wmsum_t abdstat_scatter_page_multi_zone;
wmsum_t abdstat_scatter_page_alloc_retry;
wmsum_t abdstat_scatter_sg_table_retry;
} abd_sums;
#define abd_for_each_sg(abd, sg, n, i) \
for_each_sg(ABD_SCATTER(abd).abd_sgl, sg, n, i)
/*
* zfs_abd_scatter_min_size is the minimum allocation size to use scatter
* ABD's. Smaller allocations will use linear ABD's which uses
* zio_[data_]buf_alloc().
*
* Scatter ABD's use at least one page each, so sub-page allocations waste
* some space when allocated as scatter (e.g. 2KB scatter allocation wastes
* half of each page). Using linear ABD's for small allocations means that
* they will be put on slabs which contain many allocations. This can
* improve memory efficiency, but it also makes it much harder for ARC
* evictions to actually free pages, because all the buffers on one slab need
* to be freed in order for the slab (and underlying pages) to be freed.
* Typically, 512B and 1KB kmem caches have 16 buffers per slab, so it's
* possible for them to actually waste more memory than scatter (one page per
* buf = wasting 3/4 or 7/8th; one buf per slab = wasting 15/16th).
*
* Spill blocks are typically 512B and are heavily used on systems running
* selinux with the default dnode size and the `xattr=sa` property set.
*
* By default we use linear allocations for 512B and 1KB, and scatter
* allocations for larger (1.5KB and up).
*/
static int zfs_abd_scatter_min_size = 512 * 3;
/*
* We use a scattered SPA_MAXBLOCKSIZE sized ABD whose pages are
* just a single zero'd page. This allows us to conserve memory by
* only using a single zero page for the scatterlist.
*/
abd_t *abd_zero_scatter = NULL;
struct page;
/*
* _KERNEL - Will point to ZERO_PAGE if it is available or it will be
* an allocated zero'd PAGESIZE buffer.
* Userspace - Will be an allocated zero'ed PAGESIZE buffer.
*
* abd_zero_page is assigned to each of the pages of abd_zero_scatter.
*/
static struct page *abd_zero_page = NULL;
static kmem_cache_t *abd_cache = NULL;
static kstat_t *abd_ksp;
static uint_t
abd_chunkcnt_for_bytes(size_t size)
{
return (P2ROUNDUP(size, PAGESIZE) / PAGESIZE);
}
abd_t *
abd_alloc_struct_impl(size_t size)
{
/*
* In Linux we do not use the size passed in during ABD
* allocation, so we just ignore it.
*/
(void) size;
abd_t *abd = kmem_cache_alloc(abd_cache, KM_PUSHPAGE);
ASSERT3P(abd, !=, NULL);
ABDSTAT_INCR(abdstat_struct_size, sizeof (abd_t));
return (abd);
}
void
abd_free_struct_impl(abd_t *abd)
{
kmem_cache_free(abd_cache, abd);
ABDSTAT_INCR(abdstat_struct_size, -(int)sizeof (abd_t));
}
#ifdef _KERNEL
-static unsigned zfs_abd_scatter_max_order = MAX_ORDER - 1;
+static unsigned zfs_abd_scatter_max_order = ABD_MAX_ORDER - 1;
/*
* Mark zfs data pages so they can be excluded from kernel crash dumps
*/
#ifdef _LP64
#define ABD_FILE_CACHE_PAGE 0x2F5ABDF11ECAC4E
static inline void
abd_mark_zfs_page(struct page *page)
{
get_page(page);
SetPagePrivate(page);
set_page_private(page, ABD_FILE_CACHE_PAGE);
}
static inline void
abd_unmark_zfs_page(struct page *page)
{
set_page_private(page, 0UL);
ClearPagePrivate(page);
put_page(page);
}
#else
#define abd_mark_zfs_page(page)
#define abd_unmark_zfs_page(page)
#endif /* _LP64 */
#ifndef CONFIG_HIGHMEM
#ifndef __GFP_RECLAIM
#define __GFP_RECLAIM __GFP_WAIT
#endif
/*
* The goal is to minimize fragmentation by preferentially populating ABDs
* with higher order compound pages from a single zone. Allocation size is
* progressively decreased until it can be satisfied without performing
* reclaim or compaction. When necessary this function will degenerate to
* allocating individual pages and allowing reclaim to satisfy allocations.
*/
void
abd_alloc_chunks(abd_t *abd, size_t size)
{
struct list_head pages;
struct sg_table table;
struct scatterlist *sg;
struct page *page, *tmp_page = NULL;
gfp_t gfp = __GFP_NOWARN | GFP_NOIO;
gfp_t gfp_comp = (gfp | __GFP_NORETRY | __GFP_COMP) & ~__GFP_RECLAIM;
- unsigned int max_order = MIN(zfs_abd_scatter_max_order, MAX_ORDER - 1);
+ unsigned int max_order = MIN(zfs_abd_scatter_max_order,
+ ABD_MAX_ORDER - 1);
unsigned int nr_pages = abd_chunkcnt_for_bytes(size);
unsigned int chunks = 0, zones = 0;
size_t remaining_size;
int nid = NUMA_NO_NODE;
unsigned int alloc_pages = 0;
INIT_LIST_HEAD(&pages);
ASSERT3U(alloc_pages, <, nr_pages);
while (alloc_pages < nr_pages) {
unsigned int chunk_pages;
unsigned int order;
order = MIN(highbit64(nr_pages - alloc_pages) - 1, max_order);
chunk_pages = (1U << order);
page = alloc_pages_node(nid, order ? gfp_comp : gfp, order);
if (page == NULL) {
if (order == 0) {
ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry);
schedule_timeout_interruptible(1);
} else {
max_order = MAX(0, order - 1);
}
continue;
}
list_add_tail(&page->lru, &pages);
if ((nid != NUMA_NO_NODE) && (page_to_nid(page) != nid))
zones++;
nid = page_to_nid(page);
ABDSTAT_BUMP(abdstat_scatter_orders[order]);
chunks++;
alloc_pages += chunk_pages;
}
ASSERT3S(alloc_pages, ==, nr_pages);
while (sg_alloc_table(&table, chunks, gfp)) {
ABDSTAT_BUMP(abdstat_scatter_sg_table_retry);
schedule_timeout_interruptible(1);
}
sg = table.sgl;
remaining_size = size;
list_for_each_entry_safe(page, tmp_page, &pages, lru) {
size_t sg_size = MIN(PAGESIZE << compound_order(page),
remaining_size);
sg_set_page(sg, page, sg_size, 0);
abd_mark_zfs_page(page);
remaining_size -= sg_size;
sg = sg_next(sg);
list_del(&page->lru);
}
/*
* These conditions ensure that a possible transformation to a linear
* ABD would be valid.
*/
ASSERT(!PageHighMem(sg_page(table.sgl)));
ASSERT0(ABD_SCATTER(abd).abd_offset);
if (table.nents == 1) {
/*
* Since there is only one entry, this ABD can be represented
* as a linear buffer. All single-page (4K) ABD's can be
* represented this way. Some multi-page ABD's can also be
* represented this way, if we were able to allocate a single
* "chunk" (higher-order "page" which represents a power-of-2
* series of physically-contiguous pages). This is often the
* case for 2-page (8K) ABD's.
*
* Representing a single-entry scatter ABD as a linear ABD
* has the performance advantage of avoiding the copy (and
* allocation) in abd_borrow_buf_copy / abd_return_buf_copy.
* A performance increase of around 5% has been observed for
* ARC-cached reads (of small blocks which can take advantage
* of this).
*
* Note that this optimization is only possible because the
* pages are always mapped into the kernel's address space.
* This is not the case for highmem pages, so the
* optimization can not be made there.
*/
abd->abd_flags |= ABD_FLAG_LINEAR;
abd->abd_flags |= ABD_FLAG_LINEAR_PAGE;
abd->abd_u.abd_linear.abd_sgl = table.sgl;
ABD_LINEAR_BUF(abd) = page_address(sg_page(table.sgl));
} else if (table.nents > 1) {
ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk);
abd->abd_flags |= ABD_FLAG_MULTI_CHUNK;
if (zones) {
ABDSTAT_BUMP(abdstat_scatter_page_multi_zone);
abd->abd_flags |= ABD_FLAG_MULTI_ZONE;
}
ABD_SCATTER(abd).abd_sgl = table.sgl;
ABD_SCATTER(abd).abd_nents = table.nents;
}
}
#else
/*
* Allocate N individual pages to construct a scatter ABD. This function
* makes no attempt to request contiguous pages and requires the minimal
* number of kernel interfaces. It's designed for maximum compatibility.
*/
void
abd_alloc_chunks(abd_t *abd, size_t size)
{
struct scatterlist *sg = NULL;
struct sg_table table;
struct page *page;
gfp_t gfp = __GFP_NOWARN | GFP_NOIO;
int nr_pages = abd_chunkcnt_for_bytes(size);
int i = 0;
while (sg_alloc_table(&table, nr_pages, gfp)) {
ABDSTAT_BUMP(abdstat_scatter_sg_table_retry);
schedule_timeout_interruptible(1);
}
ASSERT3U(table.nents, ==, nr_pages);
ABD_SCATTER(abd).abd_sgl = table.sgl;
ABD_SCATTER(abd).abd_nents = nr_pages;
abd_for_each_sg(abd, sg, nr_pages, i) {
while ((page = __page_cache_alloc(gfp)) == NULL) {
ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry);
schedule_timeout_interruptible(1);
}
ABDSTAT_BUMP(abdstat_scatter_orders[0]);
sg_set_page(sg, page, PAGESIZE, 0);
abd_mark_zfs_page(page);
}
if (nr_pages > 1) {
ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk);
abd->abd_flags |= ABD_FLAG_MULTI_CHUNK;
}
}
#endif /* !CONFIG_HIGHMEM */
/*
* This must be called if any of the sg_table allocation functions
* are called.
*/
static void
abd_free_sg_table(abd_t *abd)
{
struct sg_table table;
table.sgl = ABD_SCATTER(abd).abd_sgl;
table.nents = table.orig_nents = ABD_SCATTER(abd).abd_nents;
sg_free_table(&table);
}
void
abd_free_chunks(abd_t *abd)
{
struct scatterlist *sg = NULL;
struct page *page;
int nr_pages = ABD_SCATTER(abd).abd_nents;
int order, i = 0;
if (abd->abd_flags & ABD_FLAG_MULTI_ZONE)
ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_zone);
if (abd->abd_flags & ABD_FLAG_MULTI_CHUNK)
ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_chunk);
abd_for_each_sg(abd, sg, nr_pages, i) {
page = sg_page(sg);
abd_unmark_zfs_page(page);
order = compound_order(page);
__free_pages(page, order);
ASSERT3U(sg->length, <=, PAGE_SIZE << order);
ABDSTAT_BUMPDOWN(abdstat_scatter_orders[order]);
}
abd_free_sg_table(abd);
}
/*
* Allocate scatter ABD of size SPA_MAXBLOCKSIZE, where each page in
* the scatterlist will be set to the zero'd out buffer abd_zero_page.
*/
static void
abd_alloc_zero_scatter(void)
{
struct scatterlist *sg = NULL;
struct sg_table table;
gfp_t gfp = __GFP_NOWARN | GFP_NOIO;
int nr_pages = abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE);
int i = 0;
#if defined(HAVE_ZERO_PAGE_GPL_ONLY)
gfp_t gfp_zero_page = gfp | __GFP_ZERO;
while ((abd_zero_page = __page_cache_alloc(gfp_zero_page)) == NULL) {
ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry);
schedule_timeout_interruptible(1);
}
abd_mark_zfs_page(abd_zero_page);
#else
abd_zero_page = ZERO_PAGE(0);
#endif /* HAVE_ZERO_PAGE_GPL_ONLY */
while (sg_alloc_table(&table, nr_pages, gfp)) {
ABDSTAT_BUMP(abdstat_scatter_sg_table_retry);
schedule_timeout_interruptible(1);
}
ASSERT3U(table.nents, ==, nr_pages);
abd_zero_scatter = abd_alloc_struct(SPA_MAXBLOCKSIZE);
abd_zero_scatter->abd_flags |= ABD_FLAG_OWNER;
ABD_SCATTER(abd_zero_scatter).abd_offset = 0;
ABD_SCATTER(abd_zero_scatter).abd_sgl = table.sgl;
ABD_SCATTER(abd_zero_scatter).abd_nents = nr_pages;
abd_zero_scatter->abd_size = SPA_MAXBLOCKSIZE;
abd_zero_scatter->abd_flags |= ABD_FLAG_MULTI_CHUNK | ABD_FLAG_ZEROS;
abd_for_each_sg(abd_zero_scatter, sg, nr_pages, i) {
sg_set_page(sg, abd_zero_page, PAGESIZE, 0);
}
ABDSTAT_BUMP(abdstat_scatter_cnt);
ABDSTAT_INCR(abdstat_scatter_data_size, PAGESIZE);
ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk);
}
#else /* _KERNEL */
#ifndef PAGE_SHIFT
#define PAGE_SHIFT (highbit64(PAGESIZE)-1)
#endif
#define zfs_kmap_atomic(chunk) ((void *)chunk)
#define zfs_kunmap_atomic(addr) do { (void)(addr); } while (0)
#define local_irq_save(flags) do { (void)(flags); } while (0)
#define local_irq_restore(flags) do { (void)(flags); } while (0)
#define nth_page(pg, i) \
((struct page *)((void *)(pg) + (i) * PAGESIZE))
struct scatterlist {
struct page *page;
int length;
int end;
};
static void
sg_init_table(struct scatterlist *sg, int nr)
{
memset(sg, 0, nr * sizeof (struct scatterlist));
sg[nr - 1].end = 1;
}
/*
* This must be called if any of the sg_table allocation functions
* are called.
*/
static void
abd_free_sg_table(abd_t *abd)
{
int nents = ABD_SCATTER(abd).abd_nents;
vmem_free(ABD_SCATTER(abd).abd_sgl,
nents * sizeof (struct scatterlist));
}
#define for_each_sg(sgl, sg, nr, i) \
for ((i) = 0, (sg) = (sgl); (i) < (nr); (i)++, (sg) = sg_next(sg))
static inline void
sg_set_page(struct scatterlist *sg, struct page *page, unsigned int len,
unsigned int offset)
{
/* currently we don't use offset */
ASSERT(offset == 0);
sg->page = page;
sg->length = len;
}
static inline struct page *
sg_page(struct scatterlist *sg)
{
return (sg->page);
}
static inline struct scatterlist *
sg_next(struct scatterlist *sg)
{
if (sg->end)
return (NULL);
return (sg + 1);
}
void
abd_alloc_chunks(abd_t *abd, size_t size)
{
unsigned nr_pages = abd_chunkcnt_for_bytes(size);
struct scatterlist *sg;
int i;
ABD_SCATTER(abd).abd_sgl = vmem_alloc(nr_pages *
sizeof (struct scatterlist), KM_SLEEP);
sg_init_table(ABD_SCATTER(abd).abd_sgl, nr_pages);
abd_for_each_sg(abd, sg, nr_pages, i) {
struct page *p = umem_alloc_aligned(PAGESIZE, 64, KM_SLEEP);
sg_set_page(sg, p, PAGESIZE, 0);
}
ABD_SCATTER(abd).abd_nents = nr_pages;
}
void
abd_free_chunks(abd_t *abd)
{
int i, n = ABD_SCATTER(abd).abd_nents;
struct scatterlist *sg;
abd_for_each_sg(abd, sg, n, i) {
struct page *p = nth_page(sg_page(sg), 0);
umem_free_aligned(p, PAGESIZE);
}
abd_free_sg_table(abd);
}
static void
abd_alloc_zero_scatter(void)
{
unsigned nr_pages = abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE);
struct scatterlist *sg;
int i;
abd_zero_page = umem_alloc_aligned(PAGESIZE, 64, KM_SLEEP);
memset(abd_zero_page, 0, PAGESIZE);
abd_zero_scatter = abd_alloc_struct(SPA_MAXBLOCKSIZE);
abd_zero_scatter->abd_flags |= ABD_FLAG_OWNER;
abd_zero_scatter->abd_flags |= ABD_FLAG_MULTI_CHUNK | ABD_FLAG_ZEROS;
ABD_SCATTER(abd_zero_scatter).abd_offset = 0;
ABD_SCATTER(abd_zero_scatter).abd_nents = nr_pages;
abd_zero_scatter->abd_size = SPA_MAXBLOCKSIZE;
ABD_SCATTER(abd_zero_scatter).abd_sgl = vmem_alloc(nr_pages *
sizeof (struct scatterlist), KM_SLEEP);
sg_init_table(ABD_SCATTER(abd_zero_scatter).abd_sgl, nr_pages);
abd_for_each_sg(abd_zero_scatter, sg, nr_pages, i) {
sg_set_page(sg, abd_zero_page, PAGESIZE, 0);
}
ABDSTAT_BUMP(abdstat_scatter_cnt);
ABDSTAT_INCR(abdstat_scatter_data_size, PAGESIZE);
ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk);
}
#endif /* _KERNEL */
boolean_t
abd_size_alloc_linear(size_t size)
{
return (!zfs_abd_scatter_enabled || size < zfs_abd_scatter_min_size);
}
void
abd_update_scatter_stats(abd_t *abd, abd_stats_op_t op)
{
ASSERT(op == ABDSTAT_INCR || op == ABDSTAT_DECR);
int waste = P2ROUNDUP(abd->abd_size, PAGESIZE) - abd->abd_size;
if (op == ABDSTAT_INCR) {
ABDSTAT_BUMP(abdstat_scatter_cnt);
ABDSTAT_INCR(abdstat_scatter_data_size, abd->abd_size);
ABDSTAT_INCR(abdstat_scatter_chunk_waste, waste);
arc_space_consume(waste, ARC_SPACE_ABD_CHUNK_WASTE);
} else {
ABDSTAT_BUMPDOWN(abdstat_scatter_cnt);
ABDSTAT_INCR(abdstat_scatter_data_size, -(int)abd->abd_size);
ABDSTAT_INCR(abdstat_scatter_chunk_waste, -waste);
arc_space_return(waste, ARC_SPACE_ABD_CHUNK_WASTE);
}
}
void
abd_update_linear_stats(abd_t *abd, abd_stats_op_t op)
{
ASSERT(op == ABDSTAT_INCR || op == ABDSTAT_DECR);
if (op == ABDSTAT_INCR) {
ABDSTAT_BUMP(abdstat_linear_cnt);
ABDSTAT_INCR(abdstat_linear_data_size, abd->abd_size);
} else {
ABDSTAT_BUMPDOWN(abdstat_linear_cnt);
ABDSTAT_INCR(abdstat_linear_data_size, -(int)abd->abd_size);
}
}
void
abd_verify_scatter(abd_t *abd)
{
size_t n;
int i = 0;
struct scatterlist *sg = NULL;
ASSERT3U(ABD_SCATTER(abd).abd_nents, >, 0);
ASSERT3U(ABD_SCATTER(abd).abd_offset, <,
ABD_SCATTER(abd).abd_sgl->length);
n = ABD_SCATTER(abd).abd_nents;
abd_for_each_sg(abd, sg, n, i) {
ASSERT3P(sg_page(sg), !=, NULL);
}
}
static void
abd_free_zero_scatter(void)
{
ABDSTAT_BUMPDOWN(abdstat_scatter_cnt);
ABDSTAT_INCR(abdstat_scatter_data_size, -(int)PAGESIZE);
ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_chunk);
abd_free_sg_table(abd_zero_scatter);
abd_free_struct(abd_zero_scatter);
abd_zero_scatter = NULL;
ASSERT3P(abd_zero_page, !=, NULL);
#if defined(_KERNEL)
#if defined(HAVE_ZERO_PAGE_GPL_ONLY)
abd_unmark_zfs_page(abd_zero_page);
__free_page(abd_zero_page);
#endif /* HAVE_ZERO_PAGE_GPL_ONLY */
#else
umem_free_aligned(abd_zero_page, PAGESIZE);
#endif /* _KERNEL */
}
static int
abd_kstats_update(kstat_t *ksp, int rw)
{
abd_stats_t *as = ksp->ks_data;
if (rw == KSTAT_WRITE)
return (EACCES);
as->abdstat_struct_size.value.ui64 =
wmsum_value(&abd_sums.abdstat_struct_size);
as->abdstat_linear_cnt.value.ui64 =
wmsum_value(&abd_sums.abdstat_linear_cnt);
as->abdstat_linear_data_size.value.ui64 =
wmsum_value(&abd_sums.abdstat_linear_data_size);
as->abdstat_scatter_cnt.value.ui64 =
wmsum_value(&abd_sums.abdstat_scatter_cnt);
as->abdstat_scatter_data_size.value.ui64 =
wmsum_value(&abd_sums.abdstat_scatter_data_size);
as->abdstat_scatter_chunk_waste.value.ui64 =
wmsum_value(&abd_sums.abdstat_scatter_chunk_waste);
- for (int i = 0; i < MAX_ORDER; i++) {
+ for (int i = 0; i < ABD_MAX_ORDER; i++) {
as->abdstat_scatter_orders[i].value.ui64 =
wmsum_value(&abd_sums.abdstat_scatter_orders[i]);
}
as->abdstat_scatter_page_multi_chunk.value.ui64 =
wmsum_value(&abd_sums.abdstat_scatter_page_multi_chunk);
as->abdstat_scatter_page_multi_zone.value.ui64 =
wmsum_value(&abd_sums.abdstat_scatter_page_multi_zone);
as->abdstat_scatter_page_alloc_retry.value.ui64 =
wmsum_value(&abd_sums.abdstat_scatter_page_alloc_retry);
as->abdstat_scatter_sg_table_retry.value.ui64 =
wmsum_value(&abd_sums.abdstat_scatter_sg_table_retry);
return (0);
}
void
abd_init(void)
{
int i;
abd_cache = kmem_cache_create("abd_t", sizeof (abd_t),
0, NULL, NULL, NULL, NULL, NULL, 0);
wmsum_init(&abd_sums.abdstat_struct_size, 0);
wmsum_init(&abd_sums.abdstat_linear_cnt, 0);
wmsum_init(&abd_sums.abdstat_linear_data_size, 0);
wmsum_init(&abd_sums.abdstat_scatter_cnt, 0);
wmsum_init(&abd_sums.abdstat_scatter_data_size, 0);
wmsum_init(&abd_sums.abdstat_scatter_chunk_waste, 0);
- for (i = 0; i < MAX_ORDER; i++)
+ for (i = 0; i < ABD_MAX_ORDER; i++)
wmsum_init(&abd_sums.abdstat_scatter_orders[i], 0);
wmsum_init(&abd_sums.abdstat_scatter_page_multi_chunk, 0);
wmsum_init(&abd_sums.abdstat_scatter_page_multi_zone, 0);
wmsum_init(&abd_sums.abdstat_scatter_page_alloc_retry, 0);
wmsum_init(&abd_sums.abdstat_scatter_sg_table_retry, 0);
abd_ksp = kstat_create("zfs", 0, "abdstats", "misc", KSTAT_TYPE_NAMED,
sizeof (abd_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
if (abd_ksp != NULL) {
- for (i = 0; i < MAX_ORDER; i++) {
+ for (i = 0; i < ABD_MAX_ORDER; i++) {
snprintf(abd_stats.abdstat_scatter_orders[i].name,
KSTAT_STRLEN, "scatter_order_%d", i);
abd_stats.abdstat_scatter_orders[i].data_type =
KSTAT_DATA_UINT64;
}
abd_ksp->ks_data = &abd_stats;
abd_ksp->ks_update = abd_kstats_update;
kstat_install(abd_ksp);
}
abd_alloc_zero_scatter();
}
void
abd_fini(void)
{
abd_free_zero_scatter();
if (abd_ksp != NULL) {
kstat_delete(abd_ksp);
abd_ksp = NULL;
}
wmsum_fini(&abd_sums.abdstat_struct_size);
wmsum_fini(&abd_sums.abdstat_linear_cnt);
wmsum_fini(&abd_sums.abdstat_linear_data_size);
wmsum_fini(&abd_sums.abdstat_scatter_cnt);
wmsum_fini(&abd_sums.abdstat_scatter_data_size);
wmsum_fini(&abd_sums.abdstat_scatter_chunk_waste);
- for (int i = 0; i < MAX_ORDER; i++)
+ for (int i = 0; i < ABD_MAX_ORDER; i++)
wmsum_fini(&abd_sums.abdstat_scatter_orders[i]);
wmsum_fini(&abd_sums.abdstat_scatter_page_multi_chunk);
wmsum_fini(&abd_sums.abdstat_scatter_page_multi_zone);
wmsum_fini(&abd_sums.abdstat_scatter_page_alloc_retry);
wmsum_fini(&abd_sums.abdstat_scatter_sg_table_retry);
if (abd_cache) {
kmem_cache_destroy(abd_cache);
abd_cache = NULL;
}
}
void
abd_free_linear_page(abd_t *abd)
{
/* Transform it back into a scatter ABD for freeing */
struct scatterlist *sg = abd->abd_u.abd_linear.abd_sgl;
abd->abd_flags &= ~ABD_FLAG_LINEAR;
abd->abd_flags &= ~ABD_FLAG_LINEAR_PAGE;
ABD_SCATTER(abd).abd_nents = 1;
ABD_SCATTER(abd).abd_offset = 0;
ABD_SCATTER(abd).abd_sgl = sg;
abd_free_chunks(abd);
abd_update_scatter_stats(abd, ABDSTAT_DECR);
}
/*
* If we're going to use this ABD for doing I/O using the block layer, the
* consumer of the ABD data doesn't care if it's scattered or not, and we don't
* plan to store this ABD in memory for a long period of time, we should
* allocate the ABD type that requires the least data copying to do the I/O.
*
* On Linux the optimal thing to do would be to use abd_get_offset() and
* construct a new ABD which shares the original pages thereby eliminating
* the copy. But for the moment a new linear ABD is allocated until this
* performance optimization can be implemented.
*/
abd_t *
abd_alloc_for_io(size_t size, boolean_t is_metadata)
{
return (abd_alloc(size, is_metadata));
}
abd_t *
abd_get_offset_scatter(abd_t *abd, abd_t *sabd, size_t off,
size_t size)
{
(void) size;
int i = 0;
struct scatterlist *sg = NULL;
abd_verify(sabd);
ASSERT3U(off, <=, sabd->abd_size);
size_t new_offset = ABD_SCATTER(sabd).abd_offset + off;
if (abd == NULL)
abd = abd_alloc_struct(0);
/*
* Even if this buf is filesystem metadata, we only track that
* if we own the underlying data buffer, which is not true in
* this case. Therefore, we don't ever use ABD_FLAG_META here.
*/
abd_for_each_sg(sabd, sg, ABD_SCATTER(sabd).abd_nents, i) {
if (new_offset < sg->length)
break;
new_offset -= sg->length;
}
ABD_SCATTER(abd).abd_sgl = sg;
ABD_SCATTER(abd).abd_offset = new_offset;
ABD_SCATTER(abd).abd_nents = ABD_SCATTER(sabd).abd_nents - i;
return (abd);
}
/*
* Initialize the abd_iter.
*/
void
abd_iter_init(struct abd_iter *aiter, abd_t *abd)
{
ASSERT(!abd_is_gang(abd));
abd_verify(abd);
aiter->iter_abd = abd;
aiter->iter_mapaddr = NULL;
aiter->iter_mapsize = 0;
aiter->iter_pos = 0;
if (abd_is_linear(abd)) {
aiter->iter_offset = 0;
aiter->iter_sg = NULL;
} else {
aiter->iter_offset = ABD_SCATTER(abd).abd_offset;
aiter->iter_sg = ABD_SCATTER(abd).abd_sgl;
}
}
/*
* This is just a helper function to see if we have exhausted the
* abd_iter and reached the end.
*/
boolean_t
abd_iter_at_end(struct abd_iter *aiter)
{
return (aiter->iter_pos == aiter->iter_abd->abd_size);
}
/*
* Advance the iterator by a certain amount. Cannot be called when a chunk is
* in use. This can be safely called when the aiter has already exhausted, in
* which case this does nothing.
*/
void
abd_iter_advance(struct abd_iter *aiter, size_t amount)
{
ASSERT3P(aiter->iter_mapaddr, ==, NULL);
ASSERT0(aiter->iter_mapsize);
/* There's nothing left to advance to, so do nothing */
if (abd_iter_at_end(aiter))
return;
aiter->iter_pos += amount;
aiter->iter_offset += amount;
if (!abd_is_linear(aiter->iter_abd)) {
while (aiter->iter_offset >= aiter->iter_sg->length) {
aiter->iter_offset -= aiter->iter_sg->length;
aiter->iter_sg = sg_next(aiter->iter_sg);
if (aiter->iter_sg == NULL) {
ASSERT0(aiter->iter_offset);
break;
}
}
}
}
/*
* Map the current chunk into aiter. This can be safely called when the aiter
* has already exhausted, in which case this does nothing.
*/
void
abd_iter_map(struct abd_iter *aiter)
{
void *paddr;
size_t offset = 0;
ASSERT3P(aiter->iter_mapaddr, ==, NULL);
ASSERT0(aiter->iter_mapsize);
/* There's nothing left to iterate over, so do nothing */
if (abd_iter_at_end(aiter))
return;
if (abd_is_linear(aiter->iter_abd)) {
ASSERT3U(aiter->iter_pos, ==, aiter->iter_offset);
offset = aiter->iter_offset;
aiter->iter_mapsize = aiter->iter_abd->abd_size - offset;
paddr = ABD_LINEAR_BUF(aiter->iter_abd);
} else {
offset = aiter->iter_offset;
aiter->iter_mapsize = MIN(aiter->iter_sg->length - offset,
aiter->iter_abd->abd_size - aiter->iter_pos);
paddr = zfs_kmap_atomic(sg_page(aiter->iter_sg));
}
aiter->iter_mapaddr = (char *)paddr + offset;
}
/*
* Unmap the current chunk from aiter. This can be safely called when the aiter
* has already exhausted, in which case this does nothing.
*/
void
abd_iter_unmap(struct abd_iter *aiter)
{
/* There's nothing left to unmap, so do nothing */
if (abd_iter_at_end(aiter))
return;
if (!abd_is_linear(aiter->iter_abd)) {
/* LINTED E_FUNC_SET_NOT_USED */
zfs_kunmap_atomic(aiter->iter_mapaddr - aiter->iter_offset);
}
ASSERT3P(aiter->iter_mapaddr, !=, NULL);
ASSERT3U(aiter->iter_mapsize, >, 0);
aiter->iter_mapaddr = NULL;
aiter->iter_mapsize = 0;
}
void
abd_cache_reap_now(void)
{
}
#if defined(_KERNEL)
/*
* bio_nr_pages for ABD.
* @off is the offset in @abd
*/
unsigned long
abd_nr_pages_off(abd_t *abd, unsigned int size, size_t off)
{
unsigned long pos;
if (abd_is_gang(abd)) {
unsigned long count = 0;
for (abd_t *cabd = abd_gang_get_offset(abd, &off);
cabd != NULL && size != 0;
cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
ASSERT3U(off, <, cabd->abd_size);
int mysize = MIN(size, cabd->abd_size - off);
count += abd_nr_pages_off(cabd, mysize, off);
size -= mysize;
off = 0;
}
return (count);
}
if (abd_is_linear(abd))
pos = (unsigned long)abd_to_buf(abd) + off;
else
pos = ABD_SCATTER(abd).abd_offset + off;
return (((pos + size + PAGESIZE - 1) >> PAGE_SHIFT) -
(pos >> PAGE_SHIFT));
}
static unsigned int
bio_map(struct bio *bio, void *buf_ptr, unsigned int bio_size)
{
unsigned int offset, size, i;
struct page *page;
offset = offset_in_page(buf_ptr);
for (i = 0; i < bio->bi_max_vecs; i++) {
size = PAGE_SIZE - offset;
if (bio_size <= 0)
break;
if (size > bio_size)
size = bio_size;
if (is_vmalloc_addr(buf_ptr))
page = vmalloc_to_page(buf_ptr);
else
page = virt_to_page(buf_ptr);
/*
* Some network related block device uses tcp_sendpage, which
* doesn't behave well when using 0-count page, this is a
* safety net to catch them.
*/
ASSERT3S(page_count(page), >, 0);
if (bio_add_page(bio, page, size, offset) != size)
break;
buf_ptr += size;
bio_size -= size;
offset = 0;
}
return (bio_size);
}
/*
* bio_map for gang ABD.
*/
static unsigned int
abd_gang_bio_map_off(struct bio *bio, abd_t *abd,
unsigned int io_size, size_t off)
{
ASSERT(abd_is_gang(abd));
for (abd_t *cabd = abd_gang_get_offset(abd, &off);
cabd != NULL;
cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
ASSERT3U(off, <, cabd->abd_size);
int size = MIN(io_size, cabd->abd_size - off);
int remainder = abd_bio_map_off(bio, cabd, size, off);
io_size -= (size - remainder);
if (io_size == 0 || remainder > 0)
return (io_size);
off = 0;
}
ASSERT0(io_size);
return (io_size);
}
/*
* bio_map for ABD.
* @off is the offset in @abd
* Remaining IO size is returned
*/
unsigned int
abd_bio_map_off(struct bio *bio, abd_t *abd,
unsigned int io_size, size_t off)
{
struct abd_iter aiter;
ASSERT3U(io_size, <=, abd->abd_size - off);
if (abd_is_linear(abd))
return (bio_map(bio, ((char *)abd_to_buf(abd)) + off, io_size));
ASSERT(!abd_is_linear(abd));
if (abd_is_gang(abd))
return (abd_gang_bio_map_off(bio, abd, io_size, off));
abd_iter_init(&aiter, abd);
abd_iter_advance(&aiter, off);
for (int i = 0; i < bio->bi_max_vecs; i++) {
struct page *pg;
size_t len, sgoff, pgoff;
struct scatterlist *sg;
if (io_size <= 0)
break;
sg = aiter.iter_sg;
sgoff = aiter.iter_offset;
pgoff = sgoff & (PAGESIZE - 1);
len = MIN(io_size, PAGESIZE - pgoff);
ASSERT(len > 0);
pg = nth_page(sg_page(sg), sgoff >> PAGE_SHIFT);
if (bio_add_page(bio, pg, len, pgoff) != len)
break;
io_size -= len;
abd_iter_advance(&aiter, len);
}
return (io_size);
}
/* Tunable Parameters */
module_param(zfs_abd_scatter_enabled, int, 0644);
MODULE_PARM_DESC(zfs_abd_scatter_enabled,
"Toggle whether ABD allocations must be linear.");
module_param(zfs_abd_scatter_min_size, int, 0644);
MODULE_PARM_DESC(zfs_abd_scatter_min_size,
"Minimum size of scatter allocations.");
/* CSTYLED */
module_param(zfs_abd_scatter_max_order, uint, 0644);
MODULE_PARM_DESC(zfs_abd_scatter_max_order,
"Maximum order allocation used for a scatter ABD.");
#endif
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/arc_os.c b/sys/contrib/openzfs/module/os/linux/zfs/arc_os.c
index 43ed087e2dbb..1fa9f3eb3f5b 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/arc_os.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/arc_os.c
@@ -1,493 +1,496 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, Joyent, Inc.
* Copyright (c) 2011, 2019 by Delphix. All rights reserved.
* Copyright (c) 2014 by Saso Kiselkov. All rights reserved.
* Copyright 2017 Nexenta Systems, Inc. All rights reserved.
*/
#include <sys/spa.h>
#include <sys/zio.h>
#include <sys/spa_impl.h>
#include <sys/zio_compress.h>
#include <sys/zio_checksum.h>
#include <sys/zfs_context.h>
#include <sys/arc.h>
#include <sys/zfs_refcount.h>
#include <sys/vdev.h>
#include <sys/vdev_trim.h>
#include <sys/vdev_impl.h>
#include <sys/dsl_pool.h>
#include <sys/multilist.h>
#include <sys/abd.h>
#include <sys/zil.h>
#include <sys/fm/fs/zfs.h>
#ifdef _KERNEL
#include <sys/shrinker.h>
#include <sys/vmsystm.h>
#include <sys/zpl.h>
#include <linux/page_compat.h>
#include <linux/notifier.h>
#include <linux/memory.h>
#endif
#include <sys/callb.h>
#include <sys/kstat.h>
#include <sys/zthr.h>
#include <zfs_fletcher.h>
#include <sys/arc_impl.h>
#include <sys/trace_zfs.h>
#include <sys/aggsum.h>
/*
* This is a limit on how many pages the ARC shrinker makes available for
* eviction in response to one page allocation attempt. Note that in
* practice, the kernel's shrinker can ask us to evict up to about 4x this
* for one allocation attempt.
*
* The default limit of 10,000 (in practice, 160MB per allocation attempt
* with 4K pages) limits the amount of time spent attempting to reclaim ARC
* memory to less than 100ms per allocation attempt, even with a small
* average compressed block size of ~8KB.
*
* See also the comment in arc_shrinker_count().
* Set to 0 to disable limit.
*/
int zfs_arc_shrinker_limit = 10000;
#ifdef CONFIG_MEMORY_HOTPLUG
static struct notifier_block arc_hotplug_callback_mem_nb;
#endif
/*
* Return a default max arc size based on the amount of physical memory.
*/
uint64_t
arc_default_max(uint64_t min, uint64_t allmem)
{
/* Default to 1/2 of all memory. */
return (MAX(allmem / 2, min));
}
#ifdef _KERNEL
/*
* Return maximum amount of memory that we could possibly use. Reduced
* to half of all memory in user space which is primarily used for testing.
*/
uint64_t
arc_all_memory(void)
{
#ifdef CONFIG_HIGHMEM
return (ptob(zfs_totalram_pages - zfs_totalhigh_pages));
#else
return (ptob(zfs_totalram_pages));
#endif /* CONFIG_HIGHMEM */
}
/*
* Return the amount of memory that is considered free. In user space
* which is primarily used for testing we pretend that free memory ranges
* from 0-20% of all memory.
*/
uint64_t
arc_free_memory(void)
{
#ifdef CONFIG_HIGHMEM
struct sysinfo si;
si_meminfo(&si);
return (ptob(si.freeram - si.freehigh));
#else
return (ptob(nr_free_pages() +
nr_inactive_file_pages()));
#endif /* CONFIG_HIGHMEM */
}
/*
* Return the amount of memory that can be consumed before reclaim will be
* needed. Positive if there is sufficient free memory, negative indicates
* the amount of memory that needs to be freed up.
*/
int64_t
arc_available_memory(void)
{
return (arc_free_memory() - arc_sys_free);
}
static uint64_t
arc_evictable_memory(void)
{
int64_t asize = aggsum_value(&arc_sums.arcstat_size);
uint64_t arc_clean =
zfs_refcount_count(&arc_mru->arcs_esize[ARC_BUFC_DATA]) +
zfs_refcount_count(&arc_mru->arcs_esize[ARC_BUFC_METADATA]) +
zfs_refcount_count(&arc_mfu->arcs_esize[ARC_BUFC_DATA]) +
zfs_refcount_count(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
uint64_t arc_dirty = MAX((int64_t)asize - (int64_t)arc_clean, 0);
/*
* Scale reported evictable memory in proportion to page cache, cap
* at specified min/max.
*/
uint64_t min = (ptob(nr_file_pages()) / 100) * zfs_arc_pc_percent;
min = MAX(arc_c_min, MIN(arc_c_max, min));
if (arc_dirty >= min)
return (arc_clean);
return (MAX((int64_t)asize - (int64_t)min, 0));
}
/*
* The _count() function returns the number of free-able objects.
* The _scan() function returns the number of objects that were freed.
*/
static unsigned long
arc_shrinker_count(struct shrinker *shrink, struct shrink_control *sc)
{
/*
* __GFP_FS won't be set if we are called from ZFS code (see
* kmem_flags_convert(), which removes it). To avoid a deadlock, we
* don't allow evicting in this case. We return 0 rather than
* SHRINK_STOP so that the shrinker logic doesn't accumulate a
* deficit against us.
*/
if (!(sc->gfp_mask & __GFP_FS)) {
return (0);
}
/*
* This code is reached in the "direct reclaim" case, where the
* kernel (outside ZFS) is trying to allocate a page, and the system
* is low on memory.
*
* The kernel's shrinker code doesn't understand how many pages the
* ARC's callback actually frees, so it may ask the ARC to shrink a
* lot for one page allocation. This is problematic because it may
* take a long time, thus delaying the page allocation, and because
* it may force the ARC to unnecessarily shrink very small.
*
* Therefore, we limit the amount of data that we say is evictable,
* which limits the amount that the shrinker will ask us to evict for
* one page allocation attempt.
*
* In practice, we may be asked to shrink 4x the limit to satisfy one
* page allocation, before the kernel's shrinker code gives up on us.
* When that happens, we rely on the kernel code to find the pages
* that we freed before invoking the OOM killer. This happens in
* __alloc_pages_slowpath(), which retries and finds the pages we
* freed when it calls get_page_from_freelist().
*
* See also the comment above zfs_arc_shrinker_limit.
*/
int64_t limit = zfs_arc_shrinker_limit != 0 ?
zfs_arc_shrinker_limit : INT64_MAX;
return (MIN(limit, btop((int64_t)arc_evictable_memory())));
}
static unsigned long
arc_shrinker_scan(struct shrinker *shrink, struct shrink_control *sc)
{
ASSERT((sc->gfp_mask & __GFP_FS) != 0);
/* The arc is considered warm once reclaim has occurred */
if (unlikely(arc_warm == B_FALSE))
arc_warm = B_TRUE;
/*
* Evict the requested number of pages by reducing arc_c and waiting
* for the requested amount of data to be evicted.
*/
arc_reduce_target_size(ptob(sc->nr_to_scan));
arc_wait_for_eviction(ptob(sc->nr_to_scan), B_FALSE);
if (current->reclaim_state != NULL)
#ifdef HAVE_RECLAIM_STATE_RECLAIMED
current->reclaim_state->reclaimed += sc->nr_to_scan;
#else
current->reclaim_state->reclaimed_slab += sc->nr_to_scan;
#endif
/*
* We are experiencing memory pressure which the arc_evict_zthr was
* unable to keep up with. Set arc_no_grow to briefly pause arc
* growth to avoid compounding the memory pressure.
*/
arc_no_grow = B_TRUE;
/*
* When direct reclaim is observed it usually indicates a rapid
* increase in memory pressure. This occurs because the kswapd
* threads were unable to asynchronously keep enough free memory
* available.
*/
if (current_is_kswapd()) {
ARCSTAT_BUMP(arcstat_memory_indirect_count);
} else {
ARCSTAT_BUMP(arcstat_memory_direct_count);
}
return (sc->nr_to_scan);
}
-SPL_SHRINKER_DECLARE(arc_shrinker,
- arc_shrinker_count, arc_shrinker_scan, DEFAULT_SEEKS);
+static struct shrinker *arc_shrinker = NULL;
int
arc_memory_throttle(spa_t *spa, uint64_t reserve, uint64_t txg)
{
uint64_t free_memory = arc_free_memory();
if (free_memory > arc_all_memory() * arc_lotsfree_percent / 100)
return (0);
if (txg > spa->spa_lowmem_last_txg) {
spa->spa_lowmem_last_txg = txg;
spa->spa_lowmem_page_load = 0;
}
/*
* If we are in pageout, we know that memory is already tight,
* the arc is already going to be evicting, so we just want to
* continue to let page writes occur as quickly as possible.
*/
if (current_is_kswapd()) {
if (spa->spa_lowmem_page_load >
MAX(arc_sys_free / 4, free_memory) / 4) {
DMU_TX_STAT_BUMP(dmu_tx_memory_reclaim);
return (SET_ERROR(ERESTART));
}
/* Note: reserve is inflated, so we deflate */
atomic_add_64(&spa->spa_lowmem_page_load, reserve / 8);
return (0);
} else if (spa->spa_lowmem_page_load > 0 && arc_reclaim_needed()) {
/* memory is low, delay before restarting */
ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
DMU_TX_STAT_BUMP(dmu_tx_memory_reclaim);
return (SET_ERROR(EAGAIN));
}
spa->spa_lowmem_page_load = 0;
return (0);
}
static void
arc_set_sys_free(uint64_t allmem)
{
/*
* The ARC tries to keep at least this much memory available for the
* system. This gives the ARC time to shrink in response to memory
* pressure, before running completely out of memory and invoking the
* direct-reclaim ARC shrinker.
*
* This should be more than twice high_wmark_pages(), so that
* arc_wait_for_eviction() will wait until at least the
* high_wmark_pages() are free (see arc_evict_state_impl()).
*
* Note: Even when the system is very low on memory, the kernel's
* shrinker code may only ask for one "batch" of pages (512KB) to be
* evicted. If concurrent allocations consume these pages, there may
* still be insufficient free pages, and the OOM killer takes action.
*
* By setting arc_sys_free large enough, and having
* arc_wait_for_eviction() wait until there is at least arc_sys_free/2
* free memory, it is much less likely that concurrent allocations can
* consume all the memory that was evicted before checking for
* OOM.
*
* It's hard to iterate the zones from a linux kernel module, which
* makes it difficult to determine the watermark dynamically. Instead
* we compute the maximum high watermark for this system, based
* on the amount of memory, assuming default parameters on Linux kernel
* 5.3.
*/
/*
* Base wmark_low is 4 * the square root of Kbytes of RAM.
*/
long wmark = 4 * int_sqrt(allmem/1024) * 1024;
/*
* Clamp to between 128K and 64MB.
*/
wmark = MAX(wmark, 128 * 1024);
wmark = MIN(wmark, 64 * 1024 * 1024);
/*
* watermark_boost can increase the wmark by up to 150%.
*/
wmark += wmark * 150 / 100;
/*
* arc_sys_free needs to be more than 2x the watermark, because
* arc_wait_for_eviction() waits for half of arc_sys_free. Bump this up
* to 3x to ensure we're above it.
*/
arc_sys_free = wmark * 3 + allmem / 32;
}
void
arc_lowmem_init(void)
{
uint64_t allmem = arc_all_memory();
/*
* Register a shrinker to support synchronous (direct) memory
* reclaim from the arc. This is done to prevent kswapd from
* swapping out pages when it is preferable to shrink the arc.
*/
- spl_register_shrinker(&arc_shrinker);
+ arc_shrinker = spl_register_shrinker("zfs-arc-shrinker",
+ arc_shrinker_count, arc_shrinker_scan, DEFAULT_SEEKS);
+ VERIFY(arc_shrinker);
+
arc_set_sys_free(allmem);
}
void
arc_lowmem_fini(void)
{
- spl_unregister_shrinker(&arc_shrinker);
+ spl_unregister_shrinker(arc_shrinker);
+ arc_shrinker = NULL;
}
int
param_set_arc_u64(const char *buf, zfs_kernel_param_t *kp)
{
int error;
error = spl_param_set_u64(buf, kp);
if (error < 0)
return (SET_ERROR(error));
arc_tuning_update(B_TRUE);
return (0);
}
int
param_set_arc_min(const char *buf, zfs_kernel_param_t *kp)
{
return (param_set_arc_u64(buf, kp));
}
int
param_set_arc_max(const char *buf, zfs_kernel_param_t *kp)
{
return (param_set_arc_u64(buf, kp));
}
int
param_set_arc_int(const char *buf, zfs_kernel_param_t *kp)
{
int error;
error = param_set_int(buf, kp);
if (error < 0)
return (SET_ERROR(error));
arc_tuning_update(B_TRUE);
return (0);
}
#ifdef CONFIG_MEMORY_HOTPLUG
static int
arc_hotplug_callback(struct notifier_block *self, unsigned long action,
void *arg)
{
(void) self, (void) arg;
uint64_t allmem = arc_all_memory();
if (action != MEM_ONLINE)
return (NOTIFY_OK);
arc_set_limits(allmem);
#ifdef __LP64__
if (zfs_dirty_data_max_max == 0)
zfs_dirty_data_max_max = MIN(4ULL * 1024 * 1024 * 1024,
allmem * zfs_dirty_data_max_max_percent / 100);
#else
if (zfs_dirty_data_max_max == 0)
zfs_dirty_data_max_max = MIN(1ULL * 1024 * 1024 * 1024,
allmem * zfs_dirty_data_max_max_percent / 100);
#endif
arc_set_sys_free(allmem);
return (NOTIFY_OK);
}
#endif
void
arc_register_hotplug(void)
{
#ifdef CONFIG_MEMORY_HOTPLUG
arc_hotplug_callback_mem_nb.notifier_call = arc_hotplug_callback;
/* There is no significance to the value 100 */
arc_hotplug_callback_mem_nb.priority = 100;
register_memory_notifier(&arc_hotplug_callback_mem_nb);
#endif
}
void
arc_unregister_hotplug(void)
{
#ifdef CONFIG_MEMORY_HOTPLUG
unregister_memory_notifier(&arc_hotplug_callback_mem_nb);
#endif
}
#else /* _KERNEL */
int64_t
arc_available_memory(void)
{
int64_t lowest = INT64_MAX;
/* Every 100 calls, free a small amount */
if (random_in_range(100) == 0)
lowest = -1024;
return (lowest);
}
int
arc_memory_throttle(spa_t *spa, uint64_t reserve, uint64_t txg)
{
(void) spa, (void) reserve, (void) txg;
return (0);
}
uint64_t
arc_all_memory(void)
{
return (ptob(physmem) / 2);
}
uint64_t
arc_free_memory(void)
{
return (random_in_range(arc_all_memory() * 20 / 100));
}
void
arc_register_hotplug(void)
{
}
void
arc_unregister_hotplug(void)
{
}
#endif /* _KERNEL */
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, shrinker_limit, INT, ZMOD_RW,
"Limit on number of pages that ARC shrinker can reclaim at once");
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/vdev_disk.c b/sys/contrib/openzfs/module/os/linux/zfs/vdev_disk.c
index 48ac55f07034..b0bda5fa2012 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/vdev_disk.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/vdev_disk.c
@@ -1,1105 +1,1176 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>.
* LLNL-CODE-403049.
* Copyright (c) 2012, 2019 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/spa_impl.h>
#include <sys/vdev_disk.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_trim.h>
#include <sys/abd.h>
#include <sys/fs/zfs.h>
#include <sys/zio.h>
#include <linux/blkpg.h>
#include <linux/msdos_fs.h>
#include <linux/vfs_compat.h>
#ifdef HAVE_LINUX_BLK_CGROUP_HEADER
#include <linux/blk-cgroup.h>
#endif
+/*
+ * Linux 6.8.x uses a bdev_handle as an instance/refcount for an underlying
+ * block_device. Since it carries the block_device inside, its convenient to
+ * just use the handle as a proxy. For pre-6.8, we just emulate this with
+ * a cast, since we don't need any of the other fields inside the handle.
+ */
+#ifdef HAVE_BDEV_OPEN_BY_PATH
+typedef struct bdev_handle zfs_bdev_handle_t;
+#define BDH_BDEV(bdh) ((bdh)->bdev)
+#define BDH_IS_ERR(bdh) (IS_ERR(bdh))
+#define BDH_PTR_ERR(bdh) (PTR_ERR(bdh))
+#define BDH_ERR_PTR(err) (ERR_PTR(err))
+#else
+typedef void zfs_bdev_handle_t;
+#define BDH_BDEV(bdh) ((struct block_device *)bdh)
+#define BDH_IS_ERR(bdh) (IS_ERR(BDH_BDEV(bdh)))
+#define BDH_PTR_ERR(bdh) (PTR_ERR(BDH_BDEV(bdh)))
+#define BDH_ERR_PTR(err) (ERR_PTR(err))
+#endif
+
typedef struct vdev_disk {
- struct block_device *vd_bdev;
+ zfs_bdev_handle_t *vd_bdh;
krwlock_t vd_lock;
} vdev_disk_t;
/*
* Unique identifier for the exclusive vdev holder.
*/
static void *zfs_vdev_holder = VDEV_HOLDER;
/*
* Wait up to zfs_vdev_open_timeout_ms milliseconds before determining the
* device is missing. The missing path may be transient since the links
* can be briefly removed and recreated in response to udev events.
*/
static uint_t zfs_vdev_open_timeout_ms = 1000;
/*
* Size of the "reserved" partition, in blocks.
*/
#define EFI_MIN_RESV_SIZE (16 * 1024)
/*
* Virtual device vector for disks.
*/
typedef struct dio_request {
zio_t *dr_zio; /* Parent ZIO */
atomic_t dr_ref; /* References */
int dr_error; /* Bio error */
int dr_bio_count; /* Count of bio's */
struct bio *dr_bio[]; /* Attached bio's */
} dio_request_t;
/*
* BIO request failfast mask.
*/
static unsigned int zfs_vdev_failfast_mask = 1;
#ifdef HAVE_BLK_MODE_T
static blk_mode_t
#else
static fmode_t
#endif
-vdev_bdev_mode(spa_mode_t spa_mode)
+vdev_bdev_mode(spa_mode_t spa_mode, boolean_t exclusive)
{
#ifdef HAVE_BLK_MODE_T
blk_mode_t mode = 0;
if (spa_mode & SPA_MODE_READ)
mode |= BLK_OPEN_READ;
if (spa_mode & SPA_MODE_WRITE)
mode |= BLK_OPEN_WRITE;
+
+ if (exclusive)
+ mode |= BLK_OPEN_EXCL;
#else
fmode_t mode = 0;
if (spa_mode & SPA_MODE_READ)
mode |= FMODE_READ;
if (spa_mode & SPA_MODE_WRITE)
mode |= FMODE_WRITE;
+
+ if (exclusive)
+ mode |= FMODE_EXCL;
#endif
return (mode);
}
/*
* Returns the usable capacity (in bytes) for the partition or disk.
*/
static uint64_t
bdev_capacity(struct block_device *bdev)
{
return (i_size_read(bdev->bd_inode));
}
#if !defined(HAVE_BDEV_WHOLE)
static inline struct block_device *
bdev_whole(struct block_device *bdev)
{
return (bdev->bd_contains);
}
#endif
#if defined(HAVE_BDEVNAME)
#define vdev_bdevname(bdev, name) bdevname(bdev, name)
#else
static inline void
vdev_bdevname(struct block_device *bdev, char *name)
{
snprintf(name, BDEVNAME_SIZE, "%pg", bdev);
}
#endif
/*
* Returns the maximum expansion capacity of the block device (in bytes).
*
* It is possible to expand a vdev when it has been created as a wholedisk
* and the containing block device has increased in capacity. Or when the
* partition containing the pool has been manually increased in size.
*
* This function is only responsible for calculating the potential expansion
* size so it can be reported by 'zpool list'. The efi_use_whole_disk() is
* responsible for verifying the expected partition layout in the wholedisk
* case, and updating the partition table if appropriate. Once the partition
* size has been increased the additional capacity will be visible using
* bdev_capacity().
*
* The returned maximum expansion capacity is always expected to be larger, or
* at the very least equal, to its usable capacity to prevent overestimating
* the pool expandsize.
*/
static uint64_t
bdev_max_capacity(struct block_device *bdev, uint64_t wholedisk)
{
uint64_t psize;
int64_t available;
if (wholedisk && bdev != bdev_whole(bdev)) {
/*
* When reporting maximum expansion capacity for a wholedisk
* deduct any capacity which is expected to be lost due to
* alignment restrictions. Over reporting this value isn't
* harmful and would only result in slightly less capacity
* than expected post expansion.
* The estimated available space may be slightly smaller than
* bdev_capacity() for devices where the number of sectors is
* not a multiple of the alignment size and the partition layout
* is keeping less than PARTITION_END_ALIGNMENT bytes after the
* "reserved" EFI partition: in such cases return the device
* usable capacity.
*/
available = i_size_read(bdev_whole(bdev)->bd_inode) -
((EFI_MIN_RESV_SIZE + NEW_START_BLOCK +
PARTITION_END_ALIGNMENT) << SECTOR_BITS);
psize = MAX(available, bdev_capacity(bdev));
} else {
psize = bdev_capacity(bdev);
}
return (psize);
}
static void
vdev_disk_error(zio_t *zio)
{
/*
* This function can be called in interrupt context, for instance while
* handling IRQs coming from a misbehaving disk device; use printk()
* which is safe from any context.
*/
printk(KERN_WARNING "zio pool=%s vdev=%s error=%d type=%d "
"offset=%llu size=%llu flags=%llu\n", spa_name(zio->io_spa),
zio->io_vd->vdev_path, zio->io_error, zio->io_type,
(u_longlong_t)zio->io_offset, (u_longlong_t)zio->io_size,
zio->io_flags);
}
static void
vdev_disk_kobj_evt_post(vdev_t *v)
{
vdev_disk_t *vd = v->vdev_tsd;
- if (vd && vd->vd_bdev) {
- spl_signal_kobj_evt(vd->vd_bdev);
+ if (vd && vd->vd_bdh) {
+ spl_signal_kobj_evt(BDH_BDEV(vd->vd_bdh));
} else {
vdev_dbgmsg(v, "vdev_disk_t is NULL for VDEV:%s\n",
v->vdev_path);
}
}
-#if !defined(HAVE_BLKDEV_GET_BY_PATH_4ARG)
-/*
- * Define a dummy struct blk_holder_ops for kernel versions
- * prior to 6.5.
- */
-struct blk_holder_ops {};
-#endif
-
-static struct block_device *
-vdev_blkdev_get_by_path(const char *path, spa_mode_t mode, void *holder,
- const struct blk_holder_ops *hops)
+static zfs_bdev_handle_t *
+vdev_blkdev_get_by_path(const char *path, spa_mode_t mode, void *holder)
{
-#ifdef HAVE_BLKDEV_GET_BY_PATH_4ARG
+#if defined(HAVE_BDEV_OPEN_BY_PATH)
+ return (bdev_open_by_path(path,
+ vdev_bdev_mode(mode, B_TRUE), holder, NULL));
+#elif defined(HAVE_BLKDEV_GET_BY_PATH_4ARG)
return (blkdev_get_by_path(path,
- vdev_bdev_mode(mode) | BLK_OPEN_EXCL, holder, hops));
+ vdev_bdev_mode(mode, B_TRUE), holder, NULL));
#else
return (blkdev_get_by_path(path,
- vdev_bdev_mode(mode) | FMODE_EXCL, holder));
+ vdev_bdev_mode(mode, B_TRUE), holder));
#endif
}
static void
-vdev_blkdev_put(struct block_device *bdev, spa_mode_t mode, void *holder)
+vdev_blkdev_put(zfs_bdev_handle_t *bdh, spa_mode_t mode, void *holder)
{
-#ifdef HAVE_BLKDEV_PUT_HOLDER
- return (blkdev_put(bdev, holder));
+#if defined(HAVE_BDEV_RELEASE)
+ return (bdev_release(bdh));
+#elif defined(HAVE_BLKDEV_PUT_HOLDER)
+ return (blkdev_put(BDH_BDEV(bdh), holder));
#else
- return (blkdev_put(bdev, vdev_bdev_mode(mode) | FMODE_EXCL));
+ return (blkdev_put(BDH_BDEV(bdh),
+ vdev_bdev_mode(mode, B_TRUE)));
#endif
}
static int
vdev_disk_open(vdev_t *v, uint64_t *psize, uint64_t *max_psize,
uint64_t *logical_ashift, uint64_t *physical_ashift)
{
- struct block_device *bdev;
+ zfs_bdev_handle_t *bdh;
#ifdef HAVE_BLK_MODE_T
- blk_mode_t mode = vdev_bdev_mode(spa_mode(v->vdev_spa));
+ blk_mode_t mode = vdev_bdev_mode(spa_mode(v->vdev_spa), B_FALSE);
#else
- fmode_t mode = vdev_bdev_mode(spa_mode(v->vdev_spa));
+ fmode_t mode = vdev_bdev_mode(spa_mode(v->vdev_spa), B_FALSE);
#endif
hrtime_t timeout = MSEC2NSEC(zfs_vdev_open_timeout_ms);
vdev_disk_t *vd;
/* Must have a pathname and it must be absolute. */
if (v->vdev_path == NULL || v->vdev_path[0] != '/') {
v->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
vdev_dbgmsg(v, "invalid vdev_path");
return (SET_ERROR(EINVAL));
}
/*
* Reopen the device if it is currently open. When expanding a
* partition force re-scanning the partition table if userland
* did not take care of this already. We need to do this while closed
* in order to get an accurate updated block device size. Then
* since udev may need to recreate the device links increase the
* open retry timeout before reporting the device as unavailable.
*/
vd = v->vdev_tsd;
if (vd) {
char disk_name[BDEVNAME_SIZE + 6] = "/dev/";
boolean_t reread_part = B_FALSE;
rw_enter(&vd->vd_lock, RW_WRITER);
- bdev = vd->vd_bdev;
- vd->vd_bdev = NULL;
+ bdh = vd->vd_bdh;
+ vd->vd_bdh = NULL;
- if (bdev) {
+ if (bdh) {
+ struct block_device *bdev = BDH_BDEV(bdh);
if (v->vdev_expanding && bdev != bdev_whole(bdev)) {
vdev_bdevname(bdev_whole(bdev), disk_name + 5);
/*
* If userland has BLKPG_RESIZE_PARTITION,
* then it should have updated the partition
* table already. We can detect this by
* comparing our current physical size
* with that of the device. If they are
* the same, then we must not have
* BLKPG_RESIZE_PARTITION or it failed to
* update the partition table online. We
* fallback to rescanning the partition
* table from the kernel below. However,
* if the capacity already reflects the
* updated partition, then we skip
* rescanning the partition table here.
*/
if (v->vdev_psize == bdev_capacity(bdev))
reread_part = B_TRUE;
}
- vdev_blkdev_put(bdev, mode, zfs_vdev_holder);
+ vdev_blkdev_put(bdh, mode, zfs_vdev_holder);
}
if (reread_part) {
- bdev = vdev_blkdev_get_by_path(disk_name, mode,
- zfs_vdev_holder, NULL);
- if (!IS_ERR(bdev)) {
- int error = vdev_bdev_reread_part(bdev);
- vdev_blkdev_put(bdev, mode, zfs_vdev_holder);
+ bdh = vdev_blkdev_get_by_path(disk_name, mode,
+ zfs_vdev_holder);
+ if (!BDH_IS_ERR(bdh)) {
+ int error =
+ vdev_bdev_reread_part(BDH_BDEV(bdh));
+ vdev_blkdev_put(bdh, mode, zfs_vdev_holder);
if (error == 0) {
timeout = MSEC2NSEC(
zfs_vdev_open_timeout_ms * 2);
}
}
}
} else {
vd = kmem_zalloc(sizeof (vdev_disk_t), KM_SLEEP);
rw_init(&vd->vd_lock, NULL, RW_DEFAULT, NULL);
rw_enter(&vd->vd_lock, RW_WRITER);
}
/*
* Devices are always opened by the path provided at configuration
* time. This means that if the provided path is a udev by-id path
* then drives may be re-cabled without an issue. If the provided
* path is a udev by-path path, then the physical location information
* will be preserved. This can be critical for more complicated
* configurations where drives are located in specific physical
* locations to maximize the systems tolerance to component failure.
*
* Alternatively, you can provide your own udev rule to flexibly map
* the drives as you see fit. It is not advised that you use the
* /dev/[hd]d devices which may be reordered due to probing order.
* Devices in the wrong locations will be detected by the higher
* level vdev validation.
*
* The specified paths may be briefly removed and recreated in
* response to udev events. This should be exceptionally unlikely
* because the zpool command makes every effort to verify these paths
* have already settled prior to reaching this point. Therefore,
* a ENOENT failure at this point is highly likely to be transient
* and it is reasonable to sleep and retry before giving up. In
* practice delays have been observed to be on the order of 100ms.
*
* When ERESTARTSYS is returned it indicates the block device is
* a zvol which could not be opened due to the deadlock detection
* logic in zvol_open(). Extend the timeout and retry the open
* subsequent attempts are expected to eventually succeed.
*/
hrtime_t start = gethrtime();
- bdev = ERR_PTR(-ENXIO);
- while (IS_ERR(bdev) && ((gethrtime() - start) < timeout)) {
- bdev = vdev_blkdev_get_by_path(v->vdev_path, mode,
- zfs_vdev_holder, NULL);
- if (unlikely(PTR_ERR(bdev) == -ENOENT)) {
+ bdh = BDH_ERR_PTR(-ENXIO);
+ while (BDH_IS_ERR(bdh) && ((gethrtime() - start) < timeout)) {
+ bdh = vdev_blkdev_get_by_path(v->vdev_path, mode,
+ zfs_vdev_holder);
+ if (unlikely(BDH_PTR_ERR(bdh) == -ENOENT)) {
/*
* There is no point of waiting since device is removed
* explicitly
*/
if (v->vdev_removed)
break;
schedule_timeout(MSEC_TO_TICK(10));
- } else if (unlikely(PTR_ERR(bdev) == -ERESTARTSYS)) {
+ } else if (unlikely(BDH_PTR_ERR(bdh) == -ERESTARTSYS)) {
timeout = MSEC2NSEC(zfs_vdev_open_timeout_ms * 10);
continue;
- } else if (IS_ERR(bdev)) {
+ } else if (BDH_IS_ERR(bdh)) {
break;
}
}
- if (IS_ERR(bdev)) {
- int error = -PTR_ERR(bdev);
+ if (BDH_IS_ERR(bdh)) {
+ int error = -BDH_PTR_ERR(bdh);
vdev_dbgmsg(v, "open error=%d timeout=%llu/%llu", error,
(u_longlong_t)(gethrtime() - start),
(u_longlong_t)timeout);
- vd->vd_bdev = NULL;
+ vd->vd_bdh = NULL;
v->vdev_tsd = vd;
rw_exit(&vd->vd_lock);
return (SET_ERROR(error));
} else {
- vd->vd_bdev = bdev;
+ vd->vd_bdh = bdh;
v->vdev_tsd = vd;
rw_exit(&vd->vd_lock);
}
+ struct block_device *bdev = BDH_BDEV(vd->vd_bdh);
+
/* Determine the physical block size */
- int physical_block_size = bdev_physical_block_size(vd->vd_bdev);
+ int physical_block_size = bdev_physical_block_size(bdev);
/* Determine the logical block size */
- int logical_block_size = bdev_logical_block_size(vd->vd_bdev);
+ int logical_block_size = bdev_logical_block_size(bdev);
/* Clear the nowritecache bit, causes vdev_reopen() to try again. */
v->vdev_nowritecache = B_FALSE;
/* Set when device reports it supports TRIM. */
- v->vdev_has_trim = bdev_discard_supported(vd->vd_bdev);
+ v->vdev_has_trim = bdev_discard_supported(bdev);
/* Set when device reports it supports secure TRIM. */
- v->vdev_has_securetrim = bdev_secure_discard_supported(vd->vd_bdev);
+ v->vdev_has_securetrim = bdev_secure_discard_supported(bdev);
/* Inform the ZIO pipeline that we are non-rotational */
- v->vdev_nonrot = blk_queue_nonrot(bdev_get_queue(vd->vd_bdev));
+ v->vdev_nonrot = blk_queue_nonrot(bdev_get_queue(bdev));
/* Physical volume size in bytes for the partition */
- *psize = bdev_capacity(vd->vd_bdev);
+ *psize = bdev_capacity(bdev);
/* Physical volume size in bytes including possible expansion space */
- *max_psize = bdev_max_capacity(vd->vd_bdev, v->vdev_wholedisk);
+ *max_psize = bdev_max_capacity(bdev, v->vdev_wholedisk);
/* Based on the minimum sector size set the block size */
*physical_ashift = highbit64(MAX(physical_block_size,
SPA_MINBLOCKSIZE)) - 1;
*logical_ashift = highbit64(MAX(logical_block_size,
SPA_MINBLOCKSIZE)) - 1;
return (0);
}
static void
vdev_disk_close(vdev_t *v)
{
vdev_disk_t *vd = v->vdev_tsd;
if (v->vdev_reopening || vd == NULL)
return;
- if (vd->vd_bdev != NULL) {
- vdev_blkdev_put(vd->vd_bdev, spa_mode(v->vdev_spa),
+ if (vd->vd_bdh != NULL) {
+ vdev_blkdev_put(vd->vd_bdh, spa_mode(v->vdev_spa),
zfs_vdev_holder);
}
rw_destroy(&vd->vd_lock);
kmem_free(vd, sizeof (vdev_disk_t));
v->vdev_tsd = NULL;
}
static dio_request_t *
vdev_disk_dio_alloc(int bio_count)
{
dio_request_t *dr = kmem_zalloc(sizeof (dio_request_t) +
sizeof (struct bio *) * bio_count, KM_SLEEP);
atomic_set(&dr->dr_ref, 0);
dr->dr_bio_count = bio_count;
dr->dr_error = 0;
for (int i = 0; i < dr->dr_bio_count; i++)
dr->dr_bio[i] = NULL;
return (dr);
}
static void
vdev_disk_dio_free(dio_request_t *dr)
{
int i;
for (i = 0; i < dr->dr_bio_count; i++)
if (dr->dr_bio[i])
bio_put(dr->dr_bio[i]);
kmem_free(dr, sizeof (dio_request_t) +
sizeof (struct bio *) * dr->dr_bio_count);
}
static void
vdev_disk_dio_get(dio_request_t *dr)
{
atomic_inc(&dr->dr_ref);
}
static void
vdev_disk_dio_put(dio_request_t *dr)
{
int rc = atomic_dec_return(&dr->dr_ref);
/*
* Free the dio_request when the last reference is dropped and
* ensure zio_interpret is called only once with the correct zio
*/
if (rc == 0) {
zio_t *zio = dr->dr_zio;
int error = dr->dr_error;
vdev_disk_dio_free(dr);
if (zio) {
zio->io_error = error;
ASSERT3S(zio->io_error, >=, 0);
if (zio->io_error)
vdev_disk_error(zio);
zio_delay_interrupt(zio);
}
}
}
BIO_END_IO_PROTO(vdev_disk_physio_completion, bio, error)
{
dio_request_t *dr = bio->bi_private;
if (dr->dr_error == 0) {
#ifdef HAVE_1ARG_BIO_END_IO_T
dr->dr_error = BIO_END_IO_ERROR(bio);
#else
if (error)
dr->dr_error = -(error);
else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
dr->dr_error = EIO;
#endif
}
/* Drop reference acquired by __vdev_disk_physio */
vdev_disk_dio_put(dr);
}
static inline void
vdev_submit_bio_impl(struct bio *bio)
{
#ifdef HAVE_1ARG_SUBMIT_BIO
(void) submit_bio(bio);
#else
(void) submit_bio(bio_data_dir(bio), bio);
#endif
}
/*
* preempt_schedule_notrace is GPL-only which breaks the ZFS build, so
* replace it with preempt_schedule under the following condition:
*/
#if defined(CONFIG_ARM64) && \
defined(CONFIG_PREEMPTION) && \
defined(CONFIG_BLK_CGROUP)
#define preempt_schedule_notrace(x) preempt_schedule(x)
#endif
/*
* As for the Linux 5.18 kernel bio_alloc() expects a block_device struct
* as an argument removing the need to set it with bio_set_dev(). This
* removes the need for all of the following compatibility code.
*/
#if !defined(HAVE_BIO_ALLOC_4ARG)
#ifdef HAVE_BIO_SET_DEV
#if defined(CONFIG_BLK_CGROUP) && defined(HAVE_BIO_SET_DEV_GPL_ONLY)
/*
* The Linux 5.5 kernel updated percpu_ref_tryget() which is inlined by
* blkg_tryget() to use rcu_read_lock() instead of rcu_read_lock_sched().
* As a side effect the function was converted to GPL-only. Define our
* own version when needed which uses rcu_read_lock_sched().
*
* The Linux 5.17 kernel split linux/blk-cgroup.h into a private and a public
* part, moving blkg_tryget into the private one. Define our own version.
*/
#if defined(HAVE_BLKG_TRYGET_GPL_ONLY) || !defined(HAVE_BLKG_TRYGET)
static inline bool
vdev_blkg_tryget(struct blkcg_gq *blkg)
{
struct percpu_ref *ref = &blkg->refcnt;
unsigned long __percpu *count;
bool rc;
rcu_read_lock_sched();
if (__ref_is_percpu(ref, &count)) {
this_cpu_inc(*count);
rc = true;
} else {
#ifdef ZFS_PERCPU_REF_COUNT_IN_DATA
rc = atomic_long_inc_not_zero(&ref->data->count);
#else
rc = atomic_long_inc_not_zero(&ref->count);
#endif
}
rcu_read_unlock_sched();
return (rc);
}
#else
#define vdev_blkg_tryget(bg) blkg_tryget(bg)
#endif
#ifdef HAVE_BIO_SET_DEV_MACRO
/*
* The Linux 5.0 kernel updated the bio_set_dev() macro so it calls the
* GPL-only bio_associate_blkg() symbol thus inadvertently converting
* the entire macro. Provide a minimal version which always assigns the
* request queue's root_blkg to the bio.
*/
static inline void
vdev_bio_associate_blkg(struct bio *bio)
{
#if defined(HAVE_BIO_BDEV_DISK)
struct request_queue *q = bio->bi_bdev->bd_disk->queue;
#else
struct request_queue *q = bio->bi_disk->queue;
#endif
ASSERT3P(q, !=, NULL);
ASSERT3P(bio->bi_blkg, ==, NULL);
if (q->root_blkg && vdev_blkg_tryget(q->root_blkg))
bio->bi_blkg = q->root_blkg;
}
#define bio_associate_blkg vdev_bio_associate_blkg
#else
static inline void
vdev_bio_set_dev(struct bio *bio, struct block_device *bdev)
{
#if defined(HAVE_BIO_BDEV_DISK)
struct request_queue *q = bdev->bd_disk->queue;
#else
struct request_queue *q = bio->bi_disk->queue;
#endif
bio_clear_flag(bio, BIO_REMAPPED);
if (bio->bi_bdev != bdev)
bio_clear_flag(bio, BIO_THROTTLED);
bio->bi_bdev = bdev;
ASSERT3P(q, !=, NULL);
ASSERT3P(bio->bi_blkg, ==, NULL);
if (q->root_blkg && vdev_blkg_tryget(q->root_blkg))
bio->bi_blkg = q->root_blkg;
}
#define bio_set_dev vdev_bio_set_dev
#endif
#endif
#else
/*
* Provide a bio_set_dev() helper macro for pre-Linux 4.14 kernels.
*/
static inline void
bio_set_dev(struct bio *bio, struct block_device *bdev)
{
bio->bi_bdev = bdev;
}
#endif /* HAVE_BIO_SET_DEV */
#endif /* !HAVE_BIO_ALLOC_4ARG */
static inline void
vdev_submit_bio(struct bio *bio)
{
struct bio_list *bio_list = current->bio_list;
current->bio_list = NULL;
vdev_submit_bio_impl(bio);
current->bio_list = bio_list;
}
static inline struct bio *
vdev_bio_alloc(struct block_device *bdev, gfp_t gfp_mask,
unsigned short nr_vecs)
{
struct bio *bio;
#ifdef HAVE_BIO_ALLOC_4ARG
bio = bio_alloc(bdev, nr_vecs, 0, gfp_mask);
#else
bio = bio_alloc(gfp_mask, nr_vecs);
if (likely(bio != NULL))
bio_set_dev(bio, bdev);
#endif
return (bio);
}
static inline unsigned int
vdev_bio_max_segs(zio_t *zio, int bio_size, uint64_t abd_offset)
{
unsigned long nr_segs = abd_nr_pages_off(zio->io_abd,
bio_size, abd_offset);
#ifdef HAVE_BIO_MAX_SEGS
return (bio_max_segs(nr_segs));
#else
return (MIN(nr_segs, BIO_MAX_PAGES));
#endif
}
static int
__vdev_disk_physio(struct block_device *bdev, zio_t *zio,
size_t io_size, uint64_t io_offset, int rw, int flags)
{
dio_request_t *dr;
uint64_t abd_offset;
uint64_t bio_offset;
int bio_size;
int bio_count = 16;
int error = 0;
struct blk_plug plug;
unsigned short nr_vecs;
/*
* Accessing outside the block device is never allowed.
*/
if (io_offset + io_size > bdev->bd_inode->i_size) {
vdev_dbgmsg(zio->io_vd,
"Illegal access %llu size %llu, device size %llu",
(u_longlong_t)io_offset,
(u_longlong_t)io_size,
(u_longlong_t)i_size_read(bdev->bd_inode));
return (SET_ERROR(EIO));
}
retry:
dr = vdev_disk_dio_alloc(bio_count);
if (!(zio->io_flags & (ZIO_FLAG_IO_RETRY | ZIO_FLAG_TRYHARD)) &&
zio->io_vd->vdev_failfast == B_TRUE) {
bio_set_flags_failfast(bdev, &flags, zfs_vdev_failfast_mask & 1,
zfs_vdev_failfast_mask & 2, zfs_vdev_failfast_mask & 4);
}
dr->dr_zio = zio;
/*
* Since bio's can have up to BIO_MAX_PAGES=256 iovec's, each of which
* is at least 512 bytes and at most PAGESIZE (typically 4K), one bio
* can cover at least 128KB and at most 1MB. When the required number
* of iovec's exceeds this, we are forced to break the IO in multiple
* bio's and wait for them all to complete. This is likely if the
* recordsize property is increased beyond 1MB. The default
* bio_count=16 should typically accommodate the maximum-size zio of
* 16MB.
*/
abd_offset = 0;
bio_offset = io_offset;
bio_size = io_size;
for (int i = 0; i <= dr->dr_bio_count; i++) {
/* Finished constructing bio's for given buffer */
if (bio_size <= 0)
break;
/*
* If additional bio's are required, we have to retry, but
* this should be rare - see the comment above.
*/
if (dr->dr_bio_count == i) {
vdev_disk_dio_free(dr);
bio_count *= 2;
goto retry;
}
nr_vecs = vdev_bio_max_segs(zio, bio_size, abd_offset);
dr->dr_bio[i] = vdev_bio_alloc(bdev, GFP_NOIO, nr_vecs);
if (unlikely(dr->dr_bio[i] == NULL)) {
vdev_disk_dio_free(dr);
return (SET_ERROR(ENOMEM));
}
/* Matching put called by vdev_disk_physio_completion */
vdev_disk_dio_get(dr);
BIO_BI_SECTOR(dr->dr_bio[i]) = bio_offset >> 9;
dr->dr_bio[i]->bi_end_io = vdev_disk_physio_completion;
dr->dr_bio[i]->bi_private = dr;
bio_set_op_attrs(dr->dr_bio[i], rw, flags);
/* Remaining size is returned to become the new size */
bio_size = abd_bio_map_off(dr->dr_bio[i], zio->io_abd,
bio_size, abd_offset);
/* Advance in buffer and construct another bio if needed */
abd_offset += BIO_BI_SIZE(dr->dr_bio[i]);
bio_offset += BIO_BI_SIZE(dr->dr_bio[i]);
}
/* Extra reference to protect dio_request during vdev_submit_bio */
vdev_disk_dio_get(dr);
if (dr->dr_bio_count > 1)
blk_start_plug(&plug);
/* Submit all bio's associated with this dio */
for (int i = 0; i < dr->dr_bio_count; i++) {
if (dr->dr_bio[i])
vdev_submit_bio(dr->dr_bio[i]);
}
if (dr->dr_bio_count > 1)
blk_finish_plug(&plug);
vdev_disk_dio_put(dr);
return (error);
}
BIO_END_IO_PROTO(vdev_disk_io_flush_completion, bio, error)
{
zio_t *zio = bio->bi_private;
#ifdef HAVE_1ARG_BIO_END_IO_T
zio->io_error = BIO_END_IO_ERROR(bio);
#else
zio->io_error = -error;
#endif
if (zio->io_error && (zio->io_error == EOPNOTSUPP))
zio->io_vd->vdev_nowritecache = B_TRUE;
bio_put(bio);
ASSERT3S(zio->io_error, >=, 0);
if (zio->io_error)
vdev_disk_error(zio);
zio_interrupt(zio);
}
static int
vdev_disk_io_flush(struct block_device *bdev, zio_t *zio)
{
struct request_queue *q;
struct bio *bio;
q = bdev_get_queue(bdev);
if (!q)
return (SET_ERROR(ENXIO));
bio = vdev_bio_alloc(bdev, GFP_NOIO, 0);
if (unlikely(bio == NULL))
return (SET_ERROR(ENOMEM));
bio->bi_end_io = vdev_disk_io_flush_completion;
bio->bi_private = zio;
bio_set_flush(bio);
vdev_submit_bio(bio);
invalidate_bdev(bdev);
return (0);
}
+#if defined(HAVE_BLKDEV_ISSUE_SECURE_ERASE) || \
+ defined(HAVE_BLKDEV_ISSUE_DISCARD_ASYNC)
+BIO_END_IO_PROTO(vdev_disk_discard_end_io, bio, error)
+{
+ zio_t *zio = bio->bi_private;
+#ifdef HAVE_1ARG_BIO_END_IO_T
+ zio->io_error = BIO_END_IO_ERROR(bio);
+#else
+ zio->io_error = -error;
+#endif
+ bio_put(bio);
+ if (zio->io_error)
+ vdev_disk_error(zio);
+ zio_interrupt(zio);
+}
+
static int
-vdev_disk_io_trim(zio_t *zio)
+vdev_issue_discard_trim(zio_t *zio, unsigned long flags)
{
- vdev_t *v = zio->io_vd;
- vdev_disk_t *vd = v->vdev_tsd;
+ int ret;
+ struct bio *bio = NULL;
-#if defined(HAVE_BLKDEV_ISSUE_SECURE_ERASE)
- if (zio->io_trim_flags & ZIO_TRIM_SECURE) {
- return (-blkdev_issue_secure_erase(vd->vd_bdev,
- zio->io_offset >> 9, zio->io_size >> 9, GFP_NOFS));
- } else {
- return (-blkdev_issue_discard(vd->vd_bdev,
- zio->io_offset >> 9, zio->io_size >> 9, GFP_NOFS));
+#if defined(BLKDEV_DISCARD_SECURE)
+ ret = - __blkdev_issue_discard(
+ BDH_BDEV(((vdev_disk_t *)zio->io_vd->vdev_tsd)->vd_bdh),
+ zio->io_offset >> 9, zio->io_size >> 9, GFP_NOFS, flags, &bio);
+#else
+ (void) flags;
+ ret = - __blkdev_issue_discard(
+ BDH_BDEV(((vdev_disk_t *)zio->io_vd->vdev_tsd)->vd_bdh),
+ zio->io_offset >> 9, zio->io_size >> 9, GFP_NOFS, &bio);
+#endif
+ if (!ret && bio) {
+ bio->bi_private = zio;
+ bio->bi_end_io = vdev_disk_discard_end_io;
+ vdev_submit_bio(bio);
}
-#elif defined(HAVE_BLKDEV_ISSUE_DISCARD)
+ return (ret);
+}
+#endif
+
+static int
+vdev_disk_io_trim(zio_t *zio)
+{
unsigned long trim_flags = 0;
-#if defined(BLKDEV_DISCARD_SECURE)
- if (zio->io_trim_flags & ZIO_TRIM_SECURE)
+ if (zio->io_trim_flags & ZIO_TRIM_SECURE) {
+#if defined(HAVE_BLKDEV_ISSUE_SECURE_ERASE)
+ return (-blkdev_issue_secure_erase(
+ BDH_BDEV(((vdev_disk_t *)zio->io_vd->vdev_tsd)->vd_bdh),
+ zio->io_offset >> 9, zio->io_size >> 9, GFP_NOFS));
+#elif defined(BLKDEV_DISCARD_SECURE)
trim_flags |= BLKDEV_DISCARD_SECURE;
#endif
- return (-blkdev_issue_discard(vd->vd_bdev,
+ }
+#if defined(HAVE_BLKDEV_ISSUE_SECURE_ERASE) || \
+ defined(HAVE_BLKDEV_ISSUE_DISCARD_ASYNC)
+ return (vdev_issue_discard_trim(zio, trim_flags));
+#elif defined(HAVE_BLKDEV_ISSUE_DISCARD)
+ return (-blkdev_issue_discard(
+ BDH_BDEV(((vdev_disk_t *)zio->io_vd->vdev_tsd)->vd_bdh),
zio->io_offset >> 9, zio->io_size >> 9, GFP_NOFS, trim_flags));
#else
#error "Unsupported kernel"
#endif
}
static void
vdev_disk_io_start(zio_t *zio)
{
vdev_t *v = zio->io_vd;
vdev_disk_t *vd = v->vdev_tsd;
int rw, error;
/*
* If the vdev is closed, it's likely in the REMOVED or FAULTED state.
* Nothing to be done here but return failure.
*/
if (vd == NULL) {
zio->io_error = ENXIO;
zio_interrupt(zio);
return;
}
rw_enter(&vd->vd_lock, RW_READER);
/*
* If the vdev is closed, it's likely due to a failed reopen and is
* in the UNAVAIL state. Nothing to be done here but return failure.
*/
- if (vd->vd_bdev == NULL) {
+ if (vd->vd_bdh == NULL) {
rw_exit(&vd->vd_lock);
zio->io_error = ENXIO;
zio_interrupt(zio);
return;
}
switch (zio->io_type) {
case ZIO_TYPE_IOCTL:
if (!vdev_readable(v)) {
rw_exit(&vd->vd_lock);
zio->io_error = SET_ERROR(ENXIO);
zio_interrupt(zio);
return;
}
switch (zio->io_cmd) {
case DKIOCFLUSHWRITECACHE:
if (zfs_nocacheflush)
break;
if (v->vdev_nowritecache) {
zio->io_error = SET_ERROR(ENOTSUP);
break;
}
- error = vdev_disk_io_flush(vd->vd_bdev, zio);
+ error = vdev_disk_io_flush(BDH_BDEV(vd->vd_bdh), zio);
if (error == 0) {
rw_exit(&vd->vd_lock);
return;
}
zio->io_error = error;
break;
default:
zio->io_error = SET_ERROR(ENOTSUP);
}
rw_exit(&vd->vd_lock);
zio_execute(zio);
return;
case ZIO_TYPE_WRITE:
rw = WRITE;
break;
case ZIO_TYPE_READ:
rw = READ;
break;
case ZIO_TYPE_TRIM:
zio->io_error = vdev_disk_io_trim(zio);
rw_exit(&vd->vd_lock);
+#if defined(HAVE_BLKDEV_ISSUE_SECURE_ERASE)
+ if (zio->io_trim_flags & ZIO_TRIM_SECURE)
+ zio_interrupt(zio);
+#elif defined(HAVE_BLKDEV_ISSUE_DISCARD)
zio_interrupt(zio);
+#endif
return;
default:
rw_exit(&vd->vd_lock);
zio->io_error = SET_ERROR(ENOTSUP);
zio_interrupt(zio);
return;
}
zio->io_target_timestamp = zio_handle_io_delay(zio);
- error = __vdev_disk_physio(vd->vd_bdev, zio,
+ error = __vdev_disk_physio(BDH_BDEV(vd->vd_bdh), zio,
zio->io_size, zio->io_offset, rw, 0);
rw_exit(&vd->vd_lock);
if (error) {
zio->io_error = error;
zio_interrupt(zio);
return;
}
}
static void
vdev_disk_io_done(zio_t *zio)
{
/*
* If the device returned EIO, we revalidate the media. If it is
* determined the media has changed this triggers the asynchronous
* removal of the device from the configuration.
*/
if (zio->io_error == EIO) {
vdev_t *v = zio->io_vd;
vdev_disk_t *vd = v->vdev_tsd;
- if (!zfs_check_disk_status(vd->vd_bdev)) {
- invalidate_bdev(vd->vd_bdev);
+ if (!zfs_check_disk_status(BDH_BDEV(vd->vd_bdh))) {
+ invalidate_bdev(BDH_BDEV(vd->vd_bdh));
v->vdev_remove_wanted = B_TRUE;
spa_async_request(zio->io_spa, SPA_ASYNC_REMOVE);
}
}
}
static void
vdev_disk_hold(vdev_t *vd)
{
ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER));
/* We must have a pathname, and it must be absolute. */
if (vd->vdev_path == NULL || vd->vdev_path[0] != '/')
return;
/*
* Only prefetch path and devid info if the device has
* never been opened.
*/
if (vd->vdev_tsd != NULL)
return;
}
static void
vdev_disk_rele(vdev_t *vd)
{
ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER));
/* XXX: Implement me as a vnode rele for the device */
}
vdev_ops_t vdev_disk_ops = {
.vdev_op_init = NULL,
.vdev_op_fini = NULL,
.vdev_op_open = vdev_disk_open,
.vdev_op_close = vdev_disk_close,
.vdev_op_asize = vdev_default_asize,
.vdev_op_min_asize = vdev_default_min_asize,
.vdev_op_min_alloc = NULL,
.vdev_op_io_start = vdev_disk_io_start,
.vdev_op_io_done = vdev_disk_io_done,
.vdev_op_state_change = NULL,
.vdev_op_need_resilver = NULL,
.vdev_op_hold = vdev_disk_hold,
.vdev_op_rele = vdev_disk_rele,
.vdev_op_remap = NULL,
.vdev_op_xlate = vdev_default_xlate,
.vdev_op_rebuild_asize = NULL,
.vdev_op_metaslab_init = NULL,
.vdev_op_config_generate = NULL,
.vdev_op_nparity = NULL,
.vdev_op_ndisks = NULL,
.vdev_op_type = VDEV_TYPE_DISK, /* name of this vdev type */
.vdev_op_leaf = B_TRUE, /* leaf vdev */
.vdev_op_kobj_evt_post = vdev_disk_kobj_evt_post
};
/*
* The zfs_vdev_scheduler module option has been deprecated. Setting this
* value no longer has any effect. It has not yet been entirely removed
* to allow the module to be loaded if this option is specified in the
* /etc/modprobe.d/zfs.conf file. The following warning will be logged.
*/
static int
param_set_vdev_scheduler(const char *val, zfs_kernel_param_t *kp)
{
int error = param_set_charp(val, kp);
if (error == 0) {
printk(KERN_INFO "The 'zfs_vdev_scheduler' module option "
"is not supported.\n");
}
return (error);
}
static const char *zfs_vdev_scheduler = "unused";
module_param_call(zfs_vdev_scheduler, param_set_vdev_scheduler,
param_get_charp, &zfs_vdev_scheduler, 0644);
MODULE_PARM_DESC(zfs_vdev_scheduler, "I/O scheduler");
int
param_set_min_auto_ashift(const char *buf, zfs_kernel_param_t *kp)
{
uint_t val;
int error;
error = kstrtouint(buf, 0, &val);
if (error < 0)
return (SET_ERROR(error));
if (val < ASHIFT_MIN || val > zfs_vdev_max_auto_ashift)
return (SET_ERROR(-EINVAL));
error = param_set_uint(buf, kp);
if (error < 0)
return (SET_ERROR(error));
return (0);
}
int
param_set_max_auto_ashift(const char *buf, zfs_kernel_param_t *kp)
{
uint_t val;
int error;
error = kstrtouint(buf, 0, &val);
if (error < 0)
return (SET_ERROR(error));
if (val > ASHIFT_MAX || val < zfs_vdev_min_auto_ashift)
return (SET_ERROR(-EINVAL));
error = param_set_uint(buf, kp);
if (error < 0)
return (SET_ERROR(error));
return (0);
}
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, open_timeout_ms, UINT, ZMOD_RW,
"Timeout before determining that a device is missing");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, failfast_mask, UINT, ZMOD_RW,
"Defines failfast mask: 1 - device, 2 - transport, 4 - driver");
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_ctldir.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_ctldir.c
index 94e25fa0ae8f..54ed70d0394f 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_ctldir.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_ctldir.c
@@ -1,1317 +1,1317 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (C) 2011 Lawrence Livermore National Security, LLC.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* LLNL-CODE-403049.
* Rewritten for Linux by:
* Rohan Puri <rohan.puri15@gmail.com>
* Brian Behlendorf <behlendorf1@llnl.gov>
* Copyright (c) 2013 by Delphix. All rights reserved.
* Copyright 2015, OmniTI Computer Consulting, Inc. All rights reserved.
* Copyright (c) 2018 George Melikov. All Rights Reserved.
* Copyright (c) 2019 Datto, Inc. All rights reserved.
* Copyright (c) 2020 The MathWorks, Inc. All rights reserved.
*/
/*
* ZFS control directory (a.k.a. ".zfs")
*
* This directory provides a common location for all ZFS meta-objects.
* Currently, this is only the 'snapshot' and 'shares' directory, but this may
* expand in the future. The elements are built dynamically, as the hierarchy
* does not actually exist on disk.
*
* For 'snapshot', we don't want to have all snapshots always mounted, because
* this would take up a huge amount of space in /etc/mnttab. We have three
* types of objects:
*
* ctldir ------> snapshotdir -------> snapshot
* |
* |
* V
* mounted fs
*
* The 'snapshot' node contains just enough information to lookup '..' and act
* as a mountpoint for the snapshot. Whenever we lookup a specific snapshot, we
* perform an automount of the underlying filesystem and return the
* corresponding inode.
*
* All mounts are handled automatically by an user mode helper which invokes
* the mount procedure. Unmounts are handled by allowing the mount
* point to expire so the kernel may automatically unmount it.
*
* The '.zfs', '.zfs/snapshot', and all directories created under
* '.zfs/snapshot' (ie: '.zfs/snapshot/<snapname>') all share the same
* zfsvfs_t as the head filesystem (what '.zfs' lives under).
*
* File systems mounted on top of the '.zfs/snapshot/<snapname>' paths
* (ie: snapshots) are complete ZFS filesystems and have their own unique
* zfsvfs_t. However, the fsid reported by these mounts will be the same
* as that used by the parent zfsvfs_t to make NFS happy.
*/
#include <sys/types.h>
#include <sys/param.h>
#include <sys/time.h>
#include <sys/sysmacros.h>
#include <sys/pathname.h>
#include <sys/vfs.h>
#include <sys/zfs_ctldir.h>
#include <sys/zfs_ioctl.h>
#include <sys/zfs_vfsops.h>
#include <sys/zfs_vnops.h>
#include <sys/stat.h>
#include <sys/dmu.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_destroy.h>
#include <sys/dsl_deleg.h>
#include <sys/zpl.h>
#include <sys/mntent.h>
#include "zfs_namecheck.h"
/*
* Two AVL trees are maintained which contain all currently automounted
* snapshots. Every automounted snapshots maps to a single zfs_snapentry_t
* entry which MUST:
*
* - be attached to both trees, and
* - be unique, no duplicate entries are allowed.
*
* The zfs_snapshots_by_name tree is indexed by the full dataset name
* while the zfs_snapshots_by_objsetid tree is indexed by the unique
* objsetid. This allows for fast lookups either by name or objsetid.
*/
static avl_tree_t zfs_snapshots_by_name;
static avl_tree_t zfs_snapshots_by_objsetid;
static krwlock_t zfs_snapshot_lock;
/*
* Control Directory Tunables (.zfs)
*/
int zfs_expire_snapshot = ZFSCTL_EXPIRE_SNAPSHOT;
static int zfs_admin_snapshot = 0;
typedef struct {
char *se_name; /* full snapshot name */
char *se_path; /* full mount path */
spa_t *se_spa; /* pool spa */
uint64_t se_objsetid; /* snapshot objset id */
struct dentry *se_root_dentry; /* snapshot root dentry */
krwlock_t se_taskqid_lock; /* scheduled unmount taskqid lock */
taskqid_t se_taskqid; /* scheduled unmount taskqid */
avl_node_t se_node_name; /* zfs_snapshots_by_name link */
avl_node_t se_node_objsetid; /* zfs_snapshots_by_objsetid link */
zfs_refcount_t se_refcount; /* reference count */
} zfs_snapentry_t;
static void zfsctl_snapshot_unmount_delay_impl(zfs_snapentry_t *se, int delay);
/*
* Allocate a new zfs_snapentry_t being careful to make a copy of the
* the snapshot name and provided mount point. No reference is taken.
*/
static zfs_snapentry_t *
zfsctl_snapshot_alloc(const char *full_name, const char *full_path, spa_t *spa,
uint64_t objsetid, struct dentry *root_dentry)
{
zfs_snapentry_t *se;
se = kmem_zalloc(sizeof (zfs_snapentry_t), KM_SLEEP);
se->se_name = kmem_strdup(full_name);
se->se_path = kmem_strdup(full_path);
se->se_spa = spa;
se->se_objsetid = objsetid;
se->se_root_dentry = root_dentry;
se->se_taskqid = TASKQID_INVALID;
rw_init(&se->se_taskqid_lock, NULL, RW_DEFAULT, NULL);
zfs_refcount_create(&se->se_refcount);
return (se);
}
/*
* Free a zfs_snapentry_t the caller must ensure there are no active
* references.
*/
static void
zfsctl_snapshot_free(zfs_snapentry_t *se)
{
zfs_refcount_destroy(&se->se_refcount);
kmem_strfree(se->se_name);
kmem_strfree(se->se_path);
rw_destroy(&se->se_taskqid_lock);
kmem_free(se, sizeof (zfs_snapentry_t));
}
/*
* Hold a reference on the zfs_snapentry_t.
*/
static void
zfsctl_snapshot_hold(zfs_snapentry_t *se)
{
zfs_refcount_add(&se->se_refcount, NULL);
}
/*
* Release a reference on the zfs_snapentry_t. When the number of
* references drops to zero the structure will be freed.
*/
static void
zfsctl_snapshot_rele(zfs_snapentry_t *se)
{
if (zfs_refcount_remove(&se->se_refcount, NULL) == 0)
zfsctl_snapshot_free(se);
}
/*
* Add a zfs_snapentry_t to both the zfs_snapshots_by_name and
* zfs_snapshots_by_objsetid trees. While the zfs_snapentry_t is part
* of the trees a reference is held.
*/
static void
zfsctl_snapshot_add(zfs_snapentry_t *se)
{
ASSERT(RW_WRITE_HELD(&zfs_snapshot_lock));
zfsctl_snapshot_hold(se);
avl_add(&zfs_snapshots_by_name, se);
avl_add(&zfs_snapshots_by_objsetid, se);
}
/*
* Remove a zfs_snapentry_t from both the zfs_snapshots_by_name and
* zfs_snapshots_by_objsetid trees. Upon removal a reference is dropped,
* this can result in the structure being freed if that was the last
* remaining reference.
*/
static void
zfsctl_snapshot_remove(zfs_snapentry_t *se)
{
ASSERT(RW_WRITE_HELD(&zfs_snapshot_lock));
avl_remove(&zfs_snapshots_by_name, se);
avl_remove(&zfs_snapshots_by_objsetid, se);
zfsctl_snapshot_rele(se);
}
/*
* Snapshot name comparison function for the zfs_snapshots_by_name.
*/
static int
snapentry_compare_by_name(const void *a, const void *b)
{
const zfs_snapentry_t *se_a = a;
const zfs_snapentry_t *se_b = b;
int ret;
ret = strcmp(se_a->se_name, se_b->se_name);
if (ret < 0)
return (-1);
else if (ret > 0)
return (1);
else
return (0);
}
/*
* Snapshot name comparison function for the zfs_snapshots_by_objsetid.
*/
static int
snapentry_compare_by_objsetid(const void *a, const void *b)
{
const zfs_snapentry_t *se_a = a;
const zfs_snapentry_t *se_b = b;
if (se_a->se_spa != se_b->se_spa)
return ((ulong_t)se_a->se_spa < (ulong_t)se_b->se_spa ? -1 : 1);
if (se_a->se_objsetid < se_b->se_objsetid)
return (-1);
else if (se_a->se_objsetid > se_b->se_objsetid)
return (1);
else
return (0);
}
/*
* Find a zfs_snapentry_t in zfs_snapshots_by_name. If the snapname
* is found a pointer to the zfs_snapentry_t is returned and a reference
* taken on the structure. The caller is responsible for dropping the
* reference with zfsctl_snapshot_rele(). If the snapname is not found
* NULL will be returned.
*/
static zfs_snapentry_t *
zfsctl_snapshot_find_by_name(const char *snapname)
{
zfs_snapentry_t *se, search;
ASSERT(RW_LOCK_HELD(&zfs_snapshot_lock));
search.se_name = (char *)snapname;
se = avl_find(&zfs_snapshots_by_name, &search, NULL);
if (se)
zfsctl_snapshot_hold(se);
return (se);
}
/*
* Find a zfs_snapentry_t in zfs_snapshots_by_objsetid given the objset id
* rather than the snapname. In all other respects it behaves the same
* as zfsctl_snapshot_find_by_name().
*/
static zfs_snapentry_t *
zfsctl_snapshot_find_by_objsetid(spa_t *spa, uint64_t objsetid)
{
zfs_snapentry_t *se, search;
ASSERT(RW_LOCK_HELD(&zfs_snapshot_lock));
search.se_spa = spa;
search.se_objsetid = objsetid;
se = avl_find(&zfs_snapshots_by_objsetid, &search, NULL);
if (se)
zfsctl_snapshot_hold(se);
return (se);
}
/*
* Rename a zfs_snapentry_t in the zfs_snapshots_by_name. The structure is
* removed, renamed, and added back to the new correct location in the tree.
*/
static int
zfsctl_snapshot_rename(const char *old_snapname, const char *new_snapname)
{
zfs_snapentry_t *se;
ASSERT(RW_WRITE_HELD(&zfs_snapshot_lock));
se = zfsctl_snapshot_find_by_name(old_snapname);
if (se == NULL)
return (SET_ERROR(ENOENT));
zfsctl_snapshot_remove(se);
kmem_strfree(se->se_name);
se->se_name = kmem_strdup(new_snapname);
zfsctl_snapshot_add(se);
zfsctl_snapshot_rele(se);
return (0);
}
/*
* Delayed task responsible for unmounting an expired automounted snapshot.
*/
static void
snapentry_expire(void *data)
{
zfs_snapentry_t *se = (zfs_snapentry_t *)data;
spa_t *spa = se->se_spa;
uint64_t objsetid = se->se_objsetid;
if (zfs_expire_snapshot <= 0) {
zfsctl_snapshot_rele(se);
return;
}
rw_enter(&se->se_taskqid_lock, RW_WRITER);
se->se_taskqid = TASKQID_INVALID;
rw_exit(&se->se_taskqid_lock);
(void) zfsctl_snapshot_unmount(se->se_name, MNT_EXPIRE);
zfsctl_snapshot_rele(se);
/*
* Reschedule the unmount if the zfs_snapentry_t wasn't removed.
* This can occur when the snapshot is busy.
*/
rw_enter(&zfs_snapshot_lock, RW_READER);
if ((se = zfsctl_snapshot_find_by_objsetid(spa, objsetid)) != NULL) {
zfsctl_snapshot_unmount_delay_impl(se, zfs_expire_snapshot);
zfsctl_snapshot_rele(se);
}
rw_exit(&zfs_snapshot_lock);
}
/*
* Cancel an automatic unmount of a snapname. This callback is responsible
* for dropping the reference on the zfs_snapentry_t which was taken when
* during dispatch.
*/
static void
zfsctl_snapshot_unmount_cancel(zfs_snapentry_t *se)
{
int err = 0;
rw_enter(&se->se_taskqid_lock, RW_WRITER);
err = taskq_cancel_id(system_delay_taskq, se->se_taskqid);
/*
* if we get ENOENT, the taskq couldn't be found to be
* canceled, so we can just mark it as invalid because
* it's already gone. If we got EBUSY, then we already
* blocked until it was gone _anyway_, so we don't care.
*/
se->se_taskqid = TASKQID_INVALID;
rw_exit(&se->se_taskqid_lock);
if (err == 0) {
zfsctl_snapshot_rele(se);
}
}
/*
* Dispatch the unmount task for delayed handling with a hold protecting it.
*/
static void
zfsctl_snapshot_unmount_delay_impl(zfs_snapentry_t *se, int delay)
{
if (delay <= 0)
return;
zfsctl_snapshot_hold(se);
rw_enter(&se->se_taskqid_lock, RW_WRITER);
/*
* If this condition happens, we managed to:
* - dispatch once
* - want to dispatch _again_ before it returned
*
* So let's just return - if that task fails at unmounting,
* we'll eventually dispatch again, and if it succeeds,
* no problem.
*/
if (se->se_taskqid != TASKQID_INVALID) {
rw_exit(&se->se_taskqid_lock);
zfsctl_snapshot_rele(se);
return;
}
se->se_taskqid = taskq_dispatch_delay(system_delay_taskq,
snapentry_expire, se, TQ_SLEEP, ddi_get_lbolt() + delay * HZ);
rw_exit(&se->se_taskqid_lock);
}
/*
* Schedule an automatic unmount of objset id to occur in delay seconds from
* now. Any previous delayed unmount will be cancelled in favor of the
* updated deadline. A reference is taken by zfsctl_snapshot_find_by_name()
* and held until the outstanding task is handled or cancelled.
*/
int
zfsctl_snapshot_unmount_delay(spa_t *spa, uint64_t objsetid, int delay)
{
zfs_snapentry_t *se;
int error = ENOENT;
rw_enter(&zfs_snapshot_lock, RW_READER);
if ((se = zfsctl_snapshot_find_by_objsetid(spa, objsetid)) != NULL) {
zfsctl_snapshot_unmount_cancel(se);
zfsctl_snapshot_unmount_delay_impl(se, delay);
zfsctl_snapshot_rele(se);
error = 0;
}
rw_exit(&zfs_snapshot_lock);
return (error);
}
/*
* Check if snapname is currently mounted. Returned non-zero when mounted
* and zero when unmounted.
*/
static boolean_t
zfsctl_snapshot_ismounted(const char *snapname)
{
zfs_snapentry_t *se;
boolean_t ismounted = B_FALSE;
rw_enter(&zfs_snapshot_lock, RW_READER);
if ((se = zfsctl_snapshot_find_by_name(snapname)) != NULL) {
zfsctl_snapshot_rele(se);
ismounted = B_TRUE;
}
rw_exit(&zfs_snapshot_lock);
return (ismounted);
}
/*
* Check if the given inode is a part of the virtual .zfs directory.
*/
boolean_t
zfsctl_is_node(struct inode *ip)
{
return (ITOZ(ip)->z_is_ctldir);
}
/*
* Check if the given inode is a .zfs/snapshots/snapname directory.
*/
boolean_t
zfsctl_is_snapdir(struct inode *ip)
{
return (zfsctl_is_node(ip) && (ip->i_ino <= ZFSCTL_INO_SNAPDIRS));
}
/*
* Allocate a new inode with the passed id and ops.
*/
static struct inode *
zfsctl_inode_alloc(zfsvfs_t *zfsvfs, uint64_t id,
const struct file_operations *fops, const struct inode_operations *ops,
uint64_t creation)
{
struct inode *ip;
znode_t *zp;
inode_timespec_t now = {.tv_sec = creation};
ip = new_inode(zfsvfs->z_sb);
if (ip == NULL)
return (NULL);
if (!creation)
now = current_time(ip);
zp = ITOZ(ip);
ASSERT3P(zp->z_dirlocks, ==, NULL);
ASSERT3P(zp->z_acl_cached, ==, NULL);
ASSERT3P(zp->z_xattr_cached, ==, NULL);
zp->z_id = id;
zp->z_unlinked = B_FALSE;
zp->z_atime_dirty = B_FALSE;
zp->z_zn_prefetch = B_FALSE;
zp->z_is_sa = B_FALSE;
#if !defined(HAVE_FILEMAP_RANGE_HAS_PAGE)
zp->z_is_mapped = B_FALSE;
#endif
zp->z_is_ctldir = B_TRUE;
zp->z_sa_hdl = NULL;
zp->z_blksz = 0;
zp->z_seq = 0;
zp->z_mapcnt = 0;
zp->z_size = 0;
zp->z_pflags = 0;
zp->z_mode = 0;
zp->z_sync_cnt = 0;
zp->z_sync_writes_cnt = 0;
zp->z_async_writes_cnt = 0;
ip->i_generation = 0;
ip->i_ino = id;
ip->i_mode = (S_IFDIR | S_IRWXUGO);
ip->i_uid = SUID_TO_KUID(0);
ip->i_gid = SGID_TO_KGID(0);
ip->i_blkbits = SPA_MINBLOCKSHIFT;
- ip->i_atime = now;
- ip->i_mtime = now;
+ zpl_inode_set_atime_to_ts(ip, now);
+ zpl_inode_set_mtime_to_ts(ip, now);
zpl_inode_set_ctime_to_ts(ip, now);
ip->i_fop = fops;
ip->i_op = ops;
#if defined(IOP_XATTR)
ip->i_opflags &= ~IOP_XATTR;
#endif
if (insert_inode_locked(ip)) {
unlock_new_inode(ip);
iput(ip);
return (NULL);
}
mutex_enter(&zfsvfs->z_znodes_lock);
list_insert_tail(&zfsvfs->z_all_znodes, zp);
membar_producer();
mutex_exit(&zfsvfs->z_znodes_lock);
unlock_new_inode(ip);
return (ip);
}
/*
* Lookup the inode with given id, it will be allocated if needed.
*/
static struct inode *
zfsctl_inode_lookup(zfsvfs_t *zfsvfs, uint64_t id,
const struct file_operations *fops, const struct inode_operations *ops)
{
struct inode *ip = NULL;
uint64_t creation = 0;
dsl_dataset_t *snap_ds;
dsl_pool_t *pool;
while (ip == NULL) {
ip = ilookup(zfsvfs->z_sb, (unsigned long)id);
if (ip)
break;
if (id <= ZFSCTL_INO_SNAPDIRS && !creation) {
pool = dmu_objset_pool(zfsvfs->z_os);
dsl_pool_config_enter(pool, FTAG);
if (!dsl_dataset_hold_obj(pool,
ZFSCTL_INO_SNAPDIRS - id, FTAG, &snap_ds)) {
creation = dsl_get_creation(snap_ds);
dsl_dataset_rele(snap_ds, FTAG);
}
dsl_pool_config_exit(pool, FTAG);
}
/* May fail due to concurrent zfsctl_inode_alloc() */
ip = zfsctl_inode_alloc(zfsvfs, id, fops, ops, creation);
}
return (ip);
}
/*
* Create the '.zfs' directory. This directory is cached as part of the VFS
* structure. This results in a hold on the zfsvfs_t. The code in zfs_umount()
* therefore checks against a vfs_count of 2 instead of 1. This reference
* is removed when the ctldir is destroyed in the unmount. All other entities
* under the '.zfs' directory are created dynamically as needed.
*
* Because the dynamically created '.zfs' directory entries assume the use
* of 64-bit inode numbers this support must be disabled on 32-bit systems.
*/
int
zfsctl_create(zfsvfs_t *zfsvfs)
{
ASSERT(zfsvfs->z_ctldir == NULL);
zfsvfs->z_ctldir = zfsctl_inode_alloc(zfsvfs, ZFSCTL_INO_ROOT,
&zpl_fops_root, &zpl_ops_root, 0);
if (zfsvfs->z_ctldir == NULL)
return (SET_ERROR(ENOENT));
return (0);
}
/*
* Destroy the '.zfs' directory or remove a snapshot from zfs_snapshots_by_name.
* Only called when the filesystem is unmounted.
*/
void
zfsctl_destroy(zfsvfs_t *zfsvfs)
{
if (zfsvfs->z_issnap) {
zfs_snapentry_t *se;
spa_t *spa = zfsvfs->z_os->os_spa;
uint64_t objsetid = dmu_objset_id(zfsvfs->z_os);
rw_enter(&zfs_snapshot_lock, RW_WRITER);
se = zfsctl_snapshot_find_by_objsetid(spa, objsetid);
if (se != NULL)
zfsctl_snapshot_remove(se);
rw_exit(&zfs_snapshot_lock);
if (se != NULL) {
zfsctl_snapshot_unmount_cancel(se);
zfsctl_snapshot_rele(se);
}
} else if (zfsvfs->z_ctldir) {
iput(zfsvfs->z_ctldir);
zfsvfs->z_ctldir = NULL;
}
}
/*
* Given a root znode, retrieve the associated .zfs directory.
* Add a hold to the vnode and return it.
*/
struct inode *
zfsctl_root(znode_t *zp)
{
ASSERT(zfs_has_ctldir(zp));
/* Must have an existing ref, so igrab() cannot return NULL */
VERIFY3P(igrab(ZTOZSB(zp)->z_ctldir), !=, NULL);
return (ZTOZSB(zp)->z_ctldir);
}
/*
* Generate a long fid to indicate a snapdir. We encode whether snapdir is
* already mounted in gen field. We do this because nfsd lookup will not
* trigger automount. Next time the nfsd does fh_to_dentry, we will notice
* this and do automount and return ESTALE to force nfsd revalidate and follow
* mount.
*/
static int
zfsctl_snapdir_fid(struct inode *ip, fid_t *fidp)
{
zfid_short_t *zfid = (zfid_short_t *)fidp;
zfid_long_t *zlfid = (zfid_long_t *)fidp;
uint32_t gen = 0;
uint64_t object;
uint64_t objsetid;
int i;
struct dentry *dentry;
if (fidp->fid_len < LONG_FID_LEN) {
fidp->fid_len = LONG_FID_LEN;
return (SET_ERROR(ENOSPC));
}
object = ip->i_ino;
objsetid = ZFSCTL_INO_SNAPDIRS - ip->i_ino;
zfid->zf_len = LONG_FID_LEN;
dentry = d_obtain_alias(igrab(ip));
if (!IS_ERR(dentry)) {
gen = !!d_mountpoint(dentry);
dput(dentry);
}
for (i = 0; i < sizeof (zfid->zf_object); i++)
zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
for (i = 0; i < sizeof (zfid->zf_gen); i++)
zfid->zf_gen[i] = (uint8_t)(gen >> (8 * i));
for (i = 0; i < sizeof (zlfid->zf_setid); i++)
zlfid->zf_setid[i] = (uint8_t)(objsetid >> (8 * i));
for (i = 0; i < sizeof (zlfid->zf_setgen); i++)
zlfid->zf_setgen[i] = 0;
return (0);
}
/*
* Generate an appropriate fid for an entry in the .zfs directory.
*/
int
zfsctl_fid(struct inode *ip, fid_t *fidp)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
uint64_t object = zp->z_id;
zfid_short_t *zfid;
int i;
int error;
if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
return (error);
if (zfsctl_is_snapdir(ip)) {
zfs_exit(zfsvfs, FTAG);
return (zfsctl_snapdir_fid(ip, fidp));
}
if (fidp->fid_len < SHORT_FID_LEN) {
fidp->fid_len = SHORT_FID_LEN;
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(ENOSPC));
}
zfid = (zfid_short_t *)fidp;
zfid->zf_len = SHORT_FID_LEN;
for (i = 0; i < sizeof (zfid->zf_object); i++)
zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
/* .zfs znodes always have a generation number of 0 */
for (i = 0; i < sizeof (zfid->zf_gen); i++)
zfid->zf_gen[i] = 0;
zfs_exit(zfsvfs, FTAG);
return (0);
}
/*
* Construct a full dataset name in full_name: "pool/dataset@snap_name"
*/
static int
zfsctl_snapshot_name(zfsvfs_t *zfsvfs, const char *snap_name, int len,
char *full_name)
{
objset_t *os = zfsvfs->z_os;
if (zfs_component_namecheck(snap_name, NULL, NULL) != 0)
return (SET_ERROR(EILSEQ));
dmu_objset_name(os, full_name);
if ((strlen(full_name) + 1 + strlen(snap_name)) >= len)
return (SET_ERROR(ENAMETOOLONG));
(void) strcat(full_name, "@");
(void) strcat(full_name, snap_name);
return (0);
}
/*
* Returns full path in full_path: "/pool/dataset/.zfs/snapshot/snap_name/"
*/
static int
zfsctl_snapshot_path_objset(zfsvfs_t *zfsvfs, uint64_t objsetid,
int path_len, char *full_path)
{
objset_t *os = zfsvfs->z_os;
fstrans_cookie_t cookie;
char *snapname;
boolean_t case_conflict;
uint64_t id, pos = 0;
int error = 0;
if (zfsvfs->z_vfs->vfs_mntpoint == NULL)
return (SET_ERROR(ENOENT));
cookie = spl_fstrans_mark();
snapname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
while (error == 0) {
dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
error = dmu_snapshot_list_next(zfsvfs->z_os,
ZFS_MAX_DATASET_NAME_LEN, snapname, &id, &pos,
&case_conflict);
dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
if (error)
goto out;
if (id == objsetid)
break;
}
snprintf(full_path, path_len, "%s/.zfs/snapshot/%s",
zfsvfs->z_vfs->vfs_mntpoint, snapname);
out:
kmem_free(snapname, ZFS_MAX_DATASET_NAME_LEN);
spl_fstrans_unmark(cookie);
return (error);
}
/*
* Special case the handling of "..".
*/
int
zfsctl_root_lookup(struct inode *dip, const char *name, struct inode **ipp,
int flags, cred_t *cr, int *direntflags, pathname_t *realpnp)
{
zfsvfs_t *zfsvfs = ITOZSB(dip);
int error = 0;
if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
return (error);
if (strcmp(name, "..") == 0) {
*ipp = dip->i_sb->s_root->d_inode;
} else if (strcmp(name, ZFS_SNAPDIR_NAME) == 0) {
*ipp = zfsctl_inode_lookup(zfsvfs, ZFSCTL_INO_SNAPDIR,
&zpl_fops_snapdir, &zpl_ops_snapdir);
} else if (strcmp(name, ZFS_SHAREDIR_NAME) == 0) {
*ipp = zfsctl_inode_lookup(zfsvfs, ZFSCTL_INO_SHARES,
&zpl_fops_shares, &zpl_ops_shares);
} else {
*ipp = NULL;
}
if (*ipp == NULL)
error = SET_ERROR(ENOENT);
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Lookup entry point for the 'snapshot' directory. Try to open the
* snapshot if it exist, creating the pseudo filesystem inode as necessary.
*/
int
zfsctl_snapdir_lookup(struct inode *dip, const char *name, struct inode **ipp,
int flags, cred_t *cr, int *direntflags, pathname_t *realpnp)
{
zfsvfs_t *zfsvfs = ITOZSB(dip);
uint64_t id;
int error;
if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
return (error);
error = dmu_snapshot_lookup(zfsvfs->z_os, name, &id);
if (error) {
zfs_exit(zfsvfs, FTAG);
return (error);
}
*ipp = zfsctl_inode_lookup(zfsvfs, ZFSCTL_INO_SNAPDIRS - id,
&simple_dir_operations, &simple_dir_inode_operations);
if (*ipp == NULL)
error = SET_ERROR(ENOENT);
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Renaming a directory under '.zfs/snapshot' will automatically trigger
* a rename of the snapshot to the new given name. The rename is confined
* to the '.zfs/snapshot' directory snapshots cannot be moved elsewhere.
*/
int
zfsctl_snapdir_rename(struct inode *sdip, const char *snm,
struct inode *tdip, const char *tnm, cred_t *cr, int flags)
{
zfsvfs_t *zfsvfs = ITOZSB(sdip);
char *to, *from, *real, *fsname;
int error;
if (!zfs_admin_snapshot)
return (SET_ERROR(EACCES));
if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
return (error);
to = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
from = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
real = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
fsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
if (zfsvfs->z_case == ZFS_CASE_INSENSITIVE) {
error = dmu_snapshot_realname(zfsvfs->z_os, snm, real,
ZFS_MAX_DATASET_NAME_LEN, NULL);
if (error == 0) {
snm = real;
} else if (error != ENOTSUP) {
goto out;
}
}
dmu_objset_name(zfsvfs->z_os, fsname);
error = zfsctl_snapshot_name(ITOZSB(sdip), snm,
ZFS_MAX_DATASET_NAME_LEN, from);
if (error == 0)
error = zfsctl_snapshot_name(ITOZSB(tdip), tnm,
ZFS_MAX_DATASET_NAME_LEN, to);
if (error == 0)
error = zfs_secpolicy_rename_perms(from, to, cr);
if (error != 0)
goto out;
/*
* Cannot move snapshots out of the snapdir.
*/
if (sdip != tdip) {
error = SET_ERROR(EINVAL);
goto out;
}
/*
* No-op when names are identical.
*/
if (strcmp(snm, tnm) == 0) {
error = 0;
goto out;
}
rw_enter(&zfs_snapshot_lock, RW_WRITER);
error = dsl_dataset_rename_snapshot(fsname, snm, tnm, B_FALSE);
if (error == 0)
(void) zfsctl_snapshot_rename(snm, tnm);
rw_exit(&zfs_snapshot_lock);
out:
kmem_free(from, ZFS_MAX_DATASET_NAME_LEN);
kmem_free(to, ZFS_MAX_DATASET_NAME_LEN);
kmem_free(real, ZFS_MAX_DATASET_NAME_LEN);
kmem_free(fsname, ZFS_MAX_DATASET_NAME_LEN);
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Removing a directory under '.zfs/snapshot' will automatically trigger
* the removal of the snapshot with the given name.
*/
int
zfsctl_snapdir_remove(struct inode *dip, const char *name, cred_t *cr,
int flags)
{
zfsvfs_t *zfsvfs = ITOZSB(dip);
char *snapname, *real;
int error;
if (!zfs_admin_snapshot)
return (SET_ERROR(EACCES));
if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
return (error);
snapname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
real = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
if (zfsvfs->z_case == ZFS_CASE_INSENSITIVE) {
error = dmu_snapshot_realname(zfsvfs->z_os, name, real,
ZFS_MAX_DATASET_NAME_LEN, NULL);
if (error == 0) {
name = real;
} else if (error != ENOTSUP) {
goto out;
}
}
error = zfsctl_snapshot_name(ITOZSB(dip), name,
ZFS_MAX_DATASET_NAME_LEN, snapname);
if (error == 0)
error = zfs_secpolicy_destroy_perms(snapname, cr);
if (error != 0)
goto out;
error = zfsctl_snapshot_unmount(snapname, MNT_FORCE);
if ((error == 0) || (error == ENOENT))
error = dsl_destroy_snapshot(snapname, B_FALSE);
out:
kmem_free(snapname, ZFS_MAX_DATASET_NAME_LEN);
kmem_free(real, ZFS_MAX_DATASET_NAME_LEN);
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Creating a directory under '.zfs/snapshot' will automatically trigger
* the creation of a new snapshot with the given name.
*/
int
zfsctl_snapdir_mkdir(struct inode *dip, const char *dirname, vattr_t *vap,
struct inode **ipp, cred_t *cr, int flags)
{
zfsvfs_t *zfsvfs = ITOZSB(dip);
char *dsname;
int error;
if (!zfs_admin_snapshot)
return (SET_ERROR(EACCES));
dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
if (zfs_component_namecheck(dirname, NULL, NULL) != 0) {
error = SET_ERROR(EILSEQ);
goto out;
}
dmu_objset_name(zfsvfs->z_os, dsname);
error = zfs_secpolicy_snapshot_perms(dsname, cr);
if (error != 0)
goto out;
if (error == 0) {
error = dmu_objset_snapshot_one(dsname, dirname);
if (error != 0)
goto out;
error = zfsctl_snapdir_lookup(dip, dirname, ipp,
0, cr, NULL, NULL);
}
out:
kmem_free(dsname, ZFS_MAX_DATASET_NAME_LEN);
return (error);
}
/*
* Flush everything out of the kernel's export table and such.
* This is needed as once the snapshot is used over NFS, its
* entries in svc_export and svc_expkey caches hold reference
* to the snapshot mount point. There is no known way of flushing
* only the entries related to the snapshot.
*/
static void
exportfs_flush(void)
{
char *argv[] = { "/usr/sbin/exportfs", "-f", NULL };
char *envp[] = { NULL };
(void) call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
}
/*
* Attempt to unmount a snapshot by making a call to user space.
* There is no assurance that this can or will succeed, is just a
* best effort. In the case where it does fail, perhaps because
* it's in use, the unmount will fail harmlessly.
*/
int
zfsctl_snapshot_unmount(const char *snapname, int flags)
{
char *argv[] = { "/usr/bin/env", "umount", "-t", "zfs", "-n", NULL,
NULL };
char *envp[] = { NULL };
zfs_snapentry_t *se;
int error;
rw_enter(&zfs_snapshot_lock, RW_READER);
if ((se = zfsctl_snapshot_find_by_name(snapname)) == NULL) {
rw_exit(&zfs_snapshot_lock);
return (SET_ERROR(ENOENT));
}
rw_exit(&zfs_snapshot_lock);
exportfs_flush();
if (flags & MNT_FORCE)
argv[4] = "-fn";
argv[5] = se->se_path;
dprintf("unmount; path=%s\n", se->se_path);
error = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
zfsctl_snapshot_rele(se);
/*
* The umount system utility will return 256 on error. We must
* assume this error is because the file system is busy so it is
* converted to the more sensible EBUSY.
*/
if (error)
error = SET_ERROR(EBUSY);
return (error);
}
int
zfsctl_snapshot_mount(struct path *path, int flags)
{
struct dentry *dentry = path->dentry;
struct inode *ip = dentry->d_inode;
zfsvfs_t *zfsvfs;
zfsvfs_t *snap_zfsvfs;
zfs_snapentry_t *se;
char *full_name, *full_path;
char *argv[] = { "/usr/bin/env", "mount", "-t", "zfs", "-n", NULL, NULL,
NULL };
char *envp[] = { NULL };
int error;
struct path spath;
if (ip == NULL)
return (SET_ERROR(EISDIR));
zfsvfs = ITOZSB(ip);
if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
return (error);
full_name = kmem_zalloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
full_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
error = zfsctl_snapshot_name(zfsvfs, dname(dentry),
ZFS_MAX_DATASET_NAME_LEN, full_name);
if (error)
goto error;
/*
* Construct a mount point path from sb of the ctldir inode and dirent
* name, instead of from d_path(), so that chroot'd process doesn't fail
* on mount.zfs(8).
*/
snprintf(full_path, MAXPATHLEN, "%s/.zfs/snapshot/%s",
zfsvfs->z_vfs->vfs_mntpoint ? zfsvfs->z_vfs->vfs_mntpoint : "",
dname(dentry));
/*
* Multiple concurrent automounts of a snapshot are never allowed.
* The snapshot may be manually mounted as many times as desired.
*/
if (zfsctl_snapshot_ismounted(full_name)) {
error = 0;
goto error;
}
/*
* Attempt to mount the snapshot from user space. Normally this
* would be done using the vfs_kern_mount() function, however that
* function is marked GPL-only and cannot be used. On error we
* careful to log the real error to the console and return EISDIR
* to safely abort the automount. This should be very rare.
*
* If the user mode helper happens to return EBUSY, a concurrent
* mount is already in progress in which case the error is ignored.
* Take note that if the program was executed successfully the return
* value from call_usermodehelper() will be (exitcode << 8 + signal).
*/
dprintf("mount; name=%s path=%s\n", full_name, full_path);
argv[5] = full_name;
argv[6] = full_path;
error = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
if (error) {
if (!(error & MOUNT_BUSY << 8)) {
zfs_dbgmsg("Unable to automount %s error=%d",
full_path, error);
error = SET_ERROR(EISDIR);
} else {
/*
* EBUSY, this could mean a concurrent mount, or the
* snapshot has already been mounted at completely
* different place. We return 0 so VFS will retry. For
* the latter case the VFS will retry several times
* and return ELOOP, which is probably not a very good
* behavior.
*/
error = 0;
}
goto error;
}
/*
* Follow down in to the mounted snapshot and set MNT_SHRINKABLE
* to identify this as an automounted filesystem.
*/
spath = *path;
path_get(&spath);
if (follow_down_one(&spath)) {
snap_zfsvfs = ITOZSB(spath.dentry->d_inode);
snap_zfsvfs->z_parent = zfsvfs;
dentry = spath.dentry;
spath.mnt->mnt_flags |= MNT_SHRINKABLE;
rw_enter(&zfs_snapshot_lock, RW_WRITER);
se = zfsctl_snapshot_alloc(full_name, full_path,
snap_zfsvfs->z_os->os_spa, dmu_objset_id(snap_zfsvfs->z_os),
dentry);
zfsctl_snapshot_add(se);
zfsctl_snapshot_unmount_delay_impl(se, zfs_expire_snapshot);
rw_exit(&zfs_snapshot_lock);
}
path_put(&spath);
error:
kmem_free(full_name, ZFS_MAX_DATASET_NAME_LEN);
kmem_free(full_path, MAXPATHLEN);
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Get the snapdir inode from fid
*/
int
zfsctl_snapdir_vget(struct super_block *sb, uint64_t objsetid, int gen,
struct inode **ipp)
{
int error;
struct path path;
char *mnt;
struct dentry *dentry;
mnt = kmem_alloc(MAXPATHLEN, KM_SLEEP);
error = zfsctl_snapshot_path_objset(sb->s_fs_info, objsetid,
MAXPATHLEN, mnt);
if (error)
goto out;
/* Trigger automount */
error = -kern_path(mnt, LOOKUP_FOLLOW|LOOKUP_DIRECTORY, &path);
if (error)
goto out;
path_put(&path);
/*
* Get the snapdir inode. Note, we don't want to use the above
* path because it contains the root of the snapshot rather
* than the snapdir.
*/
*ipp = ilookup(sb, ZFSCTL_INO_SNAPDIRS - objsetid);
if (*ipp == NULL) {
error = SET_ERROR(ENOENT);
goto out;
}
/* check gen, see zfsctl_snapdir_fid */
dentry = d_obtain_alias(igrab(*ipp));
if (gen != (!IS_ERR(dentry) && d_mountpoint(dentry))) {
iput(*ipp);
*ipp = NULL;
error = SET_ERROR(ENOENT);
}
if (!IS_ERR(dentry))
dput(dentry);
out:
kmem_free(mnt, MAXPATHLEN);
return (error);
}
int
zfsctl_shares_lookup(struct inode *dip, char *name, struct inode **ipp,
int flags, cred_t *cr, int *direntflags, pathname_t *realpnp)
{
zfsvfs_t *zfsvfs = ITOZSB(dip);
znode_t *zp;
znode_t *dzp;
int error;
if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
return (error);
if (zfsvfs->z_shares_dir == 0) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(ENOTSUP));
}
if ((error = zfs_zget(zfsvfs, zfsvfs->z_shares_dir, &dzp)) == 0) {
error = zfs_lookup(dzp, name, &zp, 0, cr, NULL, NULL);
zrele(dzp);
}
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Initialize the various pieces we'll need to create and manipulate .zfs
* directories. Currently this is unused but available.
*/
void
zfsctl_init(void)
{
avl_create(&zfs_snapshots_by_name, snapentry_compare_by_name,
sizeof (zfs_snapentry_t), offsetof(zfs_snapentry_t,
se_node_name));
avl_create(&zfs_snapshots_by_objsetid, snapentry_compare_by_objsetid,
sizeof (zfs_snapentry_t), offsetof(zfs_snapentry_t,
se_node_objsetid));
rw_init(&zfs_snapshot_lock, NULL, RW_DEFAULT, NULL);
}
/*
* Cleanup the various pieces we needed for .zfs directories. In particular
* ensure the expiry timer is canceled safely.
*/
void
zfsctl_fini(void)
{
avl_destroy(&zfs_snapshots_by_name);
avl_destroy(&zfs_snapshots_by_objsetid);
rw_destroy(&zfs_snapshot_lock);
}
module_param(zfs_admin_snapshot, int, 0644);
MODULE_PARM_DESC(zfs_admin_snapshot, "Enable mkdir/rmdir/mv in .zfs/snapshot");
module_param(zfs_expire_snapshot, int, 0644);
MODULE_PARM_DESC(zfs_expire_snapshot, "Seconds to expire .zfs/snapshot");
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_vfsops.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_vfsops.c
index 2792bc027213..2015c20d7340 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_vfsops.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_vfsops.c
@@ -1,2129 +1,2135 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
*/
/* Portions Copyright 2010 Robert Milkowski */
#include <sys/types.h>
#include <sys/param.h>
#include <sys/sysmacros.h>
#include <sys/kmem.h>
#include <sys/pathname.h>
#include <sys/vnode.h>
#include <sys/vfs.h>
#include <sys/mntent.h>
#include <sys/cmn_err.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_vnops.h>
#include <sys/zfs_dir.h>
#include <sys/zil.h>
#include <sys/fs/zfs.h>
#include <sys/dmu.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_deleg.h>
#include <sys/spa.h>
#include <sys/zap.h>
#include <sys/sa.h>
#include <sys/sa_impl.h>
#include <sys/policy.h>
#include <sys/atomic.h>
#include <sys/zfs_ioctl.h>
#include <sys/zfs_ctldir.h>
#include <sys/zfs_fuid.h>
#include <sys/zfs_quota.h>
#include <sys/sunddi.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_dir.h>
#include <sys/objlist.h>
#include <sys/zpl.h>
#include <linux/vfs_compat.h>
#include "zfs_comutil.h"
enum {
TOKEN_RO,
TOKEN_RW,
TOKEN_SETUID,
TOKEN_NOSETUID,
TOKEN_EXEC,
TOKEN_NOEXEC,
TOKEN_DEVICES,
TOKEN_NODEVICES,
TOKEN_DIRXATTR,
TOKEN_SAXATTR,
TOKEN_XATTR,
TOKEN_NOXATTR,
TOKEN_ATIME,
TOKEN_NOATIME,
TOKEN_RELATIME,
TOKEN_NORELATIME,
TOKEN_NBMAND,
TOKEN_NONBMAND,
TOKEN_MNTPOINT,
TOKEN_LAST,
};
static const match_table_t zpl_tokens = {
{ TOKEN_RO, MNTOPT_RO },
{ TOKEN_RW, MNTOPT_RW },
{ TOKEN_SETUID, MNTOPT_SETUID },
{ TOKEN_NOSETUID, MNTOPT_NOSETUID },
{ TOKEN_EXEC, MNTOPT_EXEC },
{ TOKEN_NOEXEC, MNTOPT_NOEXEC },
{ TOKEN_DEVICES, MNTOPT_DEVICES },
{ TOKEN_NODEVICES, MNTOPT_NODEVICES },
{ TOKEN_DIRXATTR, MNTOPT_DIRXATTR },
{ TOKEN_SAXATTR, MNTOPT_SAXATTR },
{ TOKEN_XATTR, MNTOPT_XATTR },
{ TOKEN_NOXATTR, MNTOPT_NOXATTR },
{ TOKEN_ATIME, MNTOPT_ATIME },
{ TOKEN_NOATIME, MNTOPT_NOATIME },
{ TOKEN_RELATIME, MNTOPT_RELATIME },
{ TOKEN_NORELATIME, MNTOPT_NORELATIME },
{ TOKEN_NBMAND, MNTOPT_NBMAND },
{ TOKEN_NONBMAND, MNTOPT_NONBMAND },
{ TOKEN_MNTPOINT, MNTOPT_MNTPOINT "=%s" },
{ TOKEN_LAST, NULL },
};
static void
zfsvfs_vfs_free(vfs_t *vfsp)
{
if (vfsp != NULL) {
if (vfsp->vfs_mntpoint != NULL)
kmem_strfree(vfsp->vfs_mntpoint);
kmem_free(vfsp, sizeof (vfs_t));
}
}
static int
zfsvfs_parse_option(char *option, int token, substring_t *args, vfs_t *vfsp)
{
switch (token) {
case TOKEN_RO:
vfsp->vfs_readonly = B_TRUE;
vfsp->vfs_do_readonly = B_TRUE;
break;
case TOKEN_RW:
vfsp->vfs_readonly = B_FALSE;
vfsp->vfs_do_readonly = B_TRUE;
break;
case TOKEN_SETUID:
vfsp->vfs_setuid = B_TRUE;
vfsp->vfs_do_setuid = B_TRUE;
break;
case TOKEN_NOSETUID:
vfsp->vfs_setuid = B_FALSE;
vfsp->vfs_do_setuid = B_TRUE;
break;
case TOKEN_EXEC:
vfsp->vfs_exec = B_TRUE;
vfsp->vfs_do_exec = B_TRUE;
break;
case TOKEN_NOEXEC:
vfsp->vfs_exec = B_FALSE;
vfsp->vfs_do_exec = B_TRUE;
break;
case TOKEN_DEVICES:
vfsp->vfs_devices = B_TRUE;
vfsp->vfs_do_devices = B_TRUE;
break;
case TOKEN_NODEVICES:
vfsp->vfs_devices = B_FALSE;
vfsp->vfs_do_devices = B_TRUE;
break;
case TOKEN_DIRXATTR:
vfsp->vfs_xattr = ZFS_XATTR_DIR;
vfsp->vfs_do_xattr = B_TRUE;
break;
case TOKEN_SAXATTR:
vfsp->vfs_xattr = ZFS_XATTR_SA;
vfsp->vfs_do_xattr = B_TRUE;
break;
case TOKEN_XATTR:
vfsp->vfs_xattr = ZFS_XATTR_DIR;
vfsp->vfs_do_xattr = B_TRUE;
break;
case TOKEN_NOXATTR:
vfsp->vfs_xattr = ZFS_XATTR_OFF;
vfsp->vfs_do_xattr = B_TRUE;
break;
case TOKEN_ATIME:
vfsp->vfs_atime = B_TRUE;
vfsp->vfs_do_atime = B_TRUE;
break;
case TOKEN_NOATIME:
vfsp->vfs_atime = B_FALSE;
vfsp->vfs_do_atime = B_TRUE;
break;
case TOKEN_RELATIME:
vfsp->vfs_relatime = B_TRUE;
vfsp->vfs_do_relatime = B_TRUE;
break;
case TOKEN_NORELATIME:
vfsp->vfs_relatime = B_FALSE;
vfsp->vfs_do_relatime = B_TRUE;
break;
case TOKEN_NBMAND:
vfsp->vfs_nbmand = B_TRUE;
vfsp->vfs_do_nbmand = B_TRUE;
break;
case TOKEN_NONBMAND:
vfsp->vfs_nbmand = B_FALSE;
vfsp->vfs_do_nbmand = B_TRUE;
break;
case TOKEN_MNTPOINT:
vfsp->vfs_mntpoint = match_strdup(&args[0]);
if (vfsp->vfs_mntpoint == NULL)
return (SET_ERROR(ENOMEM));
break;
default:
break;
}
return (0);
}
/*
* Parse the raw mntopts and return a vfs_t describing the options.
*/
static int
zfsvfs_parse_options(char *mntopts, vfs_t **vfsp)
{
vfs_t *tmp_vfsp;
int error;
tmp_vfsp = kmem_zalloc(sizeof (vfs_t), KM_SLEEP);
if (mntopts != NULL) {
substring_t args[MAX_OPT_ARGS];
char *tmp_mntopts, *p, *t;
int token;
tmp_mntopts = t = kmem_strdup(mntopts);
if (tmp_mntopts == NULL)
return (SET_ERROR(ENOMEM));
while ((p = strsep(&t, ",")) != NULL) {
if (!*p)
continue;
args[0].to = args[0].from = NULL;
token = match_token(p, zpl_tokens, args);
error = zfsvfs_parse_option(p, token, args, tmp_vfsp);
if (error) {
kmem_strfree(tmp_mntopts);
zfsvfs_vfs_free(tmp_vfsp);
return (error);
}
}
kmem_strfree(tmp_mntopts);
}
*vfsp = tmp_vfsp;
return (0);
}
boolean_t
zfs_is_readonly(zfsvfs_t *zfsvfs)
{
return (!!(zfsvfs->z_sb->s_flags & SB_RDONLY));
}
int
zfs_sync(struct super_block *sb, int wait, cred_t *cr)
{
(void) cr;
zfsvfs_t *zfsvfs = sb->s_fs_info;
/*
* Semantically, the only requirement is that the sync be initiated.
* The DMU syncs out txgs frequently, so there's nothing to do.
*/
if (!wait)
return (0);
if (zfsvfs != NULL) {
/*
* Sync a specific filesystem.
*/
dsl_pool_t *dp;
int error;
if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
return (error);
dp = dmu_objset_pool(zfsvfs->z_os);
/*
* If the system is shutting down, then skip any
* filesystems which may exist on a suspended pool.
*/
if (spa_suspended(dp->dp_spa)) {
zfs_exit(zfsvfs, FTAG);
return (0);
}
if (zfsvfs->z_log != NULL)
zil_commit(zfsvfs->z_log, 0);
zfs_exit(zfsvfs, FTAG);
} else {
/*
* Sync all ZFS filesystems. This is what happens when you
* run sync(1). Unlike other filesystems, ZFS honors the
* request by waiting for all pools to commit all dirty data.
*/
spa_sync_allpools();
}
return (0);
}
static void
atime_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
struct super_block *sb = zfsvfs->z_sb;
if (sb == NULL)
return;
/*
* Update SB_NOATIME bit in VFS super block. Since atime update is
* determined by atime_needs_update(), atime_needs_update() needs to
* return false if atime is turned off, and not unconditionally return
* false if atime is turned on.
*/
if (newval)
sb->s_flags &= ~SB_NOATIME;
else
sb->s_flags |= SB_NOATIME;
}
static void
relatime_changed_cb(void *arg, uint64_t newval)
{
((zfsvfs_t *)arg)->z_relatime = newval;
}
static void
xattr_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
if (newval == ZFS_XATTR_OFF) {
zfsvfs->z_flags &= ~ZSB_XATTR;
} else {
zfsvfs->z_flags |= ZSB_XATTR;
if (newval == ZFS_XATTR_SA)
zfsvfs->z_xattr_sa = B_TRUE;
else
zfsvfs->z_xattr_sa = B_FALSE;
}
}
static void
acltype_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
switch (newval) {
case ZFS_ACLTYPE_NFSV4:
case ZFS_ACLTYPE_OFF:
zfsvfs->z_acl_type = ZFS_ACLTYPE_OFF;
zfsvfs->z_sb->s_flags &= ~SB_POSIXACL;
break;
case ZFS_ACLTYPE_POSIX:
#ifdef CONFIG_FS_POSIX_ACL
zfsvfs->z_acl_type = ZFS_ACLTYPE_POSIX;
zfsvfs->z_sb->s_flags |= SB_POSIXACL;
#else
zfsvfs->z_acl_type = ZFS_ACLTYPE_OFF;
zfsvfs->z_sb->s_flags &= ~SB_POSIXACL;
#endif /* CONFIG_FS_POSIX_ACL */
break;
default:
break;
}
}
static void
blksz_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
ASSERT3U(newval, <=, spa_maxblocksize(dmu_objset_spa(zfsvfs->z_os)));
ASSERT3U(newval, >=, SPA_MINBLOCKSIZE);
ASSERT(ISP2(newval));
zfsvfs->z_max_blksz = newval;
}
static void
readonly_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
struct super_block *sb = zfsvfs->z_sb;
if (sb == NULL)
return;
if (newval)
sb->s_flags |= SB_RDONLY;
else
sb->s_flags &= ~SB_RDONLY;
}
static void
devices_changed_cb(void *arg, uint64_t newval)
{
}
static void
setuid_changed_cb(void *arg, uint64_t newval)
{
}
static void
exec_changed_cb(void *arg, uint64_t newval)
{
}
static void
nbmand_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
struct super_block *sb = zfsvfs->z_sb;
if (sb == NULL)
return;
if (newval == TRUE)
sb->s_flags |= SB_MANDLOCK;
else
sb->s_flags &= ~SB_MANDLOCK;
}
static void
snapdir_changed_cb(void *arg, uint64_t newval)
{
((zfsvfs_t *)arg)->z_show_ctldir = newval;
}
static void
acl_mode_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
zfsvfs->z_acl_mode = newval;
}
static void
acl_inherit_changed_cb(void *arg, uint64_t newval)
{
((zfsvfs_t *)arg)->z_acl_inherit = newval;
}
static int
zfs_register_callbacks(vfs_t *vfsp)
{
struct dsl_dataset *ds = NULL;
objset_t *os = NULL;
zfsvfs_t *zfsvfs = NULL;
int error = 0;
ASSERT(vfsp);
zfsvfs = vfsp->vfs_data;
ASSERT(zfsvfs);
os = zfsvfs->z_os;
/*
* The act of registering our callbacks will destroy any mount
* options we may have. In order to enable temporary overrides
* of mount options, we stash away the current values and
* restore them after we register the callbacks.
*/
if (zfs_is_readonly(zfsvfs) || !spa_writeable(dmu_objset_spa(os))) {
vfsp->vfs_do_readonly = B_TRUE;
vfsp->vfs_readonly = B_TRUE;
}
/*
* Register property callbacks.
*
* It would probably be fine to just check for i/o error from
* the first prop_register(), but I guess I like to go
* overboard...
*/
ds = dmu_objset_ds(os);
dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
error = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_ATIME), atime_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_RELATIME), relatime_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_XATTR), xattr_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_RECORDSIZE), blksz_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_READONLY), readonly_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_DEVICES), devices_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_SETUID), setuid_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_EXEC), exec_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_SNAPDIR), snapdir_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_ACLTYPE), acltype_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_ACLMODE), acl_mode_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_ACLINHERIT), acl_inherit_changed_cb,
zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_NBMAND), nbmand_changed_cb, zfsvfs);
dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
if (error)
goto unregister;
/*
* Invoke our callbacks to restore temporary mount options.
*/
if (vfsp->vfs_do_readonly)
readonly_changed_cb(zfsvfs, vfsp->vfs_readonly);
if (vfsp->vfs_do_setuid)
setuid_changed_cb(zfsvfs, vfsp->vfs_setuid);
if (vfsp->vfs_do_exec)
exec_changed_cb(zfsvfs, vfsp->vfs_exec);
if (vfsp->vfs_do_devices)
devices_changed_cb(zfsvfs, vfsp->vfs_devices);
if (vfsp->vfs_do_xattr)
xattr_changed_cb(zfsvfs, vfsp->vfs_xattr);
if (vfsp->vfs_do_atime)
atime_changed_cb(zfsvfs, vfsp->vfs_atime);
if (vfsp->vfs_do_relatime)
relatime_changed_cb(zfsvfs, vfsp->vfs_relatime);
if (vfsp->vfs_do_nbmand)
nbmand_changed_cb(zfsvfs, vfsp->vfs_nbmand);
return (0);
unregister:
dsl_prop_unregister_all(ds, zfsvfs);
return (error);
}
/*
* Takes a dataset, a property, a value and that value's setpoint as
* found in the ZAP. Checks if the property has been changed in the vfs.
* If so, val and setpoint will be overwritten with updated content.
* Otherwise, they are left unchanged.
*/
int
zfs_get_temporary_prop(dsl_dataset_t *ds, zfs_prop_t zfs_prop, uint64_t *val,
char *setpoint)
{
int error;
zfsvfs_t *zfvp;
vfs_t *vfsp;
objset_t *os;
uint64_t tmp = *val;
error = dmu_objset_from_ds(ds, &os);
if (error != 0)
return (error);
if (dmu_objset_type(os) != DMU_OST_ZFS)
return (EINVAL);
mutex_enter(&os->os_user_ptr_lock);
zfvp = dmu_objset_get_user(os);
mutex_exit(&os->os_user_ptr_lock);
if (zfvp == NULL)
return (ESRCH);
vfsp = zfvp->z_vfs;
switch (zfs_prop) {
case ZFS_PROP_ATIME:
if (vfsp->vfs_do_atime)
tmp = vfsp->vfs_atime;
break;
case ZFS_PROP_RELATIME:
if (vfsp->vfs_do_relatime)
tmp = vfsp->vfs_relatime;
break;
case ZFS_PROP_DEVICES:
if (vfsp->vfs_do_devices)
tmp = vfsp->vfs_devices;
break;
case ZFS_PROP_EXEC:
if (vfsp->vfs_do_exec)
tmp = vfsp->vfs_exec;
break;
case ZFS_PROP_SETUID:
if (vfsp->vfs_do_setuid)
tmp = vfsp->vfs_setuid;
break;
case ZFS_PROP_READONLY:
if (vfsp->vfs_do_readonly)
tmp = vfsp->vfs_readonly;
break;
case ZFS_PROP_XATTR:
if (vfsp->vfs_do_xattr)
tmp = vfsp->vfs_xattr;
break;
case ZFS_PROP_NBMAND:
if (vfsp->vfs_do_nbmand)
tmp = vfsp->vfs_nbmand;
break;
default:
return (ENOENT);
}
if (tmp != *val) {
if (setpoint)
(void) strcpy(setpoint, "temporary");
*val = tmp;
}
return (0);
}
/*
* Associate this zfsvfs with the given objset, which must be owned.
* This will cache a bunch of on-disk state from the objset in the
* zfsvfs.
*/
static int
zfsvfs_init(zfsvfs_t *zfsvfs, objset_t *os)
{
int error;
uint64_t val;
zfsvfs->z_max_blksz = SPA_OLD_MAXBLOCKSIZE;
zfsvfs->z_show_ctldir = ZFS_SNAPDIR_VISIBLE;
zfsvfs->z_os = os;
error = zfs_get_zplprop(os, ZFS_PROP_VERSION, &zfsvfs->z_version);
if (error != 0)
return (error);
if (zfsvfs->z_version >
zfs_zpl_version_map(spa_version(dmu_objset_spa(os)))) {
(void) printk("Can't mount a version %lld file system "
"on a version %lld pool\n. Pool must be upgraded to mount "
"this file system.\n", (u_longlong_t)zfsvfs->z_version,
(u_longlong_t)spa_version(dmu_objset_spa(os)));
return (SET_ERROR(ENOTSUP));
}
error = zfs_get_zplprop(os, ZFS_PROP_NORMALIZE, &val);
if (error != 0)
return (error);
zfsvfs->z_norm = (int)val;
error = zfs_get_zplprop(os, ZFS_PROP_UTF8ONLY, &val);
if (error != 0)
return (error);
zfsvfs->z_utf8 = (val != 0);
error = zfs_get_zplprop(os, ZFS_PROP_CASE, &val);
if (error != 0)
return (error);
zfsvfs->z_case = (uint_t)val;
if ((error = zfs_get_zplprop(os, ZFS_PROP_ACLTYPE, &val)) != 0)
return (error);
zfsvfs->z_acl_type = (uint_t)val;
/*
* Fold case on file systems that are always or sometimes case
* insensitive.
*/
if (zfsvfs->z_case == ZFS_CASE_INSENSITIVE ||
zfsvfs->z_case == ZFS_CASE_MIXED)
zfsvfs->z_norm |= U8_TEXTPREP_TOUPPER;
zfsvfs->z_use_fuids = USE_FUIDS(zfsvfs->z_version, zfsvfs->z_os);
zfsvfs->z_use_sa = USE_SA(zfsvfs->z_version, zfsvfs->z_os);
uint64_t sa_obj = 0;
if (zfsvfs->z_use_sa) {
/* should either have both of these objects or none */
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1,
&sa_obj);
if (error != 0)
return (error);
error = zfs_get_zplprop(os, ZFS_PROP_XATTR, &val);
if ((error == 0) && (val == ZFS_XATTR_SA))
zfsvfs->z_xattr_sa = B_TRUE;
}
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ, 8, 1,
&zfsvfs->z_root);
if (error != 0)
return (error);
ASSERT(zfsvfs->z_root != 0);
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_UNLINKED_SET, 8, 1,
&zfsvfs->z_unlinkedobj);
if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_USERQUOTA],
8, 1, &zfsvfs->z_userquota_obj);
if (error == ENOENT)
zfsvfs->z_userquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_GROUPQUOTA],
8, 1, &zfsvfs->z_groupquota_obj);
if (error == ENOENT)
zfsvfs->z_groupquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_PROJECTQUOTA],
8, 1, &zfsvfs->z_projectquota_obj);
if (error == ENOENT)
zfsvfs->z_projectquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_USEROBJQUOTA],
8, 1, &zfsvfs->z_userobjquota_obj);
if (error == ENOENT)
zfsvfs->z_userobjquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_GROUPOBJQUOTA],
8, 1, &zfsvfs->z_groupobjquota_obj);
if (error == ENOENT)
zfsvfs->z_groupobjquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_PROJECTOBJQUOTA],
8, 1, &zfsvfs->z_projectobjquota_obj);
if (error == ENOENT)
zfsvfs->z_projectobjquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, 8, 1,
&zfsvfs->z_fuid_obj);
if (error == ENOENT)
zfsvfs->z_fuid_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SHARES_DIR, 8, 1,
&zfsvfs->z_shares_dir);
if (error == ENOENT)
zfsvfs->z_shares_dir = 0;
else if (error != 0)
return (error);
error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
&zfsvfs->z_attr_table);
if (error != 0)
return (error);
if (zfsvfs->z_version >= ZPL_VERSION_SA)
sa_register_update_callback(os, zfs_sa_upgrade);
return (0);
}
int
zfsvfs_create(const char *osname, boolean_t readonly, zfsvfs_t **zfvp)
{
objset_t *os;
zfsvfs_t *zfsvfs;
int error;
boolean_t ro = (readonly || (strchr(osname, '@') != NULL));
zfsvfs = kmem_zalloc(sizeof (zfsvfs_t), KM_SLEEP);
error = dmu_objset_own(osname, DMU_OST_ZFS, ro, B_TRUE, zfsvfs, &os);
if (error != 0) {
kmem_free(zfsvfs, sizeof (zfsvfs_t));
return (error);
}
error = zfsvfs_create_impl(zfvp, zfsvfs, os);
return (error);
}
/*
* Note: zfsvfs is assumed to be malloc'd, and will be freed by this function
* on a failure. Do not pass in a statically allocated zfsvfs.
*/
int
zfsvfs_create_impl(zfsvfs_t **zfvp, zfsvfs_t *zfsvfs, objset_t *os)
{
int error;
zfsvfs->z_vfs = NULL;
zfsvfs->z_sb = NULL;
zfsvfs->z_parent = zfsvfs;
mutex_init(&zfsvfs->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&zfsvfs->z_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&zfsvfs->z_all_znodes, sizeof (znode_t),
offsetof(znode_t, z_link_node));
ZFS_TEARDOWN_INIT(zfsvfs);
rw_init(&zfsvfs->z_teardown_inactive_lock, NULL, RW_DEFAULT, NULL);
rw_init(&zfsvfs->z_fuid_lock, NULL, RW_DEFAULT, NULL);
int size = MIN(1 << (highbit64(zfs_object_mutex_size) - 1),
ZFS_OBJ_MTX_MAX);
zfsvfs->z_hold_size = size;
zfsvfs->z_hold_trees = vmem_zalloc(sizeof (avl_tree_t) * size,
KM_SLEEP);
zfsvfs->z_hold_locks = vmem_zalloc(sizeof (kmutex_t) * size, KM_SLEEP);
for (int i = 0; i != size; i++) {
avl_create(&zfsvfs->z_hold_trees[i], zfs_znode_hold_compare,
sizeof (znode_hold_t), offsetof(znode_hold_t, zh_node));
mutex_init(&zfsvfs->z_hold_locks[i], NULL, MUTEX_DEFAULT, NULL);
}
error = zfsvfs_init(zfsvfs, os);
if (error != 0) {
dmu_objset_disown(os, B_TRUE, zfsvfs);
*zfvp = NULL;
zfsvfs_free(zfsvfs);
return (error);
}
zfsvfs->z_drain_task = TASKQID_INVALID;
zfsvfs->z_draining = B_FALSE;
zfsvfs->z_drain_cancel = B_TRUE;
*zfvp = zfsvfs;
return (0);
}
static int
zfsvfs_setup(zfsvfs_t *zfsvfs, boolean_t mounting)
{
int error;
boolean_t readonly = zfs_is_readonly(zfsvfs);
error = zfs_register_callbacks(zfsvfs->z_vfs);
if (error)
return (error);
/*
* If we are not mounting (ie: online recv), then we don't
* have to worry about replaying the log as we blocked all
* operations out since we closed the ZIL.
*/
if (mounting) {
ASSERT3P(zfsvfs->z_kstat.dk_kstats, ==, NULL);
error = dataset_kstats_create(&zfsvfs->z_kstat, zfsvfs->z_os);
if (error)
return (error);
zfsvfs->z_log = zil_open(zfsvfs->z_os, zfs_get_data,
&zfsvfs->z_kstat.dk_zil_sums);
/*
* During replay we remove the read only flag to
* allow replays to succeed.
*/
if (readonly != 0) {
readonly_changed_cb(zfsvfs, B_FALSE);
} else {
zap_stats_t zs;
if (zap_get_stats(zfsvfs->z_os, zfsvfs->z_unlinkedobj,
&zs) == 0) {
dataset_kstats_update_nunlinks_kstat(
&zfsvfs->z_kstat, zs.zs_num_entries);
dprintf_ds(zfsvfs->z_os->os_dsl_dataset,
"num_entries in unlinked set: %llu",
zs.zs_num_entries);
}
zfs_unlinked_drain(zfsvfs);
dsl_dir_t *dd = zfsvfs->z_os->os_dsl_dataset->ds_dir;
dd->dd_activity_cancelled = B_FALSE;
}
/*
* Parse and replay the intent log.
*
* Because of ziltest, this must be done after
* zfs_unlinked_drain(). (Further note: ziltest
* doesn't use readonly mounts, where
* zfs_unlinked_drain() isn't called.) This is because
* ziltest causes spa_sync() to think it's committed,
* but actually it is not, so the intent log contains
* many txg's worth of changes.
*
* In particular, if object N is in the unlinked set in
* the last txg to actually sync, then it could be
* actually freed in a later txg and then reallocated
* in a yet later txg. This would write a "create
* object N" record to the intent log. Normally, this
* would be fine because the spa_sync() would have
* written out the fact that object N is free, before
* we could write the "create object N" intent log
* record.
*
* But when we are in ziltest mode, we advance the "open
* txg" without actually spa_sync()-ing the changes to
* disk. So we would see that object N is still
* allocated and in the unlinked set, and there is an
* intent log record saying to allocate it.
*/
if (spa_writeable(dmu_objset_spa(zfsvfs->z_os))) {
if (zil_replay_disable) {
zil_destroy(zfsvfs->z_log, B_FALSE);
} else {
zfsvfs->z_replay = B_TRUE;
zil_replay(zfsvfs->z_os, zfsvfs,
zfs_replay_vector);
zfsvfs->z_replay = B_FALSE;
}
}
/* restore readonly bit */
if (readonly != 0)
readonly_changed_cb(zfsvfs, B_TRUE);
} else {
ASSERT3P(zfsvfs->z_kstat.dk_kstats, !=, NULL);
zfsvfs->z_log = zil_open(zfsvfs->z_os, zfs_get_data,
&zfsvfs->z_kstat.dk_zil_sums);
}
/*
* Set the objset user_ptr to track its zfsvfs.
*/
mutex_enter(&zfsvfs->z_os->os_user_ptr_lock);
dmu_objset_set_user(zfsvfs->z_os, zfsvfs);
mutex_exit(&zfsvfs->z_os->os_user_ptr_lock);
return (0);
}
void
zfsvfs_free(zfsvfs_t *zfsvfs)
{
int i, size = zfsvfs->z_hold_size;
zfs_fuid_destroy(zfsvfs);
mutex_destroy(&zfsvfs->z_znodes_lock);
mutex_destroy(&zfsvfs->z_lock);
list_destroy(&zfsvfs->z_all_znodes);
ZFS_TEARDOWN_DESTROY(zfsvfs);
rw_destroy(&zfsvfs->z_teardown_inactive_lock);
rw_destroy(&zfsvfs->z_fuid_lock);
for (i = 0; i != size; i++) {
avl_destroy(&zfsvfs->z_hold_trees[i]);
mutex_destroy(&zfsvfs->z_hold_locks[i]);
}
vmem_free(zfsvfs->z_hold_trees, sizeof (avl_tree_t) * size);
vmem_free(zfsvfs->z_hold_locks, sizeof (kmutex_t) * size);
zfsvfs_vfs_free(zfsvfs->z_vfs);
dataset_kstats_destroy(&zfsvfs->z_kstat);
kmem_free(zfsvfs, sizeof (zfsvfs_t));
}
static void
zfs_set_fuid_feature(zfsvfs_t *zfsvfs)
{
zfsvfs->z_use_fuids = USE_FUIDS(zfsvfs->z_version, zfsvfs->z_os);
zfsvfs->z_use_sa = USE_SA(zfsvfs->z_version, zfsvfs->z_os);
}
static void
zfs_unregister_callbacks(zfsvfs_t *zfsvfs)
{
objset_t *os = zfsvfs->z_os;
if (!dmu_objset_is_snapshot(os))
dsl_prop_unregister_all(dmu_objset_ds(os), zfsvfs);
}
#ifdef HAVE_MLSLABEL
/*
* Check that the hex label string is appropriate for the dataset being
* mounted into the global_zone proper.
*
* Return an error if the hex label string is not default or
* admin_low/admin_high. For admin_low labels, the corresponding
* dataset must be readonly.
*/
int
zfs_check_global_label(const char *dsname, const char *hexsl)
{
if (strcasecmp(hexsl, ZFS_MLSLABEL_DEFAULT) == 0)
return (0);
if (strcasecmp(hexsl, ADMIN_HIGH) == 0)
return (0);
if (strcasecmp(hexsl, ADMIN_LOW) == 0) {
/* must be readonly */
uint64_t rdonly;
if (dsl_prop_get_integer(dsname,
zfs_prop_to_name(ZFS_PROP_READONLY), &rdonly, NULL))
return (SET_ERROR(EACCES));
return (rdonly ? 0 : SET_ERROR(EACCES));
}
return (SET_ERROR(EACCES));
}
#endif /* HAVE_MLSLABEL */
static int
zfs_statfs_project(zfsvfs_t *zfsvfs, znode_t *zp, struct kstatfs *statp,
uint32_t bshift)
{
char buf[20 + DMU_OBJACCT_PREFIX_LEN];
uint64_t offset = DMU_OBJACCT_PREFIX_LEN;
uint64_t quota;
uint64_t used;
int err;
strlcpy(buf, DMU_OBJACCT_PREFIX, DMU_OBJACCT_PREFIX_LEN + 1);
err = zfs_id_to_fuidstr(zfsvfs, NULL, zp->z_projid, buf + offset,
sizeof (buf) - offset, B_FALSE);
if (err)
return (err);
if (zfsvfs->z_projectquota_obj == 0)
goto objs;
err = zap_lookup(zfsvfs->z_os, zfsvfs->z_projectquota_obj,
buf + offset, 8, 1, &quota);
if (err == ENOENT)
goto objs;
else if (err)
return (err);
err = zap_lookup(zfsvfs->z_os, DMU_PROJECTUSED_OBJECT,
buf + offset, 8, 1, &used);
if (unlikely(err == ENOENT)) {
uint32_t blksize;
u_longlong_t nblocks;
/*
* Quota accounting is async, so it is possible race case.
* There is at least one object with the given project ID.
*/
sa_object_size(zp->z_sa_hdl, &blksize, &nblocks);
if (unlikely(zp->z_blksz == 0))
blksize = zfsvfs->z_max_blksz;
used = blksize * nblocks;
} else if (err) {
return (err);
}
statp->f_blocks = quota >> bshift;
statp->f_bfree = (quota > used) ? ((quota - used) >> bshift) : 0;
statp->f_bavail = statp->f_bfree;
objs:
if (zfsvfs->z_projectobjquota_obj == 0)
return (0);
err = zap_lookup(zfsvfs->z_os, zfsvfs->z_projectobjquota_obj,
buf + offset, 8, 1, &quota);
if (err == ENOENT)
return (0);
else if (err)
return (err);
err = zap_lookup(zfsvfs->z_os, DMU_PROJECTUSED_OBJECT,
buf, 8, 1, &used);
if (unlikely(err == ENOENT)) {
/*
* Quota accounting is async, so it is possible race case.
* There is at least one object with the given project ID.
*/
used = 1;
} else if (err) {
return (err);
}
statp->f_files = quota;
statp->f_ffree = (quota > used) ? (quota - used) : 0;
return (0);
}
int
zfs_statvfs(struct inode *ip, struct kstatfs *statp)
{
zfsvfs_t *zfsvfs = ITOZSB(ip);
uint64_t refdbytes, availbytes, usedobjs, availobjs;
int err = 0;
if ((err = zfs_enter(zfsvfs, FTAG)) != 0)
return (err);
dmu_objset_space(zfsvfs->z_os,
&refdbytes, &availbytes, &usedobjs, &availobjs);
uint64_t fsid = dmu_objset_fsid_guid(zfsvfs->z_os);
/*
* The underlying storage pool actually uses multiple block
* size. Under Solaris frsize (fragment size) is reported as
* the smallest block size we support, and bsize (block size)
* as the filesystem's maximum block size. Unfortunately,
* under Linux the fragment size and block size are often used
* interchangeably. Thus we are forced to report both of them
* as the filesystem's maximum block size.
*/
statp->f_frsize = zfsvfs->z_max_blksz;
statp->f_bsize = zfsvfs->z_max_blksz;
uint32_t bshift = fls(statp->f_bsize) - 1;
/*
* The following report "total" blocks of various kinds in
* the file system, but reported in terms of f_bsize - the
* "preferred" size.
*/
/* Round up so we never have a filesystem using 0 blocks. */
refdbytes = P2ROUNDUP(refdbytes, statp->f_bsize);
statp->f_blocks = (refdbytes + availbytes) >> bshift;
statp->f_bfree = availbytes >> bshift;
statp->f_bavail = statp->f_bfree; /* no root reservation */
/*
* statvfs() should really be called statufs(), because it assumes
* static metadata. ZFS doesn't preallocate files, so the best
* we can do is report the max that could possibly fit in f_files,
* and that minus the number actually used in f_ffree.
* For f_ffree, report the smaller of the number of objects available
* and the number of blocks (each object will take at least a block).
*/
statp->f_ffree = MIN(availobjs, availbytes >> DNODE_SHIFT);
statp->f_files = statp->f_ffree + usedobjs;
statp->f_fsid.val[0] = (uint32_t)fsid;
statp->f_fsid.val[1] = (uint32_t)(fsid >> 32);
statp->f_type = ZFS_SUPER_MAGIC;
statp->f_namelen = MAXNAMELEN - 1;
/*
* We have all of 40 characters to stuff a string here.
* Is there anything useful we could/should provide?
*/
memset(statp->f_spare, 0, sizeof (statp->f_spare));
if (dmu_objset_projectquota_enabled(zfsvfs->z_os) &&
dmu_objset_projectquota_present(zfsvfs->z_os)) {
znode_t *zp = ITOZ(ip);
if (zp->z_pflags & ZFS_PROJINHERIT && zp->z_projid &&
zpl_is_valid_projid(zp->z_projid))
err = zfs_statfs_project(zfsvfs, zp, statp, bshift);
}
zfs_exit(zfsvfs, FTAG);
return (err);
}
static int
zfs_root(zfsvfs_t *zfsvfs, struct inode **ipp)
{
znode_t *rootzp;
int error;
if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
return (error);
error = zfs_zget(zfsvfs, zfsvfs->z_root, &rootzp);
if (error == 0)
*ipp = ZTOI(rootzp);
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Linux kernels older than 3.1 do not support a per-filesystem shrinker.
* To accommodate this we must improvise and manually walk the list of znodes
* attempting to prune dentries in order to be able to drop the inodes.
*
* To avoid scanning the same znodes multiple times they are always rotated
* to the end of the z_all_znodes list. New znodes are inserted at the
* end of the list so we're always scanning the oldest znodes first.
*/
static int
zfs_prune_aliases(zfsvfs_t *zfsvfs, unsigned long nr_to_scan)
{
znode_t **zp_array, *zp;
int max_array = MIN(nr_to_scan, PAGE_SIZE * 8 / sizeof (znode_t *));
int objects = 0;
int i = 0, j = 0;
zp_array = vmem_zalloc(max_array * sizeof (znode_t *), KM_SLEEP);
mutex_enter(&zfsvfs->z_znodes_lock);
while ((zp = list_head(&zfsvfs->z_all_znodes)) != NULL) {
if ((i++ > nr_to_scan) || (j >= max_array))
break;
ASSERT(list_link_active(&zp->z_link_node));
list_remove(&zfsvfs->z_all_znodes, zp);
list_insert_tail(&zfsvfs->z_all_znodes, zp);
/* Skip active znodes and .zfs entries */
if (MUTEX_HELD(&zp->z_lock) || zp->z_is_ctldir)
continue;
if (igrab(ZTOI(zp)) == NULL)
continue;
zp_array[j] = zp;
j++;
}
mutex_exit(&zfsvfs->z_znodes_lock);
for (i = 0; i < j; i++) {
zp = zp_array[i];
ASSERT3P(zp, !=, NULL);
d_prune_aliases(ZTOI(zp));
if (atomic_read(&ZTOI(zp)->i_count) == 1)
objects++;
zrele(zp);
}
vmem_free(zp_array, max_array * sizeof (znode_t *));
return (objects);
}
/*
* The ARC has requested that the filesystem drop entries from the dentry
* and inode caches. This can occur when the ARC needs to free meta data
* blocks but can't because they are all pinned by entries in these caches.
*/
+#if defined(HAVE_SUPER_BLOCK_S_SHRINK)
+#define S_SHRINK(sb) (&(sb)->s_shrink)
+#elif defined(HAVE_SUPER_BLOCK_S_SHRINK_PTR)
+#define S_SHRINK(sb) ((sb)->s_shrink)
+#endif
+
int
zfs_prune(struct super_block *sb, unsigned long nr_to_scan, int *objects)
{
zfsvfs_t *zfsvfs = sb->s_fs_info;
int error = 0;
- struct shrinker *shrinker = &sb->s_shrink;
+ struct shrinker *shrinker = S_SHRINK(sb);
struct shrink_control sc = {
.nr_to_scan = nr_to_scan,
.gfp_mask = GFP_KERNEL,
};
if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
return (error);
#if defined(HAVE_SPLIT_SHRINKER_CALLBACK) && \
defined(SHRINK_CONTROL_HAS_NID) && \
defined(SHRINKER_NUMA_AWARE)
- if (sb->s_shrink.flags & SHRINKER_NUMA_AWARE) {
+ if (shrinker->flags & SHRINKER_NUMA_AWARE) {
*objects = 0;
for_each_online_node(sc.nid) {
*objects += (*shrinker->scan_objects)(shrinker, &sc);
/*
* reset sc.nr_to_scan, modified by
* scan_objects == super_cache_scan
*/
sc.nr_to_scan = nr_to_scan;
}
} else {
*objects = (*shrinker->scan_objects)(shrinker, &sc);
}
#elif defined(HAVE_SPLIT_SHRINKER_CALLBACK)
*objects = (*shrinker->scan_objects)(shrinker, &sc);
#elif defined(HAVE_SINGLE_SHRINKER_CALLBACK)
*objects = (*shrinker->shrink)(shrinker, &sc);
#elif defined(HAVE_D_PRUNE_ALIASES)
#define D_PRUNE_ALIASES_IS_DEFAULT
*objects = zfs_prune_aliases(zfsvfs, nr_to_scan);
#else
#error "No available dentry and inode cache pruning mechanism."
#endif
#if defined(HAVE_D_PRUNE_ALIASES) && !defined(D_PRUNE_ALIASES_IS_DEFAULT)
#undef D_PRUNE_ALIASES_IS_DEFAULT
/*
* Fall back to zfs_prune_aliases if the kernel's per-superblock
* shrinker couldn't free anything, possibly due to the inodes being
* allocated in a different memcg.
*/
if (*objects == 0)
*objects = zfs_prune_aliases(zfsvfs, nr_to_scan);
#endif
zfs_exit(zfsvfs, FTAG);
dprintf_ds(zfsvfs->z_os->os_dsl_dataset,
"pruning, nr_to_scan=%lu objects=%d error=%d\n",
nr_to_scan, *objects, error);
return (error);
}
/*
* Teardown the zfsvfs_t.
*
* Note, if 'unmounting' is FALSE, we return with the 'z_teardown_lock'
* and 'z_teardown_inactive_lock' held.
*/
static int
zfsvfs_teardown(zfsvfs_t *zfsvfs, boolean_t unmounting)
{
znode_t *zp;
zfs_unlinked_drain_stop_wait(zfsvfs);
/*
* If someone has not already unmounted this file system,
* drain the zrele_taskq to ensure all active references to the
* zfsvfs_t have been handled only then can it be safely destroyed.
*/
if (zfsvfs->z_os) {
/*
* If we're unmounting we have to wait for the list to
* drain completely.
*
* If we're not unmounting there's no guarantee the list
* will drain completely, but iputs run from the taskq
* may add the parents of dir-based xattrs to the taskq
* so we want to wait for these.
*
* We can safely check z_all_znodes for being empty because the
* VFS has already blocked operations which add to it.
*/
int round = 0;
while (!list_is_empty(&zfsvfs->z_all_znodes)) {
taskq_wait_outstanding(dsl_pool_zrele_taskq(
dmu_objset_pool(zfsvfs->z_os)), 0);
if (++round > 1 && !unmounting)
break;
}
}
ZFS_TEARDOWN_ENTER_WRITE(zfsvfs, FTAG);
if (!unmounting) {
/*
* We purge the parent filesystem's super block as the
* parent filesystem and all of its snapshots have their
* inode's super block set to the parent's filesystem's
* super block. Note, 'z_parent' is self referential
* for non-snapshots.
*/
shrink_dcache_sb(zfsvfs->z_parent->z_sb);
}
/*
* Close the zil. NB: Can't close the zil while zfs_inactive
* threads are blocked as zil_close can call zfs_inactive.
*/
if (zfsvfs->z_log) {
zil_close(zfsvfs->z_log);
zfsvfs->z_log = NULL;
}
rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_WRITER);
/*
* If we are not unmounting (ie: online recv) and someone already
* unmounted this file system while we were doing the switcheroo,
* or a reopen of z_os failed then just bail out now.
*/
if (!unmounting && (zfsvfs->z_unmounted || zfsvfs->z_os == NULL)) {
rw_exit(&zfsvfs->z_teardown_inactive_lock);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
return (SET_ERROR(EIO));
}
/*
* At this point there are no VFS ops active, and any new VFS ops
* will fail with EIO since we have z_teardown_lock for writer (only
* relevant for forced unmount).
*
* Release all holds on dbufs. We also grab an extra reference to all
* the remaining inodes so that the kernel does not attempt to free
* any inodes of a suspended fs. This can cause deadlocks since the
* zfs_resume_fs() process may involve starting threads, which might
* attempt to free unreferenced inodes to free up memory for the new
* thread.
*/
if (!unmounting) {
mutex_enter(&zfsvfs->z_znodes_lock);
for (zp = list_head(&zfsvfs->z_all_znodes); zp != NULL;
zp = list_next(&zfsvfs->z_all_znodes, zp)) {
if (zp->z_sa_hdl)
zfs_znode_dmu_fini(zp);
if (igrab(ZTOI(zp)) != NULL)
zp->z_suspended = B_TRUE;
}
mutex_exit(&zfsvfs->z_znodes_lock);
}
/*
* If we are unmounting, set the unmounted flag and let new VFS ops
* unblock. zfs_inactive will have the unmounted behavior, and all
* other VFS ops will fail with EIO.
*/
if (unmounting) {
zfsvfs->z_unmounted = B_TRUE;
rw_exit(&zfsvfs->z_teardown_inactive_lock);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
}
/*
* z_os will be NULL if there was an error in attempting to reopen
* zfsvfs, so just return as the properties had already been
*
* unregistered and cached data had been evicted before.
*/
if (zfsvfs->z_os == NULL)
return (0);
/*
* Unregister properties.
*/
zfs_unregister_callbacks(zfsvfs);
/*
* Evict cached data. We must write out any dirty data before
* disowning the dataset.
*/
objset_t *os = zfsvfs->z_os;
boolean_t os_dirty = B_FALSE;
for (int t = 0; t < TXG_SIZE; t++) {
if (dmu_objset_is_dirty(os, t)) {
os_dirty = B_TRUE;
break;
}
}
if (!zfs_is_readonly(zfsvfs) && os_dirty) {
txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0);
}
dmu_objset_evict_dbufs(zfsvfs->z_os);
dsl_dir_t *dd = os->os_dsl_dataset->ds_dir;
dsl_dir_cancel_waiters(dd);
return (0);
}
#if defined(HAVE_SUPER_SETUP_BDI_NAME)
atomic_long_t zfs_bdi_seq = ATOMIC_LONG_INIT(0);
#endif
int
zfs_domount(struct super_block *sb, zfs_mnt_t *zm, int silent)
{
const char *osname = zm->mnt_osname;
struct inode *root_inode = NULL;
uint64_t recordsize;
int error = 0;
zfsvfs_t *zfsvfs = NULL;
vfs_t *vfs = NULL;
int canwrite;
int dataset_visible_zone;
ASSERT(zm);
ASSERT(osname);
dataset_visible_zone = zone_dataset_visible(osname, &canwrite);
/*
* Refuse to mount a filesystem if we are in a namespace and the
* dataset is not visible or writable in that namespace.
*/
if (!INGLOBALZONE(curproc) &&
(!dataset_visible_zone || !canwrite)) {
return (SET_ERROR(EPERM));
}
error = zfsvfs_parse_options(zm->mnt_data, &vfs);
if (error)
return (error);
/*
* If a non-writable filesystem is being mounted without the
* read-only flag, pretend it was set, as done for snapshots.
*/
if (!canwrite)
vfs->vfs_readonly = B_TRUE;
error = zfsvfs_create(osname, vfs->vfs_readonly, &zfsvfs);
if (error) {
zfsvfs_vfs_free(vfs);
goto out;
}
if ((error = dsl_prop_get_integer(osname, "recordsize",
&recordsize, NULL))) {
zfsvfs_vfs_free(vfs);
goto out;
}
vfs->vfs_data = zfsvfs;
zfsvfs->z_vfs = vfs;
zfsvfs->z_sb = sb;
sb->s_fs_info = zfsvfs;
sb->s_magic = ZFS_SUPER_MAGIC;
sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_time_gran = 1;
sb->s_blocksize = recordsize;
sb->s_blocksize_bits = ilog2(recordsize);
error = -zpl_bdi_setup(sb, "zfs");
if (error)
goto out;
sb->s_bdi->ra_pages = 0;
/* Set callback operations for the file system. */
sb->s_op = &zpl_super_operations;
sb->s_xattr = zpl_xattr_handlers;
sb->s_export_op = &zpl_export_operations;
/* Set features for file system. */
zfs_set_fuid_feature(zfsvfs);
if (dmu_objset_is_snapshot(zfsvfs->z_os)) {
uint64_t pval;
atime_changed_cb(zfsvfs, B_FALSE);
readonly_changed_cb(zfsvfs, B_TRUE);
if ((error = dsl_prop_get_integer(osname,
"xattr", &pval, NULL)))
goto out;
xattr_changed_cb(zfsvfs, pval);
if ((error = dsl_prop_get_integer(osname,
"acltype", &pval, NULL)))
goto out;
acltype_changed_cb(zfsvfs, pval);
zfsvfs->z_issnap = B_TRUE;
zfsvfs->z_os->os_sync = ZFS_SYNC_DISABLED;
zfsvfs->z_snap_defer_time = jiffies;
mutex_enter(&zfsvfs->z_os->os_user_ptr_lock);
dmu_objset_set_user(zfsvfs->z_os, zfsvfs);
mutex_exit(&zfsvfs->z_os->os_user_ptr_lock);
} else {
if ((error = zfsvfs_setup(zfsvfs, B_TRUE)))
goto out;
}
/* Allocate a root inode for the filesystem. */
error = zfs_root(zfsvfs, &root_inode);
if (error) {
(void) zfs_umount(sb);
zfsvfs = NULL; /* avoid double-free; first in zfs_umount */
goto out;
}
/* Allocate a root dentry for the filesystem */
sb->s_root = d_make_root(root_inode);
if (sb->s_root == NULL) {
(void) zfs_umount(sb);
zfsvfs = NULL; /* avoid double-free; first in zfs_umount */
error = SET_ERROR(ENOMEM);
goto out;
}
if (!zfsvfs->z_issnap)
zfsctl_create(zfsvfs);
zfsvfs->z_arc_prune = arc_add_prune_callback(zpl_prune_sb, sb);
out:
if (error) {
if (zfsvfs != NULL) {
dmu_objset_disown(zfsvfs->z_os, B_TRUE, zfsvfs);
zfsvfs_free(zfsvfs);
}
/*
* make sure we don't have dangling sb->s_fs_info which
* zfs_preumount will use.
*/
sb->s_fs_info = NULL;
}
return (error);
}
/*
* Called when an unmount is requested and certain sanity checks have
* already passed. At this point no dentries or inodes have been reclaimed
* from their respective caches. We drop the extra reference on the .zfs
* control directory to allow everything to be reclaimed. All snapshots
* must already have been unmounted to reach this point.
*/
void
zfs_preumount(struct super_block *sb)
{
zfsvfs_t *zfsvfs = sb->s_fs_info;
/* zfsvfs is NULL when zfs_domount fails during mount */
if (zfsvfs) {
zfs_unlinked_drain_stop_wait(zfsvfs);
zfsctl_destroy(sb->s_fs_info);
/*
* Wait for zrele_async before entering evict_inodes in
* generic_shutdown_super. The reason we must finish before
* evict_inodes is when lazytime is on, or when zfs_purgedir
* calls zfs_zget, zrele would bump i_count from 0 to 1. This
* would race with the i_count check in evict_inodes. This means
* it could destroy the inode while we are still using it.
*
* We wait for two passes. xattr directories in the first pass
* may add xattr entries in zfs_purgedir, so in the second pass
* we wait for them. We don't use taskq_wait here because it is
* a pool wide taskq. Other mounted filesystems can constantly
* do zrele_async and there's no guarantee when taskq will be
* empty.
*/
taskq_wait_outstanding(dsl_pool_zrele_taskq(
dmu_objset_pool(zfsvfs->z_os)), 0);
taskq_wait_outstanding(dsl_pool_zrele_taskq(
dmu_objset_pool(zfsvfs->z_os)), 0);
}
}
/*
* Called once all other unmount released tear down has occurred.
* It is our responsibility to release any remaining infrastructure.
*/
int
zfs_umount(struct super_block *sb)
{
zfsvfs_t *zfsvfs = sb->s_fs_info;
objset_t *os;
if (zfsvfs->z_arc_prune != NULL)
arc_remove_prune_callback(zfsvfs->z_arc_prune);
VERIFY(zfsvfs_teardown(zfsvfs, B_TRUE) == 0);
os = zfsvfs->z_os;
zpl_bdi_destroy(sb);
/*
* z_os will be NULL if there was an error in
* attempting to reopen zfsvfs.
*/
if (os != NULL) {
/*
* Unset the objset user_ptr.
*/
mutex_enter(&os->os_user_ptr_lock);
dmu_objset_set_user(os, NULL);
mutex_exit(&os->os_user_ptr_lock);
/*
* Finally release the objset
*/
dmu_objset_disown(os, B_TRUE, zfsvfs);
}
zfsvfs_free(zfsvfs);
sb->s_fs_info = NULL;
return (0);
}
int
zfs_remount(struct super_block *sb, int *flags, zfs_mnt_t *zm)
{
zfsvfs_t *zfsvfs = sb->s_fs_info;
vfs_t *vfsp;
boolean_t issnap = dmu_objset_is_snapshot(zfsvfs->z_os);
int error;
if ((issnap || !spa_writeable(dmu_objset_spa(zfsvfs->z_os))) &&
!(*flags & SB_RDONLY)) {
*flags |= SB_RDONLY;
return (EROFS);
}
error = zfsvfs_parse_options(zm->mnt_data, &vfsp);
if (error)
return (error);
if (!zfs_is_readonly(zfsvfs) && (*flags & SB_RDONLY))
txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0);
zfs_unregister_callbacks(zfsvfs);
zfsvfs_vfs_free(zfsvfs->z_vfs);
vfsp->vfs_data = zfsvfs;
zfsvfs->z_vfs = vfsp;
if (!issnap)
(void) zfs_register_callbacks(vfsp);
return (error);
}
int
zfs_vget(struct super_block *sb, struct inode **ipp, fid_t *fidp)
{
zfsvfs_t *zfsvfs = sb->s_fs_info;
znode_t *zp;
uint64_t object = 0;
uint64_t fid_gen = 0;
uint64_t gen_mask;
uint64_t zp_gen;
int i, err;
*ipp = NULL;
if (fidp->fid_len == SHORT_FID_LEN || fidp->fid_len == LONG_FID_LEN) {
zfid_short_t *zfid = (zfid_short_t *)fidp;
for (i = 0; i < sizeof (zfid->zf_object); i++)
object |= ((uint64_t)zfid->zf_object[i]) << (8 * i);
for (i = 0; i < sizeof (zfid->zf_gen); i++)
fid_gen |= ((uint64_t)zfid->zf_gen[i]) << (8 * i);
} else {
return (SET_ERROR(EINVAL));
}
/* LONG_FID_LEN means snapdirs */
if (fidp->fid_len == LONG_FID_LEN) {
zfid_long_t *zlfid = (zfid_long_t *)fidp;
uint64_t objsetid = 0;
uint64_t setgen = 0;
for (i = 0; i < sizeof (zlfid->zf_setid); i++)
objsetid |= ((uint64_t)zlfid->zf_setid[i]) << (8 * i);
for (i = 0; i < sizeof (zlfid->zf_setgen); i++)
setgen |= ((uint64_t)zlfid->zf_setgen[i]) << (8 * i);
if (objsetid != ZFSCTL_INO_SNAPDIRS - object) {
dprintf("snapdir fid: objsetid (%llu) != "
"ZFSCTL_INO_SNAPDIRS (%llu) - object (%llu)\n",
objsetid, ZFSCTL_INO_SNAPDIRS, object);
return (SET_ERROR(EINVAL));
}
if (fid_gen > 1 || setgen != 0) {
dprintf("snapdir fid: fid_gen (%llu) and setgen "
"(%llu)\n", fid_gen, setgen);
return (SET_ERROR(EINVAL));
}
return (zfsctl_snapdir_vget(sb, objsetid, fid_gen, ipp));
}
if ((err = zfs_enter(zfsvfs, FTAG)) != 0)
return (err);
/* A zero fid_gen means we are in the .zfs control directories */
if (fid_gen == 0 &&
(object == ZFSCTL_INO_ROOT || object == ZFSCTL_INO_SNAPDIR)) {
*ipp = zfsvfs->z_ctldir;
ASSERT(*ipp != NULL);
if (object == ZFSCTL_INO_SNAPDIR) {
VERIFY(zfsctl_root_lookup(*ipp, "snapshot", ipp,
0, kcred, NULL, NULL) == 0);
} else {
/*
* Must have an existing ref, so igrab()
* cannot return NULL
*/
VERIFY3P(igrab(*ipp), !=, NULL);
}
zfs_exit(zfsvfs, FTAG);
return (0);
}
gen_mask = -1ULL >> (64 - 8 * i);
dprintf("getting %llu [%llu mask %llx]\n", object, fid_gen, gen_mask);
if ((err = zfs_zget(zfsvfs, object, &zp))) {
zfs_exit(zfsvfs, FTAG);
return (err);
}
/* Don't export xattr stuff */
if (zp->z_pflags & ZFS_XATTR) {
zrele(zp);
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(ENOENT));
}
(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs), &zp_gen,
sizeof (uint64_t));
zp_gen = zp_gen & gen_mask;
if (zp_gen == 0)
zp_gen = 1;
if ((fid_gen == 0) && (zfsvfs->z_root == object))
fid_gen = zp_gen;
if (zp->z_unlinked || zp_gen != fid_gen) {
dprintf("znode gen (%llu) != fid gen (%llu)\n", zp_gen,
fid_gen);
zrele(zp);
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(ENOENT));
}
*ipp = ZTOI(zp);
if (*ipp)
zfs_znode_update_vfs(ITOZ(*ipp));
zfs_exit(zfsvfs, FTAG);
return (0);
}
/*
* Block out VFS ops and close zfsvfs_t
*
* Note, if successful, then we return with the 'z_teardown_lock' and
* 'z_teardown_inactive_lock' write held. We leave ownership of the underlying
* dataset and objset intact so that they can be atomically handed off during
* a subsequent rollback or recv operation and the resume thereafter.
*/
int
zfs_suspend_fs(zfsvfs_t *zfsvfs)
{
int error;
if ((error = zfsvfs_teardown(zfsvfs, B_FALSE)) != 0)
return (error);
return (0);
}
/*
* Rebuild SA and release VOPs. Note that ownership of the underlying dataset
* is an invariant across any of the operations that can be performed while the
* filesystem was suspended. Whether it succeeded or failed, the preconditions
* are the same: the relevant objset and associated dataset are owned by
* zfsvfs, held, and long held on entry.
*/
int
zfs_resume_fs(zfsvfs_t *zfsvfs, dsl_dataset_t *ds)
{
int err, err2;
znode_t *zp;
ASSERT(ZFS_TEARDOWN_WRITE_HELD(zfsvfs));
ASSERT(RW_WRITE_HELD(&zfsvfs->z_teardown_inactive_lock));
/*
* We already own this, so just update the objset_t, as the one we
* had before may have been evicted.
*/
objset_t *os;
VERIFY3P(ds->ds_owner, ==, zfsvfs);
VERIFY(dsl_dataset_long_held(ds));
dsl_pool_t *dp = spa_get_dsl(dsl_dataset_get_spa(ds));
dsl_pool_config_enter(dp, FTAG);
VERIFY0(dmu_objset_from_ds(ds, &os));
dsl_pool_config_exit(dp, FTAG);
err = zfsvfs_init(zfsvfs, os);
if (err != 0)
goto bail;
ds->ds_dir->dd_activity_cancelled = B_FALSE;
VERIFY(zfsvfs_setup(zfsvfs, B_FALSE) == 0);
zfs_set_fuid_feature(zfsvfs);
zfsvfs->z_rollback_time = jiffies;
/*
* Attempt to re-establish all the active inodes with their
* dbufs. If a zfs_rezget() fails, then we unhash the inode
* and mark it stale. This prevents a collision if a new
* inode/object is created which must use the same inode
* number. The stale inode will be be released when the
* VFS prunes the dentry holding the remaining references
* on the stale inode.
*/
mutex_enter(&zfsvfs->z_znodes_lock);
for (zp = list_head(&zfsvfs->z_all_znodes); zp;
zp = list_next(&zfsvfs->z_all_znodes, zp)) {
err2 = zfs_rezget(zp);
if (err2) {
zpl_d_drop_aliases(ZTOI(zp));
remove_inode_hash(ZTOI(zp));
}
/* see comment in zfs_suspend_fs() */
if (zp->z_suspended) {
zfs_zrele_async(zp);
zp->z_suspended = B_FALSE;
}
}
mutex_exit(&zfsvfs->z_znodes_lock);
if (!zfs_is_readonly(zfsvfs) && !zfsvfs->z_unmounted) {
/*
* zfs_suspend_fs() could have interrupted freeing
* of dnodes. We need to restart this freeing so
* that we don't "leak" the space.
*/
zfs_unlinked_drain(zfsvfs);
}
/*
* Most of the time zfs_suspend_fs is used for changing the contents
* of the underlying dataset. ZFS rollback and receive operations
* might create files for which negative dentries are present in
* the cache. Since walking the dcache would require a lot of GPL-only
* code duplication, it's much easier on these rather rare occasions
* just to flush the whole dcache for the given dataset/filesystem.
*/
shrink_dcache_sb(zfsvfs->z_sb);
bail:
if (err != 0)
zfsvfs->z_unmounted = B_TRUE;
/* release the VFS ops */
rw_exit(&zfsvfs->z_teardown_inactive_lock);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
if (err != 0) {
/*
* Since we couldn't setup the sa framework, try to force
* unmount this file system.
*/
if (zfsvfs->z_os)
(void) zfs_umount(zfsvfs->z_sb);
}
return (err);
}
/*
* Release VOPs and unmount a suspended filesystem.
*/
int
zfs_end_fs(zfsvfs_t *zfsvfs, dsl_dataset_t *ds)
{
ASSERT(ZFS_TEARDOWN_WRITE_HELD(zfsvfs));
ASSERT(RW_WRITE_HELD(&zfsvfs->z_teardown_inactive_lock));
/*
* We already own this, so just hold and rele it to update the
* objset_t, as the one we had before may have been evicted.
*/
objset_t *os;
VERIFY3P(ds->ds_owner, ==, zfsvfs);
VERIFY(dsl_dataset_long_held(ds));
dsl_pool_t *dp = spa_get_dsl(dsl_dataset_get_spa(ds));
dsl_pool_config_enter(dp, FTAG);
VERIFY0(dmu_objset_from_ds(ds, &os));
dsl_pool_config_exit(dp, FTAG);
zfsvfs->z_os = os;
/* release the VOPs */
rw_exit(&zfsvfs->z_teardown_inactive_lock);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
/*
* Try to force unmount this file system.
*/
(void) zfs_umount(zfsvfs->z_sb);
zfsvfs->z_unmounted = B_TRUE;
return (0);
}
/*
* Automounted snapshots rely on periodic revalidation
* to defer snapshots from being automatically unmounted.
*/
inline void
zfs_exit_fs(zfsvfs_t *zfsvfs)
{
if (!zfsvfs->z_issnap)
return;
if (time_after(jiffies, zfsvfs->z_snap_defer_time +
MAX(zfs_expire_snapshot * HZ / 2, HZ))) {
zfsvfs->z_snap_defer_time = jiffies;
zfsctl_snapshot_unmount_delay(zfsvfs->z_os->os_spa,
dmu_objset_id(zfsvfs->z_os),
zfs_expire_snapshot);
}
}
int
zfs_set_version(zfsvfs_t *zfsvfs, uint64_t newvers)
{
int error;
objset_t *os = zfsvfs->z_os;
dmu_tx_t *tx;
if (newvers < ZPL_VERSION_INITIAL || newvers > ZPL_VERSION)
return (SET_ERROR(EINVAL));
if (newvers < zfsvfs->z_version)
return (SET_ERROR(EINVAL));
if (zfs_spa_version_map(newvers) >
spa_version(dmu_objset_spa(zfsvfs->z_os)))
return (SET_ERROR(ENOTSUP));
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_FALSE, ZPL_VERSION_STR);
if (newvers >= ZPL_VERSION_SA && !zfsvfs->z_use_sa) {
dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_TRUE,
ZFS_SA_ATTRS);
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
}
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
return (error);
}
error = zap_update(os, MASTER_NODE_OBJ, ZPL_VERSION_STR,
8, 1, &newvers, tx);
if (error) {
dmu_tx_commit(tx);
return (error);
}
if (newvers >= ZPL_VERSION_SA && !zfsvfs->z_use_sa) {
uint64_t sa_obj;
ASSERT3U(spa_version(dmu_objset_spa(zfsvfs->z_os)), >=,
SPA_VERSION_SA);
sa_obj = zap_create(os, DMU_OT_SA_MASTER_NODE,
DMU_OT_NONE, 0, tx);
error = zap_add(os, MASTER_NODE_OBJ,
ZFS_SA_ATTRS, 8, 1, &sa_obj, tx);
ASSERT0(error);
VERIFY(0 == sa_set_sa_object(os, sa_obj));
sa_register_update_callback(os, zfs_sa_upgrade);
}
spa_history_log_internal_ds(dmu_objset_ds(os), "upgrade", tx,
"from %llu to %llu", zfsvfs->z_version, newvers);
dmu_tx_commit(tx);
zfsvfs->z_version = newvers;
os->os_version = newvers;
zfs_set_fuid_feature(zfsvfs);
return (0);
}
/*
* Return true if the corresponding vfs's unmounted flag is set.
* Otherwise return false.
* If this function returns true we know VFS unmount has been initiated.
*/
boolean_t
zfs_get_vfs_flag_unmounted(objset_t *os)
{
zfsvfs_t *zfvp;
boolean_t unmounted = B_FALSE;
ASSERT(dmu_objset_type(os) == DMU_OST_ZFS);
mutex_enter(&os->os_user_ptr_lock);
zfvp = dmu_objset_get_user(os);
if (zfvp != NULL && zfvp->z_unmounted)
unmounted = B_TRUE;
mutex_exit(&os->os_user_ptr_lock);
return (unmounted);
}
void
zfsvfs_update_fromname(const char *oldname, const char *newname)
{
/*
* We don't need to do anything here, the devname is always current by
* virtue of zfsvfs->z_sb->s_op->show_devname.
*/
(void) oldname, (void) newname;
}
void
zfs_init(void)
{
zfsctl_init();
zfs_znode_init();
dmu_objset_register_type(DMU_OST_ZFS, zpl_get_file_info);
register_filesystem(&zpl_fs_type);
#ifdef HAVE_VFS_FILE_OPERATIONS_EXTEND
register_fo_extend(&zpl_file_operations);
#endif
}
void
zfs_fini(void)
{
/*
* we don't use outstanding because zpl_posix_acl_free might add more.
*/
taskq_wait(system_delay_taskq);
taskq_wait(system_taskq);
#ifdef HAVE_VFS_FILE_OPERATIONS_EXTEND
unregister_fo_extend(&zpl_file_operations);
#endif
unregister_filesystem(&zpl_fs_type);
zfs_znode_fini();
zfsctl_fini();
}
#if defined(_KERNEL)
EXPORT_SYMBOL(zfs_suspend_fs);
EXPORT_SYMBOL(zfs_resume_fs);
EXPORT_SYMBOL(zfs_set_version);
EXPORT_SYMBOL(zfsvfs_create);
EXPORT_SYMBOL(zfsvfs_free);
EXPORT_SYMBOL(zfs_is_readonly);
EXPORT_SYMBOL(zfs_domount);
EXPORT_SYMBOL(zfs_preumount);
EXPORT_SYMBOL(zfs_umount);
EXPORT_SYMBOL(zfs_remount);
EXPORT_SYMBOL(zfs_statvfs);
EXPORT_SYMBOL(zfs_vget);
EXPORT_SYMBOL(zfs_prune);
#endif
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_vnops_os.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_vnops_os.c
index b464f615cdd3..c06a75662bf7 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_vnops_os.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_vnops_os.c
@@ -1,4249 +1,4251 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
* Copyright (c) 2015 by Chunwei Chen. All rights reserved.
* Copyright 2017 Nexenta Systems, Inc.
*/
/* Portions Copyright 2007 Jeremy Teo */
/* Portions Copyright 2010 Robert Milkowski */
#include <sys/types.h>
#include <sys/param.h>
#include <sys/time.h>
#include <sys/sysmacros.h>
#include <sys/vfs.h>
#include <sys/file.h>
#include <sys/stat.h>
#include <sys/kmem.h>
#include <sys/taskq.h>
#include <sys/uio.h>
#include <sys/vmsystm.h>
#include <sys/atomic.h>
#include <sys/pathname.h>
#include <sys/cmn_err.h>
#include <sys/errno.h>
#include <sys/zfs_dir.h>
#include <sys/zfs_acl.h>
#include <sys/zfs_ioctl.h>
#include <sys/fs/zfs.h>
#include <sys/dmu.h>
#include <sys/dmu_objset.h>
#include <sys/spa.h>
#include <sys/txg.h>
#include <sys/dbuf.h>
#include <sys/zap.h>
#include <sys/sa.h>
#include <sys/policy.h>
#include <sys/sunddi.h>
#include <sys/sid.h>
#include <sys/zfs_ctldir.h>
#include <sys/zfs_fuid.h>
#include <sys/zfs_quota.h>
#include <sys/zfs_sa.h>
#include <sys/zfs_vnops.h>
#include <sys/zfs_rlock.h>
#include <sys/cred.h>
#include <sys/zpl.h>
#include <sys/zil.h>
#include <sys/sa_impl.h>
/*
* Programming rules.
*
* Each vnode op performs some logical unit of work. To do this, the ZPL must
* properly lock its in-core state, create a DMU transaction, do the work,
* record this work in the intent log (ZIL), commit the DMU transaction,
* and wait for the intent log to commit if it is a synchronous operation.
* Moreover, the vnode ops must work in both normal and log replay context.
* The ordering of events is important to avoid deadlocks and references
* to freed memory. The example below illustrates the following Big Rules:
*
* (1) A check must be made in each zfs thread for a mounted file system.
* This is done avoiding races using zfs_enter(zfsvfs).
* A zfs_exit(zfsvfs) is needed before all returns. Any znodes
* must be checked with zfs_verify_zp(zp). Both of these macros
* can return EIO from the calling function.
*
* (2) zrele() should always be the last thing except for zil_commit() (if
* necessary) and zfs_exit(). This is for 3 reasons: First, if it's the
* last reference, the vnode/znode can be freed, so the zp may point to
* freed memory. Second, the last reference will call zfs_zinactive(),
* which may induce a lot of work -- pushing cached pages (which acquires
* range locks) and syncing out cached atime changes. Third,
* zfs_zinactive() may require a new tx, which could deadlock the system
* if you were already holding one. This deadlock occurs because the tx
* currently being operated on prevents a txg from syncing, which
* prevents the new tx from progressing, resulting in a deadlock. If you
* must call zrele() within a tx, use zfs_zrele_async(). Note that iput()
* is a synonym for zrele().
*
* (3) All range locks must be grabbed before calling dmu_tx_assign(),
* as they can span dmu_tx_assign() calls.
*
* (4) If ZPL locks are held, pass TXG_NOWAIT as the second argument to
* dmu_tx_assign(). This is critical because we don't want to block
* while holding locks.
*
* If no ZPL locks are held (aside from zfs_enter()), use TXG_WAIT. This
* reduces lock contention and CPU usage when we must wait (note that if
* throughput is constrained by the storage, nearly every transaction
* must wait).
*
* Note, in particular, that if a lock is sometimes acquired before
* the tx assigns, and sometimes after (e.g. z_lock), then failing
* to use a non-blocking assign can deadlock the system. The scenario:
*
* Thread A has grabbed a lock before calling dmu_tx_assign().
* Thread B is in an already-assigned tx, and blocks for this lock.
* Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
* forever, because the previous txg can't quiesce until B's tx commits.
*
* If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT,
* then drop all locks, call dmu_tx_wait(), and try again. On subsequent
* calls to dmu_tx_assign(), pass TXG_NOTHROTTLE in addition to TXG_NOWAIT,
* to indicate that this operation has already called dmu_tx_wait().
* This will ensure that we don't retry forever, waiting a short bit
* each time.
*
* (5) If the operation succeeded, generate the intent log entry for it
* before dropping locks. This ensures that the ordering of events
* in the intent log matches the order in which they actually occurred.
* During ZIL replay the zfs_log_* functions will update the sequence
* number to indicate the zil transaction has replayed.
*
* (6) At the end of each vnode op, the DMU tx must always commit,
* regardless of whether there were any errors.
*
* (7) After dropping all locks, invoke zil_commit(zilog, foid)
* to ensure that synchronous semantics are provided when necessary.
*
* In general, this is how things should be ordered in each vnode op:
*
* zfs_enter(zfsvfs); // exit if unmounted
* top:
* zfs_dirent_lock(&dl, ...) // lock directory entry (may igrab())
* rw_enter(...); // grab any other locks you need
* tx = dmu_tx_create(...); // get DMU tx
* dmu_tx_hold_*(); // hold each object you might modify
* error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
* if (error) {
* rw_exit(...); // drop locks
* zfs_dirent_unlock(dl); // unlock directory entry
* zrele(...); // release held znodes
* if (error == ERESTART) {
* waited = B_TRUE;
* dmu_tx_wait(tx);
* dmu_tx_abort(tx);
* goto top;
* }
* dmu_tx_abort(tx); // abort DMU tx
* zfs_exit(zfsvfs); // finished in zfs
* return (error); // really out of space
* }
* error = do_real_work(); // do whatever this VOP does
* if (error == 0)
* zfs_log_*(...); // on success, make ZIL entry
* dmu_tx_commit(tx); // commit DMU tx -- error or not
* rw_exit(...); // drop locks
* zfs_dirent_unlock(dl); // unlock directory entry
* zrele(...); // release held znodes
* zil_commit(zilog, foid); // synchronous when necessary
* zfs_exit(zfsvfs); // finished in zfs
* return (error); // done, report error
*/
int
zfs_open(struct inode *ip, int mode, int flag, cred_t *cr)
{
(void) cr;
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
int error;
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (error);
/* Honor ZFS_APPENDONLY file attribute */
if (blk_mode_is_open_write(mode) && (zp->z_pflags & ZFS_APPENDONLY) &&
((flag & O_APPEND) == 0)) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EPERM));
}
/* Keep a count of the synchronous opens in the znode */
if (flag & O_SYNC)
atomic_inc_32(&zp->z_sync_cnt);
zfs_exit(zfsvfs, FTAG);
return (0);
}
int
zfs_close(struct inode *ip, int flag, cred_t *cr)
{
(void) cr;
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
int error;
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (error);
/* Decrement the synchronous opens in the znode */
if (flag & O_SYNC)
atomic_dec_32(&zp->z_sync_cnt);
zfs_exit(zfsvfs, FTAG);
return (0);
}
#if defined(_KERNEL)
static int zfs_fillpage(struct inode *ip, struct page *pp);
/*
* When a file is memory mapped, we must keep the IO data synchronized
* between the DMU cache and the memory mapped pages. Update all mapped
* pages with the contents of the coresponding dmu buffer.
*/
void
update_pages(znode_t *zp, int64_t start, int len, objset_t *os)
{
struct address_space *mp = ZTOI(zp)->i_mapping;
int64_t off = start & (PAGE_SIZE - 1);
for (start &= PAGE_MASK; len > 0; start += PAGE_SIZE) {
uint64_t nbytes = MIN(PAGE_SIZE - off, len);
struct page *pp = find_lock_page(mp, start >> PAGE_SHIFT);
if (pp) {
if (mapping_writably_mapped(mp))
flush_dcache_page(pp);
void *pb = kmap(pp);
int error = dmu_read(os, zp->z_id, start + off,
nbytes, pb + off, DMU_READ_PREFETCH);
kunmap(pp);
if (error) {
SetPageError(pp);
ClearPageUptodate(pp);
} else {
ClearPageError(pp);
SetPageUptodate(pp);
if (mapping_writably_mapped(mp))
flush_dcache_page(pp);
mark_page_accessed(pp);
}
unlock_page(pp);
put_page(pp);
}
len -= nbytes;
off = 0;
}
}
/*
* When a file is memory mapped, we must keep the I/O data synchronized
* between the DMU cache and the memory mapped pages. Preferentially read
* from memory mapped pages, otherwise fallback to reading through the dmu.
*/
int
mappedread(znode_t *zp, int nbytes, zfs_uio_t *uio)
{
struct inode *ip = ZTOI(zp);
struct address_space *mp = ip->i_mapping;
int64_t start = uio->uio_loffset;
int64_t off = start & (PAGE_SIZE - 1);
int len = nbytes;
int error = 0;
for (start &= PAGE_MASK; len > 0; start += PAGE_SIZE) {
uint64_t bytes = MIN(PAGE_SIZE - off, len);
struct page *pp = find_lock_page(mp, start >> PAGE_SHIFT);
if (pp) {
/*
* If filemap_fault() retries there exists a window
* where the page will be unlocked and not up to date.
* In this case we must try and fill the page.
*/
if (unlikely(!PageUptodate(pp))) {
error = zfs_fillpage(ip, pp);
if (error) {
unlock_page(pp);
put_page(pp);
return (error);
}
}
ASSERT(PageUptodate(pp) || PageDirty(pp));
unlock_page(pp);
void *pb = kmap(pp);
error = zfs_uiomove(pb + off, bytes, UIO_READ, uio);
kunmap(pp);
if (mapping_writably_mapped(mp))
flush_dcache_page(pp);
mark_page_accessed(pp);
put_page(pp);
} else {
error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
uio, bytes);
}
len -= bytes;
off = 0;
if (error)
break;
}
return (error);
}
#endif /* _KERNEL */
static unsigned long zfs_delete_blocks = DMU_MAX_DELETEBLKCNT;
/*
* Write the bytes to a file.
*
* IN: zp - znode of file to be written to
* data - bytes to write
* len - number of bytes to write
* pos - offset to start writing at
*
* OUT: resid - remaining bytes to write
*
* RETURN: 0 if success
* positive error code if failure. EIO is returned
* for a short write when residp isn't provided.
*
* Timestamps:
* zp - ctime|mtime updated if byte count > 0
*/
int
zfs_write_simple(znode_t *zp, const void *data, size_t len,
loff_t pos, size_t *residp)
{
fstrans_cookie_t cookie;
int error;
struct iovec iov;
iov.iov_base = (void *)data;
iov.iov_len = len;
zfs_uio_t uio;
zfs_uio_iovec_init(&uio, &iov, 1, pos, UIO_SYSSPACE, len, 0);
cookie = spl_fstrans_mark();
error = zfs_write(zp, &uio, 0, kcred);
spl_fstrans_unmark(cookie);
if (error == 0) {
if (residp != NULL)
*residp = zfs_uio_resid(&uio);
else if (zfs_uio_resid(&uio) != 0)
error = SET_ERROR(EIO);
}
return (error);
}
static void
zfs_rele_async_task(void *arg)
{
iput(arg);
}
void
zfs_zrele_async(znode_t *zp)
{
struct inode *ip = ZTOI(zp);
objset_t *os = ITOZSB(ip)->z_os;
ASSERT(atomic_read(&ip->i_count) > 0);
ASSERT(os != NULL);
/*
* If decrementing the count would put us at 0, we can't do it inline
* here, because that would be synchronous. Instead, dispatch an iput
* to run later.
*
* For more information on the dangers of a synchronous iput, see the
* header comment of this file.
*/
if (!atomic_add_unless(&ip->i_count, -1, 1)) {
VERIFY(taskq_dispatch(dsl_pool_zrele_taskq(dmu_objset_pool(os)),
zfs_rele_async_task, ip, TQ_SLEEP) != TASKQID_INVALID);
}
}
/*
* Lookup an entry in a directory, or an extended attribute directory.
* If it exists, return a held inode reference for it.
*
* IN: zdp - znode of directory to search.
* nm - name of entry to lookup.
* flags - LOOKUP_XATTR set if looking for an attribute.
* cr - credentials of caller.
* direntflags - directory lookup flags
* realpnp - returned pathname.
*
* OUT: zpp - znode of located entry, NULL if not found.
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* NA
*/
int
zfs_lookup(znode_t *zdp, char *nm, znode_t **zpp, int flags, cred_t *cr,
int *direntflags, pathname_t *realpnp)
{
zfsvfs_t *zfsvfs = ZTOZSB(zdp);
int error = 0;
/*
* Fast path lookup, however we must skip DNLC lookup
* for case folding or normalizing lookups because the
* DNLC code only stores the passed in name. This means
* creating 'a' and removing 'A' on a case insensitive
* file system would work, but DNLC still thinks 'a'
* exists and won't let you create it again on the next
* pass through fast path.
*/
if (!(flags & (LOOKUP_XATTR | FIGNORECASE))) {
if (!S_ISDIR(ZTOI(zdp)->i_mode)) {
return (SET_ERROR(ENOTDIR));
} else if (zdp->z_sa_hdl == NULL) {
return (SET_ERROR(EIO));
}
if (nm[0] == 0 || (nm[0] == '.' && nm[1] == '\0')) {
error = zfs_fastaccesschk_execute(zdp, cr);
if (!error) {
*zpp = zdp;
zhold(*zpp);
return (0);
}
return (error);
}
}
if ((error = zfs_enter_verify_zp(zfsvfs, zdp, FTAG)) != 0)
return (error);
*zpp = NULL;
if (flags & LOOKUP_XATTR) {
/*
* We don't allow recursive attributes..
* Maybe someday we will.
*/
if (zdp->z_pflags & ZFS_XATTR) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
if ((error = zfs_get_xattrdir(zdp, zpp, cr, flags))) {
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Do we have permission to get into attribute directory?
*/
if ((error = zfs_zaccess(*zpp, ACE_EXECUTE, 0,
B_TRUE, cr, zfs_init_idmap))) {
zrele(*zpp);
*zpp = NULL;
}
zfs_exit(zfsvfs, FTAG);
return (error);
}
if (!S_ISDIR(ZTOI(zdp)->i_mode)) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(ENOTDIR));
}
/*
* Check accessibility of directory.
*/
if ((error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr,
zfs_init_idmap))) {
zfs_exit(zfsvfs, FTAG);
return (error);
}
if (zfsvfs->z_utf8 && u8_validate(nm, strlen(nm),
NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EILSEQ));
}
error = zfs_dirlook(zdp, nm, zpp, flags, direntflags, realpnp);
if ((error == 0) && (*zpp))
zfs_znode_update_vfs(*zpp);
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Attempt to create a new entry in a directory. If the entry
* already exists, truncate the file if permissible, else return
* an error. Return the ip of the created or trunc'd file.
*
* IN: dzp - znode of directory to put new file entry in.
* name - name of new file entry.
* vap - attributes of new file.
* excl - flag indicating exclusive or non-exclusive mode.
* mode - mode to open file with.
* cr - credentials of caller.
* flag - file flag.
* vsecp - ACL to be set
* mnt_ns - user namespace of the mount
*
* OUT: zpp - znode of created or trunc'd entry.
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* dzp - ctime|mtime updated if new entry created
* zp - ctime|mtime always, atime if new
*/
int
zfs_create(znode_t *dzp, char *name, vattr_t *vap, int excl,
int mode, znode_t **zpp, cred_t *cr, int flag, vsecattr_t *vsecp,
zidmap_t *mnt_ns)
{
znode_t *zp;
zfsvfs_t *zfsvfs = ZTOZSB(dzp);
zilog_t *zilog;
objset_t *os;
zfs_dirlock_t *dl;
dmu_tx_t *tx;
int error;
uid_t uid;
gid_t gid;
zfs_acl_ids_t acl_ids;
boolean_t fuid_dirtied;
boolean_t have_acl = B_FALSE;
boolean_t waited = B_FALSE;
boolean_t skip_acl = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
/*
* If we have an ephemeral id, ACL, or XVATTR then
* make sure file system is at proper version
*/
gid = crgetgid(cr);
uid = crgetuid(cr);
if (zfsvfs->z_use_fuids == B_FALSE &&
(vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
return (SET_ERROR(EINVAL));
if (name == NULL)
return (SET_ERROR(EINVAL));
if ((error = zfs_enter_verify_zp(zfsvfs, dzp, FTAG)) != 0)
return (error);
os = zfsvfs->z_os;
zilog = zfsvfs->z_log;
if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EILSEQ));
}
if (vap->va_mask & ATTR_XVATTR) {
if ((error = secpolicy_xvattr((xvattr_t *)vap,
crgetuid(cr), cr, vap->va_mode)) != 0) {
zfs_exit(zfsvfs, FTAG);
return (error);
}
}
top:
*zpp = NULL;
if (*name == '\0') {
/*
* Null component name refers to the directory itself.
*/
zhold(dzp);
zp = dzp;
dl = NULL;
error = 0;
} else {
/* possible igrab(zp) */
int zflg = 0;
if (flag & FIGNORECASE)
zflg |= ZCILOOK;
error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
NULL, NULL);
if (error) {
if (have_acl)
zfs_acl_ids_free(&acl_ids);
if (strcmp(name, "..") == 0)
error = SET_ERROR(EISDIR);
zfs_exit(zfsvfs, FTAG);
return (error);
}
}
if (zp == NULL) {
uint64_t txtype;
uint64_t projid = ZFS_DEFAULT_PROJID;
/*
* Create a new file object and update the directory
* to reference it.
*/
if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, skip_acl, cr,
mnt_ns))) {
if (have_acl)
zfs_acl_ids_free(&acl_ids);
goto out;
}
/*
* We only support the creation of regular files in
* extended attribute directories.
*/
if ((dzp->z_pflags & ZFS_XATTR) && !S_ISREG(vap->va_mode)) {
if (have_acl)
zfs_acl_ids_free(&acl_ids);
error = SET_ERROR(EINVAL);
goto out;
}
if (!have_acl && (error = zfs_acl_ids_create(dzp, 0, vap,
cr, vsecp, &acl_ids, mnt_ns)) != 0)
goto out;
have_acl = B_TRUE;
if (S_ISREG(vap->va_mode) || S_ISDIR(vap->va_mode))
projid = zfs_inherit_projid(dzp);
if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, projid)) {
zfs_acl_ids_free(&acl_ids);
error = SET_ERROR(EDQUOT);
goto out;
}
tx = dmu_tx_create(os);
dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
ZFS_SA_BASE_ATTR_SIZE);
fuid_dirtied = zfsvfs->z_fuid_dirty;
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
if (!zfsvfs->z_use_sa &&
acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
0, acl_ids.z_aclp->z_acl_bytes);
}
error = dmu_tx_assign(tx,
(waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
if (error) {
zfs_dirent_unlock(dl);
if (error == ERESTART) {
waited = B_TRUE;
dmu_tx_wait(tx);
dmu_tx_abort(tx);
goto top;
}
zfs_acl_ids_free(&acl_ids);
dmu_tx_abort(tx);
zfs_exit(zfsvfs, FTAG);
return (error);
}
zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
error = zfs_link_create(dl, zp, tx, ZNEW);
if (error != 0) {
/*
* Since, we failed to add the directory entry for it,
* delete the newly created dnode.
*/
zfs_znode_delete(zp, tx);
remove_inode_hash(ZTOI(zp));
zfs_acl_ids_free(&acl_ids);
dmu_tx_commit(tx);
goto out;
}
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
txtype = zfs_log_create_txtype(Z_FILE, vsecp, vap);
if (flag & FIGNORECASE)
txtype |= TX_CI;
zfs_log_create(zilog, tx, txtype, dzp, zp, name,
vsecp, acl_ids.z_fuidp, vap);
zfs_acl_ids_free(&acl_ids);
dmu_tx_commit(tx);
} else {
int aflags = (flag & O_APPEND) ? V_APPEND : 0;
if (have_acl)
zfs_acl_ids_free(&acl_ids);
/*
* A directory entry already exists for this name.
*/
/*
* Can't truncate an existing file if in exclusive mode.
*/
if (excl) {
error = SET_ERROR(EEXIST);
goto out;
}
/*
* Can't open a directory for writing.
*/
if (S_ISDIR(ZTOI(zp)->i_mode)) {
error = SET_ERROR(EISDIR);
goto out;
}
/*
* Verify requested access to file.
*/
if (mode && (error = zfs_zaccess_rwx(zp, mode, aflags, cr,
mnt_ns))) {
goto out;
}
mutex_enter(&dzp->z_lock);
dzp->z_seq++;
mutex_exit(&dzp->z_lock);
/*
* Truncate regular files if requested.
*/
if (S_ISREG(ZTOI(zp)->i_mode) &&
(vap->va_mask & ATTR_SIZE) && (vap->va_size == 0)) {
/* we can't hold any locks when calling zfs_freesp() */
if (dl) {
zfs_dirent_unlock(dl);
dl = NULL;
}
error = zfs_freesp(zp, 0, 0, mode, TRUE);
}
}
out:
if (dl)
zfs_dirent_unlock(dl);
if (error) {
if (zp)
zrele(zp);
} else {
zfs_znode_update_vfs(dzp);
zfs_znode_update_vfs(zp);
*zpp = zp;
}
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
zfs_exit(zfsvfs, FTAG);
return (error);
}
int
zfs_tmpfile(struct inode *dip, vattr_t *vap, int excl,
int mode, struct inode **ipp, cred_t *cr, int flag, vsecattr_t *vsecp,
zidmap_t *mnt_ns)
{
(void) excl, (void) mode, (void) flag;
znode_t *zp = NULL, *dzp = ITOZ(dip);
zfsvfs_t *zfsvfs = ITOZSB(dip);
objset_t *os;
dmu_tx_t *tx;
int error;
uid_t uid;
gid_t gid;
zfs_acl_ids_t acl_ids;
uint64_t projid = ZFS_DEFAULT_PROJID;
boolean_t fuid_dirtied;
boolean_t have_acl = B_FALSE;
boolean_t waited = B_FALSE;
/*
* If we have an ephemeral id, ACL, or XVATTR then
* make sure file system is at proper version
*/
gid = crgetgid(cr);
uid = crgetuid(cr);
if (zfsvfs->z_use_fuids == B_FALSE &&
(vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
return (SET_ERROR(EINVAL));
if ((error = zfs_enter_verify_zp(zfsvfs, dzp, FTAG)) != 0)
return (error);
os = zfsvfs->z_os;
if (vap->va_mask & ATTR_XVATTR) {
if ((error = secpolicy_xvattr((xvattr_t *)vap,
crgetuid(cr), cr, vap->va_mode)) != 0) {
zfs_exit(zfsvfs, FTAG);
return (error);
}
}
top:
*ipp = NULL;
/*
* Create a new file object and update the directory
* to reference it.
*/
if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr, mnt_ns))) {
if (have_acl)
zfs_acl_ids_free(&acl_ids);
goto out;
}
if (!have_acl && (error = zfs_acl_ids_create(dzp, 0, vap,
cr, vsecp, &acl_ids, mnt_ns)) != 0)
goto out;
have_acl = B_TRUE;
if (S_ISREG(vap->va_mode) || S_ISDIR(vap->va_mode))
projid = zfs_inherit_projid(dzp);
if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, projid)) {
zfs_acl_ids_free(&acl_ids);
error = SET_ERROR(EDQUOT);
goto out;
}
tx = dmu_tx_create(os);
dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
ZFS_SA_BASE_ATTR_SIZE);
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
fuid_dirtied = zfsvfs->z_fuid_dirty;
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
if (!zfsvfs->z_use_sa &&
acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
0, acl_ids.z_aclp->z_acl_bytes);
}
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
if (error) {
if (error == ERESTART) {
waited = B_TRUE;
dmu_tx_wait(tx);
dmu_tx_abort(tx);
goto top;
}
zfs_acl_ids_free(&acl_ids);
dmu_tx_abort(tx);
zfs_exit(zfsvfs, FTAG);
return (error);
}
zfs_mknode(dzp, vap, tx, cr, IS_TMPFILE, &zp, &acl_ids);
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
/* Add to unlinked set */
zp->z_unlinked = B_TRUE;
zfs_unlinked_add(zp, tx);
zfs_acl_ids_free(&acl_ids);
dmu_tx_commit(tx);
out:
if (error) {
if (zp)
zrele(zp);
} else {
zfs_znode_update_vfs(dzp);
zfs_znode_update_vfs(zp);
*ipp = ZTOI(zp);
}
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Remove an entry from a directory.
*
* IN: dzp - znode of directory to remove entry from.
* name - name of entry to remove.
* cr - credentials of caller.
* flags - case flags.
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
* dzp - ctime|mtime
* ip - ctime (if nlink > 0)
*/
static uint64_t null_xattr = 0;
int
zfs_remove(znode_t *dzp, char *name, cred_t *cr, int flags)
{
znode_t *zp;
znode_t *xzp;
zfsvfs_t *zfsvfs = ZTOZSB(dzp);
zilog_t *zilog;
uint64_t acl_obj, xattr_obj;
uint64_t xattr_obj_unlinked = 0;
uint64_t obj = 0;
uint64_t links;
zfs_dirlock_t *dl;
dmu_tx_t *tx;
boolean_t may_delete_now, delete_now = FALSE;
boolean_t unlinked, toobig = FALSE;
uint64_t txtype;
pathname_t *realnmp = NULL;
pathname_t realnm;
int error;
int zflg = ZEXISTS;
boolean_t waited = B_FALSE;
if (name == NULL)
return (SET_ERROR(EINVAL));
if ((error = zfs_enter_verify_zp(zfsvfs, dzp, FTAG)) != 0)
return (error);
zilog = zfsvfs->z_log;
if (flags & FIGNORECASE) {
zflg |= ZCILOOK;
pn_alloc(&realnm);
realnmp = &realnm;
}
top:
xattr_obj = 0;
xzp = NULL;
/*
* Attempt to lock directory; fail if entry doesn't exist.
*/
if ((error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
NULL, realnmp))) {
if (realnmp)
pn_free(realnmp);
zfs_exit(zfsvfs, FTAG);
return (error);
}
if ((error = zfs_zaccess_delete(dzp, zp, cr, zfs_init_idmap))) {
goto out;
}
/*
* Need to use rmdir for removing directories.
*/
if (S_ISDIR(ZTOI(zp)->i_mode)) {
error = SET_ERROR(EPERM);
goto out;
}
mutex_enter(&zp->z_lock);
may_delete_now = atomic_read(&ZTOI(zp)->i_count) == 1 &&
!zn_has_cached_data(zp, 0, LLONG_MAX);
mutex_exit(&zp->z_lock);
/*
* We may delete the znode now, or we may put it in the unlinked set;
* it depends on whether we're the last link, and on whether there are
* other holds on the inode. So we dmu_tx_hold() the right things to
* allow for either case.
*/
obj = zp->z_id;
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
zfs_sa_upgrade_txholds(tx, dzp);
if (may_delete_now) {
toobig = zp->z_size > zp->z_blksz * zfs_delete_blocks;
/* if the file is too big, only hold_free a token amount */
dmu_tx_hold_free(tx, zp->z_id, 0,
(toobig ? DMU_MAX_ACCESS : DMU_OBJECT_END));
}
/* are there any extended attributes? */
error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
&xattr_obj, sizeof (xattr_obj));
if (error == 0 && xattr_obj) {
error = zfs_zget(zfsvfs, xattr_obj, &xzp);
ASSERT0(error);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
}
mutex_enter(&zp->z_lock);
if ((acl_obj = zfs_external_acl(zp)) != 0 && may_delete_now)
dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END);
mutex_exit(&zp->z_lock);
/* charge as an update -- would be nice not to charge at all */
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
/*
* Mark this transaction as typically resulting in a net free of space
*/
dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
if (error) {
zfs_dirent_unlock(dl);
if (error == ERESTART) {
waited = B_TRUE;
dmu_tx_wait(tx);
dmu_tx_abort(tx);
zrele(zp);
if (xzp)
zrele(xzp);
goto top;
}
if (realnmp)
pn_free(realnmp);
dmu_tx_abort(tx);
zrele(zp);
if (xzp)
zrele(xzp);
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Remove the directory entry.
*/
error = zfs_link_destroy(dl, zp, tx, zflg, &unlinked);
if (error) {
dmu_tx_commit(tx);
goto out;
}
if (unlinked) {
/*
* Hold z_lock so that we can make sure that the ACL obj
* hasn't changed. Could have been deleted due to
* zfs_sa_upgrade().
*/
mutex_enter(&zp->z_lock);
(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
&xattr_obj_unlinked, sizeof (xattr_obj_unlinked));
delete_now = may_delete_now && !toobig &&
atomic_read(&ZTOI(zp)->i_count) == 1 &&
!zn_has_cached_data(zp, 0, LLONG_MAX) &&
xattr_obj == xattr_obj_unlinked &&
zfs_external_acl(zp) == acl_obj;
VERIFY_IMPLY(xattr_obj_unlinked, xzp);
}
if (delete_now) {
if (xattr_obj_unlinked) {
ASSERT3U(ZTOI(xzp)->i_nlink, ==, 2);
mutex_enter(&xzp->z_lock);
xzp->z_unlinked = B_TRUE;
clear_nlink(ZTOI(xzp));
links = 0;
error = sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs),
&links, sizeof (links), tx);
ASSERT3U(error, ==, 0);
mutex_exit(&xzp->z_lock);
zfs_unlinked_add(xzp, tx);
if (zp->z_is_sa)
error = sa_remove(zp->z_sa_hdl,
SA_ZPL_XATTR(zfsvfs), tx);
else
error = sa_update(zp->z_sa_hdl,
SA_ZPL_XATTR(zfsvfs), &null_xattr,
sizeof (uint64_t), tx);
ASSERT0(error);
}
/*
* Add to the unlinked set because a new reference could be
* taken concurrently resulting in a deferred destruction.
*/
zfs_unlinked_add(zp, tx);
mutex_exit(&zp->z_lock);
} else if (unlinked) {
mutex_exit(&zp->z_lock);
zfs_unlinked_add(zp, tx);
}
txtype = TX_REMOVE;
if (flags & FIGNORECASE)
txtype |= TX_CI;
zfs_log_remove(zilog, tx, txtype, dzp, name, obj, unlinked);
dmu_tx_commit(tx);
out:
if (realnmp)
pn_free(realnmp);
zfs_dirent_unlock(dl);
zfs_znode_update_vfs(dzp);
zfs_znode_update_vfs(zp);
if (delete_now)
zrele(zp);
else
zfs_zrele_async(zp);
if (xzp) {
zfs_znode_update_vfs(xzp);
zfs_zrele_async(xzp);
}
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Create a new directory and insert it into dzp using the name
* provided. Return a pointer to the inserted directory.
*
* IN: dzp - znode of directory to add subdir to.
* dirname - name of new directory.
* vap - attributes of new directory.
* cr - credentials of caller.
* flags - case flags.
* vsecp - ACL to be set
* mnt_ns - user namespace of the mount
*
* OUT: zpp - znode of created directory.
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
* dzp - ctime|mtime updated
* zpp - ctime|mtime|atime updated
*/
int
zfs_mkdir(znode_t *dzp, char *dirname, vattr_t *vap, znode_t **zpp,
cred_t *cr, int flags, vsecattr_t *vsecp, zidmap_t *mnt_ns)
{
znode_t *zp;
zfsvfs_t *zfsvfs = ZTOZSB(dzp);
zilog_t *zilog;
zfs_dirlock_t *dl;
uint64_t txtype;
dmu_tx_t *tx;
int error;
int zf = ZNEW;
uid_t uid;
gid_t gid = crgetgid(cr);
zfs_acl_ids_t acl_ids;
boolean_t fuid_dirtied;
boolean_t waited = B_FALSE;
ASSERT(S_ISDIR(vap->va_mode));
/*
* If we have an ephemeral id, ACL, or XVATTR then
* make sure file system is at proper version
*/
uid = crgetuid(cr);
if (zfsvfs->z_use_fuids == B_FALSE &&
(vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
return (SET_ERROR(EINVAL));
if (dirname == NULL)
return (SET_ERROR(EINVAL));
if ((error = zfs_enter_verify_zp(zfsvfs, dzp, FTAG)) != 0)
return (error);
zilog = zfsvfs->z_log;
if (dzp->z_pflags & ZFS_XATTR) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
if (zfsvfs->z_utf8 && u8_validate(dirname,
strlen(dirname), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EILSEQ));
}
if (flags & FIGNORECASE)
zf |= ZCILOOK;
if (vap->va_mask & ATTR_XVATTR) {
if ((error = secpolicy_xvattr((xvattr_t *)vap,
crgetuid(cr), cr, vap->va_mode)) != 0) {
zfs_exit(zfsvfs, FTAG);
return (error);
}
}
if ((error = zfs_acl_ids_create(dzp, 0, vap, cr,
vsecp, &acl_ids, mnt_ns)) != 0) {
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* First make sure the new directory doesn't exist.
*
* Existence is checked first to make sure we don't return
* EACCES instead of EEXIST which can cause some applications
* to fail.
*/
top:
*zpp = NULL;
if ((error = zfs_dirent_lock(&dl, dzp, dirname, &zp, zf,
NULL, NULL))) {
zfs_acl_ids_free(&acl_ids);
zfs_exit(zfsvfs, FTAG);
return (error);
}
if ((error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr,
mnt_ns))) {
zfs_acl_ids_free(&acl_ids);
zfs_dirent_unlock(dl);
zfs_exit(zfsvfs, FTAG);
return (error);
}
if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, zfs_inherit_projid(dzp))) {
zfs_acl_ids_free(&acl_ids);
zfs_dirent_unlock(dl);
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EDQUOT));
}
/*
* Add a new entry to the directory.
*/
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname);
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
fuid_dirtied = zfsvfs->z_fuid_dirty;
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
acl_ids.z_aclp->z_acl_bytes);
}
dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
ZFS_SA_BASE_ATTR_SIZE);
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
if (error) {
zfs_dirent_unlock(dl);
if (error == ERESTART) {
waited = B_TRUE;
dmu_tx_wait(tx);
dmu_tx_abort(tx);
goto top;
}
zfs_acl_ids_free(&acl_ids);
dmu_tx_abort(tx);
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Create new node.
*/
zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
/*
* Now put new name in parent dir.
*/
error = zfs_link_create(dl, zp, tx, ZNEW);
if (error != 0) {
zfs_znode_delete(zp, tx);
remove_inode_hash(ZTOI(zp));
goto out;
}
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
*zpp = zp;
txtype = zfs_log_create_txtype(Z_DIR, vsecp, vap);
if (flags & FIGNORECASE)
txtype |= TX_CI;
zfs_log_create(zilog, tx, txtype, dzp, zp, dirname, vsecp,
acl_ids.z_fuidp, vap);
out:
zfs_acl_ids_free(&acl_ids);
dmu_tx_commit(tx);
zfs_dirent_unlock(dl);
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
if (error != 0) {
zrele(zp);
} else {
zfs_znode_update_vfs(dzp);
zfs_znode_update_vfs(zp);
}
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Remove a directory subdir entry. If the current working
* directory is the same as the subdir to be removed, the
* remove will fail.
*
* IN: dzp - znode of directory to remove from.
* name - name of directory to be removed.
* cwd - inode of current working directory.
* cr - credentials of caller.
* flags - case flags
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* dzp - ctime|mtime updated
*/
int
zfs_rmdir(znode_t *dzp, char *name, znode_t *cwd, cred_t *cr,
int flags)
{
znode_t *zp;
zfsvfs_t *zfsvfs = ZTOZSB(dzp);
zilog_t *zilog;
zfs_dirlock_t *dl;
dmu_tx_t *tx;
int error;
int zflg = ZEXISTS;
boolean_t waited = B_FALSE;
if (name == NULL)
return (SET_ERROR(EINVAL));
if ((error = zfs_enter_verify_zp(zfsvfs, dzp, FTAG)) != 0)
return (error);
zilog = zfsvfs->z_log;
if (flags & FIGNORECASE)
zflg |= ZCILOOK;
top:
zp = NULL;
/*
* Attempt to lock directory; fail if entry doesn't exist.
*/
if ((error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
NULL, NULL))) {
zfs_exit(zfsvfs, FTAG);
return (error);
}
if ((error = zfs_zaccess_delete(dzp, zp, cr, zfs_init_idmap))) {
goto out;
}
if (!S_ISDIR(ZTOI(zp)->i_mode)) {
error = SET_ERROR(ENOTDIR);
goto out;
}
if (zp == cwd) {
error = SET_ERROR(EINVAL);
goto out;
}
/*
* Grab a lock on the directory to make sure that no one is
* trying to add (or lookup) entries while we are removing it.
*/
rw_enter(&zp->z_name_lock, RW_WRITER);
/*
* Grab a lock on the parent pointer to make sure we play well
* with the treewalk and directory rename code.
*/
rw_enter(&zp->z_parent_lock, RW_WRITER);
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
zfs_sa_upgrade_txholds(tx, zp);
zfs_sa_upgrade_txholds(tx, dzp);
dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
if (error) {
rw_exit(&zp->z_parent_lock);
rw_exit(&zp->z_name_lock);
zfs_dirent_unlock(dl);
if (error == ERESTART) {
waited = B_TRUE;
dmu_tx_wait(tx);
dmu_tx_abort(tx);
zrele(zp);
goto top;
}
dmu_tx_abort(tx);
zrele(zp);
zfs_exit(zfsvfs, FTAG);
return (error);
}
error = zfs_link_destroy(dl, zp, tx, zflg, NULL);
if (error == 0) {
uint64_t txtype = TX_RMDIR;
if (flags & FIGNORECASE)
txtype |= TX_CI;
zfs_log_remove(zilog, tx, txtype, dzp, name, ZFS_NO_OBJECT,
B_FALSE);
}
dmu_tx_commit(tx);
rw_exit(&zp->z_parent_lock);
rw_exit(&zp->z_name_lock);
out:
zfs_dirent_unlock(dl);
zfs_znode_update_vfs(dzp);
zfs_znode_update_vfs(zp);
zrele(zp);
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Read directory entries from the given directory cursor position and emit
* name and position for each entry.
*
* IN: ip - inode of directory to read.
* ctx - directory entry context.
* cr - credentials of caller.
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
* ip - atime updated
*
* Note that the low 4 bits of the cookie returned by zap is always zero.
* This allows us to use the low range for "special" directory entries:
* We use 0 for '.', and 1 for '..'. If this is the root of the filesystem,
* we use the offset 2 for the '.zfs' directory.
*/
int
zfs_readdir(struct inode *ip, zpl_dir_context_t *ctx, cred_t *cr)
{
(void) cr;
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
objset_t *os;
zap_cursor_t zc;
zap_attribute_t zap;
int error;
uint8_t prefetch;
uint8_t type;
int done = 0;
uint64_t parent;
uint64_t offset; /* must be unsigned; checks for < 1 */
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (error);
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
&parent, sizeof (parent))) != 0)
goto out;
/*
* Quit if directory has been removed (posix)
*/
if (zp->z_unlinked)
goto out;
error = 0;
os = zfsvfs->z_os;
offset = ctx->pos;
prefetch = zp->z_zn_prefetch;
/*
* Initialize the iterator cursor.
*/
if (offset <= 3) {
/*
* Start iteration from the beginning of the directory.
*/
zap_cursor_init(&zc, os, zp->z_id);
} else {
/*
* The offset is a serialized cursor.
*/
zap_cursor_init_serialized(&zc, os, zp->z_id, offset);
}
/*
* Transform to file-system independent format
*/
while (!done) {
uint64_t objnum;
/*
* Special case `.', `..', and `.zfs'.
*/
if (offset == 0) {
(void) strcpy(zap.za_name, ".");
zap.za_normalization_conflict = 0;
objnum = zp->z_id;
type = DT_DIR;
} else if (offset == 1) {
(void) strcpy(zap.za_name, "..");
zap.za_normalization_conflict = 0;
objnum = parent;
type = DT_DIR;
} else if (offset == 2 && zfs_show_ctldir(zp)) {
(void) strcpy(zap.za_name, ZFS_CTLDIR_NAME);
zap.za_normalization_conflict = 0;
objnum = ZFSCTL_INO_ROOT;
type = DT_DIR;
} else {
/*
* Grab next entry.
*/
if ((error = zap_cursor_retrieve(&zc, &zap))) {
if (error == ENOENT)
break;
else
goto update;
}
/*
* Allow multiple entries provided the first entry is
* the object id. Non-zpl consumers may safely make
* use of the additional space.
*
* XXX: This should be a feature flag for compatibility
*/
if (zap.za_integer_length != 8 ||
zap.za_num_integers == 0) {
cmn_err(CE_WARN, "zap_readdir: bad directory "
"entry, obj = %lld, offset = %lld, "
"length = %d, num = %lld\n",
(u_longlong_t)zp->z_id,
(u_longlong_t)offset,
zap.za_integer_length,
(u_longlong_t)zap.za_num_integers);
error = SET_ERROR(ENXIO);
goto update;
}
objnum = ZFS_DIRENT_OBJ(zap.za_first_integer);
type = ZFS_DIRENT_TYPE(zap.za_first_integer);
}
done = !zpl_dir_emit(ctx, zap.za_name, strlen(zap.za_name),
objnum, type);
if (done)
break;
/* Prefetch znode */
if (prefetch) {
dmu_prefetch(os, objnum, 0, 0, 0,
ZIO_PRIORITY_SYNC_READ);
}
/*
* Move to the next entry, fill in the previous offset.
*/
if (offset > 2 || (offset == 2 && !zfs_show_ctldir(zp))) {
zap_cursor_advance(&zc);
offset = zap_cursor_serialize(&zc);
} else {
offset += 1;
}
ctx->pos = offset;
}
zp->z_zn_prefetch = B_FALSE; /* a lookup will re-enable pre-fetching */
update:
zap_cursor_fini(&zc);
if (error == ENOENT)
error = 0;
out:
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Get the basic file attributes and place them in the provided kstat
* structure. The inode is assumed to be the authoritative source
* for most of the attributes. However, the znode currently has the
* authoritative atime, blksize, and block count.
*
* IN: ip - inode of file.
*
* OUT: sp - kstat values.
*
* RETURN: 0 (always succeeds)
*/
int
#ifdef HAVE_GENERIC_FILLATTR_IDMAP_REQMASK
zfs_getattr_fast(zidmap_t *user_ns, u32 request_mask, struct inode *ip,
struct kstat *sp)
#else
zfs_getattr_fast(zidmap_t *user_ns, struct inode *ip, struct kstat *sp)
#endif
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
uint32_t blksize;
u_longlong_t nblocks;
int error;
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (error);
mutex_enter(&zp->z_lock);
#ifdef HAVE_GENERIC_FILLATTR_IDMAP_REQMASK
zpl_generic_fillattr(user_ns, request_mask, ip, sp);
#else
zpl_generic_fillattr(user_ns, ip, sp);
#endif
/*
* +1 link count for root inode with visible '.zfs' directory.
*/
if ((zp->z_id == zfsvfs->z_root) && zfs_show_ctldir(zp))
if (sp->nlink < ZFS_LINK_MAX)
sp->nlink++;
sa_object_size(zp->z_sa_hdl, &blksize, &nblocks);
sp->blksize = blksize;
sp->blocks = nblocks;
if (unlikely(zp->z_blksz == 0)) {
/*
* Block size hasn't been set; suggest maximal I/O transfers.
*/
sp->blksize = zfsvfs->z_max_blksz;
}
mutex_exit(&zp->z_lock);
/*
* Required to prevent NFS client from detecting different inode
* numbers of snapshot root dentry before and after snapshot mount.
*/
if (zfsvfs->z_issnap) {
if (ip->i_sb->s_root->d_inode == ip)
sp->ino = ZFSCTL_INO_SNAPDIRS -
dmu_objset_id(zfsvfs->z_os);
}
zfs_exit(zfsvfs, FTAG);
return (0);
}
/*
* For the operation of changing file's user/group/project, we need to
* handle not only the main object that is assigned to the file directly,
* but also the ones that are used by the file via hidden xattr directory.
*
* Because the xattr directory may contains many EA entries, as to it may
* be impossible to change all of them via the transaction of changing the
* main object's user/group/project attributes. Then we have to change them
* via other multiple independent transactions one by one. It may be not good
* solution, but we have no better idea yet.
*/
static int
zfs_setattr_dir(znode_t *dzp)
{
struct inode *dxip = ZTOI(dzp);
struct inode *xip = NULL;
zfsvfs_t *zfsvfs = ZTOZSB(dzp);
objset_t *os = zfsvfs->z_os;
zap_cursor_t zc;
zap_attribute_t zap;
zfs_dirlock_t *dl;
znode_t *zp = NULL;
dmu_tx_t *tx = NULL;
uint64_t uid, gid;
sa_bulk_attr_t bulk[4];
int count;
int err;
zap_cursor_init(&zc, os, dzp->z_id);
while ((err = zap_cursor_retrieve(&zc, &zap)) == 0) {
count = 0;
if (zap.za_integer_length != 8 || zap.za_num_integers != 1) {
err = ENXIO;
break;
}
err = zfs_dirent_lock(&dl, dzp, (char *)zap.za_name, &zp,
ZEXISTS, NULL, NULL);
if (err == ENOENT)
goto next;
if (err)
break;
xip = ZTOI(zp);
if (KUID_TO_SUID(xip->i_uid) == KUID_TO_SUID(dxip->i_uid) &&
KGID_TO_SGID(xip->i_gid) == KGID_TO_SGID(dxip->i_gid) &&
zp->z_projid == dzp->z_projid)
goto next;
tx = dmu_tx_create(os);
if (!(zp->z_pflags & ZFS_PROJID))
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
else
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err)
break;
mutex_enter(&dzp->z_lock);
if (KUID_TO_SUID(xip->i_uid) != KUID_TO_SUID(dxip->i_uid)) {
xip->i_uid = dxip->i_uid;
uid = zfs_uid_read(dxip);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
&uid, sizeof (uid));
}
if (KGID_TO_SGID(xip->i_gid) != KGID_TO_SGID(dxip->i_gid)) {
xip->i_gid = dxip->i_gid;
gid = zfs_gid_read(dxip);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
&gid, sizeof (gid));
}
if (zp->z_projid != dzp->z_projid) {
if (!(zp->z_pflags & ZFS_PROJID)) {
zp->z_pflags |= ZFS_PROJID;
SA_ADD_BULK_ATTR(bulk, count,
SA_ZPL_FLAGS(zfsvfs), NULL, &zp->z_pflags,
sizeof (zp->z_pflags));
}
zp->z_projid = dzp->z_projid;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PROJID(zfsvfs),
NULL, &zp->z_projid, sizeof (zp->z_projid));
}
mutex_exit(&dzp->z_lock);
if (likely(count > 0)) {
err = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
dmu_tx_commit(tx);
} else {
dmu_tx_abort(tx);
}
tx = NULL;
if (err != 0 && err != ENOENT)
break;
next:
if (zp) {
zrele(zp);
zp = NULL;
zfs_dirent_unlock(dl);
}
zap_cursor_advance(&zc);
}
if (tx)
dmu_tx_abort(tx);
if (zp) {
zrele(zp);
zfs_dirent_unlock(dl);
}
zap_cursor_fini(&zc);
return (err == ENOENT ? 0 : err);
}
/*
* Set the file attributes to the values contained in the
* vattr structure.
*
* IN: zp - znode of file to be modified.
* vap - new attribute values.
* If ATTR_XVATTR set, then optional attrs are being set
* flags - ATTR_UTIME set if non-default time values provided.
* - ATTR_NOACLCHECK (CIFS context only).
* cr - credentials of caller.
* mnt_ns - user namespace of the mount
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
* ip - ctime updated, mtime updated if size changed.
*/
int
zfs_setattr(znode_t *zp, vattr_t *vap, int flags, cred_t *cr, zidmap_t *mnt_ns)
{
struct inode *ip;
zfsvfs_t *zfsvfs = ZTOZSB(zp);
- objset_t *os = zfsvfs->z_os;
+ objset_t *os;
zilog_t *zilog;
dmu_tx_t *tx;
vattr_t oldva;
xvattr_t *tmpxvattr;
uint_t mask = vap->va_mask;
uint_t saved_mask = 0;
int trim_mask = 0;
uint64_t new_mode;
uint64_t new_kuid = 0, new_kgid = 0, new_uid, new_gid;
uint64_t xattr_obj;
uint64_t mtime[2], ctime[2], atime[2];
uint64_t projid = ZFS_INVALID_PROJID;
znode_t *attrzp;
int need_policy = FALSE;
int err, err2 = 0;
zfs_fuid_info_t *fuidp = NULL;
xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
xoptattr_t *xoap;
zfs_acl_t *aclp;
boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
boolean_t fuid_dirtied = B_FALSE;
boolean_t handle_eadir = B_FALSE;
sa_bulk_attr_t *bulk, *xattr_bulk;
int count = 0, xattr_count = 0, bulks = 8;
if (mask == 0)
return (0);
if ((err = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (err);
ip = ZTOI(zp);
+ os = zfsvfs->z_os;
/*
* If this is a xvattr_t, then get a pointer to the structure of
* optional attributes. If this is NULL, then we have a vattr_t.
*/
xoap = xva_getxoptattr(xvap);
if (xoap != NULL && (mask & ATTR_XVATTR)) {
if (XVA_ISSET_REQ(xvap, XAT_PROJID)) {
if (!dmu_objset_projectquota_enabled(os) ||
(!S_ISREG(ip->i_mode) && !S_ISDIR(ip->i_mode))) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(ENOTSUP));
}
projid = xoap->xoa_projid;
if (unlikely(projid == ZFS_INVALID_PROJID)) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
if (projid == zp->z_projid && zp->z_pflags & ZFS_PROJID)
projid = ZFS_INVALID_PROJID;
else
need_policy = TRUE;
}
if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT) &&
(xoap->xoa_projinherit !=
((zp->z_pflags & ZFS_PROJINHERIT) != 0)) &&
(!dmu_objset_projectquota_enabled(os) ||
(!S_ISREG(ip->i_mode) && !S_ISDIR(ip->i_mode)))) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(ENOTSUP));
}
}
zilog = zfsvfs->z_log;
/*
* Make sure that if we have ephemeral uid/gid or xvattr specified
* that file system is at proper version level
*/
if (zfsvfs->z_use_fuids == B_FALSE &&
(((mask & ATTR_UID) && IS_EPHEMERAL(vap->va_uid)) ||
((mask & ATTR_GID) && IS_EPHEMERAL(vap->va_gid)) ||
(mask & ATTR_XVATTR))) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
if (mask & ATTR_SIZE && S_ISDIR(ip->i_mode)) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EISDIR));
}
if (mask & ATTR_SIZE && !S_ISREG(ip->i_mode) && !S_ISFIFO(ip->i_mode)) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
tmpxvattr = kmem_alloc(sizeof (xvattr_t), KM_SLEEP);
xva_init(tmpxvattr);
bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * bulks, KM_SLEEP);
xattr_bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * bulks, KM_SLEEP);
/*
* Immutable files can only alter immutable bit and atime
*/
if ((zp->z_pflags & ZFS_IMMUTABLE) &&
((mask & (ATTR_SIZE|ATTR_UID|ATTR_GID|ATTR_MTIME|ATTR_MODE)) ||
((mask & ATTR_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) {
err = SET_ERROR(EPERM);
goto out3;
}
if ((mask & ATTR_SIZE) && (zp->z_pflags & ZFS_READONLY)) {
err = SET_ERROR(EPERM);
goto out3;
}
/*
* Verify timestamps doesn't overflow 32 bits.
* ZFS can handle large timestamps, but 32bit syscalls can't
* handle times greater than 2039. This check should be removed
* once large timestamps are fully supported.
*/
if (mask & (ATTR_ATIME | ATTR_MTIME)) {
if (((mask & ATTR_ATIME) &&
TIMESPEC_OVERFLOW(&vap->va_atime)) ||
((mask & ATTR_MTIME) &&
TIMESPEC_OVERFLOW(&vap->va_mtime))) {
err = SET_ERROR(EOVERFLOW);
goto out3;
}
}
top:
attrzp = NULL;
aclp = NULL;
/* Can this be moved to before the top label? */
if (zfs_is_readonly(zfsvfs)) {
err = SET_ERROR(EROFS);
goto out3;
}
/*
* First validate permissions
*/
if (mask & ATTR_SIZE) {
err = zfs_zaccess(zp, ACE_WRITE_DATA, 0, skipaclchk, cr,
mnt_ns);
if (err)
goto out3;
/*
* XXX - Note, we are not providing any open
* mode flags here (like FNDELAY), so we may
* block if there are locks present... this
* should be addressed in openat().
*/
/* XXX - would it be OK to generate a log record here? */
err = zfs_freesp(zp, vap->va_size, 0, 0, FALSE);
if (err)
goto out3;
}
if (mask & (ATTR_ATIME|ATTR_MTIME) ||
((mask & ATTR_XVATTR) && (XVA_ISSET_REQ(xvap, XAT_HIDDEN) ||
XVA_ISSET_REQ(xvap, XAT_READONLY) ||
XVA_ISSET_REQ(xvap, XAT_ARCHIVE) ||
XVA_ISSET_REQ(xvap, XAT_OFFLINE) ||
XVA_ISSET_REQ(xvap, XAT_SPARSE) ||
XVA_ISSET_REQ(xvap, XAT_CREATETIME) ||
XVA_ISSET_REQ(xvap, XAT_SYSTEM)))) {
need_policy = zfs_zaccess(zp, ACE_WRITE_ATTRIBUTES, 0,
skipaclchk, cr, mnt_ns);
}
if (mask & (ATTR_UID|ATTR_GID)) {
int idmask = (mask & (ATTR_UID|ATTR_GID));
int take_owner;
int take_group;
uid_t uid;
gid_t gid;
/*
* NOTE: even if a new mode is being set,
* we may clear S_ISUID/S_ISGID bits.
*/
if (!(mask & ATTR_MODE))
vap->va_mode = zp->z_mode;
/*
* Take ownership or chgrp to group we are a member of
*/
uid = zfs_uid_to_vfsuid(mnt_ns, zfs_i_user_ns(ip),
vap->va_uid);
gid = zfs_gid_to_vfsgid(mnt_ns, zfs_i_user_ns(ip),
vap->va_gid);
take_owner = (mask & ATTR_UID) && (uid == crgetuid(cr));
take_group = (mask & ATTR_GID) &&
zfs_groupmember(zfsvfs, gid, cr);
/*
* If both ATTR_UID and ATTR_GID are set then take_owner and
* take_group must both be set in order to allow taking
* ownership.
*
* Otherwise, send the check through secpolicy_vnode_setattr()
*
*/
if (((idmask == (ATTR_UID|ATTR_GID)) &&
take_owner && take_group) ||
((idmask == ATTR_UID) && take_owner) ||
((idmask == ATTR_GID) && take_group)) {
if (zfs_zaccess(zp, ACE_WRITE_OWNER, 0,
skipaclchk, cr, mnt_ns) == 0) {
/*
* Remove setuid/setgid for non-privileged users
*/
(void) secpolicy_setid_clear(vap, cr);
trim_mask = (mask & (ATTR_UID|ATTR_GID));
} else {
need_policy = TRUE;
}
} else {
need_policy = TRUE;
}
}
mutex_enter(&zp->z_lock);
oldva.va_mode = zp->z_mode;
zfs_fuid_map_ids(zp, cr, &oldva.va_uid, &oldva.va_gid);
if (mask & ATTR_XVATTR) {
/*
* Update xvattr mask to include only those attributes
* that are actually changing.
*
* the bits will be restored prior to actually setting
* the attributes so the caller thinks they were set.
*/
if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
if (xoap->xoa_appendonly !=
((zp->z_pflags & ZFS_APPENDONLY) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_APPENDONLY);
XVA_SET_REQ(tmpxvattr, XAT_APPENDONLY);
}
}
if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT)) {
if (xoap->xoa_projinherit !=
((zp->z_pflags & ZFS_PROJINHERIT) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_PROJINHERIT);
XVA_SET_REQ(tmpxvattr, XAT_PROJINHERIT);
}
}
if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
if (xoap->xoa_nounlink !=
((zp->z_pflags & ZFS_NOUNLINK) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_NOUNLINK);
XVA_SET_REQ(tmpxvattr, XAT_NOUNLINK);
}
}
if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
if (xoap->xoa_immutable !=
((zp->z_pflags & ZFS_IMMUTABLE) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_IMMUTABLE);
XVA_SET_REQ(tmpxvattr, XAT_IMMUTABLE);
}
}
if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
if (xoap->xoa_nodump !=
((zp->z_pflags & ZFS_NODUMP) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_NODUMP);
XVA_SET_REQ(tmpxvattr, XAT_NODUMP);
}
}
if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
if (xoap->xoa_av_modified !=
((zp->z_pflags & ZFS_AV_MODIFIED) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_AV_MODIFIED);
XVA_SET_REQ(tmpxvattr, XAT_AV_MODIFIED);
}
}
if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
if ((!S_ISREG(ip->i_mode) &&
xoap->xoa_av_quarantined) ||
xoap->xoa_av_quarantined !=
((zp->z_pflags & ZFS_AV_QUARANTINED) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_AV_QUARANTINED);
XVA_SET_REQ(tmpxvattr, XAT_AV_QUARANTINED);
}
}
if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
mutex_exit(&zp->z_lock);
err = SET_ERROR(EPERM);
goto out3;
}
if (need_policy == FALSE &&
(XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) ||
XVA_ISSET_REQ(xvap, XAT_OPAQUE))) {
need_policy = TRUE;
}
}
mutex_exit(&zp->z_lock);
if (mask & ATTR_MODE) {
if (zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr,
mnt_ns) == 0) {
err = secpolicy_setid_setsticky_clear(ip, vap,
&oldva, cr, mnt_ns, zfs_i_user_ns(ip));
if (err)
goto out3;
trim_mask |= ATTR_MODE;
} else {
need_policy = TRUE;
}
}
if (need_policy) {
/*
* If trim_mask is set then take ownership
* has been granted or write_acl is present and user
* has the ability to modify mode. In that case remove
* UID|GID and or MODE from mask so that
* secpolicy_vnode_setattr() doesn't revoke it.
*/
if (trim_mask) {
saved_mask = vap->va_mask;
vap->va_mask &= ~trim_mask;
}
err = secpolicy_vnode_setattr(cr, ip, vap, &oldva, flags,
zfs_zaccess_unix, zp);
if (err)
goto out3;
if (trim_mask)
vap->va_mask |= saved_mask;
}
/*
* secpolicy_vnode_setattr, or take ownership may have
* changed va_mask
*/
mask = vap->va_mask;
if ((mask & (ATTR_UID | ATTR_GID)) || projid != ZFS_INVALID_PROJID) {
handle_eadir = B_TRUE;
err = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
&xattr_obj, sizeof (xattr_obj));
if (err == 0 && xattr_obj) {
err = zfs_zget(ZTOZSB(zp), xattr_obj, &attrzp);
if (err)
goto out2;
}
if (mask & ATTR_UID) {
new_kuid = zfs_fuid_create(zfsvfs,
(uint64_t)vap->va_uid, cr, ZFS_OWNER, &fuidp);
if (new_kuid != KUID_TO_SUID(ZTOI(zp)->i_uid) &&
zfs_id_overquota(zfsvfs, DMU_USERUSED_OBJECT,
new_kuid)) {
if (attrzp)
zrele(attrzp);
err = SET_ERROR(EDQUOT);
goto out2;
}
}
if (mask & ATTR_GID) {
new_kgid = zfs_fuid_create(zfsvfs,
(uint64_t)vap->va_gid, cr, ZFS_GROUP, &fuidp);
if (new_kgid != KGID_TO_SGID(ZTOI(zp)->i_gid) &&
zfs_id_overquota(zfsvfs, DMU_GROUPUSED_OBJECT,
new_kgid)) {
if (attrzp)
zrele(attrzp);
err = SET_ERROR(EDQUOT);
goto out2;
}
}
if (projid != ZFS_INVALID_PROJID &&
zfs_id_overquota(zfsvfs, DMU_PROJECTUSED_OBJECT, projid)) {
if (attrzp)
zrele(attrzp);
err = EDQUOT;
goto out2;
}
}
tx = dmu_tx_create(os);
if (mask & ATTR_MODE) {
uint64_t pmode = zp->z_mode;
uint64_t acl_obj;
new_mode = (pmode & S_IFMT) | (vap->va_mode & ~S_IFMT);
if (ZTOZSB(zp)->z_acl_mode == ZFS_ACL_RESTRICTED &&
!(zp->z_pflags & ZFS_ACL_TRIVIAL)) {
err = EPERM;
goto out;
}
if ((err = zfs_acl_chmod_setattr(zp, &aclp, new_mode)))
goto out;
mutex_enter(&zp->z_lock);
if (!zp->z_is_sa && ((acl_obj = zfs_external_acl(zp)) != 0)) {
/*
* Are we upgrading ACL from old V0 format
* to V1 format?
*/
if (zfsvfs->z_version >= ZPL_VERSION_FUID &&
zfs_znode_acl_version(zp) ==
ZFS_ACL_VERSION_INITIAL) {
dmu_tx_hold_free(tx, acl_obj, 0,
DMU_OBJECT_END);
dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
0, aclp->z_acl_bytes);
} else {
dmu_tx_hold_write(tx, acl_obj, 0,
aclp->z_acl_bytes);
}
} else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
0, aclp->z_acl_bytes);
}
mutex_exit(&zp->z_lock);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
} else {
if (((mask & ATTR_XVATTR) &&
XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) ||
(projid != ZFS_INVALID_PROJID &&
!(zp->z_pflags & ZFS_PROJID)))
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
else
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
}
if (attrzp) {
dmu_tx_hold_sa(tx, attrzp->z_sa_hdl, B_FALSE);
}
fuid_dirtied = zfsvfs->z_fuid_dirty;
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
zfs_sa_upgrade_txholds(tx, zp);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err)
goto out;
count = 0;
/*
* Set each attribute requested.
* We group settings according to the locks they need to acquire.
*
* Note: you cannot set ctime directly, although it will be
* updated as a side-effect of calling this function.
*/
if (projid != ZFS_INVALID_PROJID && !(zp->z_pflags & ZFS_PROJID)) {
/*
* For the existed object that is upgraded from old system,
* its on-disk layout has no slot for the project ID attribute.
* But quota accounting logic needs to access related slots by
* offset directly. So we need to adjust old objects' layout
* to make the project ID to some unified and fixed offset.
*/
if (attrzp)
err = sa_add_projid(attrzp->z_sa_hdl, tx, projid);
if (err == 0)
err = sa_add_projid(zp->z_sa_hdl, tx, projid);
if (unlikely(err == EEXIST))
err = 0;
else if (err != 0)
goto out;
else
projid = ZFS_INVALID_PROJID;
}
if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
mutex_enter(&zp->z_acl_lock);
mutex_enter(&zp->z_lock);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, sizeof (zp->z_pflags));
if (attrzp) {
if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
mutex_enter(&attrzp->z_acl_lock);
mutex_enter(&attrzp->z_lock);
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
SA_ZPL_FLAGS(zfsvfs), NULL, &attrzp->z_pflags,
sizeof (attrzp->z_pflags));
if (projid != ZFS_INVALID_PROJID) {
attrzp->z_projid = projid;
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
SA_ZPL_PROJID(zfsvfs), NULL, &attrzp->z_projid,
sizeof (attrzp->z_projid));
}
}
if (mask & (ATTR_UID|ATTR_GID)) {
if (mask & ATTR_UID) {
ZTOI(zp)->i_uid = SUID_TO_KUID(new_kuid);
new_uid = zfs_uid_read(ZTOI(zp));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
&new_uid, sizeof (new_uid));
if (attrzp) {
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
SA_ZPL_UID(zfsvfs), NULL, &new_uid,
sizeof (new_uid));
ZTOI(attrzp)->i_uid = SUID_TO_KUID(new_uid);
}
}
if (mask & ATTR_GID) {
ZTOI(zp)->i_gid = SGID_TO_KGID(new_kgid);
new_gid = zfs_gid_read(ZTOI(zp));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs),
NULL, &new_gid, sizeof (new_gid));
if (attrzp) {
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
SA_ZPL_GID(zfsvfs), NULL, &new_gid,
sizeof (new_gid));
ZTOI(attrzp)->i_gid = SGID_TO_KGID(new_kgid);
}
}
if (!(mask & ATTR_MODE)) {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs),
NULL, &new_mode, sizeof (new_mode));
new_mode = zp->z_mode;
}
err = zfs_acl_chown_setattr(zp);
ASSERT(err == 0);
if (attrzp) {
err = zfs_acl_chown_setattr(attrzp);
ASSERT(err == 0);
}
}
if (mask & ATTR_MODE) {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
&new_mode, sizeof (new_mode));
zp->z_mode = ZTOI(zp)->i_mode = new_mode;
ASSERT3P(aclp, !=, NULL);
err = zfs_aclset_common(zp, aclp, cr, tx);
ASSERT0(err);
if (zp->z_acl_cached)
zfs_acl_free(zp->z_acl_cached);
zp->z_acl_cached = aclp;
aclp = NULL;
}
if ((mask & ATTR_ATIME) || zp->z_atime_dirty) {
zp->z_atime_dirty = B_FALSE;
- ZFS_TIME_ENCODE(&ip->i_atime, atime);
+ inode_timespec_t tmp_atime = zpl_inode_get_atime(ip);
+ ZFS_TIME_ENCODE(&tmp_atime, atime);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
&atime, sizeof (atime));
}
if (mask & (ATTR_MTIME | ATTR_SIZE)) {
ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
- ZTOI(zp)->i_mtime = zpl_inode_timestamp_truncate(
- vap->va_mtime, ZTOI(zp));
+ zpl_inode_set_mtime_to_ts(ZTOI(zp),
+ zpl_inode_timestamp_truncate(vap->va_mtime, ZTOI(zp)));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
mtime, sizeof (mtime));
}
if (mask & (ATTR_CTIME | ATTR_SIZE)) {
ZFS_TIME_ENCODE(&vap->va_ctime, ctime);
zpl_inode_set_ctime_to_ts(ZTOI(zp),
zpl_inode_timestamp_truncate(vap->va_ctime, ZTOI(zp)));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
ctime, sizeof (ctime));
}
if (projid != ZFS_INVALID_PROJID) {
zp->z_projid = projid;
SA_ADD_BULK_ATTR(bulk, count,
SA_ZPL_PROJID(zfsvfs), NULL, &zp->z_projid,
sizeof (zp->z_projid));
}
if (attrzp && mask) {
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
SA_ZPL_CTIME(zfsvfs), NULL, &ctime,
sizeof (ctime));
}
/*
* Do this after setting timestamps to prevent timestamp
* update from toggling bit
*/
if (xoap && (mask & ATTR_XVATTR)) {
/*
* restore trimmed off masks
* so that return masks can be set for caller.
*/
if (XVA_ISSET_REQ(tmpxvattr, XAT_APPENDONLY)) {
XVA_SET_REQ(xvap, XAT_APPENDONLY);
}
if (XVA_ISSET_REQ(tmpxvattr, XAT_NOUNLINK)) {
XVA_SET_REQ(xvap, XAT_NOUNLINK);
}
if (XVA_ISSET_REQ(tmpxvattr, XAT_IMMUTABLE)) {
XVA_SET_REQ(xvap, XAT_IMMUTABLE);
}
if (XVA_ISSET_REQ(tmpxvattr, XAT_NODUMP)) {
XVA_SET_REQ(xvap, XAT_NODUMP);
}
if (XVA_ISSET_REQ(tmpxvattr, XAT_AV_MODIFIED)) {
XVA_SET_REQ(xvap, XAT_AV_MODIFIED);
}
if (XVA_ISSET_REQ(tmpxvattr, XAT_AV_QUARANTINED)) {
XVA_SET_REQ(xvap, XAT_AV_QUARANTINED);
}
if (XVA_ISSET_REQ(tmpxvattr, XAT_PROJINHERIT)) {
XVA_SET_REQ(xvap, XAT_PROJINHERIT);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
ASSERT(S_ISREG(ip->i_mode));
zfs_xvattr_set(zp, xvap, tx);
}
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
if (mask != 0)
zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask, fuidp);
mutex_exit(&zp->z_lock);
if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
mutex_exit(&zp->z_acl_lock);
if (attrzp) {
if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
mutex_exit(&attrzp->z_acl_lock);
mutex_exit(&attrzp->z_lock);
}
out:
if (err == 0 && xattr_count > 0) {
err2 = sa_bulk_update(attrzp->z_sa_hdl, xattr_bulk,
xattr_count, tx);
ASSERT(err2 == 0);
}
if (aclp)
zfs_acl_free(aclp);
if (fuidp) {
zfs_fuid_info_free(fuidp);
fuidp = NULL;
}
if (err) {
dmu_tx_abort(tx);
if (attrzp)
zrele(attrzp);
if (err == ERESTART)
goto top;
} else {
if (count > 0)
err2 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
dmu_tx_commit(tx);
if (attrzp) {
if (err2 == 0 && handle_eadir)
err = zfs_setattr_dir(attrzp);
zrele(attrzp);
}
zfs_znode_update_vfs(zp);
}
out2:
if (os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
out3:
kmem_free(xattr_bulk, sizeof (sa_bulk_attr_t) * bulks);
kmem_free(bulk, sizeof (sa_bulk_attr_t) * bulks);
kmem_free(tmpxvattr, sizeof (xvattr_t));
zfs_exit(zfsvfs, FTAG);
return (err);
}
typedef struct zfs_zlock {
krwlock_t *zl_rwlock; /* lock we acquired */
znode_t *zl_znode; /* znode we held */
struct zfs_zlock *zl_next; /* next in list */
} zfs_zlock_t;
/*
* Drop locks and release vnodes that were held by zfs_rename_lock().
*/
static void
zfs_rename_unlock(zfs_zlock_t **zlpp)
{
zfs_zlock_t *zl;
while ((zl = *zlpp) != NULL) {
if (zl->zl_znode != NULL)
zfs_zrele_async(zl->zl_znode);
rw_exit(zl->zl_rwlock);
*zlpp = zl->zl_next;
kmem_free(zl, sizeof (*zl));
}
}
/*
* Search back through the directory tree, using the ".." entries.
* Lock each directory in the chain to prevent concurrent renames.
* Fail any attempt to move a directory into one of its own descendants.
* XXX - z_parent_lock can overlap with map or grow locks
*/
static int
zfs_rename_lock(znode_t *szp, znode_t *tdzp, znode_t *sdzp, zfs_zlock_t **zlpp)
{
zfs_zlock_t *zl;
znode_t *zp = tdzp;
uint64_t rootid = ZTOZSB(zp)->z_root;
uint64_t oidp = zp->z_id;
krwlock_t *rwlp = &szp->z_parent_lock;
krw_t rw = RW_WRITER;
/*
* First pass write-locks szp and compares to zp->z_id.
* Later passes read-lock zp and compare to zp->z_parent.
*/
do {
if (!rw_tryenter(rwlp, rw)) {
/*
* Another thread is renaming in this path.
* Note that if we are a WRITER, we don't have any
* parent_locks held yet.
*/
if (rw == RW_READER && zp->z_id > szp->z_id) {
/*
* Drop our locks and restart
*/
zfs_rename_unlock(&zl);
*zlpp = NULL;
zp = tdzp;
oidp = zp->z_id;
rwlp = &szp->z_parent_lock;
rw = RW_WRITER;
continue;
} else {
/*
* Wait for other thread to drop its locks
*/
rw_enter(rwlp, rw);
}
}
zl = kmem_alloc(sizeof (*zl), KM_SLEEP);
zl->zl_rwlock = rwlp;
zl->zl_znode = NULL;
zl->zl_next = *zlpp;
*zlpp = zl;
if (oidp == szp->z_id) /* We're a descendant of szp */
return (SET_ERROR(EINVAL));
if (oidp == rootid) /* We've hit the top */
return (0);
if (rw == RW_READER) { /* i.e. not the first pass */
int error = zfs_zget(ZTOZSB(zp), oidp, &zp);
if (error)
return (error);
zl->zl_znode = zp;
}
(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(ZTOZSB(zp)),
&oidp, sizeof (oidp));
rwlp = &zp->z_parent_lock;
rw = RW_READER;
} while (zp->z_id != sdzp->z_id);
return (0);
}
/*
* Move an entry from the provided source directory to the target
* directory. Change the entry name as indicated.
*
* IN: sdzp - Source directory containing the "old entry".
* snm - Old entry name.
* tdzp - Target directory to contain the "new entry".
* tnm - New entry name.
* cr - credentials of caller.
* flags - case flags
* rflags - RENAME_* flags
* wa_vap - attributes for RENAME_WHITEOUT (must be a char 0:0).
* mnt_ns - user namespace of the mount
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* sdzp,tdzp - ctime|mtime updated
*/
int
zfs_rename(znode_t *sdzp, char *snm, znode_t *tdzp, char *tnm,
cred_t *cr, int flags, uint64_t rflags, vattr_t *wo_vap, zidmap_t *mnt_ns)
{
znode_t *szp, *tzp;
zfsvfs_t *zfsvfs = ZTOZSB(sdzp);
zilog_t *zilog;
zfs_dirlock_t *sdl, *tdl;
dmu_tx_t *tx;
zfs_zlock_t *zl;
int cmp, serr, terr;
int error = 0;
int zflg = 0;
boolean_t waited = B_FALSE;
/* Needed for whiteout inode creation. */
boolean_t fuid_dirtied;
zfs_acl_ids_t acl_ids;
boolean_t have_acl = B_FALSE;
znode_t *wzp = NULL;
if (snm == NULL || tnm == NULL)
return (SET_ERROR(EINVAL));
if (rflags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
return (SET_ERROR(EINVAL));
/* Already checked by Linux VFS, but just to make sure. */
if (rflags & RENAME_EXCHANGE &&
(rflags & (RENAME_NOREPLACE | RENAME_WHITEOUT)))
return (SET_ERROR(EINVAL));
/*
* Make sure we only get wo_vap iff. RENAME_WHITEOUT and that it's the
* right kind of vattr_t for the whiteout file. These are set
* internally by ZFS so should never be incorrect.
*/
VERIFY_EQUIV(rflags & RENAME_WHITEOUT, wo_vap != NULL);
VERIFY_IMPLY(wo_vap, wo_vap->va_mode == S_IFCHR);
VERIFY_IMPLY(wo_vap, wo_vap->va_rdev == makedevice(0, 0));
if ((error = zfs_enter_verify_zp(zfsvfs, sdzp, FTAG)) != 0)
return (error);
zilog = zfsvfs->z_log;
if ((error = zfs_verify_zp(tdzp)) != 0) {
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* We check i_sb because snapshots and the ctldir must have different
* super blocks.
*/
if (ZTOI(tdzp)->i_sb != ZTOI(sdzp)->i_sb ||
zfsctl_is_node(ZTOI(tdzp))) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EXDEV));
}
if (zfsvfs->z_utf8 && u8_validate(tnm,
strlen(tnm), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EILSEQ));
}
if (flags & FIGNORECASE)
zflg |= ZCILOOK;
top:
szp = NULL;
tzp = NULL;
zl = NULL;
/*
* This is to prevent the creation of links into attribute space
* by renaming a linked file into/outof an attribute directory.
* See the comment in zfs_link() for why this is considered bad.
*/
if ((tdzp->z_pflags & ZFS_XATTR) != (sdzp->z_pflags & ZFS_XATTR)) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
/*
* Lock source and target directory entries. To prevent deadlock,
* a lock ordering must be defined. We lock the directory with
* the smallest object id first, or if it's a tie, the one with
* the lexically first name.
*/
if (sdzp->z_id < tdzp->z_id) {
cmp = -1;
} else if (sdzp->z_id > tdzp->z_id) {
cmp = 1;
} else {
/*
* First compare the two name arguments without
* considering any case folding.
*/
int nofold = (zfsvfs->z_norm & ~U8_TEXTPREP_TOUPPER);
cmp = u8_strcmp(snm, tnm, 0, nofold, U8_UNICODE_LATEST, &error);
ASSERT(error == 0 || !zfsvfs->z_utf8);
if (cmp == 0) {
/*
* POSIX: "If the old argument and the new argument
* both refer to links to the same existing file,
* the rename() function shall return successfully
* and perform no other action."
*/
zfs_exit(zfsvfs, FTAG);
return (0);
}
/*
* If the file system is case-folding, then we may
* have some more checking to do. A case-folding file
* system is either supporting mixed case sensitivity
* access or is completely case-insensitive. Note
* that the file system is always case preserving.
*
* In mixed sensitivity mode case sensitive behavior
* is the default. FIGNORECASE must be used to
* explicitly request case insensitive behavior.
*
* If the source and target names provided differ only
* by case (e.g., a request to rename 'tim' to 'Tim'),
* we will treat this as a special case in the
* case-insensitive mode: as long as the source name
* is an exact match, we will allow this to proceed as
* a name-change request.
*/
if ((zfsvfs->z_case == ZFS_CASE_INSENSITIVE ||
(zfsvfs->z_case == ZFS_CASE_MIXED &&
flags & FIGNORECASE)) &&
u8_strcmp(snm, tnm, 0, zfsvfs->z_norm, U8_UNICODE_LATEST,
&error) == 0) {
/*
* case preserving rename request, require exact
* name matches
*/
zflg |= ZCIEXACT;
zflg &= ~ZCILOOK;
}
}
/*
* If the source and destination directories are the same, we should
* grab the z_name_lock of that directory only once.
*/
if (sdzp == tdzp) {
zflg |= ZHAVELOCK;
rw_enter(&sdzp->z_name_lock, RW_READER);
}
if (cmp < 0) {
serr = zfs_dirent_lock(&sdl, sdzp, snm, &szp,
ZEXISTS | zflg, NULL, NULL);
terr = zfs_dirent_lock(&tdl,
tdzp, tnm, &tzp, ZRENAMING | zflg, NULL, NULL);
} else {
terr = zfs_dirent_lock(&tdl,
tdzp, tnm, &tzp, zflg, NULL, NULL);
serr = zfs_dirent_lock(&sdl,
sdzp, snm, &szp, ZEXISTS | ZRENAMING | zflg,
NULL, NULL);
}
if (serr) {
/*
* Source entry invalid or not there.
*/
if (!terr) {
zfs_dirent_unlock(tdl);
if (tzp)
zrele(tzp);
}
if (sdzp == tdzp)
rw_exit(&sdzp->z_name_lock);
if (strcmp(snm, "..") == 0)
serr = EINVAL;
zfs_exit(zfsvfs, FTAG);
return (serr);
}
if (terr) {
zfs_dirent_unlock(sdl);
zrele(szp);
if (sdzp == tdzp)
rw_exit(&sdzp->z_name_lock);
if (strcmp(tnm, "..") == 0)
terr = EINVAL;
zfs_exit(zfsvfs, FTAG);
return (terr);
}
/*
* If we are using project inheritance, means if the directory has
* ZFS_PROJINHERIT set, then its descendant directories will inherit
* not only the project ID, but also the ZFS_PROJINHERIT flag. Under
* such case, we only allow renames into our tree when the project
* IDs are the same.
*/
if (tdzp->z_pflags & ZFS_PROJINHERIT &&
tdzp->z_projid != szp->z_projid) {
error = SET_ERROR(EXDEV);
goto out;
}
/*
* Must have write access at the source to remove the old entry
* and write access at the target to create the new entry.
* Note that if target and source are the same, this can be
* done in a single check.
*/
if ((error = zfs_zaccess_rename(sdzp, szp, tdzp, tzp, cr, mnt_ns)))
goto out;
if (S_ISDIR(ZTOI(szp)->i_mode)) {
/*
* Check to make sure rename is valid.
* Can't do a move like this: /usr/a/b to /usr/a/b/c/d
*/
if ((error = zfs_rename_lock(szp, tdzp, sdzp, &zl)))
goto out;
}
/*
* Does target exist?
*/
if (tzp) {
if (rflags & RENAME_NOREPLACE) {
error = SET_ERROR(EEXIST);
goto out;
}
/*
* Source and target must be the same type (unless exchanging).
*/
if (!(rflags & RENAME_EXCHANGE)) {
boolean_t s_is_dir = S_ISDIR(ZTOI(szp)->i_mode) != 0;
boolean_t t_is_dir = S_ISDIR(ZTOI(tzp)->i_mode) != 0;
if (s_is_dir != t_is_dir) {
error = SET_ERROR(s_is_dir ? ENOTDIR : EISDIR);
goto out;
}
}
/*
* POSIX dictates that when the source and target
* entries refer to the same file object, rename
* must do nothing and exit without error.
*/
if (szp->z_id == tzp->z_id) {
error = 0;
goto out;
}
} else if (rflags & RENAME_EXCHANGE) {
/* Target must exist for RENAME_EXCHANGE. */
error = SET_ERROR(ENOENT);
goto out;
}
/* Set up inode creation for RENAME_WHITEOUT. */
if (rflags & RENAME_WHITEOUT) {
/*
* Whiteout files are not regular files or directories, so to
* match zfs_create() we do not inherit the project id.
*/
uint64_t wo_projid = ZFS_DEFAULT_PROJID;
error = zfs_zaccess(sdzp, ACE_ADD_FILE, 0, B_FALSE, cr, mnt_ns);
if (error)
goto out;
if (!have_acl) {
error = zfs_acl_ids_create(sdzp, 0, wo_vap, cr, NULL,
&acl_ids, mnt_ns);
if (error)
goto out;
have_acl = B_TRUE;
}
if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, wo_projid)) {
error = SET_ERROR(EDQUOT);
goto out;
}
}
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
dmu_tx_hold_sa(tx, sdzp->z_sa_hdl, B_FALSE);
dmu_tx_hold_zap(tx, sdzp->z_id,
(rflags & RENAME_EXCHANGE) ? TRUE : FALSE, snm);
dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, tnm);
if (sdzp != tdzp) {
dmu_tx_hold_sa(tx, tdzp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, tdzp);
}
if (tzp) {
dmu_tx_hold_sa(tx, tzp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, tzp);
}
if (rflags & RENAME_WHITEOUT) {
dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
ZFS_SA_BASE_ATTR_SIZE);
dmu_tx_hold_zap(tx, sdzp->z_id, TRUE, snm);
dmu_tx_hold_sa(tx, sdzp->z_sa_hdl, B_FALSE);
if (!zfsvfs->z_use_sa &&
acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
0, acl_ids.z_aclp->z_acl_bytes);
}
}
fuid_dirtied = zfsvfs->z_fuid_dirty;
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
zfs_sa_upgrade_txholds(tx, szp);
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
if (error) {
if (zl != NULL)
zfs_rename_unlock(&zl);
zfs_dirent_unlock(sdl);
zfs_dirent_unlock(tdl);
if (sdzp == tdzp)
rw_exit(&sdzp->z_name_lock);
if (error == ERESTART) {
waited = B_TRUE;
dmu_tx_wait(tx);
dmu_tx_abort(tx);
zrele(szp);
if (tzp)
zrele(tzp);
goto top;
}
dmu_tx_abort(tx);
zrele(szp);
if (tzp)
zrele(tzp);
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Unlink the source.
*/
szp->z_pflags |= ZFS_AV_MODIFIED;
if (tdzp->z_pflags & ZFS_PROJINHERIT)
szp->z_pflags |= ZFS_PROJINHERIT;
error = sa_update(szp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs),
(void *)&szp->z_pflags, sizeof (uint64_t), tx);
VERIFY0(error);
error = zfs_link_destroy(sdl, szp, tx, ZRENAMING, NULL);
if (error)
goto commit;
/*
* Unlink the target.
*/
if (tzp) {
int tzflg = zflg;
if (rflags & RENAME_EXCHANGE) {
/* This inode will be re-linked soon. */
tzflg |= ZRENAMING;
tzp->z_pflags |= ZFS_AV_MODIFIED;
if (sdzp->z_pflags & ZFS_PROJINHERIT)
tzp->z_pflags |= ZFS_PROJINHERIT;
error = sa_update(tzp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs),
(void *)&tzp->z_pflags, sizeof (uint64_t), tx);
ASSERT0(error);
}
error = zfs_link_destroy(tdl, tzp, tx, tzflg, NULL);
if (error)
goto commit_link_szp;
}
/*
* Create the new target links:
* * We always link the target.
* * RENAME_EXCHANGE: Link the old target to the source.
* * RENAME_WHITEOUT: Create a whiteout inode in-place of the source.
*/
error = zfs_link_create(tdl, szp, tx, ZRENAMING);
if (error) {
/*
* If we have removed the existing target, a subsequent call to
* zfs_link_create() to add back the same entry, but with a new
* dnode (szp), should not fail.
*/
ASSERT3P(tzp, ==, NULL);
goto commit_link_tzp;
}
switch (rflags & (RENAME_EXCHANGE | RENAME_WHITEOUT)) {
case RENAME_EXCHANGE:
error = zfs_link_create(sdl, tzp, tx, ZRENAMING);
/*
* The same argument as zfs_link_create() failing for
* szp applies here, since the source directory must
* have had an entry we are replacing.
*/
ASSERT0(error);
if (error)
goto commit_unlink_td_szp;
break;
case RENAME_WHITEOUT:
zfs_mknode(sdzp, wo_vap, tx, cr, 0, &wzp, &acl_ids);
error = zfs_link_create(sdl, wzp, tx, ZNEW);
if (error) {
zfs_znode_delete(wzp, tx);
remove_inode_hash(ZTOI(wzp));
goto commit_unlink_td_szp;
}
break;
}
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
switch (rflags & (RENAME_EXCHANGE | RENAME_WHITEOUT)) {
case RENAME_EXCHANGE:
zfs_log_rename_exchange(zilog, tx,
(flags & FIGNORECASE ? TX_CI : 0), sdzp, sdl->dl_name,
tdzp, tdl->dl_name, szp);
break;
case RENAME_WHITEOUT:
zfs_log_rename_whiteout(zilog, tx,
(flags & FIGNORECASE ? TX_CI : 0), sdzp, sdl->dl_name,
tdzp, tdl->dl_name, szp, wzp);
break;
default:
ASSERT0(rflags & ~RENAME_NOREPLACE);
zfs_log_rename(zilog, tx, (flags & FIGNORECASE ? TX_CI : 0),
sdzp, sdl->dl_name, tdzp, tdl->dl_name, szp);
break;
}
commit:
dmu_tx_commit(tx);
out:
if (have_acl)
zfs_acl_ids_free(&acl_ids);
zfs_znode_update_vfs(sdzp);
if (sdzp == tdzp)
rw_exit(&sdzp->z_name_lock);
if (sdzp != tdzp)
zfs_znode_update_vfs(tdzp);
zfs_znode_update_vfs(szp);
zrele(szp);
if (wzp) {
zfs_znode_update_vfs(wzp);
zrele(wzp);
}
if (tzp) {
zfs_znode_update_vfs(tzp);
zrele(tzp);
}
if (zl != NULL)
zfs_rename_unlock(&zl);
zfs_dirent_unlock(sdl);
zfs_dirent_unlock(tdl);
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
zfs_exit(zfsvfs, FTAG);
return (error);
/*
* Clean-up path for broken link state.
*
* At this point we are in a (very) bad state, so we need to do our
* best to correct the state. In particular, all of the nlinks are
* wrong because we were destroying and creating links with ZRENAMING.
*
* In some form, all of these operations have to resolve the state:
*
* * link_destroy() *must* succeed. Fortunately, this is very likely
* since we only just created it.
*
* * link_create()s are allowed to fail (though they shouldn't because
* we only just unlinked them and are putting the entries back
* during clean-up). But if they fail, we can just forcefully drop
* the nlink value to (at the very least) avoid broken nlink values
* -- though in the case of non-empty directories we will have to
* panic (otherwise we'd have a leaked directory with a broken ..).
*/
commit_unlink_td_szp:
VERIFY0(zfs_link_destroy(tdl, szp, tx, ZRENAMING, NULL));
commit_link_tzp:
if (tzp) {
if (zfs_link_create(tdl, tzp, tx, ZRENAMING))
VERIFY0(zfs_drop_nlink(tzp, tx, NULL));
}
commit_link_szp:
if (zfs_link_create(sdl, szp, tx, ZRENAMING))
VERIFY0(zfs_drop_nlink(szp, tx, NULL));
goto commit;
}
/*
* Insert the indicated symbolic reference entry into the directory.
*
* IN: dzp - Directory to contain new symbolic link.
* name - Name of directory entry in dip.
* vap - Attributes of new entry.
* link - Name for new symlink entry.
* cr - credentials of caller.
* flags - case flags
* mnt_ns - user namespace of the mount
*
* OUT: zpp - Znode for new symbolic link.
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* dip - ctime|mtime updated
*/
int
zfs_symlink(znode_t *dzp, char *name, vattr_t *vap, char *link,
znode_t **zpp, cred_t *cr, int flags, zidmap_t *mnt_ns)
{
znode_t *zp;
zfs_dirlock_t *dl;
dmu_tx_t *tx;
zfsvfs_t *zfsvfs = ZTOZSB(dzp);
zilog_t *zilog;
uint64_t len = strlen(link);
int error;
int zflg = ZNEW;
zfs_acl_ids_t acl_ids;
boolean_t fuid_dirtied;
uint64_t txtype = TX_SYMLINK;
boolean_t waited = B_FALSE;
ASSERT(S_ISLNK(vap->va_mode));
if (name == NULL)
return (SET_ERROR(EINVAL));
if ((error = zfs_enter_verify_zp(zfsvfs, dzp, FTAG)) != 0)
return (error);
zilog = zfsvfs->z_log;
if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EILSEQ));
}
if (flags & FIGNORECASE)
zflg |= ZCILOOK;
if (len > MAXPATHLEN) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(ENAMETOOLONG));
}
if ((error = zfs_acl_ids_create(dzp, 0,
vap, cr, NULL, &acl_ids, mnt_ns)) != 0) {
zfs_exit(zfsvfs, FTAG);
return (error);
}
top:
*zpp = NULL;
/*
* Attempt to lock directory; fail if entry already exists.
*/
error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, NULL, NULL);
if (error) {
zfs_acl_ids_free(&acl_ids);
zfs_exit(zfsvfs, FTAG);
return (error);
}
if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr, mnt_ns))) {
zfs_acl_ids_free(&acl_ids);
zfs_dirent_unlock(dl);
zfs_exit(zfsvfs, FTAG);
return (error);
}
if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, ZFS_DEFAULT_PROJID)) {
zfs_acl_ids_free(&acl_ids);
zfs_dirent_unlock(dl);
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EDQUOT));
}
tx = dmu_tx_create(zfsvfs->z_os);
fuid_dirtied = zfsvfs->z_fuid_dirty;
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len));
dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
ZFS_SA_BASE_ATTR_SIZE + len);
dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
acl_ids.z_aclp->z_acl_bytes);
}
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
if (error) {
zfs_dirent_unlock(dl);
if (error == ERESTART) {
waited = B_TRUE;
dmu_tx_wait(tx);
dmu_tx_abort(tx);
goto top;
}
zfs_acl_ids_free(&acl_ids);
dmu_tx_abort(tx);
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Create a new object for the symlink.
* for version 4 ZPL datasets the symlink will be an SA attribute
*/
zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
mutex_enter(&zp->z_lock);
if (zp->z_is_sa)
error = sa_update(zp->z_sa_hdl, SA_ZPL_SYMLINK(zfsvfs),
link, len, tx);
else
zfs_sa_symlink(zp, link, len, tx);
mutex_exit(&zp->z_lock);
zp->z_size = len;
(void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
&zp->z_size, sizeof (zp->z_size), tx);
/*
* Insert the new object into the directory.
*/
error = zfs_link_create(dl, zp, tx, ZNEW);
if (error != 0) {
zfs_znode_delete(zp, tx);
remove_inode_hash(ZTOI(zp));
} else {
if (flags & FIGNORECASE)
txtype |= TX_CI;
zfs_log_symlink(zilog, tx, txtype, dzp, zp, name, link);
zfs_znode_update_vfs(dzp);
zfs_znode_update_vfs(zp);
}
zfs_acl_ids_free(&acl_ids);
dmu_tx_commit(tx);
zfs_dirent_unlock(dl);
if (error == 0) {
*zpp = zp;
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
} else {
zrele(zp);
}
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Return, in the buffer contained in the provided uio structure,
* the symbolic path referred to by ip.
*
* IN: ip - inode of symbolic link
* uio - structure to contain the link path.
* cr - credentials of caller.
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
* ip - atime updated
*/
int
zfs_readlink(struct inode *ip, zfs_uio_t *uio, cred_t *cr)
{
(void) cr;
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
int error;
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (error);
mutex_enter(&zp->z_lock);
if (zp->z_is_sa)
error = sa_lookup_uio(zp->z_sa_hdl,
SA_ZPL_SYMLINK(zfsvfs), uio);
else
error = zfs_sa_readlink(zp, uio);
mutex_exit(&zp->z_lock);
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Insert a new entry into directory tdzp referencing szp.
*
* IN: tdzp - Directory to contain new entry.
* szp - znode of new entry.
* name - name of new entry.
* cr - credentials of caller.
* flags - case flags.
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
* tdzp - ctime|mtime updated
* szp - ctime updated
*/
int
zfs_link(znode_t *tdzp, znode_t *szp, char *name, cred_t *cr,
int flags)
{
struct inode *sip = ZTOI(szp);
znode_t *tzp;
zfsvfs_t *zfsvfs = ZTOZSB(tdzp);
zilog_t *zilog;
zfs_dirlock_t *dl;
dmu_tx_t *tx;
int error;
int zf = ZNEW;
uint64_t parent;
uid_t owner;
boolean_t waited = B_FALSE;
boolean_t is_tmpfile = 0;
uint64_t txg;
#ifdef HAVE_TMPFILE
is_tmpfile = (sip->i_nlink == 0 && (sip->i_state & I_LINKABLE));
#endif
ASSERT(S_ISDIR(ZTOI(tdzp)->i_mode));
if (name == NULL)
return (SET_ERROR(EINVAL));
if ((error = zfs_enter_verify_zp(zfsvfs, tdzp, FTAG)) != 0)
return (error);
zilog = zfsvfs->z_log;
/*
* POSIX dictates that we return EPERM here.
* Better choices include ENOTSUP or EISDIR.
*/
if (S_ISDIR(sip->i_mode)) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EPERM));
}
if ((error = zfs_verify_zp(szp)) != 0) {
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* If we are using project inheritance, means if the directory has
* ZFS_PROJINHERIT set, then its descendant directories will inherit
* not only the project ID, but also the ZFS_PROJINHERIT flag. Under
* such case, we only allow hard link creation in our tree when the
* project IDs are the same.
*/
if (tdzp->z_pflags & ZFS_PROJINHERIT &&
tdzp->z_projid != szp->z_projid) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EXDEV));
}
/*
* We check i_sb because snapshots and the ctldir must have different
* super blocks.
*/
if (sip->i_sb != ZTOI(tdzp)->i_sb || zfsctl_is_node(sip)) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EXDEV));
}
/* Prevent links to .zfs/shares files */
if ((error = sa_lookup(szp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
&parent, sizeof (uint64_t))) != 0) {
zfs_exit(zfsvfs, FTAG);
return (error);
}
if (parent == zfsvfs->z_shares_dir) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EPERM));
}
if (zfsvfs->z_utf8 && u8_validate(name,
strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EILSEQ));
}
if (flags & FIGNORECASE)
zf |= ZCILOOK;
/*
* We do not support links between attributes and non-attributes
* because of the potential security risk of creating links
* into "normal" file space in order to circumvent restrictions
* imposed in attribute space.
*/
if ((szp->z_pflags & ZFS_XATTR) != (tdzp->z_pflags & ZFS_XATTR)) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
owner = zfs_fuid_map_id(zfsvfs, KUID_TO_SUID(sip->i_uid),
cr, ZFS_OWNER);
if (owner != crgetuid(cr) && secpolicy_basic_link(cr) != 0) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EPERM));
}
if ((error = zfs_zaccess(tdzp, ACE_ADD_FILE, 0, B_FALSE, cr,
zfs_init_idmap))) {
zfs_exit(zfsvfs, FTAG);
return (error);
}
top:
/*
* Attempt to lock directory; fail if entry already exists.
*/
error = zfs_dirent_lock(&dl, tdzp, name, &tzp, zf, NULL, NULL);
if (error) {
zfs_exit(zfsvfs, FTAG);
return (error);
}
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, name);
if (is_tmpfile)
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
zfs_sa_upgrade_txholds(tx, szp);
zfs_sa_upgrade_txholds(tx, tdzp);
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
if (error) {
zfs_dirent_unlock(dl);
if (error == ERESTART) {
waited = B_TRUE;
dmu_tx_wait(tx);
dmu_tx_abort(tx);
goto top;
}
dmu_tx_abort(tx);
zfs_exit(zfsvfs, FTAG);
return (error);
}
/* unmark z_unlinked so zfs_link_create will not reject */
if (is_tmpfile)
szp->z_unlinked = B_FALSE;
error = zfs_link_create(dl, szp, tx, 0);
if (error == 0) {
uint64_t txtype = TX_LINK;
/*
* tmpfile is created to be in z_unlinkedobj, so remove it.
* Also, we don't log in ZIL, because all previous file
* operation on the tmpfile are ignored by ZIL. Instead we
* always wait for txg to sync to make sure all previous
* operation are sync safe.
*/
if (is_tmpfile) {
VERIFY(zap_remove_int(zfsvfs->z_os,
zfsvfs->z_unlinkedobj, szp->z_id, tx) == 0);
} else {
if (flags & FIGNORECASE)
txtype |= TX_CI;
zfs_log_link(zilog, tx, txtype, tdzp, szp, name);
}
} else if (is_tmpfile) {
/* restore z_unlinked since when linking failed */
szp->z_unlinked = B_TRUE;
}
txg = dmu_tx_get_txg(tx);
dmu_tx_commit(tx);
zfs_dirent_unlock(dl);
if (!is_tmpfile && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
if (is_tmpfile && zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED)
txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), txg);
zfs_znode_update_vfs(tdzp);
zfs_znode_update_vfs(szp);
zfs_exit(zfsvfs, FTAG);
return (error);
}
static void
zfs_putpage_sync_commit_cb(void *arg)
{
struct page *pp = arg;
ClearPageError(pp);
end_page_writeback(pp);
}
static void
zfs_putpage_async_commit_cb(void *arg)
{
struct page *pp = arg;
znode_t *zp = ITOZ(pp->mapping->host);
ClearPageError(pp);
end_page_writeback(pp);
atomic_dec_32(&zp->z_async_writes_cnt);
}
/*
* Push a page out to disk, once the page is on stable storage the
* registered commit callback will be run as notification of completion.
*
* IN: ip - page mapped for inode.
* pp - page to push (page is locked)
* wbc - writeback control data
* for_sync - does the caller intend to wait synchronously for the
* page writeback to complete?
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
* ip - ctime|mtime updated
*/
int
zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc,
boolean_t for_sync)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
loff_t offset;
loff_t pgoff;
unsigned int pglen;
dmu_tx_t *tx;
caddr_t va;
int err = 0;
uint64_t mtime[2], ctime[2];
- inode_timespec_t tmp_ctime;
+ inode_timespec_t tmp_ts;
sa_bulk_attr_t bulk[3];
int cnt = 0;
struct address_space *mapping;
if ((err = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (err);
ASSERT(PageLocked(pp));
pgoff = page_offset(pp); /* Page byte-offset in file */
offset = i_size_read(ip); /* File length in bytes */
pglen = MIN(PAGE_SIZE, /* Page length in bytes */
P2ROUNDUP(offset, PAGE_SIZE)-pgoff);
/* Page is beyond end of file */
if (pgoff >= offset) {
unlock_page(pp);
zfs_exit(zfsvfs, FTAG);
return (0);
}
/* Truncate page length to end of file */
if (pgoff + pglen > offset)
pglen = offset - pgoff;
#if 0
/*
* FIXME: Allow mmap writes past its quota. The correct fix
* is to register a page_mkwrite() handler to count the page
* against its quota when it is about to be dirtied.
*/
if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT,
KUID_TO_SUID(ip->i_uid)) ||
zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT,
KGID_TO_SGID(ip->i_gid)) ||
(zp->z_projid != ZFS_DEFAULT_PROJID &&
zfs_id_overblockquota(zfsvfs, DMU_PROJECTUSED_OBJECT,
zp->z_projid))) {
err = EDQUOT;
}
#endif
/*
* The ordering here is critical and must adhere to the following
* rules in order to avoid deadlocking in either zfs_read() or
* zfs_free_range() due to a lock inversion.
*
* 1) The page must be unlocked prior to acquiring the range lock.
* This is critical because zfs_read() calls find_lock_page()
* which may block on the page lock while holding the range lock.
*
* 2) Before setting or clearing write back on a page the range lock
* must be held in order to prevent a lock inversion with the
* zfs_free_range() function.
*
* This presents a problem because upon entering this function the
* page lock is already held. To safely acquire the range lock the
* page lock must be dropped. This creates a window where another
* process could truncate, invalidate, dirty, or write out the page.
*
* Therefore, after successfully reacquiring the range and page locks
* the current page state is checked. In the common case everything
* will be as is expected and it can be written out. However, if
* the page state has changed it must be handled accordingly.
*/
mapping = pp->mapping;
redirty_page_for_writepage(wbc, pp);
unlock_page(pp);
zfs_locked_range_t *lr = zfs_rangelock_enter(&zp->z_rangelock,
pgoff, pglen, RL_WRITER);
lock_page(pp);
/* Page mapping changed or it was no longer dirty, we're done */
if (unlikely((mapping != pp->mapping) || !PageDirty(pp))) {
unlock_page(pp);
zfs_rangelock_exit(lr);
zfs_exit(zfsvfs, FTAG);
return (0);
}
/* Another process started write block if required */
if (PageWriteback(pp)) {
unlock_page(pp);
zfs_rangelock_exit(lr);
if (wbc->sync_mode != WB_SYNC_NONE) {
/*
* Speed up any non-sync page writebacks since
* they may take several seconds to complete.
* Refer to the comment in zpl_fsync() (when
* HAVE_FSYNC_RANGE is defined) for details.
*/
if (atomic_load_32(&zp->z_async_writes_cnt) > 0) {
zil_commit(zfsvfs->z_log, zp->z_id);
}
if (PageWriteback(pp))
#ifdef HAVE_PAGEMAP_FOLIO_WAIT_BIT
folio_wait_bit(page_folio(pp), PG_writeback);
#else
wait_on_page_bit(pp, PG_writeback);
#endif
}
zfs_exit(zfsvfs, FTAG);
return (0);
}
/* Clear the dirty flag the required locks are held */
if (!clear_page_dirty_for_io(pp)) {
unlock_page(pp);
zfs_rangelock_exit(lr);
zfs_exit(zfsvfs, FTAG);
return (0);
}
/*
* Counterpart for redirty_page_for_writepage() above. This page
* was in fact not skipped and should not be counted as if it were.
*/
wbc->pages_skipped--;
if (!for_sync)
atomic_inc_32(&zp->z_async_writes_cnt);
set_page_writeback(pp);
unlock_page(pp);
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_write(tx, zp->z_id, pgoff, pglen);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
err = dmu_tx_assign(tx, TXG_NOWAIT);
if (err != 0) {
if (err == ERESTART)
dmu_tx_wait(tx);
dmu_tx_abort(tx);
#ifdef HAVE_VFS_FILEMAP_DIRTY_FOLIO
filemap_dirty_folio(page_mapping(pp), page_folio(pp));
#else
__set_page_dirty_nobuffers(pp);
#endif
ClearPageError(pp);
end_page_writeback(pp);
if (!for_sync)
atomic_dec_32(&zp->z_async_writes_cnt);
zfs_rangelock_exit(lr);
zfs_exit(zfsvfs, FTAG);
return (err);
}
va = kmap(pp);
ASSERT3U(pglen, <=, PAGE_SIZE);
dmu_write(zfsvfs->z_os, zp->z_id, pgoff, pglen, va, tx);
kunmap(pp);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, 8);
/* Preserve the mtime and ctime provided by the inode */
- ZFS_TIME_ENCODE(&ip->i_mtime, mtime);
- tmp_ctime = zpl_inode_get_ctime(ip);
- ZFS_TIME_ENCODE(&tmp_ctime, ctime);
+ tmp_ts = zpl_inode_get_mtime(ip);
+ ZFS_TIME_ENCODE(&tmp_ts, mtime);
+ tmp_ts = zpl_inode_get_ctime(ip);
+ ZFS_TIME_ENCODE(&tmp_ts, ctime);
zp->z_atime_dirty = B_FALSE;
zp->z_seq++;
err = sa_bulk_update(zp->z_sa_hdl, bulk, cnt, tx);
zfs_log_write(zfsvfs->z_log, tx, TX_WRITE, zp, pgoff, pglen, 0,
for_sync ? zfs_putpage_sync_commit_cb :
zfs_putpage_async_commit_cb, pp);
dmu_tx_commit(tx);
zfs_rangelock_exit(lr);
if (wbc->sync_mode != WB_SYNC_NONE) {
/*
* Note that this is rarely called under writepages(), because
* writepages() normally handles the entire commit for
* performance reasons.
*/
zil_commit(zfsvfs->z_log, zp->z_id);
} else if (!for_sync && atomic_load_32(&zp->z_sync_writes_cnt) > 0) {
/*
* If the caller does not intend to wait synchronously
* for this page writeback to complete and there are active
* synchronous calls on this file, do a commit so that
* the latter don't accidentally end up waiting for
* our writeback to complete. Refer to the comment in
* zpl_fsync() (when HAVE_FSYNC_RANGE is defined) for details.
*/
zil_commit(zfsvfs->z_log, zp->z_id);
}
dataset_kstats_update_write_kstats(&zfsvfs->z_kstat, pglen);
zfs_exit(zfsvfs, FTAG);
return (err);
}
/*
* Update the system attributes when the inode has been dirtied. For the
* moment we only update the mode, atime, mtime, and ctime.
*/
int
zfs_dirty_inode(struct inode *ip, int flags)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
dmu_tx_t *tx;
uint64_t mode, atime[2], mtime[2], ctime[2];
- inode_timespec_t tmp_ctime;
+ inode_timespec_t tmp_ts;
sa_bulk_attr_t bulk[4];
int error = 0;
int cnt = 0;
if (zfs_is_readonly(zfsvfs) || dmu_objset_is_snapshot(zfsvfs->z_os))
return (0);
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (error);
#ifdef I_DIRTY_TIME
/*
* This is the lazytime semantic introduced in Linux 4.0
* This flag will only be called from update_time when lazytime is set.
* (Note, I_DIRTY_SYNC will also set if not lazytime)
* Fortunately mtime and ctime are managed within ZFS itself, so we
* only need to dirty atime.
*/
if (flags == I_DIRTY_TIME) {
zp->z_atime_dirty = B_TRUE;
goto out;
}
#endif
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
goto out;
}
mutex_enter(&zp->z_lock);
zp->z_atime_dirty = B_FALSE;
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
/* Preserve the mode, mtime and ctime provided by the inode */
- ZFS_TIME_ENCODE(&ip->i_atime, atime);
- ZFS_TIME_ENCODE(&ip->i_mtime, mtime);
- tmp_ctime = zpl_inode_get_ctime(ip);
- ZFS_TIME_ENCODE(&tmp_ctime, ctime);
+ tmp_ts = zpl_inode_get_atime(ip);
+ ZFS_TIME_ENCODE(&tmp_ts, atime);
+ tmp_ts = zpl_inode_get_mtime(ip);
+ ZFS_TIME_ENCODE(&tmp_ts, mtime);
+ tmp_ts = zpl_inode_get_ctime(ip);
+ ZFS_TIME_ENCODE(&tmp_ts, ctime);
mode = ip->i_mode;
zp->z_mode = mode;
error = sa_bulk_update(zp->z_sa_hdl, bulk, cnt, tx);
mutex_exit(&zp->z_lock);
dmu_tx_commit(tx);
out:
zfs_exit(zfsvfs, FTAG);
return (error);
}
void
zfs_inactive(struct inode *ip)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
uint64_t atime[2];
int error;
int need_unlock = 0;
/* Only read lock if we haven't already write locked, e.g. rollback */
if (!RW_WRITE_HELD(&zfsvfs->z_teardown_inactive_lock)) {
need_unlock = 1;
rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_READER);
}
if (zp->z_sa_hdl == NULL) {
if (need_unlock)
rw_exit(&zfsvfs->z_teardown_inactive_lock);
return;
}
if (zp->z_atime_dirty && zp->z_unlinked == B_FALSE) {
dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
} else {
- ZFS_TIME_ENCODE(&ip->i_atime, atime);
+ inode_timespec_t tmp_atime;
+ tmp_atime = zpl_inode_get_atime(ip);
+ ZFS_TIME_ENCODE(&tmp_atime, atime);
mutex_enter(&zp->z_lock);
(void) sa_update(zp->z_sa_hdl, SA_ZPL_ATIME(zfsvfs),
(void *)&atime, sizeof (atime), tx);
zp->z_atime_dirty = B_FALSE;
mutex_exit(&zp->z_lock);
dmu_tx_commit(tx);
}
}
zfs_zinactive(zp);
if (need_unlock)
rw_exit(&zfsvfs->z_teardown_inactive_lock);
}
/*
* Fill pages with data from the disk.
*/
static int
zfs_fillpage(struct inode *ip, struct page *pp)
{
zfsvfs_t *zfsvfs = ITOZSB(ip);
loff_t i_size = i_size_read(ip);
u_offset_t io_off = page_offset(pp);
size_t io_len = PAGE_SIZE;
ASSERT3U(io_off, <, i_size);
if (io_off + io_len > i_size)
io_len = i_size - io_off;
void *va = kmap(pp);
int error = dmu_read(zfsvfs->z_os, ITOZ(ip)->z_id, io_off,
io_len, va, DMU_READ_PREFETCH);
if (io_len != PAGE_SIZE)
memset((char *)va + io_len, 0, PAGE_SIZE - io_len);
kunmap(pp);
if (error) {
/* convert checksum errors into IO errors */
if (error == ECKSUM)
error = SET_ERROR(EIO);
SetPageError(pp);
ClearPageUptodate(pp);
} else {
ClearPageError(pp);
SetPageUptodate(pp);
}
return (error);
}
/*
* Uses zfs_fillpage to read data from the file and fill the page.
*
* IN: ip - inode of file to get data from.
* pp - page to read
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* vp - atime updated
*/
int
zfs_getpage(struct inode *ip, struct page *pp)
{
zfsvfs_t *zfsvfs = ITOZSB(ip);
znode_t *zp = ITOZ(ip);
int error;
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (error);
error = zfs_fillpage(ip, pp);
if (error == 0)
dataset_kstats_update_read_kstats(&zfsvfs->z_kstat, PAGE_SIZE);
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Check ZFS specific permissions to memory map a section of a file.
*
* IN: ip - inode of the file to mmap
* off - file offset
* addrp - start address in memory region
* len - length of memory region
* vm_flags- address flags
*
* RETURN: 0 if success
* error code if failure
*/
int
zfs_map(struct inode *ip, offset_t off, caddr_t *addrp, size_t len,
unsigned long vm_flags)
{
(void) addrp;
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
int error;
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (error);
if ((vm_flags & VM_WRITE) && (vm_flags & VM_SHARED) &&
(zp->z_pflags & (ZFS_IMMUTABLE | ZFS_READONLY | ZFS_APPENDONLY))) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EPERM));
}
if ((vm_flags & (VM_READ | VM_EXEC)) &&
(zp->z_pflags & ZFS_AV_QUARANTINED)) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EACCES));
}
if (off < 0 || len > MAXOFFSET_T - off) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(ENXIO));
}
zfs_exit(zfsvfs, FTAG);
return (0);
}
/*
* Free or allocate space in a file. Currently, this function only
* supports the `F_FREESP' command. However, this command is somewhat
* misnamed, as its functionality includes the ability to allocate as
* well as free space.
*
* IN: zp - znode of file to free data in.
* cmd - action to take (only F_FREESP supported).
* bfp - section of file to free/alloc.
* flag - current file open mode flags.
* offset - current file offset.
* cr - credentials of caller.
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* zp - ctime|mtime updated
*/
int
zfs_space(znode_t *zp, int cmd, flock64_t *bfp, int flag,
offset_t offset, cred_t *cr)
{
(void) offset;
zfsvfs_t *zfsvfs = ZTOZSB(zp);
uint64_t off, len;
int error;
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (error);
if (cmd != F_FREESP) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
/*
* Callers might not be able to detect properly that we are read-only,
* so check it explicitly here.
*/
if (zfs_is_readonly(zfsvfs)) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EROFS));
}
if (bfp->l_len < 0) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
/*
* Permissions aren't checked on Solaris because on this OS
* zfs_space() can only be called with an opened file handle.
* On Linux we can get here through truncate_range() which
* operates directly on inodes, so we need to check access rights.
*/
if ((error = zfs_zaccess(zp, ACE_WRITE_DATA, 0, B_FALSE, cr,
zfs_init_idmap))) {
zfs_exit(zfsvfs, FTAG);
return (error);
}
off = bfp->l_start;
len = bfp->l_len; /* 0 means from off to end of file */
error = zfs_freesp(zp, off, len, flag, TRUE);
zfs_exit(zfsvfs, FTAG);
return (error);
}
int
zfs_fid(struct inode *ip, fid_t *fidp)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
uint32_t gen;
uint64_t gen64;
uint64_t object = zp->z_id;
zfid_short_t *zfid;
int size, i, error;
if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
return (error);
if (fidp->fid_len < SHORT_FID_LEN) {
fidp->fid_len = SHORT_FID_LEN;
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(ENOSPC));
}
if ((error = zfs_verify_zp(zp)) != 0) {
zfs_exit(zfsvfs, FTAG);
return (error);
}
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs),
&gen64, sizeof (uint64_t))) != 0) {
zfs_exit(zfsvfs, FTAG);
return (error);
}
gen = (uint32_t)gen64;
size = SHORT_FID_LEN;
zfid = (zfid_short_t *)fidp;
zfid->zf_len = size;
for (i = 0; i < sizeof (zfid->zf_object); i++)
zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
/* Must have a non-zero generation number to distinguish from .zfs */
if (gen == 0)
gen = 1;
for (i = 0; i < sizeof (zfid->zf_gen); i++)
zfid->zf_gen[i] = (uint8_t)(gen >> (8 * i));
zfs_exit(zfsvfs, FTAG);
return (0);
}
#if defined(_KERNEL)
EXPORT_SYMBOL(zfs_open);
EXPORT_SYMBOL(zfs_close);
EXPORT_SYMBOL(zfs_lookup);
EXPORT_SYMBOL(zfs_create);
EXPORT_SYMBOL(zfs_tmpfile);
EXPORT_SYMBOL(zfs_remove);
EXPORT_SYMBOL(zfs_mkdir);
EXPORT_SYMBOL(zfs_rmdir);
EXPORT_SYMBOL(zfs_readdir);
EXPORT_SYMBOL(zfs_getattr_fast);
EXPORT_SYMBOL(zfs_setattr);
EXPORT_SYMBOL(zfs_rename);
EXPORT_SYMBOL(zfs_symlink);
EXPORT_SYMBOL(zfs_readlink);
EXPORT_SYMBOL(zfs_link);
EXPORT_SYMBOL(zfs_inactive);
EXPORT_SYMBOL(zfs_space);
EXPORT_SYMBOL(zfs_fid);
EXPORT_SYMBOL(zfs_getpage);
EXPORT_SYMBOL(zfs_putpage);
EXPORT_SYMBOL(zfs_dirty_inode);
EXPORT_SYMBOL(zfs_map);
/* CSTYLED */
module_param(zfs_delete_blocks, ulong, 0644);
MODULE_PARM_DESC(zfs_delete_blocks, "Delete files larger than N blocks async");
-
-/* CSTYLED */
-module_param(zfs_bclone_enabled, uint, 0644);
-MODULE_PARM_DESC(zfs_bclone_enabled, "Enable block cloning");
-
#endif
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_znode.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_znode.c
index f71026da83cb..b99df188c64b 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_znode.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_znode.c
@@ -1,2364 +1,2371 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
*/
/* Portions Copyright 2007 Jeremy Teo */
#ifdef _KERNEL
#include <sys/types.h>
#include <sys/param.h>
#include <sys/time.h>
#include <sys/sysmacros.h>
#include <sys/mntent.h>
#include <sys/u8_textprep.h>
#include <sys/dsl_dataset.h>
#include <sys/vfs.h>
#include <sys/vnode.h>
#include <sys/file.h>
#include <sys/kmem.h>
#include <sys/errno.h>
#include <sys/atomic.h>
#include <sys/zfs_dir.h>
#include <sys/zfs_acl.h>
#include <sys/zfs_ioctl.h>
#include <sys/zfs_rlock.h>
#include <sys/zfs_fuid.h>
#include <sys/zfs_vnops.h>
#include <sys/zfs_ctldir.h>
#include <sys/dnode.h>
#include <sys/fs/zfs.h>
#include <sys/zpl.h>
#endif /* _KERNEL */
#include <sys/dmu.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_tx.h>
#include <sys/zfs_refcount.h>
#include <sys/stat.h>
#include <sys/zap.h>
#include <sys/zfs_znode.h>
#include <sys/sa.h>
#include <sys/zfs_sa.h>
#include <sys/zfs_stat.h>
#include "zfs_prop.h"
#include "zfs_comutil.h"
/*
* Functions needed for userland (ie: libzpool) are not put under
* #ifdef_KERNEL; the rest of the functions have dependencies
* (such as VFS logic) that will not compile easily in userland.
*/
#ifdef _KERNEL
static kmem_cache_t *znode_cache = NULL;
static kmem_cache_t *znode_hold_cache = NULL;
unsigned int zfs_object_mutex_size = ZFS_OBJ_MTX_SZ;
/*
* This is used by the test suite so that it can delay znodes from being
* freed in order to inspect the unlinked set.
*/
static int zfs_unlink_suspend_progress = 0;
/*
* This callback is invoked when acquiring a RL_WRITER or RL_APPEND lock on
* z_rangelock. It will modify the offset and length of the lock to reflect
* znode-specific information, and convert RL_APPEND to RL_WRITER. This is
* called with the rangelock_t's rl_lock held, which avoids races.
*/
static void
zfs_rangelock_cb(zfs_locked_range_t *new, void *arg)
{
znode_t *zp = arg;
/*
* If in append mode, convert to writer and lock starting at the
* current end of file.
*/
if (new->lr_type == RL_APPEND) {
new->lr_offset = zp->z_size;
new->lr_type = RL_WRITER;
}
/*
* If we need to grow the block size then lock the whole file range.
*/
uint64_t end_size = MAX(zp->z_size, new->lr_offset + new->lr_length);
if (end_size > zp->z_blksz && (!ISP2(zp->z_blksz) ||
zp->z_blksz < ZTOZSB(zp)->z_max_blksz)) {
new->lr_offset = 0;
new->lr_length = UINT64_MAX;
}
}
static int
zfs_znode_cache_constructor(void *buf, void *arg, int kmflags)
{
(void) arg, (void) kmflags;
znode_t *zp = buf;
inode_init_once(ZTOI(zp));
list_link_init(&zp->z_link_node);
mutex_init(&zp->z_lock, NULL, MUTEX_DEFAULT, NULL);
rw_init(&zp->z_parent_lock, NULL, RW_DEFAULT, NULL);
rw_init(&zp->z_name_lock, NULL, RW_NOLOCKDEP, NULL);
mutex_init(&zp->z_acl_lock, NULL, MUTEX_DEFAULT, NULL);
rw_init(&zp->z_xattr_lock, NULL, RW_DEFAULT, NULL);
zfs_rangelock_init(&zp->z_rangelock, zfs_rangelock_cb, zp);
zp->z_dirlocks = NULL;
zp->z_acl_cached = NULL;
zp->z_xattr_cached = NULL;
zp->z_xattr_parent = 0;
zp->z_sync_writes_cnt = 0;
zp->z_async_writes_cnt = 0;
return (0);
}
static void
zfs_znode_cache_destructor(void *buf, void *arg)
{
(void) arg;
znode_t *zp = buf;
ASSERT(!list_link_active(&zp->z_link_node));
mutex_destroy(&zp->z_lock);
rw_destroy(&zp->z_parent_lock);
rw_destroy(&zp->z_name_lock);
mutex_destroy(&zp->z_acl_lock);
rw_destroy(&zp->z_xattr_lock);
zfs_rangelock_fini(&zp->z_rangelock);
ASSERT3P(zp->z_dirlocks, ==, NULL);
ASSERT3P(zp->z_acl_cached, ==, NULL);
ASSERT3P(zp->z_xattr_cached, ==, NULL);
ASSERT0(atomic_load_32(&zp->z_sync_writes_cnt));
ASSERT0(atomic_load_32(&zp->z_async_writes_cnt));
}
static int
zfs_znode_hold_cache_constructor(void *buf, void *arg, int kmflags)
{
(void) arg, (void) kmflags;
znode_hold_t *zh = buf;
mutex_init(&zh->zh_lock, NULL, MUTEX_DEFAULT, NULL);
zh->zh_refcount = 0;
return (0);
}
static void
zfs_znode_hold_cache_destructor(void *buf, void *arg)
{
(void) arg;
znode_hold_t *zh = buf;
mutex_destroy(&zh->zh_lock);
}
void
zfs_znode_init(void)
{
/*
* Initialize zcache. The KMC_SLAB hint is used in order that it be
* backed by kmalloc() when on the Linux slab in order that any
* wait_on_bit() operations on the related inode operate properly.
*/
ASSERT(znode_cache == NULL);
znode_cache = kmem_cache_create("zfs_znode_cache",
sizeof (znode_t), 0, zfs_znode_cache_constructor,
zfs_znode_cache_destructor, NULL, NULL, NULL, KMC_SLAB);
ASSERT(znode_hold_cache == NULL);
znode_hold_cache = kmem_cache_create("zfs_znode_hold_cache",
sizeof (znode_hold_t), 0, zfs_znode_hold_cache_constructor,
zfs_znode_hold_cache_destructor, NULL, NULL, NULL, 0);
}
void
zfs_znode_fini(void)
{
/*
* Cleanup zcache
*/
if (znode_cache)
kmem_cache_destroy(znode_cache);
znode_cache = NULL;
if (znode_hold_cache)
kmem_cache_destroy(znode_hold_cache);
znode_hold_cache = NULL;
}
/*
* The zfs_znode_hold_enter() / zfs_znode_hold_exit() functions are used to
* serialize access to a znode and its SA buffer while the object is being
* created or destroyed. This kind of locking would normally reside in the
* znode itself but in this case that's impossible because the znode and SA
* buffer may not yet exist. Therefore the locking is handled externally
* with an array of mutexes and AVLs trees which contain per-object locks.
*
* In zfs_znode_hold_enter() a per-object lock is created as needed, inserted
* in to the correct AVL tree and finally the per-object lock is held. In
* zfs_znode_hold_exit() the process is reversed. The per-object lock is
* released, removed from the AVL tree and destroyed if there are no waiters.
*
* This scheme has two important properties:
*
* 1) No memory allocations are performed while holding one of the z_hold_locks.
* This ensures evict(), which can be called from direct memory reclaim, will
* never block waiting on a z_hold_locks which just happens to have hashed
* to the same index.
*
* 2) All locks used to serialize access to an object are per-object and never
* shared. This minimizes lock contention without creating a large number
* of dedicated locks.
*
* On the downside it does require znode_lock_t structures to be frequently
* allocated and freed. However, because these are backed by a kmem cache
* and very short lived this cost is minimal.
*/
int
zfs_znode_hold_compare(const void *a, const void *b)
{
const znode_hold_t *zh_a = (const znode_hold_t *)a;
const znode_hold_t *zh_b = (const znode_hold_t *)b;
return (TREE_CMP(zh_a->zh_obj, zh_b->zh_obj));
}
static boolean_t __maybe_unused
zfs_znode_held(zfsvfs_t *zfsvfs, uint64_t obj)
{
znode_hold_t *zh, search;
int i = ZFS_OBJ_HASH(zfsvfs, obj);
boolean_t held;
search.zh_obj = obj;
mutex_enter(&zfsvfs->z_hold_locks[i]);
zh = avl_find(&zfsvfs->z_hold_trees[i], &search, NULL);
held = (zh && MUTEX_HELD(&zh->zh_lock)) ? B_TRUE : B_FALSE;
mutex_exit(&zfsvfs->z_hold_locks[i]);
return (held);
}
znode_hold_t *
zfs_znode_hold_enter(zfsvfs_t *zfsvfs, uint64_t obj)
{
znode_hold_t *zh, *zh_new, search;
int i = ZFS_OBJ_HASH(zfsvfs, obj);
boolean_t found = B_FALSE;
zh_new = kmem_cache_alloc(znode_hold_cache, KM_SLEEP);
search.zh_obj = obj;
mutex_enter(&zfsvfs->z_hold_locks[i]);
zh = avl_find(&zfsvfs->z_hold_trees[i], &search, NULL);
if (likely(zh == NULL)) {
zh = zh_new;
zh->zh_obj = obj;
avl_add(&zfsvfs->z_hold_trees[i], zh);
} else {
ASSERT3U(zh->zh_obj, ==, obj);
found = B_TRUE;
}
zh->zh_refcount++;
ASSERT3S(zh->zh_refcount, >, 0);
mutex_exit(&zfsvfs->z_hold_locks[i]);
if (found == B_TRUE)
kmem_cache_free(znode_hold_cache, zh_new);
ASSERT(MUTEX_NOT_HELD(&zh->zh_lock));
mutex_enter(&zh->zh_lock);
return (zh);
}
void
zfs_znode_hold_exit(zfsvfs_t *zfsvfs, znode_hold_t *zh)
{
int i = ZFS_OBJ_HASH(zfsvfs, zh->zh_obj);
boolean_t remove = B_FALSE;
ASSERT(zfs_znode_held(zfsvfs, zh->zh_obj));
mutex_exit(&zh->zh_lock);
mutex_enter(&zfsvfs->z_hold_locks[i]);
ASSERT3S(zh->zh_refcount, >, 0);
if (--zh->zh_refcount == 0) {
avl_remove(&zfsvfs->z_hold_trees[i], zh);
remove = B_TRUE;
}
mutex_exit(&zfsvfs->z_hold_locks[i]);
if (remove == B_TRUE)
kmem_cache_free(znode_hold_cache, zh);
}
dev_t
zfs_cmpldev(uint64_t dev)
{
return (dev);
}
static void
zfs_znode_sa_init(zfsvfs_t *zfsvfs, znode_t *zp,
dmu_buf_t *db, dmu_object_type_t obj_type, sa_handle_t *sa_hdl)
{
ASSERT(zfs_znode_held(zfsvfs, zp->z_id));
mutex_enter(&zp->z_lock);
ASSERT(zp->z_sa_hdl == NULL);
ASSERT(zp->z_acl_cached == NULL);
if (sa_hdl == NULL) {
VERIFY(0 == sa_handle_get_from_db(zfsvfs->z_os, db, zp,
SA_HDL_SHARED, &zp->z_sa_hdl));
} else {
zp->z_sa_hdl = sa_hdl;
sa_set_userp(sa_hdl, zp);
}
zp->z_is_sa = (obj_type == DMU_OT_SA) ? B_TRUE : B_FALSE;
mutex_exit(&zp->z_lock);
}
void
zfs_znode_dmu_fini(znode_t *zp)
{
ASSERT(zfs_znode_held(ZTOZSB(zp), zp->z_id) ||
RW_WRITE_HELD(&ZTOZSB(zp)->z_teardown_inactive_lock));
sa_handle_destroy(zp->z_sa_hdl);
zp->z_sa_hdl = NULL;
}
/*
* Called by new_inode() to allocate a new inode.
*/
int
zfs_inode_alloc(struct super_block *sb, struct inode **ip)
{
znode_t *zp;
zp = kmem_cache_alloc(znode_cache, KM_SLEEP);
*ip = ZTOI(zp);
return (0);
}
/*
* Called in multiple places when an inode should be destroyed.
*/
void
zfs_inode_destroy(struct inode *ip)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
mutex_enter(&zfsvfs->z_znodes_lock);
if (list_link_active(&zp->z_link_node)) {
list_remove(&zfsvfs->z_all_znodes, zp);
}
mutex_exit(&zfsvfs->z_znodes_lock);
if (zp->z_acl_cached) {
zfs_acl_free(zp->z_acl_cached);
zp->z_acl_cached = NULL;
}
if (zp->z_xattr_cached) {
nvlist_free(zp->z_xattr_cached);
zp->z_xattr_cached = NULL;
}
kmem_cache_free(znode_cache, zp);
}
static void
zfs_inode_set_ops(zfsvfs_t *zfsvfs, struct inode *ip)
{
uint64_t rdev = 0;
switch (ip->i_mode & S_IFMT) {
case S_IFREG:
ip->i_op = &zpl_inode_operations;
#ifdef HAVE_VFS_FILE_OPERATIONS_EXTEND
ip->i_fop = &zpl_file_operations.kabi_fops;
#else
ip->i_fop = &zpl_file_operations;
#endif
ip->i_mapping->a_ops = &zpl_address_space_operations;
break;
case S_IFDIR:
#ifdef HAVE_RENAME2_OPERATIONS_WRAPPER
ip->i_flags |= S_IOPS_WRAPPER;
ip->i_op = &zpl_dir_inode_operations.ops;
#else
ip->i_op = &zpl_dir_inode_operations;
#endif
ip->i_fop = &zpl_dir_file_operations;
ITOZ(ip)->z_zn_prefetch = B_TRUE;
break;
case S_IFLNK:
ip->i_op = &zpl_symlink_inode_operations;
break;
/*
* rdev is only stored in a SA only for device files.
*/
case S_IFCHR:
case S_IFBLK:
(void) sa_lookup(ITOZ(ip)->z_sa_hdl, SA_ZPL_RDEV(zfsvfs), &rdev,
sizeof (rdev));
zfs_fallthrough;
case S_IFIFO:
case S_IFSOCK:
init_special_inode(ip, ip->i_mode, rdev);
ip->i_op = &zpl_special_inode_operations;
break;
default:
zfs_panic_recover("inode %llu has invalid mode: 0x%x\n",
(u_longlong_t)ip->i_ino, ip->i_mode);
/* Assume the inode is a file and attempt to continue */
ip->i_mode = S_IFREG | 0644;
ip->i_op = &zpl_inode_operations;
#ifdef HAVE_VFS_FILE_OPERATIONS_EXTEND
ip->i_fop = &zpl_file_operations.kabi_fops;
#else
ip->i_fop = &zpl_file_operations;
#endif
ip->i_mapping->a_ops = &zpl_address_space_operations;
break;
}
}
static void
zfs_set_inode_flags(znode_t *zp, struct inode *ip)
{
/*
* Linux and Solaris have different sets of file attributes, so we
* restrict this conversion to the intersection of the two.
*/
#ifdef HAVE_INODE_SET_FLAGS
unsigned int flags = 0;
if (zp->z_pflags & ZFS_IMMUTABLE)
flags |= S_IMMUTABLE;
if (zp->z_pflags & ZFS_APPENDONLY)
flags |= S_APPEND;
inode_set_flags(ip, flags, S_IMMUTABLE|S_APPEND);
#else
if (zp->z_pflags & ZFS_IMMUTABLE)
ip->i_flags |= S_IMMUTABLE;
else
ip->i_flags &= ~S_IMMUTABLE;
if (zp->z_pflags & ZFS_APPENDONLY)
ip->i_flags |= S_APPEND;
else
ip->i_flags &= ~S_APPEND;
#endif
}
/*
* Update the embedded inode given the znode.
*/
void
zfs_znode_update_vfs(znode_t *zp)
{
struct inode *ip;
uint32_t blksize;
u_longlong_t i_blocks;
ASSERT(zp != NULL);
ip = ZTOI(zp);
/* Skip .zfs control nodes which do not exist on disk. */
if (zfsctl_is_node(ip))
return;
dmu_object_size_from_db(sa_get_db(zp->z_sa_hdl), &blksize, &i_blocks);
spin_lock(&ip->i_lock);
ip->i_mode = zp->z_mode;
ip->i_blocks = i_blocks;
i_size_write(ip, zp->z_size);
spin_unlock(&ip->i_lock);
}
/*
* Construct a znode+inode and initialize.
*
* This does not do a call to dmu_set_user() that is
* up to the caller to do, in case you don't want to
* return the znode
*/
static znode_t *
zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz,
dmu_object_type_t obj_type, sa_handle_t *hdl)
{
znode_t *zp;
struct inode *ip;
uint64_t mode;
uint64_t parent;
uint64_t tmp_gen;
uint64_t links;
uint64_t z_uid, z_gid;
uint64_t atime[2], mtime[2], ctime[2], btime[2];
- inode_timespec_t tmp_ctime;
+ inode_timespec_t tmp_ts;
uint64_t projid = ZFS_DEFAULT_PROJID;
sa_bulk_attr_t bulk[12];
int count = 0;
ASSERT(zfsvfs != NULL);
ip = new_inode(zfsvfs->z_sb);
if (ip == NULL)
return (NULL);
zp = ITOZ(ip);
ASSERT(zp->z_dirlocks == NULL);
ASSERT3P(zp->z_acl_cached, ==, NULL);
ASSERT3P(zp->z_xattr_cached, ==, NULL);
zp->z_unlinked = B_FALSE;
zp->z_atime_dirty = B_FALSE;
#if !defined(HAVE_FILEMAP_RANGE_HAS_PAGE)
zp->z_is_mapped = B_FALSE;
#endif
zp->z_is_ctldir = B_FALSE;
zp->z_suspended = B_FALSE;
zp->z_sa_hdl = NULL;
zp->z_mapcnt = 0;
zp->z_id = db->db_object;
zp->z_blksz = blksz;
zp->z_seq = 0x7A4653;
zp->z_sync_cnt = 0;
zp->z_sync_writes_cnt = 0;
zp->z_async_writes_cnt = 0;
zfs_znode_sa_init(zfsvfs, zp, db, obj_type, hdl);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL, &tmp_gen, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
&zp->z_size, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL, &links, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL,
&parent, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL, &z_uid, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL, &z_gid, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zfsvfs), NULL, &btime, 16);
if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count) != 0 || tmp_gen == 0 ||
(dmu_objset_projectquota_enabled(zfsvfs->z_os) &&
(zp->z_pflags & ZFS_PROJID) &&
sa_lookup(zp->z_sa_hdl, SA_ZPL_PROJID(zfsvfs), &projid, 8) != 0)) {
if (hdl == NULL)
sa_handle_destroy(zp->z_sa_hdl);
zp->z_sa_hdl = NULL;
goto error;
}
zp->z_projid = projid;
zp->z_mode = ip->i_mode = mode;
ip->i_generation = (uint32_t)tmp_gen;
ip->i_blkbits = SPA_MINBLOCKSHIFT;
set_nlink(ip, (uint32_t)links);
zfs_uid_write(ip, z_uid);
zfs_gid_write(ip, z_gid);
zfs_set_inode_flags(zp, ip);
/* Cache the xattr parent id */
if (zp->z_pflags & ZFS_XATTR)
zp->z_xattr_parent = parent;
- ZFS_TIME_DECODE(&ip->i_atime, atime);
- ZFS_TIME_DECODE(&ip->i_mtime, mtime);
- ZFS_TIME_DECODE(&tmp_ctime, ctime);
- zpl_inode_set_ctime_to_ts(ip, tmp_ctime);
+ ZFS_TIME_DECODE(&tmp_ts, atime);
+ zpl_inode_set_atime_to_ts(ip, tmp_ts);
+ ZFS_TIME_DECODE(&tmp_ts, mtime);
+ zpl_inode_set_mtime_to_ts(ip, tmp_ts);
+ ZFS_TIME_DECODE(&tmp_ts, ctime);
+ zpl_inode_set_ctime_to_ts(ip, tmp_ts);
ZFS_TIME_DECODE(&zp->z_btime, btime);
ip->i_ino = zp->z_id;
zfs_znode_update_vfs(zp);
zfs_inode_set_ops(zfsvfs, ip);
/*
* The only way insert_inode_locked() can fail is if the ip->i_ino
* number is already hashed for this super block. This can never
* happen because the inode numbers map 1:1 with the object numbers.
*
* Exceptions include rolling back a mounted file system, either
* from the zfs rollback or zfs recv command.
*
* Active inodes are unhashed during the rollback, but since zrele
* can happen asynchronously, we can't guarantee they've been
* unhashed. This can cause hash collisions in unlinked drain
* processing so do not hash unlinked znodes.
*/
if (links > 0)
VERIFY3S(insert_inode_locked(ip), ==, 0);
mutex_enter(&zfsvfs->z_znodes_lock);
list_insert_tail(&zfsvfs->z_all_znodes, zp);
mutex_exit(&zfsvfs->z_znodes_lock);
if (links > 0)
unlock_new_inode(ip);
return (zp);
error:
iput(ip);
return (NULL);
}
/*
* Safely mark an inode dirty. Inodes which are part of a read-only
* file system or snapshot may not be dirtied.
*/
void
zfs_mark_inode_dirty(struct inode *ip)
{
zfsvfs_t *zfsvfs = ITOZSB(ip);
if (zfs_is_readonly(zfsvfs) || dmu_objset_is_snapshot(zfsvfs->z_os))
return;
mark_inode_dirty(ip);
}
static uint64_t empty_xattr;
static uint64_t pad[4];
static zfs_acl_phys_t acl_phys;
/*
* Create a new DMU object to hold a zfs znode.
*
* IN: dzp - parent directory for new znode
* vap - file attributes for new znode
* tx - dmu transaction id for zap operations
* cr - credentials of caller
* flag - flags:
* IS_ROOT_NODE - new object will be root
* IS_TMPFILE - new object is of O_TMPFILE
* IS_XATTR - new object is an attribute
* acl_ids - ACL related attributes
*
* OUT: zpp - allocated znode (set to dzp if IS_ROOT_NODE)
*
*/
void
zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
uint_t flag, znode_t **zpp, zfs_acl_ids_t *acl_ids)
{
uint64_t crtime[2], atime[2], mtime[2], ctime[2];
uint64_t mode, size, links, parent, pflags;
uint64_t projid = ZFS_DEFAULT_PROJID;
uint64_t rdev = 0;
zfsvfs_t *zfsvfs = ZTOZSB(dzp);
dmu_buf_t *db;
inode_timespec_t now;
uint64_t gen, obj;
int bonuslen;
int dnodesize;
sa_handle_t *sa_hdl;
dmu_object_type_t obj_type;
sa_bulk_attr_t *sa_attrs;
int cnt = 0;
zfs_acl_locator_cb_t locate = { 0 };
znode_hold_t *zh;
if (zfsvfs->z_replay) {
obj = vap->va_nodeid;
now = vap->va_ctime; /* see zfs_replay_create() */
gen = vap->va_nblocks; /* ditto */
dnodesize = vap->va_fsid; /* ditto */
} else {
obj = 0;
gethrestime(&now);
gen = dmu_tx_get_txg(tx);
dnodesize = dmu_objset_dnodesize(zfsvfs->z_os);
}
if (dnodesize == 0)
dnodesize = DNODE_MIN_SIZE;
obj_type = zfsvfs->z_use_sa ? DMU_OT_SA : DMU_OT_ZNODE;
bonuslen = (obj_type == DMU_OT_SA) ?
DN_BONUS_SIZE(dnodesize) : ZFS_OLD_ZNODE_PHYS_SIZE;
/*
* Create a new DMU object.
*/
/*
* There's currently no mechanism for pre-reading the blocks that will
* be needed to allocate a new object, so we accept the small chance
* that there will be an i/o error and we will fail one of the
* assertions below.
*/
if (S_ISDIR(vap->va_mode)) {
if (zfsvfs->z_replay) {
VERIFY0(zap_create_claim_norm_dnsize(zfsvfs->z_os, obj,
zfsvfs->z_norm, DMU_OT_DIRECTORY_CONTENTS,
obj_type, bonuslen, dnodesize, tx));
} else {
obj = zap_create_norm_dnsize(zfsvfs->z_os,
zfsvfs->z_norm, DMU_OT_DIRECTORY_CONTENTS,
obj_type, bonuslen, dnodesize, tx);
}
} else {
if (zfsvfs->z_replay) {
VERIFY0(dmu_object_claim_dnsize(zfsvfs->z_os, obj,
DMU_OT_PLAIN_FILE_CONTENTS, 0,
obj_type, bonuslen, dnodesize, tx));
} else {
obj = dmu_object_alloc_dnsize(zfsvfs->z_os,
DMU_OT_PLAIN_FILE_CONTENTS, 0,
obj_type, bonuslen, dnodesize, tx);
}
}
zh = zfs_znode_hold_enter(zfsvfs, obj);
VERIFY0(sa_buf_hold(zfsvfs->z_os, obj, NULL, &db));
/*
* If this is the root, fix up the half-initialized parent pointer
* to reference the just-allocated physical data area.
*/
if (flag & IS_ROOT_NODE) {
dzp->z_id = obj;
}
/*
* If parent is an xattr, so am I.
*/
if (dzp->z_pflags & ZFS_XATTR) {
flag |= IS_XATTR;
}
if (zfsvfs->z_use_fuids)
pflags = ZFS_ARCHIVE | ZFS_AV_MODIFIED;
else
pflags = 0;
if (S_ISDIR(vap->va_mode)) {
size = 2; /* contents ("." and "..") */
links = 2;
} else {
size = 0;
links = (flag & IS_TMPFILE) ? 0 : 1;
}
if (S_ISBLK(vap->va_mode) || S_ISCHR(vap->va_mode))
rdev = vap->va_rdev;
parent = dzp->z_id;
mode = acl_ids->z_mode;
if (flag & IS_XATTR)
pflags |= ZFS_XATTR;
if (S_ISREG(vap->va_mode) || S_ISDIR(vap->va_mode)) {
/*
* With ZFS_PROJID flag, we can easily know whether there is
* project ID stored on disk or not. See zfs_space_delta_cb().
*/
if (obj_type != DMU_OT_ZNODE &&
dmu_objset_projectquota_enabled(zfsvfs->z_os))
pflags |= ZFS_PROJID;
/*
* Inherit project ID from parent if required.
*/
projid = zfs_inherit_projid(dzp);
if (dzp->z_pflags & ZFS_PROJINHERIT)
pflags |= ZFS_PROJINHERIT;
}
/*
* No execs denied will be determined when zfs_mode_compute() is called.
*/
pflags |= acl_ids->z_aclp->z_hints &
(ZFS_ACL_TRIVIAL|ZFS_INHERIT_ACE|ZFS_ACL_AUTO_INHERIT|
ZFS_ACL_DEFAULTED|ZFS_ACL_PROTECTED);
ZFS_TIME_ENCODE(&now, crtime);
ZFS_TIME_ENCODE(&now, ctime);
if (vap->va_mask & ATTR_ATIME) {
ZFS_TIME_ENCODE(&vap->va_atime, atime);
} else {
ZFS_TIME_ENCODE(&now, atime);
}
if (vap->va_mask & ATTR_MTIME) {
ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
} else {
ZFS_TIME_ENCODE(&now, mtime);
}
/* Now add in all of the "SA" attributes */
VERIFY(0 == sa_handle_get_from_db(zfsvfs->z_os, db, NULL, SA_HDL_SHARED,
&sa_hdl));
/*
* Setup the array of attributes to be replaced/set on the new file
*
* order for DMU_OT_ZNODE is critical since it needs to be constructed
* in the old znode_phys_t format. Don't change this ordering
*/
sa_attrs = kmem_alloc(sizeof (sa_bulk_attr_t) * ZPL_END, KM_SLEEP);
if (obj_type == DMU_OT_ZNODE) {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zfsvfs),
NULL, &atime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zfsvfs),
NULL, &mtime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zfsvfs),
NULL, &ctime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zfsvfs),
NULL, &crtime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zfsvfs),
NULL, &gen, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zfsvfs),
NULL, &mode, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zfsvfs),
NULL, &size, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zfsvfs),
NULL, &parent, 8);
} else {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zfsvfs),
NULL, &mode, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zfsvfs),
NULL, &size, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zfsvfs),
NULL, &gen, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zfsvfs),
NULL, &acl_ids->z_fuid, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zfsvfs),
NULL, &acl_ids->z_fgid, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zfsvfs),
NULL, &parent, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zfsvfs),
NULL, &pflags, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zfsvfs),
NULL, &atime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zfsvfs),
NULL, &mtime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zfsvfs),
NULL, &ctime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zfsvfs),
NULL, &crtime, 16);
}
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_LINKS(zfsvfs), NULL, &links, 8);
if (obj_type == DMU_OT_ZNODE) {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_XATTR(zfsvfs), NULL,
&empty_xattr, 8);
} else if (dmu_objset_projectquota_enabled(zfsvfs->z_os) &&
pflags & ZFS_PROJID) {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PROJID(zfsvfs),
NULL, &projid, 8);
}
if (obj_type == DMU_OT_ZNODE ||
(S_ISBLK(vap->va_mode) || S_ISCHR(vap->va_mode))) {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_RDEV(zfsvfs),
NULL, &rdev, 8);
}
if (obj_type == DMU_OT_ZNODE) {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zfsvfs),
NULL, &pflags, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zfsvfs), NULL,
&acl_ids->z_fuid, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zfsvfs), NULL,
&acl_ids->z_fgid, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PAD(zfsvfs), NULL, pad,
sizeof (uint64_t) * 4);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ZNODE_ACL(zfsvfs), NULL,
&acl_phys, sizeof (zfs_acl_phys_t));
} else if (acl_ids->z_aclp->z_version >= ZFS_ACL_VERSION_FUID) {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_COUNT(zfsvfs), NULL,
&acl_ids->z_aclp->z_acl_count, 8);
locate.cb_aclp = acl_ids->z_aclp;
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_ACES(zfsvfs),
zfs_acl_data_locator, &locate,
acl_ids->z_aclp->z_acl_bytes);
mode = zfs_mode_compute(mode, acl_ids->z_aclp, &pflags,
acl_ids->z_fuid, acl_ids->z_fgid);
}
VERIFY(sa_replace_all_by_template(sa_hdl, sa_attrs, cnt, tx) == 0);
if (!(flag & IS_ROOT_NODE)) {
/*
* The call to zfs_znode_alloc() may fail if memory is low
* via the call path: alloc_inode() -> inode_init_always() ->
* security_inode_alloc() -> inode_alloc_security(). Since
* the existing code is written such that zfs_mknode() can
* not fail retry until sufficient memory has been reclaimed.
*/
do {
*zpp = zfs_znode_alloc(zfsvfs, db, 0, obj_type, sa_hdl);
} while (*zpp == NULL);
VERIFY(*zpp != NULL);
VERIFY(dzp != NULL);
} else {
/*
* If we are creating the root node, the "parent" we
* passed in is the znode for the root.
*/
*zpp = dzp;
(*zpp)->z_sa_hdl = sa_hdl;
}
(*zpp)->z_pflags = pflags;
(*zpp)->z_mode = ZTOI(*zpp)->i_mode = mode;
(*zpp)->z_dnodesize = dnodesize;
(*zpp)->z_projid = projid;
if (obj_type == DMU_OT_ZNODE ||
acl_ids->z_aclp->z_version < ZFS_ACL_VERSION_FUID) {
VERIFY0(zfs_aclset_common(*zpp, acl_ids->z_aclp, cr, tx));
}
kmem_free(sa_attrs, sizeof (sa_bulk_attr_t) * ZPL_END);
zfs_znode_hold_exit(zfsvfs, zh);
}
/*
* Update in-core attributes. It is assumed the caller will be doing an
* sa_bulk_update to push the changes out.
*/
void
zfs_xvattr_set(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx)
{
xoptattr_t *xoap;
boolean_t update_inode = B_FALSE;
xoap = xva_getxoptattr(xvap);
ASSERT(xoap);
if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) {
uint64_t times[2];
ZFS_TIME_ENCODE(&xoap->xoa_createtime, times);
(void) sa_update(zp->z_sa_hdl, SA_ZPL_CRTIME(ZTOZSB(zp)),
&times, sizeof (times), tx);
XVA_SET_RTN(xvap, XAT_CREATETIME);
}
if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
ZFS_ATTR_SET(zp, ZFS_READONLY, xoap->xoa_readonly,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_READONLY);
}
if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
ZFS_ATTR_SET(zp, ZFS_HIDDEN, xoap->xoa_hidden,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_HIDDEN);
}
if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
ZFS_ATTR_SET(zp, ZFS_SYSTEM, xoap->xoa_system,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_SYSTEM);
}
if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
ZFS_ATTR_SET(zp, ZFS_ARCHIVE, xoap->xoa_archive,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_ARCHIVE);
}
if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
ZFS_ATTR_SET(zp, ZFS_IMMUTABLE, xoap->xoa_immutable,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_IMMUTABLE);
update_inode = B_TRUE;
}
if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
ZFS_ATTR_SET(zp, ZFS_NOUNLINK, xoap->xoa_nounlink,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_NOUNLINK);
}
if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
ZFS_ATTR_SET(zp, ZFS_APPENDONLY, xoap->xoa_appendonly,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_APPENDONLY);
update_inode = B_TRUE;
}
if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
ZFS_ATTR_SET(zp, ZFS_NODUMP, xoap->xoa_nodump,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_NODUMP);
}
if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
ZFS_ATTR_SET(zp, ZFS_OPAQUE, xoap->xoa_opaque,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_OPAQUE);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
ZFS_ATTR_SET(zp, ZFS_AV_QUARANTINED,
xoap->xoa_av_quarantined, zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
ZFS_ATTR_SET(zp, ZFS_AV_MODIFIED, xoap->xoa_av_modified,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) {
zfs_sa_set_scanstamp(zp, xvap, tx);
XVA_SET_RTN(xvap, XAT_AV_SCANSTAMP);
}
if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
ZFS_ATTR_SET(zp, ZFS_REPARSE, xoap->xoa_reparse,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_REPARSE);
}
if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) {
ZFS_ATTR_SET(zp, ZFS_OFFLINE, xoap->xoa_offline,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_OFFLINE);
}
if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) {
ZFS_ATTR_SET(zp, ZFS_SPARSE, xoap->xoa_sparse,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_SPARSE);
}
if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT)) {
ZFS_ATTR_SET(zp, ZFS_PROJINHERIT, xoap->xoa_projinherit,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_PROJINHERIT);
}
if (update_inode)
zfs_set_inode_flags(zp, ZTOI(zp));
}
int
zfs_zget(zfsvfs_t *zfsvfs, uint64_t obj_num, znode_t **zpp)
{
dmu_object_info_t doi;
dmu_buf_t *db;
znode_t *zp;
znode_hold_t *zh;
int err;
sa_handle_t *hdl;
*zpp = NULL;
again:
zh = zfs_znode_hold_enter(zfsvfs, obj_num);
err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db);
if (err) {
zfs_znode_hold_exit(zfsvfs, zh);
return (err);
}
dmu_object_info_from_db(db, &doi);
if (doi.doi_bonus_type != DMU_OT_SA &&
(doi.doi_bonus_type != DMU_OT_ZNODE ||
(doi.doi_bonus_type == DMU_OT_ZNODE &&
doi.doi_bonus_size < sizeof (znode_phys_t)))) {
sa_buf_rele(db, NULL);
zfs_znode_hold_exit(zfsvfs, zh);
return (SET_ERROR(EINVAL));
}
hdl = dmu_buf_get_user(db);
if (hdl != NULL) {
zp = sa_get_userdata(hdl);
/*
* Since "SA" does immediate eviction we
* should never find a sa handle that doesn't
* know about the znode.
*/
ASSERT3P(zp, !=, NULL);
mutex_enter(&zp->z_lock);
ASSERT3U(zp->z_id, ==, obj_num);
/*
* If zp->z_unlinked is set, the znode is already marked
* for deletion and should not be discovered. Check this
* after checking igrab() due to fsetxattr() & O_TMPFILE.
*
* If igrab() returns NULL the VFS has independently
* determined the inode should be evicted and has
* called iput_final() to start the eviction process.
* The SA handle is still valid but because the VFS
* requires that the eviction succeed we must drop
* our locks and references to allow the eviction to
* complete. The zfs_zget() may then be retried.
*
* This unlikely case could be optimized by registering
* a sops->drop_inode() callback. The callback would
* need to detect the active SA hold thereby informing
* the VFS that this inode should not be evicted.
*/
if (igrab(ZTOI(zp)) == NULL) {
if (zp->z_unlinked)
err = SET_ERROR(ENOENT);
else
err = SET_ERROR(EAGAIN);
} else {
*zpp = zp;
err = 0;
}
mutex_exit(&zp->z_lock);
sa_buf_rele(db, NULL);
zfs_znode_hold_exit(zfsvfs, zh);
if (err == EAGAIN) {
/* inode might need this to finish evict */
cond_resched();
goto again;
}
return (err);
}
/*
* Not found create new znode/vnode but only if file exists.
*
* There is a small window where zfs_vget() could
* find this object while a file create is still in
* progress. This is checked for in zfs_znode_alloc()
*
* if zfs_znode_alloc() fails it will drop the hold on the
* bonus buffer.
*/
zp = zfs_znode_alloc(zfsvfs, db, doi.doi_data_block_size,
doi.doi_bonus_type, NULL);
if (zp == NULL) {
err = SET_ERROR(ENOENT);
} else {
*zpp = zp;
}
zfs_znode_hold_exit(zfsvfs, zh);
return (err);
}
int
zfs_rezget(znode_t *zp)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
dmu_object_info_t doi;
dmu_buf_t *db;
uint64_t obj_num = zp->z_id;
uint64_t mode;
uint64_t links;
sa_bulk_attr_t bulk[11];
int err;
int count = 0;
uint64_t gen;
uint64_t z_uid, z_gid;
uint64_t atime[2], mtime[2], ctime[2], btime[2];
- inode_timespec_t tmp_ctime;
+ inode_timespec_t tmp_ts;
uint64_t projid = ZFS_DEFAULT_PROJID;
znode_hold_t *zh;
/*
* skip ctldir, otherwise they will always get invalidated. This will
* cause funny behaviour for the mounted snapdirs. Especially for
* Linux >= 3.18, d_invalidate will detach the mountpoint and prevent
* anyone automount it again as long as someone is still using the
* detached mount.
*/
if (zp->z_is_ctldir)
return (0);
zh = zfs_znode_hold_enter(zfsvfs, obj_num);
mutex_enter(&zp->z_acl_lock);
if (zp->z_acl_cached) {
zfs_acl_free(zp->z_acl_cached);
zp->z_acl_cached = NULL;
}
mutex_exit(&zp->z_acl_lock);
rw_enter(&zp->z_xattr_lock, RW_WRITER);
if (zp->z_xattr_cached) {
nvlist_free(zp->z_xattr_cached);
zp->z_xattr_cached = NULL;
}
rw_exit(&zp->z_xattr_lock);
ASSERT(zp->z_sa_hdl == NULL);
err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db);
if (err) {
zfs_znode_hold_exit(zfsvfs, zh);
return (err);
}
dmu_object_info_from_db(db, &doi);
if (doi.doi_bonus_type != DMU_OT_SA &&
(doi.doi_bonus_type != DMU_OT_ZNODE ||
(doi.doi_bonus_type == DMU_OT_ZNODE &&
doi.doi_bonus_size < sizeof (znode_phys_t)))) {
sa_buf_rele(db, NULL);
zfs_znode_hold_exit(zfsvfs, zh);
return (SET_ERROR(EINVAL));
}
zfs_znode_sa_init(zfsvfs, zp, db, doi.doi_bonus_type, NULL);
/* reload cached values */
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL,
&gen, sizeof (gen));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
&zp->z_size, sizeof (zp->z_size));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL,
&links, sizeof (links));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, sizeof (zp->z_pflags));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
&z_uid, sizeof (z_uid));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
&z_gid, sizeof (z_gid));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
&mode, sizeof (mode));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
&atime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
&mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
&ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zfsvfs), NULL, &btime, 16);
if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) {
zfs_znode_dmu_fini(zp);
zfs_znode_hold_exit(zfsvfs, zh);
return (SET_ERROR(EIO));
}
if (dmu_objset_projectquota_enabled(zfsvfs->z_os)) {
err = sa_lookup(zp->z_sa_hdl, SA_ZPL_PROJID(zfsvfs),
&projid, 8);
if (err != 0 && err != ENOENT) {
zfs_znode_dmu_fini(zp);
zfs_znode_hold_exit(zfsvfs, zh);
return (SET_ERROR(err));
}
}
zp->z_projid = projid;
zp->z_mode = ZTOI(zp)->i_mode = mode;
zfs_uid_write(ZTOI(zp), z_uid);
zfs_gid_write(ZTOI(zp), z_gid);
- ZFS_TIME_DECODE(&ZTOI(zp)->i_atime, atime);
- ZFS_TIME_DECODE(&ZTOI(zp)->i_mtime, mtime);
- ZFS_TIME_DECODE(&tmp_ctime, ctime);
- zpl_inode_set_ctime_to_ts(ZTOI(zp), tmp_ctime);
+ ZFS_TIME_DECODE(&tmp_ts, atime);
+ zpl_inode_set_atime_to_ts(ZTOI(zp), tmp_ts);
+ ZFS_TIME_DECODE(&tmp_ts, mtime);
+ zpl_inode_set_mtime_to_ts(ZTOI(zp), tmp_ts);
+ ZFS_TIME_DECODE(&tmp_ts, ctime);
+ zpl_inode_set_ctime_to_ts(ZTOI(zp), tmp_ts);
ZFS_TIME_DECODE(&zp->z_btime, btime);
if ((uint32_t)gen != ZTOI(zp)->i_generation) {
zfs_znode_dmu_fini(zp);
zfs_znode_hold_exit(zfsvfs, zh);
return (SET_ERROR(EIO));
}
set_nlink(ZTOI(zp), (uint32_t)links);
zfs_set_inode_flags(zp, ZTOI(zp));
zp->z_blksz = doi.doi_data_block_size;
zp->z_atime_dirty = B_FALSE;
zfs_znode_update_vfs(zp);
/*
* If the file has zero links, then it has been unlinked on the send
* side and it must be in the received unlinked set.
* We call zfs_znode_dmu_fini() now to prevent any accesses to the
* stale data and to prevent automatic removal of the file in
* zfs_zinactive(). The file will be removed either when it is removed
* on the send side and the next incremental stream is received or
* when the unlinked set gets processed.
*/
zp->z_unlinked = (ZTOI(zp)->i_nlink == 0);
if (zp->z_unlinked)
zfs_znode_dmu_fini(zp);
zfs_znode_hold_exit(zfsvfs, zh);
return (0);
}
void
zfs_znode_delete(znode_t *zp, dmu_tx_t *tx)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
objset_t *os = zfsvfs->z_os;
uint64_t obj = zp->z_id;
uint64_t acl_obj = zfs_external_acl(zp);
znode_hold_t *zh;
zh = zfs_znode_hold_enter(zfsvfs, obj);
if (acl_obj) {
VERIFY(!zp->z_is_sa);
VERIFY(0 == dmu_object_free(os, acl_obj, tx));
}
VERIFY(0 == dmu_object_free(os, obj, tx));
zfs_znode_dmu_fini(zp);
zfs_znode_hold_exit(zfsvfs, zh);
}
void
zfs_zinactive(znode_t *zp)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
uint64_t z_id = zp->z_id;
znode_hold_t *zh;
ASSERT(zp->z_sa_hdl);
/*
* Don't allow a zfs_zget() while were trying to release this znode.
*/
zh = zfs_znode_hold_enter(zfsvfs, z_id);
mutex_enter(&zp->z_lock);
/*
* If this was the last reference to a file with no links, remove
* the file from the file system unless the file system is mounted
* read-only. That can happen, for example, if the file system was
* originally read-write, the file was opened, then unlinked and
* the file system was made read-only before the file was finally
* closed. The file will remain in the unlinked set.
*/
if (zp->z_unlinked) {
ASSERT(!zfsvfs->z_issnap);
if (!zfs_is_readonly(zfsvfs) && !zfs_unlink_suspend_progress) {
mutex_exit(&zp->z_lock);
zfs_znode_hold_exit(zfsvfs, zh);
zfs_rmnode(zp);
return;
}
}
mutex_exit(&zp->z_lock);
zfs_znode_dmu_fini(zp);
zfs_znode_hold_exit(zfsvfs, zh);
}
#if defined(HAVE_INODE_TIMESPEC64_TIMES)
#define zfs_compare_timespec timespec64_compare
#else
#define zfs_compare_timespec timespec_compare
#endif
/*
* Determine whether the znode's atime must be updated. The logic mostly
* duplicates the Linux kernel's relatime_need_update() functionality.
* This function is only called if the underlying filesystem actually has
* atime updates enabled.
*/
boolean_t
zfs_relatime_need_update(const struct inode *ip)
{
- inode_timespec_t now, tmp_ctime;
+ inode_timespec_t now, tmp_atime, tmp_ts;
gethrestime(&now);
+ tmp_atime = zpl_inode_get_atime(ip);
/*
* In relatime mode, only update the atime if the previous atime
* is earlier than either the ctime or mtime or if at least a day
* has passed since the last update of atime.
*/
- if (zfs_compare_timespec(&ip->i_mtime, &ip->i_atime) >= 0)
+ tmp_ts = zpl_inode_get_mtime(ip);
+ if (zfs_compare_timespec(&tmp_ts, &tmp_atime) >= 0)
return (B_TRUE);
- tmp_ctime = zpl_inode_get_ctime(ip);
- if (zfs_compare_timespec(&tmp_ctime, &ip->i_atime) >= 0)
+ tmp_ts = zpl_inode_get_ctime(ip);
+ if (zfs_compare_timespec(&tmp_ts, &tmp_atime) >= 0)
return (B_TRUE);
- if ((hrtime_t)now.tv_sec - (hrtime_t)ip->i_atime.tv_sec >= 24*60*60)
+ if ((hrtime_t)now.tv_sec - (hrtime_t)tmp_atime.tv_sec >= 24*60*60)
return (B_TRUE);
return (B_FALSE);
}
/*
* Prepare to update znode time stamps.
*
* IN: zp - znode requiring timestamp update
* flag - ATTR_MTIME, ATTR_CTIME flags
*
* OUT: zp - z_seq
* mtime - new mtime
* ctime - new ctime
*
* Note: We don't update atime here, because we rely on Linux VFS to do
* atime updating.
*/
void
zfs_tstamp_update_setup(znode_t *zp, uint_t flag, uint64_t mtime[2],
uint64_t ctime[2])
{
- inode_timespec_t now, tmp_ctime;
+ inode_timespec_t now, tmp_ts;
gethrestime(&now);
zp->z_seq++;
if (flag & ATTR_MTIME) {
ZFS_TIME_ENCODE(&now, mtime);
- ZFS_TIME_DECODE(&(ZTOI(zp)->i_mtime), mtime);
+ ZFS_TIME_DECODE(&tmp_ts, mtime);
+ zpl_inode_set_mtime_to_ts(ZTOI(zp), tmp_ts);
if (ZTOZSB(zp)->z_use_fuids) {
zp->z_pflags |= (ZFS_ARCHIVE |
ZFS_AV_MODIFIED);
}
}
if (flag & ATTR_CTIME) {
ZFS_TIME_ENCODE(&now, ctime);
- ZFS_TIME_DECODE(&tmp_ctime, ctime);
- zpl_inode_set_ctime_to_ts(ZTOI(zp), tmp_ctime);
+ ZFS_TIME_DECODE(&tmp_ts, ctime);
+ zpl_inode_set_ctime_to_ts(ZTOI(zp), tmp_ts);
if (ZTOZSB(zp)->z_use_fuids)
zp->z_pflags |= ZFS_ARCHIVE;
}
}
/*
* Grow the block size for a file.
*
* IN: zp - znode of file to free data in.
* size - requested block size
* tx - open transaction.
*
* NOTE: this function assumes that the znode is write locked.
*/
void
zfs_grow_blocksize(znode_t *zp, uint64_t size, dmu_tx_t *tx)
{
int error;
u_longlong_t dummy;
if (size <= zp->z_blksz)
return;
/*
* If the file size is already greater than the current blocksize,
* we will not grow. If there is more than one block in a file,
* the blocksize cannot change.
*/
if (zp->z_blksz && zp->z_size > zp->z_blksz)
return;
error = dmu_object_set_blocksize(ZTOZSB(zp)->z_os, zp->z_id,
size, 0, tx);
if (error == ENOTSUP)
return;
ASSERT0(error);
/* What blocksize did we actually get? */
dmu_object_size_from_db(sa_get_db(zp->z_sa_hdl), &zp->z_blksz, &dummy);
}
/*
* Increase the file length
*
* IN: zp - znode of file to free data in.
* end - new end-of-file
*
* RETURN: 0 on success, error code on failure
*/
static int
zfs_extend(znode_t *zp, uint64_t end)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
dmu_tx_t *tx;
zfs_locked_range_t *lr;
uint64_t newblksz;
int error;
/*
* We will change zp_size, lock the whole file.
*/
lr = zfs_rangelock_enter(&zp->z_rangelock, 0, UINT64_MAX, RL_WRITER);
/*
* Nothing to do if file already at desired length.
*/
if (end <= zp->z_size) {
zfs_rangelock_exit(lr);
return (0);
}
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
if (end > zp->z_blksz &&
(!ISP2(zp->z_blksz) || zp->z_blksz < zfsvfs->z_max_blksz)) {
/*
* We are growing the file past the current block size.
*/
if (zp->z_blksz > ZTOZSB(zp)->z_max_blksz) {
/*
* File's blocksize is already larger than the
* "recordsize" property. Only let it grow to
* the next power of 2.
*/
ASSERT(!ISP2(zp->z_blksz));
newblksz = MIN(end, 1 << highbit64(zp->z_blksz));
} else {
newblksz = MIN(end, ZTOZSB(zp)->z_max_blksz);
}
dmu_tx_hold_write(tx, zp->z_id, 0, newblksz);
} else {
newblksz = 0;
}
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
zfs_rangelock_exit(lr);
return (error);
}
if (newblksz)
zfs_grow_blocksize(zp, newblksz, tx);
zp->z_size = end;
VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(ZTOZSB(zp)),
&zp->z_size, sizeof (zp->z_size), tx));
zfs_rangelock_exit(lr);
dmu_tx_commit(tx);
return (0);
}
/*
* zfs_zero_partial_page - Modeled after update_pages() but
* with different arguments and semantics for use by zfs_freesp().
*
* Zeroes a piece of a single page cache entry for zp at offset
* start and length len.
*
* Caller must acquire a range lock on the file for the region
* being zeroed in order that the ARC and page cache stay in sync.
*/
static void
zfs_zero_partial_page(znode_t *zp, uint64_t start, uint64_t len)
{
struct address_space *mp = ZTOI(zp)->i_mapping;
struct page *pp;
int64_t off;
void *pb;
ASSERT((start & PAGE_MASK) == ((start + len - 1) & PAGE_MASK));
off = start & (PAGE_SIZE - 1);
start &= PAGE_MASK;
pp = find_lock_page(mp, start >> PAGE_SHIFT);
if (pp) {
if (mapping_writably_mapped(mp))
flush_dcache_page(pp);
pb = kmap(pp);
memset(pb + off, 0, len);
kunmap(pp);
if (mapping_writably_mapped(mp))
flush_dcache_page(pp);
mark_page_accessed(pp);
SetPageUptodate(pp);
ClearPageError(pp);
unlock_page(pp);
put_page(pp);
}
}
/*
* Free space in a file.
*
* IN: zp - znode of file to free data in.
* off - start of section to free.
* len - length of section to free.
*
* RETURN: 0 on success, error code on failure
*/
static int
zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
zfs_locked_range_t *lr;
int error;
/*
* Lock the range being freed.
*/
lr = zfs_rangelock_enter(&zp->z_rangelock, off, len, RL_WRITER);
/*
* Nothing to do if file already at desired length.
*/
if (off >= zp->z_size) {
zfs_rangelock_exit(lr);
return (0);
}
if (off + len > zp->z_size)
len = zp->z_size - off;
error = dmu_free_long_range(zfsvfs->z_os, zp->z_id, off, len);
/*
* Zero partial page cache entries. This must be done under a
* range lock in order to keep the ARC and page cache in sync.
*/
if (zn_has_cached_data(zp, off, off + len - 1)) {
loff_t first_page, last_page, page_len;
loff_t first_page_offset, last_page_offset;
/* first possible full page in hole */
first_page = (off + PAGE_SIZE - 1) >> PAGE_SHIFT;
/* last page of hole */
last_page = (off + len) >> PAGE_SHIFT;
/* offset of first_page */
first_page_offset = first_page << PAGE_SHIFT;
/* offset of last_page */
last_page_offset = last_page << PAGE_SHIFT;
/* truncate whole pages */
if (last_page_offset > first_page_offset) {
truncate_inode_pages_range(ZTOI(zp)->i_mapping,
first_page_offset, last_page_offset - 1);
}
/* truncate sub-page ranges */
if (first_page > last_page) {
/* entire punched area within a single page */
zfs_zero_partial_page(zp, off, len);
} else {
/* beginning of punched area at the end of a page */
page_len = first_page_offset - off;
if (page_len > 0)
zfs_zero_partial_page(zp, off, page_len);
/* end of punched area at the beginning of a page */
page_len = off + len - last_page_offset;
if (page_len > 0)
zfs_zero_partial_page(zp, last_page_offset,
page_len);
}
}
zfs_rangelock_exit(lr);
return (error);
}
/*
* Truncate a file
*
* IN: zp - znode of file to free data in.
* end - new end-of-file.
*
* RETURN: 0 on success, error code on failure
*/
static int
zfs_trunc(znode_t *zp, uint64_t end)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
dmu_tx_t *tx;
zfs_locked_range_t *lr;
int error;
sa_bulk_attr_t bulk[2];
int count = 0;
/*
* We will change zp_size, lock the whole file.
*/
lr = zfs_rangelock_enter(&zp->z_rangelock, 0, UINT64_MAX, RL_WRITER);
/*
* Nothing to do if file already at desired length.
*/
if (end >= zp->z_size) {
zfs_rangelock_exit(lr);
return (0);
}
error = dmu_free_long_range(zfsvfs->z_os, zp->z_id, end,
DMU_OBJECT_END);
if (error) {
zfs_rangelock_exit(lr);
return (error);
}
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
zfs_rangelock_exit(lr);
return (error);
}
zp->z_size = end;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs),
NULL, &zp->z_size, sizeof (zp->z_size));
if (end == 0) {
zp->z_pflags &= ~ZFS_SPARSE;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
NULL, &zp->z_pflags, 8);
}
VERIFY(sa_bulk_update(zp->z_sa_hdl, bulk, count, tx) == 0);
dmu_tx_commit(tx);
zfs_rangelock_exit(lr);
return (0);
}
/*
* Free space in a file
*
* IN: zp - znode of file to free data in.
* off - start of range
* len - end of range (0 => EOF)
* flag - current file open mode flags.
* log - TRUE if this action should be logged
*
* RETURN: 0 on success, error code on failure
*/
int
zfs_freesp(znode_t *zp, uint64_t off, uint64_t len, int flag, boolean_t log)
{
dmu_tx_t *tx;
zfsvfs_t *zfsvfs = ZTOZSB(zp);
zilog_t *zilog = zfsvfs->z_log;
uint64_t mode;
uint64_t mtime[2], ctime[2];
sa_bulk_attr_t bulk[3];
int count = 0;
int error;
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs), &mode,
sizeof (mode))) != 0)
return (error);
if (off > zp->z_size) {
error = zfs_extend(zp, off+len);
if (error == 0 && log)
goto log;
goto out;
}
if (len == 0) {
error = zfs_trunc(zp, off);
} else {
if ((error = zfs_free_range(zp, off, len)) == 0 &&
off + len > zp->z_size)
error = zfs_extend(zp, off+len);
}
if (error || !log)
goto out;
log:
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
goto out;
}
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
NULL, &zp->z_pflags, 8);
zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
ASSERT(error == 0);
zfs_log_truncate(zilog, tx, TX_TRUNCATE, zp, off, len);
dmu_tx_commit(tx);
zfs_znode_update_vfs(zp);
error = 0;
out:
/*
* Truncate the page cache - for file truncate operations, use
* the purpose-built API for truncations. For punching operations,
* the truncation is handled under a range lock in zfs_free_range.
*/
if (len == 0)
truncate_setsize(ZTOI(zp), off);
return (error);
}
void
zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
{
struct super_block *sb;
zfsvfs_t *zfsvfs;
uint64_t moid, obj, sa_obj, version;
uint64_t sense = ZFS_CASE_SENSITIVE;
uint64_t norm = 0;
nvpair_t *elem;
int size;
int error;
int i;
znode_t *rootzp = NULL;
vattr_t vattr;
znode_t *zp;
zfs_acl_ids_t acl_ids;
/*
* First attempt to create master node.
*/
/*
* In an empty objset, there are no blocks to read and thus
* there can be no i/o errors (which we assert below).
*/
moid = MASTER_NODE_OBJ;
error = zap_create_claim(os, moid, DMU_OT_MASTER_NODE,
DMU_OT_NONE, 0, tx);
ASSERT(error == 0);
/*
* Set starting attributes.
*/
version = zfs_zpl_version_map(spa_version(dmu_objset_spa(os)));
elem = NULL;
while ((elem = nvlist_next_nvpair(zplprops, elem)) != NULL) {
/* For the moment we expect all zpl props to be uint64_ts */
uint64_t val;
const char *name;
ASSERT(nvpair_type(elem) == DATA_TYPE_UINT64);
VERIFY(nvpair_value_uint64(elem, &val) == 0);
name = nvpair_name(elem);
if (strcmp(name, zfs_prop_to_name(ZFS_PROP_VERSION)) == 0) {
if (val < version)
version = val;
} else {
error = zap_update(os, moid, name, 8, 1, &val, tx);
}
ASSERT(error == 0);
if (strcmp(name, zfs_prop_to_name(ZFS_PROP_NORMALIZE)) == 0)
norm = val;
else if (strcmp(name, zfs_prop_to_name(ZFS_PROP_CASE)) == 0)
sense = val;
}
ASSERT(version != 0);
error = zap_update(os, moid, ZPL_VERSION_STR, 8, 1, &version, tx);
ASSERT(error == 0);
/*
* Create zap object used for SA attribute registration
*/
if (version >= ZPL_VERSION_SA) {
sa_obj = zap_create(os, DMU_OT_SA_MASTER_NODE,
DMU_OT_NONE, 0, tx);
error = zap_add(os, moid, ZFS_SA_ATTRS, 8, 1, &sa_obj, tx);
ASSERT(error == 0);
} else {
sa_obj = 0;
}
/*
* Create a delete queue.
*/
obj = zap_create(os, DMU_OT_UNLINKED_SET, DMU_OT_NONE, 0, tx);
error = zap_add(os, moid, ZFS_UNLINKED_SET, 8, 1, &obj, tx);
ASSERT(error == 0);
/*
* Create root znode. Create minimal znode/inode/zfsvfs/sb
* to allow zfs_mknode to work.
*/
vattr.va_mask = ATTR_MODE|ATTR_UID|ATTR_GID;
vattr.va_mode = S_IFDIR|0755;
vattr.va_uid = crgetuid(cr);
vattr.va_gid = crgetgid(cr);
rootzp = kmem_cache_alloc(znode_cache, KM_SLEEP);
rootzp->z_unlinked = B_FALSE;
rootzp->z_atime_dirty = B_FALSE;
rootzp->z_is_sa = USE_SA(version, os);
rootzp->z_pflags = 0;
zfsvfs = kmem_zalloc(sizeof (zfsvfs_t), KM_SLEEP);
zfsvfs->z_os = os;
zfsvfs->z_parent = zfsvfs;
zfsvfs->z_version = version;
zfsvfs->z_use_fuids = USE_FUIDS(version, os);
zfsvfs->z_use_sa = USE_SA(version, os);
zfsvfs->z_norm = norm;
sb = kmem_zalloc(sizeof (struct super_block), KM_SLEEP);
sb->s_fs_info = zfsvfs;
ZTOI(rootzp)->i_sb = sb;
error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
&zfsvfs->z_attr_table);
ASSERT(error == 0);
/*
* Fold case on file systems that are always or sometimes case
* insensitive.
*/
if (sense == ZFS_CASE_INSENSITIVE || sense == ZFS_CASE_MIXED)
zfsvfs->z_norm |= U8_TEXTPREP_TOUPPER;
mutex_init(&zfsvfs->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&zfsvfs->z_all_znodes, sizeof (znode_t),
offsetof(znode_t, z_link_node));
size = MIN(1 << (highbit64(zfs_object_mutex_size)-1), ZFS_OBJ_MTX_MAX);
zfsvfs->z_hold_size = size;
zfsvfs->z_hold_trees = vmem_zalloc(sizeof (avl_tree_t) * size,
KM_SLEEP);
zfsvfs->z_hold_locks = vmem_zalloc(sizeof (kmutex_t) * size, KM_SLEEP);
for (i = 0; i != size; i++) {
avl_create(&zfsvfs->z_hold_trees[i], zfs_znode_hold_compare,
sizeof (znode_hold_t), offsetof(znode_hold_t, zh_node));
mutex_init(&zfsvfs->z_hold_locks[i], NULL, MUTEX_DEFAULT, NULL);
}
VERIFY(0 == zfs_acl_ids_create(rootzp, IS_ROOT_NODE, &vattr,
cr, NULL, &acl_ids, zfs_init_idmap));
zfs_mknode(rootzp, &vattr, tx, cr, IS_ROOT_NODE, &zp, &acl_ids);
ASSERT3P(zp, ==, rootzp);
error = zap_add(os, moid, ZFS_ROOT_OBJ, 8, 1, &rootzp->z_id, tx);
ASSERT(error == 0);
zfs_acl_ids_free(&acl_ids);
atomic_set(&ZTOI(rootzp)->i_count, 0);
sa_handle_destroy(rootzp->z_sa_hdl);
kmem_cache_free(znode_cache, rootzp);
for (i = 0; i != size; i++) {
avl_destroy(&zfsvfs->z_hold_trees[i]);
mutex_destroy(&zfsvfs->z_hold_locks[i]);
}
mutex_destroy(&zfsvfs->z_znodes_lock);
vmem_free(zfsvfs->z_hold_trees, sizeof (avl_tree_t) * size);
vmem_free(zfsvfs->z_hold_locks, sizeof (kmutex_t) * size);
kmem_free(sb, sizeof (struct super_block));
kmem_free(zfsvfs, sizeof (zfsvfs_t));
}
#endif /* _KERNEL */
static int
zfs_sa_setup(objset_t *osp, sa_attr_type_t **sa_table)
{
uint64_t sa_obj = 0;
int error;
error = zap_lookup(osp, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1, &sa_obj);
if (error != 0 && error != ENOENT)
return (error);
error = sa_setup(osp, sa_obj, zfs_attr_table, ZPL_END, sa_table);
return (error);
}
static int
zfs_grab_sa_handle(objset_t *osp, uint64_t obj, sa_handle_t **hdlp,
dmu_buf_t **db, const void *tag)
{
dmu_object_info_t doi;
int error;
if ((error = sa_buf_hold(osp, obj, tag, db)) != 0)
return (error);
dmu_object_info_from_db(*db, &doi);
if ((doi.doi_bonus_type != DMU_OT_SA &&
doi.doi_bonus_type != DMU_OT_ZNODE) ||
(doi.doi_bonus_type == DMU_OT_ZNODE &&
doi.doi_bonus_size < sizeof (znode_phys_t))) {
sa_buf_rele(*db, tag);
return (SET_ERROR(ENOTSUP));
}
error = sa_handle_get(osp, obj, NULL, SA_HDL_PRIVATE, hdlp);
if (error != 0) {
sa_buf_rele(*db, tag);
return (error);
}
return (0);
}
static void
zfs_release_sa_handle(sa_handle_t *hdl, dmu_buf_t *db, const void *tag)
{
sa_handle_destroy(hdl);
sa_buf_rele(db, tag);
}
/*
* Given an object number, return its parent object number and whether
* or not the object is an extended attribute directory.
*/
static int
zfs_obj_to_pobj(objset_t *osp, sa_handle_t *hdl, sa_attr_type_t *sa_table,
uint64_t *pobjp, int *is_xattrdir)
{
uint64_t parent;
uint64_t pflags;
uint64_t mode;
uint64_t parent_mode;
sa_bulk_attr_t bulk[3];
sa_handle_t *sa_hdl;
dmu_buf_t *sa_db;
int count = 0;
int error;
SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_PARENT], NULL,
&parent, sizeof (parent));
SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_FLAGS], NULL,
&pflags, sizeof (pflags));
SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_MODE], NULL,
&mode, sizeof (mode));
if ((error = sa_bulk_lookup(hdl, bulk, count)) != 0)
return (error);
/*
* When a link is removed its parent pointer is not changed and will
* be invalid. There are two cases where a link is removed but the
* file stays around, when it goes to the delete queue and when there
* are additional links.
*/
error = zfs_grab_sa_handle(osp, parent, &sa_hdl, &sa_db, FTAG);
if (error != 0)
return (error);
error = sa_lookup(sa_hdl, ZPL_MODE, &parent_mode, sizeof (parent_mode));
zfs_release_sa_handle(sa_hdl, sa_db, FTAG);
if (error != 0)
return (error);
*is_xattrdir = ((pflags & ZFS_XATTR) != 0) && S_ISDIR(mode);
/*
* Extended attributes can be applied to files, directories, etc.
* Otherwise the parent must be a directory.
*/
if (!*is_xattrdir && !S_ISDIR(parent_mode))
return (SET_ERROR(EINVAL));
*pobjp = parent;
return (0);
}
/*
* Given an object number, return some zpl level statistics
*/
static int
zfs_obj_to_stats_impl(sa_handle_t *hdl, sa_attr_type_t *sa_table,
zfs_stat_t *sb)
{
sa_bulk_attr_t bulk[4];
int count = 0;
SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_MODE], NULL,
&sb->zs_mode, sizeof (sb->zs_mode));
SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_GEN], NULL,
&sb->zs_gen, sizeof (sb->zs_gen));
SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_LINKS], NULL,
&sb->zs_links, sizeof (sb->zs_links));
SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_CTIME], NULL,
&sb->zs_ctime, sizeof (sb->zs_ctime));
return (sa_bulk_lookup(hdl, bulk, count));
}
static int
zfs_obj_to_path_impl(objset_t *osp, uint64_t obj, sa_handle_t *hdl,
sa_attr_type_t *sa_table, char *buf, int len)
{
sa_handle_t *sa_hdl;
sa_handle_t *prevhdl = NULL;
dmu_buf_t *prevdb = NULL;
dmu_buf_t *sa_db = NULL;
char *path = buf + len - 1;
int error;
*path = '\0';
sa_hdl = hdl;
uint64_t deleteq_obj;
VERIFY0(zap_lookup(osp, MASTER_NODE_OBJ,
ZFS_UNLINKED_SET, sizeof (uint64_t), 1, &deleteq_obj));
error = zap_lookup_int(osp, deleteq_obj, obj);
if (error == 0) {
return (ESTALE);
} else if (error != ENOENT) {
return (error);
}
for (;;) {
uint64_t pobj = 0;
char component[MAXNAMELEN + 2];
size_t complen;
int is_xattrdir = 0;
if (prevdb) {
ASSERT(prevhdl != NULL);
zfs_release_sa_handle(prevhdl, prevdb, FTAG);
}
if ((error = zfs_obj_to_pobj(osp, sa_hdl, sa_table, &pobj,
&is_xattrdir)) != 0)
break;
if (pobj == obj) {
if (path[0] != '/')
*--path = '/';
break;
}
component[0] = '/';
if (is_xattrdir) {
strcpy(component + 1, "<xattrdir>");
} else {
error = zap_value_search(osp, pobj, obj,
ZFS_DIRENT_OBJ(-1ULL), component + 1);
if (error != 0)
break;
}
complen = strlen(component);
path -= complen;
ASSERT(path >= buf);
memcpy(path, component, complen);
obj = pobj;
if (sa_hdl != hdl) {
prevhdl = sa_hdl;
prevdb = sa_db;
}
error = zfs_grab_sa_handle(osp, obj, &sa_hdl, &sa_db, FTAG);
if (error != 0) {
sa_hdl = prevhdl;
sa_db = prevdb;
break;
}
}
if (sa_hdl != NULL && sa_hdl != hdl) {
ASSERT(sa_db != NULL);
zfs_release_sa_handle(sa_hdl, sa_db, FTAG);
}
if (error == 0)
(void) memmove(buf, path, buf + len - path);
return (error);
}
int
zfs_obj_to_path(objset_t *osp, uint64_t obj, char *buf, int len)
{
sa_attr_type_t *sa_table;
sa_handle_t *hdl;
dmu_buf_t *db;
int error;
error = zfs_sa_setup(osp, &sa_table);
if (error != 0)
return (error);
error = zfs_grab_sa_handle(osp, obj, &hdl, &db, FTAG);
if (error != 0)
return (error);
error = zfs_obj_to_path_impl(osp, obj, hdl, sa_table, buf, len);
zfs_release_sa_handle(hdl, db, FTAG);
return (error);
}
int
zfs_obj_to_stats(objset_t *osp, uint64_t obj, zfs_stat_t *sb,
char *buf, int len)
{
char *path = buf + len - 1;
sa_attr_type_t *sa_table;
sa_handle_t *hdl;
dmu_buf_t *db;
int error;
*path = '\0';
error = zfs_sa_setup(osp, &sa_table);
if (error != 0)
return (error);
error = zfs_grab_sa_handle(osp, obj, &hdl, &db, FTAG);
if (error != 0)
return (error);
error = zfs_obj_to_stats_impl(hdl, sa_table, sb);
if (error != 0) {
zfs_release_sa_handle(hdl, db, FTAG);
return (error);
}
error = zfs_obj_to_path_impl(osp, obj, hdl, sa_table, buf, len);
zfs_release_sa_handle(hdl, db, FTAG);
return (error);
}
/*
* Read a property stored within the master node.
*/
int
zfs_get_zplprop(objset_t *os, zfs_prop_t prop, uint64_t *value)
{
uint64_t *cached_copy = NULL;
/*
* Figure out where in the objset_t the cached copy would live, if it
* is available for the requested property.
*/
if (os != NULL) {
switch (prop) {
case ZFS_PROP_VERSION:
cached_copy = &os->os_version;
break;
case ZFS_PROP_NORMALIZE:
cached_copy = &os->os_normalization;
break;
case ZFS_PROP_UTF8ONLY:
cached_copy = &os->os_utf8only;
break;
case ZFS_PROP_CASE:
cached_copy = &os->os_casesensitivity;
break;
default:
break;
}
}
if (cached_copy != NULL && *cached_copy != OBJSET_PROP_UNINITIALIZED) {
*value = *cached_copy;
return (0);
}
/*
* If the property wasn't cached, look up the file system's value for
* the property. For the version property, we look up a slightly
* different string.
*/
const char *pname;
int error = ENOENT;
if (prop == ZFS_PROP_VERSION)
pname = ZPL_VERSION_STR;
else
pname = zfs_prop_to_name(prop);
if (os != NULL) {
ASSERT3U(os->os_phys->os_type, ==, DMU_OST_ZFS);
error = zap_lookup(os, MASTER_NODE_OBJ, pname, 8, 1, value);
}
if (error == ENOENT) {
/* No value set, use the default value */
switch (prop) {
case ZFS_PROP_VERSION:
*value = ZPL_VERSION;
break;
case ZFS_PROP_NORMALIZE:
case ZFS_PROP_UTF8ONLY:
*value = 0;
break;
case ZFS_PROP_CASE:
*value = ZFS_CASE_SENSITIVE;
break;
case ZFS_PROP_ACLTYPE:
*value = ZFS_ACLTYPE_OFF;
break;
default:
return (error);
}
error = 0;
}
/*
* If one of the methods for getting the property value above worked,
* copy it into the objset_t's cache.
*/
if (error == 0 && cached_copy != NULL) {
*cached_copy = *value;
}
return (error);
}
#if defined(_KERNEL)
EXPORT_SYMBOL(zfs_create_fs);
EXPORT_SYMBOL(zfs_obj_to_path);
/* CSTYLED */
module_param(zfs_object_mutex_size, uint, 0644);
MODULE_PARM_DESC(zfs_object_mutex_size, "Size of znode hold array");
module_param(zfs_unlink_suspend_progress, int, 0644);
MODULE_PARM_DESC(zfs_unlink_suspend_progress, "Set to prevent async unlinks "
"(debug - leaks space into the unlinked set)");
#endif
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zio_crypt.c b/sys/contrib/openzfs/module/os/linux/zfs/zio_crypt.c
index 775ab8efbcdf..2114be281901 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zio_crypt.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zio_crypt.c
@@ -1,2079 +1,2084 @@
/*
* CDDL HEADER START
*
* This file and its contents are supplied under the terms of the
* Common Development and Distribution License ("CDDL"), version 1.0.
* You may only use this file in accordance with the terms of version
* 1.0 of the CDDL.
*
* A full copy of the text of the CDDL should have accompanied this
* source. A copy of the CDDL is also available via the Internet at
* http://www.illumos.org/license/CDDL.
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2017, Datto, Inc. All rights reserved.
*/
#include <sys/zio_crypt.h>
#include <sys/dmu.h>
#include <sys/dmu_objset.h>
#include <sys/dnode.h>
#include <sys/fs/zfs.h>
#include <sys/zio.h>
#include <sys/zil.h>
#include <sys/sha2.h>
#include <sys/hkdf.h>
#include <sys/qat.h>
/*
* This file is responsible for handling all of the details of generating
* encryption parameters and performing encryption and authentication.
*
* BLOCK ENCRYPTION PARAMETERS:
* Encryption /Authentication Algorithm Suite (crypt):
* The encryption algorithm, mode, and key length we are going to use. We
* currently support AES in either GCM or CCM modes with 128, 192, and 256 bit
* keys. All authentication is currently done with SHA512-HMAC.
*
* Plaintext:
* The unencrypted data that we want to encrypt.
*
* Initialization Vector (IV):
* An initialization vector for the encryption algorithms. This is used to
* "tweak" the encryption algorithms so that two blocks of the same data are
* encrypted into different ciphertext outputs, thus obfuscating block patterns.
* The supported encryption modes (AES-GCM and AES-CCM) require that an IV is
* never reused with the same encryption key. This value is stored unencrypted
* and must simply be provided to the decryption function. We use a 96 bit IV
* (as recommended by NIST) for all block encryption. For non-dedup blocks we
* derive the IV randomly. The first 64 bits of the IV are stored in the second
* word of DVA[2] and the remaining 32 bits are stored in the upper 32 bits of
* blk_fill. This is safe because encrypted blocks can't use the upper 32 bits
* of blk_fill. We only encrypt level 0 blocks, which normally have a fill count
* of 1. The only exception is for DMU_OT_DNODE objects, where the fill count of
* level 0 blocks is the number of allocated dnodes in that block. The on-disk
* format supports at most 2^15 slots per L0 dnode block, because the maximum
* block size is 16MB (2^24). In either case, for level 0 blocks this number
* will still be smaller than UINT32_MAX so it is safe to store the IV in the
* top 32 bits of blk_fill, while leaving the bottom 32 bits of the fill count
* for the dnode code.
*
* Master key:
* This is the most important secret data of an encrypted dataset. It is used
* along with the salt to generate that actual encryption keys via HKDF. We
* do not use the master key to directly encrypt any data because there are
* theoretical limits on how much data can actually be safely encrypted with
* any encryption mode. The master key is stored encrypted on disk with the
* user's wrapping key. Its length is determined by the encryption algorithm.
* For details on how this is stored see the block comment in dsl_crypt.c
*
* Salt:
* Used as an input to the HKDF function, along with the master key. We use a
* 64 bit salt, stored unencrypted in the first word of DVA[2]. Any given salt
* can be used for encrypting many blocks, so we cache the current salt and the
* associated derived key in zio_crypt_t so we do not need to derive it again
* needlessly.
*
* Encryption Key:
* A secret binary key, generated from an HKDF function used to encrypt and
* decrypt data.
*
* Message Authentication Code (MAC)
* The MAC is an output of authenticated encryption modes such as AES-GCM and
* AES-CCM. Its purpose is to ensure that an attacker cannot modify encrypted
* data on disk and return garbage to the application. Effectively, it is a
* checksum that can not be reproduced by an attacker. We store the MAC in the
* second 128 bits of blk_cksum, leaving the first 128 bits for a truncated
* regular checksum of the ciphertext which can be used for scrubbing.
*
* OBJECT AUTHENTICATION:
* Some object types, such as DMU_OT_MASTER_NODE cannot be encrypted because
* they contain some info that always needs to be readable. To prevent this
* data from being altered, we authenticate this data using SHA512-HMAC. This
* will produce a MAC (similar to the one produced via encryption) which can
* be used to verify the object was not modified. HMACs do not require key
* rotation or IVs, so we can keep up to the full 3 copies of authenticated
* data.
*
* ZIL ENCRYPTION:
* ZIL blocks have their bp written to disk ahead of the associated data, so we
* cannot store the MAC there as we normally do. For these blocks the MAC is
* stored in the embedded checksum within the zil_chain_t header. The salt and
* IV are generated for the block on bp allocation instead of at encryption
* time. In addition, ZIL blocks have some pieces that must be left in plaintext
* for claiming even though all of the sensitive user data still needs to be
* encrypted. The function zio_crypt_init_uios_zil() handles parsing which
* pieces of the block need to be encrypted. All data that is not encrypted is
* authenticated using the AAD mechanisms that the supported encryption modes
* provide for. In order to preserve the semantics of the ZIL for encrypted
* datasets, the ZIL is not protected at the objset level as described below.
*
* DNODE ENCRYPTION:
* Similarly to ZIL blocks, the core part of each dnode_phys_t needs to be left
* in plaintext for scrubbing and claiming, but the bonus buffers might contain
* sensitive user data. The function zio_crypt_init_uios_dnode() handles parsing
* which pieces of the block need to be encrypted. For more details about
* dnode authentication and encryption, see zio_crypt_init_uios_dnode().
*
* OBJECT SET AUTHENTICATION:
* Up to this point, everything we have encrypted and authenticated has been
* at level 0 (or -2 for the ZIL). If we did not do any further work the
* on-disk format would be susceptible to attacks that deleted or rearranged
* the order of level 0 blocks. Ideally, the cleanest solution would be to
* maintain a tree of authentication MACs going up the bp tree. However, this
* presents a problem for raw sends. Send files do not send information about
* indirect blocks so there would be no convenient way to transfer the MACs and
* they cannot be recalculated on the receive side without the master key which
* would defeat one of the purposes of raw sends in the first place. Instead,
* for the indirect levels of the bp tree, we use a regular SHA512 of the MACs
* from the level below. We also include some portable fields from blk_prop such
* as the lsize and compression algorithm to prevent the data from being
* misinterpreted.
*
* At the objset level, we maintain 2 separate 256 bit MACs in the
* objset_phys_t. The first one is "portable" and is the logical root of the
* MAC tree maintained in the metadnode's bps. The second, is "local" and is
* used as the root MAC for the user accounting objects, which are also not
* transferred via "zfs send". The portable MAC is sent in the DRR_BEGIN payload
* of the send file. The useraccounting code ensures that the useraccounting
* info is not present upon a receive, so the local MAC can simply be cleared
* out at that time. For more info about objset_phys_t authentication, see
* zio_crypt_do_objset_hmacs().
*
* CONSIDERATIONS FOR DEDUP:
* In order for dedup to work, blocks that we want to dedup with one another
* need to use the same IV and encryption key, so that they will have the same
* ciphertext. Normally, one should never reuse an IV with the same encryption
* key or else AES-GCM and AES-CCM can both actually leak the plaintext of both
* blocks. In this case, however, since we are using the same plaintext as
* well all that we end up with is a duplicate of the original ciphertext we
* already had. As a result, an attacker with read access to the raw disk will
* be able to tell which blocks are the same but this information is given away
* by dedup anyway. In order to get the same IVs and encryption keys for
* equivalent blocks of data we use an HMAC of the plaintext. We use an HMAC
* here so that a reproducible checksum of the plaintext is never available to
* the attacker. The HMAC key is kept alongside the master key, encrypted on
* disk. The first 64 bits of the HMAC are used in place of the random salt, and
* the next 96 bits are used as the IV. As a result of this mechanism, dedup
* will only work within a clone family since encrypted dedup requires use of
* the same master and HMAC keys.
*/
/*
* After encrypting many blocks with the same key we may start to run up
* against the theoretical limits of how much data can securely be encrypted
* with a single key using the supported encryption modes. The most obvious
* limitation is that our risk of generating 2 equivalent 96 bit IVs increases
* the more IVs we generate (which both GCM and CCM modes strictly forbid).
* This risk actually grows surprisingly quickly over time according to the
* Birthday Problem. With a total IV space of 2^(96 bits), and assuming we have
* generated n IVs with a cryptographically secure RNG, the approximate
* probability p(n) of a collision is given as:
*
* p(n) ~= e^(-n*(n-1)/(2*(2^96)))
*
* [http://www.math.cornell.edu/~mec/2008-2009/TianyiZheng/Birthday.html]
*
* Assuming that we want to ensure that p(n) never goes over 1 / 1 trillion
* we must not write more than 398,065,730 blocks with the same encryption key.
* Therefore, we rotate our keys after 400,000,000 blocks have been written by
* generating a new random 64 bit salt for our HKDF encryption key generation
* function.
*/
#define ZFS_KEY_MAX_SALT_USES_DEFAULT 400000000
#define ZFS_CURRENT_MAX_SALT_USES \
(MIN(zfs_key_max_salt_uses, ZFS_KEY_MAX_SALT_USES_DEFAULT))
static unsigned long zfs_key_max_salt_uses = ZFS_KEY_MAX_SALT_USES_DEFAULT;
typedef struct blkptr_auth_buf {
uint64_t bab_prop; /* blk_prop - portable mask */
uint8_t bab_mac[ZIO_DATA_MAC_LEN]; /* MAC from blk_cksum */
uint64_t bab_pad; /* reserved for future use */
} blkptr_auth_buf_t;
const zio_crypt_info_t zio_crypt_table[ZIO_CRYPT_FUNCTIONS] = {
{"", ZC_TYPE_NONE, 0, "inherit"},
{"", ZC_TYPE_NONE, 0, "on"},
{"", ZC_TYPE_NONE, 0, "off"},
{SUN_CKM_AES_CCM, ZC_TYPE_CCM, 16, "aes-128-ccm"},
{SUN_CKM_AES_CCM, ZC_TYPE_CCM, 24, "aes-192-ccm"},
{SUN_CKM_AES_CCM, ZC_TYPE_CCM, 32, "aes-256-ccm"},
{SUN_CKM_AES_GCM, ZC_TYPE_GCM, 16, "aes-128-gcm"},
{SUN_CKM_AES_GCM, ZC_TYPE_GCM, 24, "aes-192-gcm"},
{SUN_CKM_AES_GCM, ZC_TYPE_GCM, 32, "aes-256-gcm"}
};
void
zio_crypt_key_destroy(zio_crypt_key_t *key)
{
rw_destroy(&key->zk_salt_lock);
/* free crypto templates */
crypto_destroy_ctx_template(key->zk_current_tmpl);
crypto_destroy_ctx_template(key->zk_hmac_tmpl);
/* zero out sensitive data */
memset(key, 0, sizeof (zio_crypt_key_t));
}
int
zio_crypt_key_init(uint64_t crypt, zio_crypt_key_t *key)
{
int ret;
crypto_mechanism_t mech = {0};
uint_t keydata_len;
ASSERT(key != NULL);
ASSERT3U(crypt, <, ZIO_CRYPT_FUNCTIONS);
/*
* Workaround for GCC 12+ with UBSan enabled deficencies.
*
* GCC 12+ invoked with -fsanitize=undefined incorrectly reports the code
* below as violating -Warray-bounds
*/
#if defined(__GNUC__) && !defined(__clang__) && \
((!defined(_KERNEL) && defined(ZFS_UBSAN_ENABLED)) || \
defined(CONFIG_UBSAN))
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Warray-bounds"
#endif
keydata_len = zio_crypt_table[crypt].ci_keylen;
#if defined(__GNUC__) && !defined(__clang__) && \
((!defined(_KERNEL) && defined(ZFS_UBSAN_ENABLED)) || \
defined(CONFIG_UBSAN))
#pragma GCC diagnostic pop
#endif
memset(key, 0, sizeof (zio_crypt_key_t));
rw_init(&key->zk_salt_lock, NULL, RW_DEFAULT, NULL);
/* fill keydata buffers and salt with random data */
ret = random_get_bytes((uint8_t *)&key->zk_guid, sizeof (uint64_t));
if (ret != 0)
goto error;
ret = random_get_bytes(key->zk_master_keydata, keydata_len);
if (ret != 0)
goto error;
ret = random_get_bytes(key->zk_hmac_keydata, SHA512_HMAC_KEYLEN);
if (ret != 0)
goto error;
ret = random_get_bytes(key->zk_salt, ZIO_DATA_SALT_LEN);
if (ret != 0)
goto error;
/* derive the current key from the master key */
ret = hkdf_sha512(key->zk_master_keydata, keydata_len, NULL, 0,
key->zk_salt, ZIO_DATA_SALT_LEN, key->zk_current_keydata,
keydata_len);
if (ret != 0)
goto error;
/* initialize keys for the ICP */
key->zk_current_key.ck_data = key->zk_current_keydata;
key->zk_current_key.ck_length = CRYPTO_BYTES2BITS(keydata_len);
key->zk_hmac_key.ck_data = &key->zk_hmac_key;
key->zk_hmac_key.ck_length = CRYPTO_BYTES2BITS(SHA512_HMAC_KEYLEN);
/*
* Initialize the crypto templates. It's ok if this fails because
* this is just an optimization.
*/
mech.cm_type = crypto_mech2id(zio_crypt_table[crypt].ci_mechname);
ret = crypto_create_ctx_template(&mech, &key->zk_current_key,
&key->zk_current_tmpl);
if (ret != CRYPTO_SUCCESS)
key->zk_current_tmpl = NULL;
mech.cm_type = crypto_mech2id(SUN_CKM_SHA512_HMAC);
ret = crypto_create_ctx_template(&mech, &key->zk_hmac_key,
&key->zk_hmac_tmpl);
if (ret != CRYPTO_SUCCESS)
key->zk_hmac_tmpl = NULL;
key->zk_crypt = crypt;
key->zk_version = ZIO_CRYPT_KEY_CURRENT_VERSION;
key->zk_salt_count = 0;
return (0);
error:
zio_crypt_key_destroy(key);
return (ret);
}
static int
zio_crypt_key_change_salt(zio_crypt_key_t *key)
{
int ret = 0;
uint8_t salt[ZIO_DATA_SALT_LEN];
crypto_mechanism_t mech;
uint_t keydata_len = zio_crypt_table[key->zk_crypt].ci_keylen;
/* generate a new salt */
ret = random_get_bytes(salt, ZIO_DATA_SALT_LEN);
if (ret != 0)
goto error;
rw_enter(&key->zk_salt_lock, RW_WRITER);
/* someone beat us to the salt rotation, just unlock and return */
if (key->zk_salt_count < ZFS_CURRENT_MAX_SALT_USES)
goto out_unlock;
/* derive the current key from the master key and the new salt */
ret = hkdf_sha512(key->zk_master_keydata, keydata_len, NULL, 0,
salt, ZIO_DATA_SALT_LEN, key->zk_current_keydata, keydata_len);
if (ret != 0)
goto out_unlock;
/* assign the salt and reset the usage count */
memcpy(key->zk_salt, salt, ZIO_DATA_SALT_LEN);
key->zk_salt_count = 0;
/* destroy the old context template and create the new one */
crypto_destroy_ctx_template(key->zk_current_tmpl);
ret = crypto_create_ctx_template(&mech, &key->zk_current_key,
&key->zk_current_tmpl);
if (ret != CRYPTO_SUCCESS)
key->zk_current_tmpl = NULL;
rw_exit(&key->zk_salt_lock);
return (0);
out_unlock:
rw_exit(&key->zk_salt_lock);
error:
return (ret);
}
/* See comment above zfs_key_max_salt_uses definition for details */
int
zio_crypt_key_get_salt(zio_crypt_key_t *key, uint8_t *salt)
{
int ret;
boolean_t salt_change;
rw_enter(&key->zk_salt_lock, RW_READER);
memcpy(salt, key->zk_salt, ZIO_DATA_SALT_LEN);
salt_change = (atomic_inc_64_nv(&key->zk_salt_count) >=
ZFS_CURRENT_MAX_SALT_USES);
rw_exit(&key->zk_salt_lock);
if (salt_change) {
ret = zio_crypt_key_change_salt(key);
if (ret != 0)
goto error;
}
return (0);
error:
return (ret);
}
/*
* This function handles all encryption and decryption in zfs. When
* encrypting it expects puio to reference the plaintext and cuio to
* reference the ciphertext. cuio must have enough space for the
* ciphertext + room for a MAC. datalen should be the length of the
* plaintext / ciphertext alone.
*/
static int
zio_do_crypt_uio(boolean_t encrypt, uint64_t crypt, crypto_key_t *key,
crypto_ctx_template_t tmpl, uint8_t *ivbuf, uint_t datalen,
zfs_uio_t *puio, zfs_uio_t *cuio, uint8_t *authbuf, uint_t auth_len)
{
int ret;
crypto_data_t plaindata, cipherdata;
CK_AES_CCM_PARAMS ccmp;
CK_AES_GCM_PARAMS gcmp;
crypto_mechanism_t mech;
zio_crypt_info_t crypt_info;
uint_t plain_full_len, maclen;
ASSERT3U(crypt, <, ZIO_CRYPT_FUNCTIONS);
/* lookup the encryption info */
crypt_info = zio_crypt_table[crypt];
/* the mac will always be the last iovec_t in the cipher uio */
maclen = cuio->uio_iov[cuio->uio_iovcnt - 1].iov_len;
ASSERT(maclen <= ZIO_DATA_MAC_LEN);
/* setup encryption mechanism (same as crypt) */
mech.cm_type = crypto_mech2id(crypt_info.ci_mechname);
/*
* Strangely, the ICP requires that plain_full_len must include
* the MAC length when decrypting, even though the UIO does not
* need to have the extra space allocated.
*/
if (encrypt) {
plain_full_len = datalen;
} else {
plain_full_len = datalen + maclen;
}
/*
* setup encryption params (currently only AES CCM and AES GCM
* are supported)
*/
if (crypt_info.ci_crypt_type == ZC_TYPE_CCM) {
ccmp.ulNonceSize = ZIO_DATA_IV_LEN;
ccmp.ulAuthDataSize = auth_len;
ccmp.authData = authbuf;
ccmp.ulMACSize = maclen;
ccmp.nonce = ivbuf;
ccmp.ulDataSize = plain_full_len;
mech.cm_param = (char *)(&ccmp);
mech.cm_param_len = sizeof (CK_AES_CCM_PARAMS);
} else {
gcmp.ulIvLen = ZIO_DATA_IV_LEN;
gcmp.ulIvBits = CRYPTO_BYTES2BITS(ZIO_DATA_IV_LEN);
gcmp.ulAADLen = auth_len;
gcmp.pAAD = authbuf;
gcmp.ulTagBits = CRYPTO_BYTES2BITS(maclen);
gcmp.pIv = ivbuf;
mech.cm_param = (char *)(&gcmp);
mech.cm_param_len = sizeof (CK_AES_GCM_PARAMS);
}
/* populate the cipher and plain data structs. */
plaindata.cd_format = CRYPTO_DATA_UIO;
plaindata.cd_offset = 0;
plaindata.cd_uio = puio;
plaindata.cd_length = plain_full_len;
cipherdata.cd_format = CRYPTO_DATA_UIO;
cipherdata.cd_offset = 0;
cipherdata.cd_uio = cuio;
cipherdata.cd_length = datalen + maclen;
/* perform the actual encryption */
if (encrypt) {
ret = crypto_encrypt(&mech, &plaindata, key, tmpl, &cipherdata);
if (ret != CRYPTO_SUCCESS) {
ret = SET_ERROR(EIO);
goto error;
}
} else {
ret = crypto_decrypt(&mech, &cipherdata, key, tmpl, &plaindata);
if (ret != CRYPTO_SUCCESS) {
ASSERT3U(ret, ==, CRYPTO_INVALID_MAC);
ret = SET_ERROR(ECKSUM);
goto error;
}
}
return (0);
error:
return (ret);
}
int
zio_crypt_key_wrap(crypto_key_t *cwkey, zio_crypt_key_t *key, uint8_t *iv,
uint8_t *mac, uint8_t *keydata_out, uint8_t *hmac_keydata_out)
{
int ret;
zfs_uio_t puio, cuio;
uint64_t aad[3];
iovec_t plain_iovecs[2], cipher_iovecs[3];
uint64_t crypt = key->zk_crypt;
uint_t enc_len, keydata_len, aad_len;
ASSERT3U(crypt, <, ZIO_CRYPT_FUNCTIONS);
keydata_len = zio_crypt_table[crypt].ci_keylen;
/* generate iv for wrapping the master and hmac key */
ret = random_get_pseudo_bytes(iv, WRAPPING_IV_LEN);
if (ret != 0)
goto error;
/* initialize zfs_uio_ts */
plain_iovecs[0].iov_base = key->zk_master_keydata;
plain_iovecs[0].iov_len = keydata_len;
plain_iovecs[1].iov_base = key->zk_hmac_keydata;
plain_iovecs[1].iov_len = SHA512_HMAC_KEYLEN;
cipher_iovecs[0].iov_base = keydata_out;
cipher_iovecs[0].iov_len = keydata_len;
cipher_iovecs[1].iov_base = hmac_keydata_out;
cipher_iovecs[1].iov_len = SHA512_HMAC_KEYLEN;
cipher_iovecs[2].iov_base = mac;
cipher_iovecs[2].iov_len = WRAPPING_MAC_LEN;
/*
* Although we don't support writing to the old format, we do
* support rewrapping the key so that the user can move and
* quarantine datasets on the old format.
*/
if (key->zk_version == 0) {
aad_len = sizeof (uint64_t);
aad[0] = LE_64(key->zk_guid);
} else {
ASSERT3U(key->zk_version, ==, ZIO_CRYPT_KEY_CURRENT_VERSION);
aad_len = sizeof (uint64_t) * 3;
aad[0] = LE_64(key->zk_guid);
aad[1] = LE_64(crypt);
aad[2] = LE_64(key->zk_version);
}
enc_len = zio_crypt_table[crypt].ci_keylen + SHA512_HMAC_KEYLEN;
puio.uio_iov = plain_iovecs;
puio.uio_iovcnt = 2;
puio.uio_segflg = UIO_SYSSPACE;
cuio.uio_iov = cipher_iovecs;
cuio.uio_iovcnt = 3;
cuio.uio_segflg = UIO_SYSSPACE;
/* encrypt the keys and store the resulting ciphertext and mac */
ret = zio_do_crypt_uio(B_TRUE, crypt, cwkey, NULL, iv, enc_len,
&puio, &cuio, (uint8_t *)aad, aad_len);
if (ret != 0)
goto error;
return (0);
error:
return (ret);
}
int
zio_crypt_key_unwrap(crypto_key_t *cwkey, uint64_t crypt, uint64_t version,
uint64_t guid, uint8_t *keydata, uint8_t *hmac_keydata, uint8_t *iv,
uint8_t *mac, zio_crypt_key_t *key)
{
crypto_mechanism_t mech;
zfs_uio_t puio, cuio;
uint64_t aad[3];
iovec_t plain_iovecs[2], cipher_iovecs[3];
uint_t enc_len, keydata_len, aad_len;
int ret;
ASSERT3U(crypt, <, ZIO_CRYPT_FUNCTIONS);
rw_init(&key->zk_salt_lock, NULL, RW_DEFAULT, NULL);
keydata_len = zio_crypt_table[crypt].ci_keylen;
/* initialize zfs_uio_ts */
plain_iovecs[0].iov_base = key->zk_master_keydata;
plain_iovecs[0].iov_len = keydata_len;
plain_iovecs[1].iov_base = key->zk_hmac_keydata;
plain_iovecs[1].iov_len = SHA512_HMAC_KEYLEN;
cipher_iovecs[0].iov_base = keydata;
cipher_iovecs[0].iov_len = keydata_len;
cipher_iovecs[1].iov_base = hmac_keydata;
cipher_iovecs[1].iov_len = SHA512_HMAC_KEYLEN;
cipher_iovecs[2].iov_base = mac;
cipher_iovecs[2].iov_len = WRAPPING_MAC_LEN;
if (version == 0) {
aad_len = sizeof (uint64_t);
aad[0] = LE_64(guid);
} else {
ASSERT3U(version, ==, ZIO_CRYPT_KEY_CURRENT_VERSION);
aad_len = sizeof (uint64_t) * 3;
aad[0] = LE_64(guid);
aad[1] = LE_64(crypt);
aad[2] = LE_64(version);
}
enc_len = keydata_len + SHA512_HMAC_KEYLEN;
puio.uio_iov = plain_iovecs;
puio.uio_segflg = UIO_SYSSPACE;
puio.uio_iovcnt = 2;
cuio.uio_iov = cipher_iovecs;
cuio.uio_iovcnt = 3;
cuio.uio_segflg = UIO_SYSSPACE;
/* decrypt the keys and store the result in the output buffers */
ret = zio_do_crypt_uio(B_FALSE, crypt, cwkey, NULL, iv, enc_len,
&puio, &cuio, (uint8_t *)aad, aad_len);
if (ret != 0)
goto error;
/* generate a fresh salt */
ret = random_get_bytes(key->zk_salt, ZIO_DATA_SALT_LEN);
if (ret != 0)
goto error;
/* derive the current key from the master key */
ret = hkdf_sha512(key->zk_master_keydata, keydata_len, NULL, 0,
key->zk_salt, ZIO_DATA_SALT_LEN, key->zk_current_keydata,
keydata_len);
if (ret != 0)
goto error;
/* initialize keys for ICP */
key->zk_current_key.ck_data = key->zk_current_keydata;
key->zk_current_key.ck_length = CRYPTO_BYTES2BITS(keydata_len);
key->zk_hmac_key.ck_data = key->zk_hmac_keydata;
key->zk_hmac_key.ck_length = CRYPTO_BYTES2BITS(SHA512_HMAC_KEYLEN);
/*
* Initialize the crypto templates. It's ok if this fails because
* this is just an optimization.
*/
mech.cm_type = crypto_mech2id(zio_crypt_table[crypt].ci_mechname);
ret = crypto_create_ctx_template(&mech, &key->zk_current_key,
&key->zk_current_tmpl);
if (ret != CRYPTO_SUCCESS)
key->zk_current_tmpl = NULL;
mech.cm_type = crypto_mech2id(SUN_CKM_SHA512_HMAC);
ret = crypto_create_ctx_template(&mech, &key->zk_hmac_key,
&key->zk_hmac_tmpl);
if (ret != CRYPTO_SUCCESS)
key->zk_hmac_tmpl = NULL;
key->zk_crypt = crypt;
key->zk_version = version;
key->zk_guid = guid;
key->zk_salt_count = 0;
return (0);
error:
zio_crypt_key_destroy(key);
return (ret);
}
int
zio_crypt_generate_iv(uint8_t *ivbuf)
{
int ret;
/* randomly generate the IV */
ret = random_get_pseudo_bytes(ivbuf, ZIO_DATA_IV_LEN);
if (ret != 0)
goto error;
return (0);
error:
memset(ivbuf, 0, ZIO_DATA_IV_LEN);
return (ret);
}
int
zio_crypt_do_hmac(zio_crypt_key_t *key, uint8_t *data, uint_t datalen,
uint8_t *digestbuf, uint_t digestlen)
{
int ret;
crypto_mechanism_t mech;
crypto_data_t in_data, digest_data;
uint8_t raw_digestbuf[SHA512_DIGEST_LENGTH];
ASSERT3U(digestlen, <=, SHA512_DIGEST_LENGTH);
/* initialize sha512-hmac mechanism and crypto data */
mech.cm_type = crypto_mech2id(SUN_CKM_SHA512_HMAC);
mech.cm_param = NULL;
mech.cm_param_len = 0;
/* initialize the crypto data */
in_data.cd_format = CRYPTO_DATA_RAW;
in_data.cd_offset = 0;
in_data.cd_length = datalen;
in_data.cd_raw.iov_base = (char *)data;
in_data.cd_raw.iov_len = in_data.cd_length;
digest_data.cd_format = CRYPTO_DATA_RAW;
digest_data.cd_offset = 0;
digest_data.cd_length = SHA512_DIGEST_LENGTH;
digest_data.cd_raw.iov_base = (char *)raw_digestbuf;
digest_data.cd_raw.iov_len = digest_data.cd_length;
/* generate the hmac */
ret = crypto_mac(&mech, &in_data, &key->zk_hmac_key, key->zk_hmac_tmpl,
&digest_data);
if (ret != CRYPTO_SUCCESS) {
ret = SET_ERROR(EIO);
goto error;
}
memcpy(digestbuf, raw_digestbuf, digestlen);
return (0);
error:
memset(digestbuf, 0, digestlen);
return (ret);
}
int
zio_crypt_generate_iv_salt_dedup(zio_crypt_key_t *key, uint8_t *data,
uint_t datalen, uint8_t *ivbuf, uint8_t *salt)
{
int ret;
uint8_t digestbuf[SHA512_DIGEST_LENGTH];
ret = zio_crypt_do_hmac(key, data, datalen,
digestbuf, SHA512_DIGEST_LENGTH);
if (ret != 0)
return (ret);
memcpy(salt, digestbuf, ZIO_DATA_SALT_LEN);
memcpy(ivbuf, digestbuf + ZIO_DATA_SALT_LEN, ZIO_DATA_IV_LEN);
return (0);
}
/*
* The following functions are used to encode and decode encryption parameters
* into blkptr_t and zil_header_t. The ICP wants to use these parameters as
* byte strings, which normally means that these strings would not need to deal
* with byteswapping at all. However, both blkptr_t and zil_header_t may be
* byteswapped by lower layers and so we must "undo" that byteswap here upon
* decoding and encoding in a non-native byteorder. These functions require
* that the byteorder bit is correct before being called.
*/
void
zio_crypt_encode_params_bp(blkptr_t *bp, uint8_t *salt, uint8_t *iv)
{
uint64_t val64;
uint32_t val32;
ASSERT(BP_IS_ENCRYPTED(bp));
if (!BP_SHOULD_BYTESWAP(bp)) {
memcpy(&bp->blk_dva[2].dva_word[0], salt, sizeof (uint64_t));
memcpy(&bp->blk_dva[2].dva_word[1], iv, sizeof (uint64_t));
memcpy(&val32, iv + sizeof (uint64_t), sizeof (uint32_t));
BP_SET_IV2(bp, val32);
} else {
memcpy(&val64, salt, sizeof (uint64_t));
bp->blk_dva[2].dva_word[0] = BSWAP_64(val64);
memcpy(&val64, iv, sizeof (uint64_t));
bp->blk_dva[2].dva_word[1] = BSWAP_64(val64);
memcpy(&val32, iv + sizeof (uint64_t), sizeof (uint32_t));
BP_SET_IV2(bp, BSWAP_32(val32));
}
}
void
zio_crypt_decode_params_bp(const blkptr_t *bp, uint8_t *salt, uint8_t *iv)
{
uint64_t val64;
uint32_t val32;
ASSERT(BP_IS_PROTECTED(bp));
/* for convenience, so callers don't need to check */
if (BP_IS_AUTHENTICATED(bp)) {
memset(salt, 0, ZIO_DATA_SALT_LEN);
memset(iv, 0, ZIO_DATA_IV_LEN);
return;
}
if (!BP_SHOULD_BYTESWAP(bp)) {
memcpy(salt, &bp->blk_dva[2].dva_word[0], sizeof (uint64_t));
memcpy(iv, &bp->blk_dva[2].dva_word[1], sizeof (uint64_t));
val32 = (uint32_t)BP_GET_IV2(bp);
memcpy(iv + sizeof (uint64_t), &val32, sizeof (uint32_t));
} else {
val64 = BSWAP_64(bp->blk_dva[2].dva_word[0]);
memcpy(salt, &val64, sizeof (uint64_t));
val64 = BSWAP_64(bp->blk_dva[2].dva_word[1]);
memcpy(iv, &val64, sizeof (uint64_t));
val32 = BSWAP_32((uint32_t)BP_GET_IV2(bp));
memcpy(iv + sizeof (uint64_t), &val32, sizeof (uint32_t));
}
}
void
zio_crypt_encode_mac_bp(blkptr_t *bp, uint8_t *mac)
{
uint64_t val64;
ASSERT(BP_USES_CRYPT(bp));
ASSERT3U(BP_GET_TYPE(bp), !=, DMU_OT_OBJSET);
if (!BP_SHOULD_BYTESWAP(bp)) {
memcpy(&bp->blk_cksum.zc_word[2], mac, sizeof (uint64_t));
memcpy(&bp->blk_cksum.zc_word[3], mac + sizeof (uint64_t),
sizeof (uint64_t));
} else {
memcpy(&val64, mac, sizeof (uint64_t));
bp->blk_cksum.zc_word[2] = BSWAP_64(val64);
memcpy(&val64, mac + sizeof (uint64_t), sizeof (uint64_t));
bp->blk_cksum.zc_word[3] = BSWAP_64(val64);
}
}
void
zio_crypt_decode_mac_bp(const blkptr_t *bp, uint8_t *mac)
{
uint64_t val64;
ASSERT(BP_USES_CRYPT(bp) || BP_IS_HOLE(bp));
/* for convenience, so callers don't need to check */
if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
memset(mac, 0, ZIO_DATA_MAC_LEN);
return;
}
if (!BP_SHOULD_BYTESWAP(bp)) {
memcpy(mac, &bp->blk_cksum.zc_word[2], sizeof (uint64_t));
memcpy(mac + sizeof (uint64_t), &bp->blk_cksum.zc_word[3],
sizeof (uint64_t));
} else {
val64 = BSWAP_64(bp->blk_cksum.zc_word[2]);
memcpy(mac, &val64, sizeof (uint64_t));
val64 = BSWAP_64(bp->blk_cksum.zc_word[3]);
memcpy(mac + sizeof (uint64_t), &val64, sizeof (uint64_t));
}
}
void
zio_crypt_encode_mac_zil(void *data, uint8_t *mac)
{
zil_chain_t *zilc = data;
memcpy(&zilc->zc_eck.zec_cksum.zc_word[2], mac, sizeof (uint64_t));
memcpy(&zilc->zc_eck.zec_cksum.zc_word[3], mac + sizeof (uint64_t),
sizeof (uint64_t));
}
void
zio_crypt_decode_mac_zil(const void *data, uint8_t *mac)
{
/*
* The ZIL MAC is embedded in the block it protects, which will
* not have been byteswapped by the time this function has been called.
* As a result, we don't need to worry about byteswapping the MAC.
*/
const zil_chain_t *zilc = data;
memcpy(mac, &zilc->zc_eck.zec_cksum.zc_word[2], sizeof (uint64_t));
memcpy(mac + sizeof (uint64_t), &zilc->zc_eck.zec_cksum.zc_word[3],
sizeof (uint64_t));
}
/*
* This routine takes a block of dnodes (src_abd) and copies only the bonus
* buffers to the same offsets in the dst buffer. datalen should be the size
* of both the src_abd and the dst buffer (not just the length of the bonus
* buffers).
*/
void
zio_crypt_copy_dnode_bonus(abd_t *src_abd, uint8_t *dst, uint_t datalen)
{
uint_t i, max_dnp = datalen >> DNODE_SHIFT;
uint8_t *src;
dnode_phys_t *dnp, *sdnp, *ddnp;
src = abd_borrow_buf_copy(src_abd, datalen);
sdnp = (dnode_phys_t *)src;
ddnp = (dnode_phys_t *)dst;
for (i = 0; i < max_dnp; i += sdnp[i].dn_extra_slots + 1) {
dnp = &sdnp[i];
if (dnp->dn_type != DMU_OT_NONE &&
DMU_OT_IS_ENCRYPTED(dnp->dn_bonustype) &&
dnp->dn_bonuslen != 0) {
memcpy(DN_BONUS(&ddnp[i]), DN_BONUS(dnp),
DN_MAX_BONUS_LEN(dnp));
}
}
abd_return_buf(src_abd, src, datalen);
}
/*
* This function decides what fields from blk_prop are included in
* the on-disk various MAC algorithms.
*/
static void
zio_crypt_bp_zero_nonportable_blkprop(blkptr_t *bp, uint64_t version)
{
/*
* Version 0 did not properly zero out all non-portable fields
* as it should have done. We maintain this code so that we can
* do read-only imports of pools on this version.
*/
if (version == 0) {
BP_SET_DEDUP(bp, 0);
BP_SET_CHECKSUM(bp, 0);
BP_SET_PSIZE(bp, SPA_MINBLOCKSIZE);
return;
}
ASSERT3U(version, ==, ZIO_CRYPT_KEY_CURRENT_VERSION);
/*
* The hole_birth feature might set these fields even if this bp
* is a hole. We zero them out here to guarantee that raw sends
* will function with or without the feature.
*/
if (BP_IS_HOLE(bp)) {
bp->blk_prop = 0ULL;
return;
}
/*
* At L0 we want to verify these fields to ensure that data blocks
* can not be reinterpreted. For instance, we do not want an attacker
* to trick us into returning raw lz4 compressed data to the user
* by modifying the compression bits. At higher levels, we cannot
* enforce this policy since raw sends do not convey any information
* about indirect blocks, so these values might be different on the
* receive side. Fortunately, this does not open any new attack
* vectors, since any alterations that can be made to a higher level
* bp must still verify the correct order of the layer below it.
*/
if (BP_GET_LEVEL(bp) != 0) {
BP_SET_BYTEORDER(bp, 0);
BP_SET_COMPRESS(bp, 0);
/*
* psize cannot be set to zero or it will trigger
* asserts, but the value doesn't really matter as
* long as it is constant.
*/
BP_SET_PSIZE(bp, SPA_MINBLOCKSIZE);
}
BP_SET_DEDUP(bp, 0);
BP_SET_CHECKSUM(bp, 0);
}
static void
zio_crypt_bp_auth_init(uint64_t version, boolean_t should_bswap, blkptr_t *bp,
blkptr_auth_buf_t *bab, uint_t *bab_len)
{
blkptr_t tmpbp = *bp;
if (should_bswap)
byteswap_uint64_array(&tmpbp, sizeof (blkptr_t));
ASSERT(BP_USES_CRYPT(&tmpbp) || BP_IS_HOLE(&tmpbp));
ASSERT0(BP_IS_EMBEDDED(&tmpbp));
zio_crypt_decode_mac_bp(&tmpbp, bab->bab_mac);
/*
* We always MAC blk_prop in LE to ensure portability. This
* must be done after decoding the mac, since the endianness
* will get zero'd out here.
*/
zio_crypt_bp_zero_nonportable_blkprop(&tmpbp, version);
bab->bab_prop = LE_64(tmpbp.blk_prop);
bab->bab_pad = 0ULL;
/* version 0 did not include the padding */
*bab_len = sizeof (blkptr_auth_buf_t);
if (version == 0)
*bab_len -= sizeof (uint64_t);
}
static int
zio_crypt_bp_do_hmac_updates(crypto_context_t ctx, uint64_t version,
boolean_t should_bswap, blkptr_t *bp)
{
int ret;
uint_t bab_len;
blkptr_auth_buf_t bab;
crypto_data_t cd;
zio_crypt_bp_auth_init(version, should_bswap, bp, &bab, &bab_len);
cd.cd_format = CRYPTO_DATA_RAW;
cd.cd_offset = 0;
cd.cd_length = bab_len;
cd.cd_raw.iov_base = (char *)&bab;
cd.cd_raw.iov_len = cd.cd_length;
ret = crypto_mac_update(ctx, &cd);
if (ret != CRYPTO_SUCCESS) {
ret = SET_ERROR(EIO);
goto error;
}
return (0);
error:
return (ret);
}
static void
zio_crypt_bp_do_indrect_checksum_updates(SHA2_CTX *ctx, uint64_t version,
boolean_t should_bswap, blkptr_t *bp)
{
uint_t bab_len;
blkptr_auth_buf_t bab;
zio_crypt_bp_auth_init(version, should_bswap, bp, &bab, &bab_len);
SHA2Update(ctx, &bab, bab_len);
}
static void
zio_crypt_bp_do_aad_updates(uint8_t **aadp, uint_t *aad_len, uint64_t version,
boolean_t should_bswap, blkptr_t *bp)
{
uint_t bab_len;
blkptr_auth_buf_t bab;
zio_crypt_bp_auth_init(version, should_bswap, bp, &bab, &bab_len);
memcpy(*aadp, &bab, bab_len);
*aadp += bab_len;
*aad_len += bab_len;
}
static int
zio_crypt_do_dnode_hmac_updates(crypto_context_t ctx, uint64_t version,
boolean_t should_bswap, dnode_phys_t *dnp)
{
int ret, i;
dnode_phys_t *adnp, tmp_dncore;
size_t dn_core_size = offsetof(dnode_phys_t, dn_blkptr);
boolean_t le_bswap = (should_bswap == ZFS_HOST_BYTEORDER);
crypto_data_t cd;
cd.cd_format = CRYPTO_DATA_RAW;
cd.cd_offset = 0;
/*
* Authenticate the core dnode (masking out non-portable bits).
* We only copy the first 64 bytes we operate on to avoid the overhead
* of copying 512-64 unneeded bytes. The compiler seems to be fine
* with that.
*/
memcpy(&tmp_dncore, dnp, dn_core_size);
adnp = &tmp_dncore;
if (le_bswap) {
adnp->dn_datablkszsec = BSWAP_16(adnp->dn_datablkszsec);
adnp->dn_bonuslen = BSWAP_16(adnp->dn_bonuslen);
adnp->dn_maxblkid = BSWAP_64(adnp->dn_maxblkid);
adnp->dn_used = BSWAP_64(adnp->dn_used);
}
adnp->dn_flags &= DNODE_CRYPT_PORTABLE_FLAGS_MASK;
adnp->dn_used = 0;
cd.cd_length = dn_core_size;
cd.cd_raw.iov_base = (char *)adnp;
cd.cd_raw.iov_len = cd.cd_length;
ret = crypto_mac_update(ctx, &cd);
if (ret != CRYPTO_SUCCESS) {
ret = SET_ERROR(EIO);
goto error;
}
for (i = 0; i < dnp->dn_nblkptr; i++) {
ret = zio_crypt_bp_do_hmac_updates(ctx, version,
should_bswap, &dnp->dn_blkptr[i]);
if (ret != 0)
goto error;
}
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
ret = zio_crypt_bp_do_hmac_updates(ctx, version,
should_bswap, DN_SPILL_BLKPTR(dnp));
if (ret != 0)
goto error;
}
return (0);
error:
return (ret);
}
/*
* objset_phys_t blocks introduce a number of exceptions to the normal
* authentication process. objset_phys_t's contain 2 separate HMACS for
* protecting the integrity of their data. The portable_mac protects the
* metadnode. This MAC can be sent with a raw send and protects against
* reordering of data within the metadnode. The local_mac protects the user
* accounting objects which are not sent from one system to another.
*
* In addition, objset blocks are the only blocks that can be modified and
* written to disk without the key loaded under certain circumstances. During
* zil_claim() we need to be able to update the zil_header_t to complete
* claiming log blocks and during raw receives we need to write out the
* portable_mac from the send file. Both of these actions are possible
* because these fields are not protected by either MAC so neither one will
* need to modify the MACs without the key. However, when the modified blocks
* are written out they will be byteswapped into the host machine's native
* endianness which will modify fields protected by the MAC. As a result, MAC
* calculation for objset blocks works slightly differently from other block
* types. Where other block types MAC the data in whatever endianness is
* written to disk, objset blocks always MAC little endian version of their
* values. In the code, should_bswap is the value from BP_SHOULD_BYTESWAP()
* and le_bswap indicates whether a byteswap is needed to get this block
* into little endian format.
*/
int
zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen,
boolean_t should_bswap, uint8_t *portable_mac, uint8_t *local_mac)
{
int ret;
crypto_mechanism_t mech;
crypto_context_t ctx;
crypto_data_t cd;
objset_phys_t *osp = data;
uint64_t intval;
boolean_t le_bswap = (should_bswap == ZFS_HOST_BYTEORDER);
uint8_t raw_portable_mac[SHA512_DIGEST_LENGTH];
uint8_t raw_local_mac[SHA512_DIGEST_LENGTH];
/* initialize HMAC mechanism */
mech.cm_type = crypto_mech2id(SUN_CKM_SHA512_HMAC);
mech.cm_param = NULL;
mech.cm_param_len = 0;
cd.cd_format = CRYPTO_DATA_RAW;
cd.cd_offset = 0;
/* calculate the portable MAC from the portable fields and metadnode */
ret = crypto_mac_init(&mech, &key->zk_hmac_key, NULL, &ctx);
if (ret != CRYPTO_SUCCESS) {
ret = SET_ERROR(EIO);
goto error;
}
/* add in the os_type */
intval = (le_bswap) ? osp->os_type : BSWAP_64(osp->os_type);
cd.cd_length = sizeof (uint64_t);
cd.cd_raw.iov_base = (char *)&intval;
cd.cd_raw.iov_len = cd.cd_length;
ret = crypto_mac_update(ctx, &cd);
if (ret != CRYPTO_SUCCESS) {
ret = SET_ERROR(EIO);
goto error;
}
/* add in the portable os_flags */
intval = osp->os_flags;
if (should_bswap)
intval = BSWAP_64(intval);
intval &= OBJSET_CRYPT_PORTABLE_FLAGS_MASK;
if (!ZFS_HOST_BYTEORDER)
intval = BSWAP_64(intval);
cd.cd_length = sizeof (uint64_t);
cd.cd_raw.iov_base = (char *)&intval;
cd.cd_raw.iov_len = cd.cd_length;
ret = crypto_mac_update(ctx, &cd);
if (ret != CRYPTO_SUCCESS) {
ret = SET_ERROR(EIO);
goto error;
}
/* add in fields from the metadnode */
ret = zio_crypt_do_dnode_hmac_updates(ctx, key->zk_version,
should_bswap, &osp->os_meta_dnode);
if (ret)
goto error;
/* store the final digest in a temporary buffer and copy what we need */
cd.cd_length = SHA512_DIGEST_LENGTH;
cd.cd_raw.iov_base = (char *)raw_portable_mac;
cd.cd_raw.iov_len = cd.cd_length;
ret = crypto_mac_final(ctx, &cd);
if (ret != CRYPTO_SUCCESS) {
ret = SET_ERROR(EIO);
goto error;
}
memcpy(portable_mac, raw_portable_mac, ZIO_OBJSET_MAC_LEN);
/*
* This is necessary here as we check next whether
* OBJSET_FLAG_USERACCOUNTING_COMPLETE is set in order to
* decide if the local_mac should be zeroed out. That flag will always
* be set by dmu_objset_id_quota_upgrade_cb() and
* dmu_objset_userspace_upgrade_cb() if useraccounting has been
* completed.
*/
intval = osp->os_flags;
if (should_bswap)
intval = BSWAP_64(intval);
boolean_t uacct_incomplete =
!(intval & OBJSET_FLAG_USERACCOUNTING_COMPLETE);
/*
* The local MAC protects the user, group and project accounting.
* If these objects are not present, the local MAC is zeroed out.
*/
if (uacct_incomplete ||
(datalen >= OBJSET_PHYS_SIZE_V3 &&
osp->os_userused_dnode.dn_type == DMU_OT_NONE &&
osp->os_groupused_dnode.dn_type == DMU_OT_NONE &&
osp->os_projectused_dnode.dn_type == DMU_OT_NONE) ||
(datalen >= OBJSET_PHYS_SIZE_V2 &&
osp->os_userused_dnode.dn_type == DMU_OT_NONE &&
osp->os_groupused_dnode.dn_type == DMU_OT_NONE) ||
(datalen <= OBJSET_PHYS_SIZE_V1)) {
memset(local_mac, 0, ZIO_OBJSET_MAC_LEN);
return (0);
}
/* calculate the local MAC from the userused and groupused dnodes */
ret = crypto_mac_init(&mech, &key->zk_hmac_key, NULL, &ctx);
if (ret != CRYPTO_SUCCESS) {
ret = SET_ERROR(EIO);
goto error;
}
/* add in the non-portable os_flags */
intval = osp->os_flags;
if (should_bswap)
intval = BSWAP_64(intval);
intval &= ~OBJSET_CRYPT_PORTABLE_FLAGS_MASK;
if (!ZFS_HOST_BYTEORDER)
intval = BSWAP_64(intval);
cd.cd_length = sizeof (uint64_t);
cd.cd_raw.iov_base = (char *)&intval;
cd.cd_raw.iov_len = cd.cd_length;
ret = crypto_mac_update(ctx, &cd);
if (ret != CRYPTO_SUCCESS) {
ret = SET_ERROR(EIO);
goto error;
}
/* add in fields from the user accounting dnodes */
if (osp->os_userused_dnode.dn_type != DMU_OT_NONE) {
ret = zio_crypt_do_dnode_hmac_updates(ctx, key->zk_version,
should_bswap, &osp->os_userused_dnode);
if (ret)
goto error;
}
if (osp->os_groupused_dnode.dn_type != DMU_OT_NONE) {
ret = zio_crypt_do_dnode_hmac_updates(ctx, key->zk_version,
should_bswap, &osp->os_groupused_dnode);
if (ret)
goto error;
}
if (osp->os_projectused_dnode.dn_type != DMU_OT_NONE &&
datalen >= OBJSET_PHYS_SIZE_V3) {
ret = zio_crypt_do_dnode_hmac_updates(ctx, key->zk_version,
should_bswap, &osp->os_projectused_dnode);
if (ret)
goto error;
}
/* store the final digest in a temporary buffer and copy what we need */
cd.cd_length = SHA512_DIGEST_LENGTH;
cd.cd_raw.iov_base = (char *)raw_local_mac;
cd.cd_raw.iov_len = cd.cd_length;
ret = crypto_mac_final(ctx, &cd);
if (ret != CRYPTO_SUCCESS) {
ret = SET_ERROR(EIO);
goto error;
}
memcpy(local_mac, raw_local_mac, ZIO_OBJSET_MAC_LEN);
return (0);
error:
memset(portable_mac, 0, ZIO_OBJSET_MAC_LEN);
memset(local_mac, 0, ZIO_OBJSET_MAC_LEN);
return (ret);
}
static void
zio_crypt_destroy_uio(zfs_uio_t *uio)
{
if (uio->uio_iov)
kmem_free(uio->uio_iov, uio->uio_iovcnt * sizeof (iovec_t));
}
/*
* This function parses an uncompressed indirect block and returns a checksum
* of all the portable fields from all of the contained bps. The portable
* fields are the MAC and all of the fields from blk_prop except for the dedup,
* checksum, and psize bits. For an explanation of the purpose of this, see
* the comment block on object set authentication.
*/
static int
zio_crypt_do_indirect_mac_checksum_impl(boolean_t generate, void *buf,
uint_t datalen, uint64_t version, boolean_t byteswap, uint8_t *cksum)
{
blkptr_t *bp;
int i, epb = datalen >> SPA_BLKPTRSHIFT;
SHA2_CTX ctx;
uint8_t digestbuf[SHA512_DIGEST_LENGTH];
/* checksum all of the MACs from the layer below */
SHA2Init(SHA512, &ctx);
for (i = 0, bp = buf; i < epb; i++, bp++) {
zio_crypt_bp_do_indrect_checksum_updates(&ctx, version,
byteswap, bp);
}
SHA2Final(digestbuf, &ctx);
if (generate) {
memcpy(cksum, digestbuf, ZIO_DATA_MAC_LEN);
return (0);
}
if (memcmp(digestbuf, cksum, ZIO_DATA_MAC_LEN) != 0)
return (SET_ERROR(ECKSUM));
return (0);
}
int
zio_crypt_do_indirect_mac_checksum(boolean_t generate, void *buf,
uint_t datalen, boolean_t byteswap, uint8_t *cksum)
{
int ret;
/*
* Unfortunately, callers of this function will not always have
* easy access to the on-disk format version. This info is
* normally found in the DSL Crypto Key, but the checksum-of-MACs
* is expected to be verifiable even when the key isn't loaded.
* Here, instead of doing a ZAP lookup for the version for each
* zio, we simply try both existing formats.
*/
ret = zio_crypt_do_indirect_mac_checksum_impl(generate, buf,
datalen, ZIO_CRYPT_KEY_CURRENT_VERSION, byteswap, cksum);
if (ret == ECKSUM) {
ASSERT(!generate);
ret = zio_crypt_do_indirect_mac_checksum_impl(generate,
buf, datalen, 0, byteswap, cksum);
}
return (ret);
}
int
zio_crypt_do_indirect_mac_checksum_abd(boolean_t generate, abd_t *abd,
uint_t datalen, boolean_t byteswap, uint8_t *cksum)
{
int ret;
void *buf;
buf = abd_borrow_buf_copy(abd, datalen);
ret = zio_crypt_do_indirect_mac_checksum(generate, buf, datalen,
byteswap, cksum);
abd_return_buf(abd, buf, datalen);
return (ret);
}
/*
* Special case handling routine for encrypting / decrypting ZIL blocks.
* We do not check for the older ZIL chain because the encryption feature
* was not available before the newer ZIL chain was introduced. The goal
* here is to encrypt everything except the blkptr_t of a lr_write_t and
* the zil_chain_t header. Everything that is not encrypted is authenticated.
*/
static int
zio_crypt_init_uios_zil(boolean_t encrypt, uint8_t *plainbuf,
uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap, zfs_uio_t *puio,
zfs_uio_t *cuio, uint_t *enc_len, uint8_t **authbuf, uint_t *auth_len,
boolean_t *no_crypt)
{
int ret;
- uint64_t txtype, lr_len;
+ uint64_t txtype, lr_len, nused;
uint_t nr_src, nr_dst, crypt_len;
uint_t aad_len = 0, nr_iovecs = 0, total_len = 0;
iovec_t *src_iovecs = NULL, *dst_iovecs = NULL;
uint8_t *src, *dst, *slrp, *dlrp, *blkend, *aadp;
zil_chain_t *zilc;
lr_t *lr;
uint8_t *aadbuf = zio_buf_alloc(datalen);
/* cipherbuf always needs an extra iovec for the MAC */
if (encrypt) {
src = plainbuf;
dst = cipherbuf;
nr_src = 0;
nr_dst = 1;
} else {
src = cipherbuf;
dst = plainbuf;
nr_src = 1;
nr_dst = 0;
}
memset(dst, 0, datalen);
/* find the start and end record of the log block */
zilc = (zil_chain_t *)src;
slrp = src + sizeof (zil_chain_t);
aadp = aadbuf;
- blkend = src + ((byteswap) ? BSWAP_64(zilc->zc_nused) : zilc->zc_nused);
+ nused = ((byteswap) ? BSWAP_64(zilc->zc_nused) : zilc->zc_nused);
+ ASSERT3U(nused, >=, sizeof (zil_chain_t));
+ ASSERT3U(nused, <=, datalen);
+ blkend = src + nused;
/* calculate the number of encrypted iovecs we will need */
for (; slrp < blkend; slrp += lr_len) {
lr = (lr_t *)slrp;
if (!byteswap) {
txtype = lr->lrc_txtype;
lr_len = lr->lrc_reclen;
} else {
txtype = BSWAP_64(lr->lrc_txtype);
lr_len = BSWAP_64(lr->lrc_reclen);
}
+ ASSERT3U(lr_len, >=, sizeof (lr_t));
+ ASSERT3U(lr_len, <=, blkend - slrp);
nr_iovecs++;
if (txtype == TX_WRITE && lr_len != sizeof (lr_write_t))
nr_iovecs++;
}
nr_src += nr_iovecs;
nr_dst += nr_iovecs;
/* allocate the iovec arrays */
if (nr_src != 0) {
src_iovecs = kmem_alloc(nr_src * sizeof (iovec_t), KM_SLEEP);
if (src_iovecs == NULL) {
ret = SET_ERROR(ENOMEM);
goto error;
}
}
if (nr_dst != 0) {
dst_iovecs = kmem_alloc(nr_dst * sizeof (iovec_t), KM_SLEEP);
if (dst_iovecs == NULL) {
ret = SET_ERROR(ENOMEM);
goto error;
}
}
/*
* Copy the plain zil header over and authenticate everything except
* the checksum that will store our MAC. If we are writing the data
* the embedded checksum will not have been calculated yet, so we don't
* authenticate that.
*/
memcpy(dst, src, sizeof (zil_chain_t));
memcpy(aadp, src, sizeof (zil_chain_t) - sizeof (zio_eck_t));
aadp += sizeof (zil_chain_t) - sizeof (zio_eck_t);
aad_len += sizeof (zil_chain_t) - sizeof (zio_eck_t);
/* loop over records again, filling in iovecs */
nr_iovecs = 0;
slrp = src + sizeof (zil_chain_t);
dlrp = dst + sizeof (zil_chain_t);
for (; slrp < blkend; slrp += lr_len, dlrp += lr_len) {
lr = (lr_t *)slrp;
if (!byteswap) {
txtype = lr->lrc_txtype;
lr_len = lr->lrc_reclen;
} else {
txtype = BSWAP_64(lr->lrc_txtype);
lr_len = BSWAP_64(lr->lrc_reclen);
}
/* copy the common lr_t */
memcpy(dlrp, slrp, sizeof (lr_t));
memcpy(aadp, slrp, sizeof (lr_t));
aadp += sizeof (lr_t);
aad_len += sizeof (lr_t);
ASSERT3P(src_iovecs, !=, NULL);
ASSERT3P(dst_iovecs, !=, NULL);
/*
* If this is a TX_WRITE record we want to encrypt everything
* except the bp if exists. If the bp does exist we want to
* authenticate it.
*/
if (txtype == TX_WRITE) {
crypt_len = sizeof (lr_write_t) -
sizeof (lr_t) - sizeof (blkptr_t);
src_iovecs[nr_iovecs].iov_base = slrp + sizeof (lr_t);
src_iovecs[nr_iovecs].iov_len = crypt_len;
dst_iovecs[nr_iovecs].iov_base = dlrp + sizeof (lr_t);
dst_iovecs[nr_iovecs].iov_len = crypt_len;
/* copy the bp now since it will not be encrypted */
memcpy(dlrp + sizeof (lr_write_t) - sizeof (blkptr_t),
slrp + sizeof (lr_write_t) - sizeof (blkptr_t),
sizeof (blkptr_t));
memcpy(aadp,
slrp + sizeof (lr_write_t) - sizeof (blkptr_t),
sizeof (blkptr_t));
aadp += sizeof (blkptr_t);
aad_len += sizeof (blkptr_t);
nr_iovecs++;
total_len += crypt_len;
if (lr_len != sizeof (lr_write_t)) {
crypt_len = lr_len - sizeof (lr_write_t);
src_iovecs[nr_iovecs].iov_base =
slrp + sizeof (lr_write_t);
src_iovecs[nr_iovecs].iov_len = crypt_len;
dst_iovecs[nr_iovecs].iov_base =
dlrp + sizeof (lr_write_t);
dst_iovecs[nr_iovecs].iov_len = crypt_len;
nr_iovecs++;
total_len += crypt_len;
}
} else if (txtype == TX_CLONE_RANGE) {
const size_t o = offsetof(lr_clone_range_t, lr_nbps);
crypt_len = o - sizeof (lr_t);
src_iovecs[nr_iovecs].iov_base = slrp + sizeof (lr_t);
src_iovecs[nr_iovecs].iov_len = crypt_len;
dst_iovecs[nr_iovecs].iov_base = dlrp + sizeof (lr_t);
dst_iovecs[nr_iovecs].iov_len = crypt_len;
/* copy the bps now since they will not be encrypted */
memcpy(dlrp + o, slrp + o, lr_len - o);
memcpy(aadp, slrp + o, lr_len - o);
aadp += lr_len - o;
aad_len += lr_len - o;
nr_iovecs++;
total_len += crypt_len;
} else {
crypt_len = lr_len - sizeof (lr_t);
src_iovecs[nr_iovecs].iov_base = slrp + sizeof (lr_t);
src_iovecs[nr_iovecs].iov_len = crypt_len;
dst_iovecs[nr_iovecs].iov_base = dlrp + sizeof (lr_t);
dst_iovecs[nr_iovecs].iov_len = crypt_len;
nr_iovecs++;
total_len += crypt_len;
}
}
*no_crypt = (nr_iovecs == 0);
*enc_len = total_len;
*authbuf = aadbuf;
*auth_len = aad_len;
if (encrypt) {
puio->uio_iov = src_iovecs;
puio->uio_iovcnt = nr_src;
cuio->uio_iov = dst_iovecs;
cuio->uio_iovcnt = nr_dst;
} else {
puio->uio_iov = dst_iovecs;
puio->uio_iovcnt = nr_dst;
cuio->uio_iov = src_iovecs;
cuio->uio_iovcnt = nr_src;
}
return (0);
error:
zio_buf_free(aadbuf, datalen);
if (src_iovecs != NULL)
kmem_free(src_iovecs, nr_src * sizeof (iovec_t));
if (dst_iovecs != NULL)
kmem_free(dst_iovecs, nr_dst * sizeof (iovec_t));
*enc_len = 0;
*authbuf = NULL;
*auth_len = 0;
*no_crypt = B_FALSE;
puio->uio_iov = NULL;
puio->uio_iovcnt = 0;
cuio->uio_iov = NULL;
cuio->uio_iovcnt = 0;
return (ret);
}
/*
* Special case handling routine for encrypting / decrypting dnode blocks.
*/
static int
zio_crypt_init_uios_dnode(boolean_t encrypt, uint64_t version,
uint8_t *plainbuf, uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap,
zfs_uio_t *puio, zfs_uio_t *cuio, uint_t *enc_len, uint8_t **authbuf,
uint_t *auth_len, boolean_t *no_crypt)
{
int ret;
uint_t nr_src, nr_dst, crypt_len;
uint_t aad_len = 0, nr_iovecs = 0, total_len = 0;
uint_t i, j, max_dnp = datalen >> DNODE_SHIFT;
iovec_t *src_iovecs = NULL, *dst_iovecs = NULL;
uint8_t *src, *dst, *aadp;
dnode_phys_t *dnp, *adnp, *sdnp, *ddnp;
uint8_t *aadbuf = zio_buf_alloc(datalen);
if (encrypt) {
src = plainbuf;
dst = cipherbuf;
nr_src = 0;
nr_dst = 1;
} else {
src = cipherbuf;
dst = plainbuf;
nr_src = 1;
nr_dst = 0;
}
sdnp = (dnode_phys_t *)src;
ddnp = (dnode_phys_t *)dst;
aadp = aadbuf;
/*
* Count the number of iovecs we will need to do the encryption by
* counting the number of bonus buffers that need to be encrypted.
*/
for (i = 0; i < max_dnp; i += sdnp[i].dn_extra_slots + 1) {
/*
* This block may still be byteswapped. However, all of the
* values we use are either uint8_t's (for which byteswapping
* is a noop) or a * != 0 check, which will work regardless
* of whether or not we byteswap.
*/
if (sdnp[i].dn_type != DMU_OT_NONE &&
DMU_OT_IS_ENCRYPTED(sdnp[i].dn_bonustype) &&
sdnp[i].dn_bonuslen != 0) {
nr_iovecs++;
}
}
nr_src += nr_iovecs;
nr_dst += nr_iovecs;
if (nr_src != 0) {
src_iovecs = kmem_alloc(nr_src * sizeof (iovec_t), KM_SLEEP);
if (src_iovecs == NULL) {
ret = SET_ERROR(ENOMEM);
goto error;
}
}
if (nr_dst != 0) {
dst_iovecs = kmem_alloc(nr_dst * sizeof (iovec_t), KM_SLEEP);
if (dst_iovecs == NULL) {
ret = SET_ERROR(ENOMEM);
goto error;
}
}
nr_iovecs = 0;
/*
* Iterate through the dnodes again, this time filling in the uios
* we allocated earlier. We also concatenate any data we want to
* authenticate onto aadbuf.
*/
for (i = 0; i < max_dnp; i += sdnp[i].dn_extra_slots + 1) {
dnp = &sdnp[i];
/* copy over the core fields and blkptrs (kept as plaintext) */
memcpy(&ddnp[i], dnp,
(uint8_t *)DN_BONUS(dnp) - (uint8_t *)dnp);
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
memcpy(DN_SPILL_BLKPTR(&ddnp[i]), DN_SPILL_BLKPTR(dnp),
sizeof (blkptr_t));
}
/*
* Handle authenticated data. We authenticate everything in
* the dnode that can be brought over when we do a raw send.
* This includes all of the core fields as well as the MACs
* stored in the bp checksums and all of the portable bits
* from blk_prop. We include the dnode padding here in case it
* ever gets used in the future. Some dn_flags and dn_used are
* not portable so we mask those out values out of the
* authenticated data.
*/
crypt_len = offsetof(dnode_phys_t, dn_blkptr);
memcpy(aadp, dnp, crypt_len);
adnp = (dnode_phys_t *)aadp;
adnp->dn_flags &= DNODE_CRYPT_PORTABLE_FLAGS_MASK;
adnp->dn_used = 0;
aadp += crypt_len;
aad_len += crypt_len;
for (j = 0; j < dnp->dn_nblkptr; j++) {
zio_crypt_bp_do_aad_updates(&aadp, &aad_len,
version, byteswap, &dnp->dn_blkptr[j]);
}
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
zio_crypt_bp_do_aad_updates(&aadp, &aad_len,
version, byteswap, DN_SPILL_BLKPTR(dnp));
}
/*
* If this bonus buffer needs to be encrypted, we prepare an
* iovec_t. The encryption / decryption functions will fill
* this in for us with the encrypted or decrypted data.
* Otherwise we add the bonus buffer to the authenticated
* data buffer and copy it over to the destination. The
* encrypted iovec extends to DN_MAX_BONUS_LEN(dnp) so that
* we can guarantee alignment with the AES block size
* (128 bits).
*/
crypt_len = DN_MAX_BONUS_LEN(dnp);
if (dnp->dn_type != DMU_OT_NONE &&
DMU_OT_IS_ENCRYPTED(dnp->dn_bonustype) &&
dnp->dn_bonuslen != 0) {
ASSERT3U(nr_iovecs, <, nr_src);
ASSERT3U(nr_iovecs, <, nr_dst);
ASSERT3P(src_iovecs, !=, NULL);
ASSERT3P(dst_iovecs, !=, NULL);
src_iovecs[nr_iovecs].iov_base = DN_BONUS(dnp);
src_iovecs[nr_iovecs].iov_len = crypt_len;
dst_iovecs[nr_iovecs].iov_base = DN_BONUS(&ddnp[i]);
dst_iovecs[nr_iovecs].iov_len = crypt_len;
nr_iovecs++;
total_len += crypt_len;
} else {
memcpy(DN_BONUS(&ddnp[i]), DN_BONUS(dnp), crypt_len);
memcpy(aadp, DN_BONUS(dnp), crypt_len);
aadp += crypt_len;
aad_len += crypt_len;
}
}
*no_crypt = (nr_iovecs == 0);
*enc_len = total_len;
*authbuf = aadbuf;
*auth_len = aad_len;
if (encrypt) {
puio->uio_iov = src_iovecs;
puio->uio_iovcnt = nr_src;
cuio->uio_iov = dst_iovecs;
cuio->uio_iovcnt = nr_dst;
} else {
puio->uio_iov = dst_iovecs;
puio->uio_iovcnt = nr_dst;
cuio->uio_iov = src_iovecs;
cuio->uio_iovcnt = nr_src;
}
return (0);
error:
zio_buf_free(aadbuf, datalen);
if (src_iovecs != NULL)
kmem_free(src_iovecs, nr_src * sizeof (iovec_t));
if (dst_iovecs != NULL)
kmem_free(dst_iovecs, nr_dst * sizeof (iovec_t));
*enc_len = 0;
*authbuf = NULL;
*auth_len = 0;
*no_crypt = B_FALSE;
puio->uio_iov = NULL;
puio->uio_iovcnt = 0;
cuio->uio_iov = NULL;
cuio->uio_iovcnt = 0;
return (ret);
}
static int
zio_crypt_init_uios_normal(boolean_t encrypt, uint8_t *plainbuf,
uint8_t *cipherbuf, uint_t datalen, zfs_uio_t *puio, zfs_uio_t *cuio,
uint_t *enc_len)
{
(void) encrypt;
int ret;
uint_t nr_plain = 1, nr_cipher = 2;
iovec_t *plain_iovecs = NULL, *cipher_iovecs = NULL;
/* allocate the iovecs for the plain and cipher data */
plain_iovecs = kmem_alloc(nr_plain * sizeof (iovec_t),
KM_SLEEP);
if (!plain_iovecs) {
ret = SET_ERROR(ENOMEM);
goto error;
}
cipher_iovecs = kmem_alloc(nr_cipher * sizeof (iovec_t),
KM_SLEEP);
if (!cipher_iovecs) {
ret = SET_ERROR(ENOMEM);
goto error;
}
plain_iovecs[0].iov_base = plainbuf;
plain_iovecs[0].iov_len = datalen;
cipher_iovecs[0].iov_base = cipherbuf;
cipher_iovecs[0].iov_len = datalen;
*enc_len = datalen;
puio->uio_iov = plain_iovecs;
puio->uio_iovcnt = nr_plain;
cuio->uio_iov = cipher_iovecs;
cuio->uio_iovcnt = nr_cipher;
return (0);
error:
if (plain_iovecs != NULL)
kmem_free(plain_iovecs, nr_plain * sizeof (iovec_t));
if (cipher_iovecs != NULL)
kmem_free(cipher_iovecs, nr_cipher * sizeof (iovec_t));
*enc_len = 0;
puio->uio_iov = NULL;
puio->uio_iovcnt = 0;
cuio->uio_iov = NULL;
cuio->uio_iovcnt = 0;
return (ret);
}
/*
* This function builds up the plaintext (puio) and ciphertext (cuio) uios so
* that they can be used for encryption and decryption by zio_do_crypt_uio().
* Most blocks will use zio_crypt_init_uios_normal(), with ZIL and dnode blocks
* requiring special handling to parse out pieces that are to be encrypted. The
* authbuf is used by these special cases to store additional authenticated
* data (AAD) for the encryption modes.
*/
static int
zio_crypt_init_uios(boolean_t encrypt, uint64_t version, dmu_object_type_t ot,
uint8_t *plainbuf, uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap,
uint8_t *mac, zfs_uio_t *puio, zfs_uio_t *cuio, uint_t *enc_len,
uint8_t **authbuf, uint_t *auth_len, boolean_t *no_crypt)
{
int ret;
iovec_t *mac_iov;
ASSERT(DMU_OT_IS_ENCRYPTED(ot) || ot == DMU_OT_NONE);
/* route to handler */
switch (ot) {
case DMU_OT_INTENT_LOG:
ret = zio_crypt_init_uios_zil(encrypt, plainbuf, cipherbuf,
datalen, byteswap, puio, cuio, enc_len, authbuf, auth_len,
no_crypt);
break;
case DMU_OT_DNODE:
ret = zio_crypt_init_uios_dnode(encrypt, version, plainbuf,
cipherbuf, datalen, byteswap, puio, cuio, enc_len, authbuf,
auth_len, no_crypt);
break;
default:
ret = zio_crypt_init_uios_normal(encrypt, plainbuf, cipherbuf,
datalen, puio, cuio, enc_len);
*authbuf = NULL;
*auth_len = 0;
*no_crypt = B_FALSE;
break;
}
if (ret != 0)
goto error;
/* populate the uios */
puio->uio_segflg = UIO_SYSSPACE;
cuio->uio_segflg = UIO_SYSSPACE;
mac_iov = ((iovec_t *)&cuio->uio_iov[cuio->uio_iovcnt - 1]);
mac_iov->iov_base = mac;
mac_iov->iov_len = ZIO_DATA_MAC_LEN;
return (0);
error:
return (ret);
}
/*
* Primary encryption / decryption entrypoint for zio data.
*/
int
zio_do_crypt_data(boolean_t encrypt, zio_crypt_key_t *key,
dmu_object_type_t ot, boolean_t byteswap, uint8_t *salt, uint8_t *iv,
uint8_t *mac, uint_t datalen, uint8_t *plainbuf, uint8_t *cipherbuf,
boolean_t *no_crypt)
{
int ret;
boolean_t locked = B_FALSE;
uint64_t crypt = key->zk_crypt;
uint_t keydata_len = zio_crypt_table[crypt].ci_keylen;
uint_t enc_len, auth_len;
zfs_uio_t puio, cuio;
uint8_t enc_keydata[MASTER_KEY_MAX_LEN];
crypto_key_t tmp_ckey, *ckey = NULL;
crypto_ctx_template_t tmpl;
uint8_t *authbuf = NULL;
memset(&puio, 0, sizeof (puio));
memset(&cuio, 0, sizeof (cuio));
/*
* If the needed key is the current one, just use it. Otherwise we
* need to generate a temporary one from the given salt + master key.
* If we are encrypting, we must return a copy of the current salt
* so that it can be stored in the blkptr_t.
*/
rw_enter(&key->zk_salt_lock, RW_READER);
locked = B_TRUE;
if (memcmp(salt, key->zk_salt, ZIO_DATA_SALT_LEN) == 0) {
ckey = &key->zk_current_key;
tmpl = key->zk_current_tmpl;
} else {
rw_exit(&key->zk_salt_lock);
locked = B_FALSE;
ret = hkdf_sha512(key->zk_master_keydata, keydata_len, NULL, 0,
salt, ZIO_DATA_SALT_LEN, enc_keydata, keydata_len);
if (ret != 0)
goto error;
tmp_ckey.ck_data = enc_keydata;
tmp_ckey.ck_length = CRYPTO_BYTES2BITS(keydata_len);
ckey = &tmp_ckey;
tmpl = NULL;
}
/*
* Attempt to use QAT acceleration if we can. We currently don't
* do this for metadnode and ZIL blocks, since they have a much
* more involved buffer layout and the qat_crypt() function only
* works in-place.
*/
if (qat_crypt_use_accel(datalen) &&
ot != DMU_OT_INTENT_LOG && ot != DMU_OT_DNODE) {
uint8_t *srcbuf, *dstbuf;
if (encrypt) {
srcbuf = plainbuf;
dstbuf = cipherbuf;
} else {
srcbuf = cipherbuf;
dstbuf = plainbuf;
}
ret = qat_crypt((encrypt) ? QAT_ENCRYPT : QAT_DECRYPT, srcbuf,
dstbuf, NULL, 0, iv, mac, ckey, key->zk_crypt, datalen);
if (ret == CPA_STATUS_SUCCESS) {
if (locked) {
rw_exit(&key->zk_salt_lock);
locked = B_FALSE;
}
return (0);
}
/* If the hardware implementation fails fall back to software */
}
/* create uios for encryption */
ret = zio_crypt_init_uios(encrypt, key->zk_version, ot, plainbuf,
cipherbuf, datalen, byteswap, mac, &puio, &cuio, &enc_len,
&authbuf, &auth_len, no_crypt);
if (ret != 0)
goto error;
/* perform the encryption / decryption in software */
ret = zio_do_crypt_uio(encrypt, key->zk_crypt, ckey, tmpl, iv, enc_len,
&puio, &cuio, authbuf, auth_len);
if (ret != 0)
goto error;
if (locked) {
rw_exit(&key->zk_salt_lock);
}
if (authbuf != NULL)
zio_buf_free(authbuf, datalen);
if (ckey == &tmp_ckey)
memset(enc_keydata, 0, keydata_len);
zio_crypt_destroy_uio(&puio);
zio_crypt_destroy_uio(&cuio);
return (0);
error:
if (locked)
rw_exit(&key->zk_salt_lock);
if (authbuf != NULL)
zio_buf_free(authbuf, datalen);
if (ckey == &tmp_ckey)
memset(enc_keydata, 0, keydata_len);
zio_crypt_destroy_uio(&puio);
zio_crypt_destroy_uio(&cuio);
return (ret);
}
/*
* Simple wrapper around zio_do_crypt_data() to work with abd's instead of
* linear buffers.
*/
int
zio_do_crypt_abd(boolean_t encrypt, zio_crypt_key_t *key, dmu_object_type_t ot,
boolean_t byteswap, uint8_t *salt, uint8_t *iv, uint8_t *mac,
uint_t datalen, abd_t *pabd, abd_t *cabd, boolean_t *no_crypt)
{
int ret;
void *ptmp, *ctmp;
if (encrypt) {
ptmp = abd_borrow_buf_copy(pabd, datalen);
ctmp = abd_borrow_buf(cabd, datalen);
} else {
ptmp = abd_borrow_buf(pabd, datalen);
ctmp = abd_borrow_buf_copy(cabd, datalen);
}
ret = zio_do_crypt_data(encrypt, key, ot, byteswap, salt, iv, mac,
datalen, ptmp, ctmp, no_crypt);
if (ret != 0)
goto error;
if (encrypt) {
abd_return_buf(pabd, ptmp, datalen);
abd_return_buf_copy(cabd, ctmp, datalen);
} else {
abd_return_buf_copy(pabd, ptmp, datalen);
abd_return_buf(cabd, ctmp, datalen);
}
return (0);
error:
if (encrypt) {
abd_return_buf(pabd, ptmp, datalen);
abd_return_buf_copy(cabd, ctmp, datalen);
} else {
abd_return_buf_copy(pabd, ptmp, datalen);
abd_return_buf(cabd, ctmp, datalen);
}
return (ret);
}
#if defined(_KERNEL)
/* CSTYLED */
module_param(zfs_key_max_salt_uses, ulong, 0644);
MODULE_PARM_DESC(zfs_key_max_salt_uses, "Max number of times a salt value "
"can be used for generating encryption keys before it is rotated");
#endif
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zpl_file_range.c b/sys/contrib/openzfs/module/os/linux/zfs/zpl_file_range.c
index 139c51cf46df..3065d54fa9da 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zpl_file_range.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zpl_file_range.c
@@ -1,281 +1,287 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2023, Klara Inc.
*/
#ifdef CONFIG_COMPAT
#include <linux/compat.h>
#endif
#include <linux/fs.h>
#include <sys/file.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_vnops.h>
#include <sys/zfeature.h>
-int zfs_bclone_enabled = 0;
-
/*
* Clone part of a file via block cloning.
*
* Note that we are not required to update file offsets; the kernel will take
* care of that depending on how it was called.
*/
static ssize_t
-__zpl_clone_file_range(struct file *src_file, loff_t src_off,
+zpl_clone_file_range_impl(struct file *src_file, loff_t src_off,
struct file *dst_file, loff_t dst_off, size_t len)
{
struct inode *src_i = file_inode(src_file);
struct inode *dst_i = file_inode(dst_file);
uint64_t src_off_o = (uint64_t)src_off;
uint64_t dst_off_o = (uint64_t)dst_off;
uint64_t len_o = (uint64_t)len;
cred_t *cr = CRED();
fstrans_cookie_t cookie;
int err;
if (!zfs_bclone_enabled)
return (-EOPNOTSUPP);
if (!spa_feature_is_enabled(
dmu_objset_spa(ITOZSB(dst_i)->z_os), SPA_FEATURE_BLOCK_CLONING))
return (-EOPNOTSUPP);
if (src_i != dst_i)
spl_inode_lock_shared(src_i);
spl_inode_lock(dst_i);
crhold(cr);
cookie = spl_fstrans_mark();
err = -zfs_clone_range(ITOZ(src_i), &src_off_o, ITOZ(dst_i),
&dst_off_o, &len_o, cr);
spl_fstrans_unmark(cookie);
crfree(cr);
spl_inode_unlock(dst_i);
if (src_i != dst_i)
spl_inode_unlock_shared(src_i);
if (err < 0)
return (err);
return ((ssize_t)len_o);
}
#if defined(HAVE_VFS_COPY_FILE_RANGE) || \
defined(HAVE_VFS_FILE_OPERATIONS_EXTEND)
/*
* Entry point for copy_file_range(). Copy len bytes from src_off in src_file
* to dst_off in dst_file. We are permitted to do this however we like, so we
* try to just clone the blocks, and if we can't support it, fall back to the
* kernel's generic byte copy function.
*/
ssize_t
zpl_copy_file_range(struct file *src_file, loff_t src_off,
struct file *dst_file, loff_t dst_off, size_t len, unsigned int flags)
{
ssize_t ret;
+ /* Flags is reserved for future extensions and must be zero. */
if (flags != 0)
return (-EINVAL);
- /* Try to do it via zfs_clone_range() */
- ret = __zpl_clone_file_range(src_file, src_off,
+ /* Try to do it via zfs_clone_range() and allow shortening. */
+ ret = zpl_clone_file_range_impl(src_file, src_off,
dst_file, dst_off, len);
#ifdef HAVE_VFS_GENERIC_COPY_FILE_RANGE
/*
* Since Linux 5.3 the filesystem driver is responsible for executing
* an appropriate fallback, and a generic fallback function is provided.
*/
if (ret == -EOPNOTSUPP || ret == -EINVAL || ret == -EXDEV ||
ret == -EAGAIN)
ret = generic_copy_file_range(src_file, src_off, dst_file,
dst_off, len, flags);
#else
/*
* Before Linux 5.3 the filesystem has to return -EOPNOTSUPP to signal
* to the kernel that it should fallback to a content copy.
*/
if (ret == -EINVAL || ret == -EXDEV || ret == -EAGAIN)
ret = -EOPNOTSUPP;
#endif /* HAVE_VFS_GENERIC_COPY_FILE_RANGE */
return (ret);
}
#endif /* HAVE_VFS_COPY_FILE_RANGE || HAVE_VFS_FILE_OPERATIONS_EXTEND */
#ifdef HAVE_VFS_REMAP_FILE_RANGE
/*
* Entry point for FICLONE/FICLONERANGE/FIDEDUPERANGE.
*
* FICLONE and FICLONERANGE are basically the same as copy_file_range(), except
* that they must clone - they cannot fall back to copying. FICLONE is exactly
* FICLONERANGE, for the entire file. We don't need to try to tell them apart;
* the kernel will sort that out for us.
*
* FIDEDUPERANGE is for turning a non-clone into a clone, that is, compare the
* range in both files and if they're the same, arrange for them to be backed
* by the same storage.
+ *
+ * REMAP_FILE_CAN_SHORTEN lets us know we can clone less than the given range
+ * if we want. It's designed for filesystems that may need to shorten the
+ * length for alignment, EOF, or any other requirement. ZFS may shorten the
+ * request when there is outstanding dirty data which hasn't been written.
*/
loff_t
zpl_remap_file_range(struct file *src_file, loff_t src_off,
struct file *dst_file, loff_t dst_off, loff_t len, unsigned int flags)
{
if (flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_CAN_SHORTEN))
return (-EINVAL);
- /*
- * REMAP_FILE_CAN_SHORTEN lets us know we can clone less than the given
- * range if we want. Its designed for filesystems that make data past
- * EOF available, and don't want it to be visible in both files. ZFS
- * doesn't do that, so we just turn the flag off.
- */
- flags &= ~REMAP_FILE_CAN_SHORTEN;
-
+ /* No support for dedup yet */
if (flags & REMAP_FILE_DEDUP)
- /* No support for dedup yet */
return (-EOPNOTSUPP);
/* Zero length means to clone everything to the end of the file */
if (len == 0)
len = i_size_read(file_inode(src_file)) - src_off;
- return (__zpl_clone_file_range(src_file, src_off,
- dst_file, dst_off, len));
+ ssize_t ret = zpl_clone_file_range_impl(src_file, src_off,
+ dst_file, dst_off, len);
+
+ if (!(flags & REMAP_FILE_CAN_SHORTEN) && ret >= 0 && ret != len)
+ ret = -EINVAL;
+
+ return (ret);
}
#endif /* HAVE_VFS_REMAP_FILE_RANGE */
#if defined(HAVE_VFS_CLONE_FILE_RANGE) || \
defined(HAVE_VFS_FILE_OPERATIONS_EXTEND)
/*
* Entry point for FICLONE and FICLONERANGE, before Linux 4.20.
*/
int
zpl_clone_file_range(struct file *src_file, loff_t src_off,
struct file *dst_file, loff_t dst_off, uint64_t len)
{
/* Zero length means to clone everything to the end of the file */
if (len == 0)
len = i_size_read(file_inode(src_file)) - src_off;
- return (__zpl_clone_file_range(src_file, src_off,
- dst_file, dst_off, len));
+ /* The entire length must be cloned or this is an error. */
+ ssize_t ret = zpl_clone_file_range_impl(src_file, src_off,
+ dst_file, dst_off, len);
+
+ if (ret >= 0 && ret != len)
+ ret = -EINVAL;
+
+ return (ret);
}
#endif /* HAVE_VFS_CLONE_FILE_RANGE || HAVE_VFS_FILE_OPERATIONS_EXTEND */
#ifdef HAVE_VFS_DEDUPE_FILE_RANGE
/*
* Entry point for FIDEDUPERANGE, before Linux 4.20.
*/
int
zpl_dedupe_file_range(struct file *src_file, loff_t src_off,
struct file *dst_file, loff_t dst_off, uint64_t len)
{
/* No support for dedup yet */
return (-EOPNOTSUPP);
}
#endif /* HAVE_VFS_DEDUPE_FILE_RANGE */
/* Entry point for FICLONE, before Linux 4.5. */
long
zpl_ioctl_ficlone(struct file *dst_file, void *arg)
{
unsigned long sfd = (unsigned long)arg;
struct file *src_file = fget(sfd);
if (src_file == NULL)
return (-EBADF);
if (dst_file->f_op != src_file->f_op) {
fput(src_file);
return (-EXDEV);
}
size_t len = i_size_read(file_inode(src_file));
- ssize_t ret =
- __zpl_clone_file_range(src_file, 0, dst_file, 0, len);
+ ssize_t ret = zpl_clone_file_range_impl(src_file, 0, dst_file, 0, len);
fput(src_file);
if (ret < 0) {
if (ret == -EOPNOTSUPP)
return (-ENOTTY);
return (ret);
}
if (ret != len)
return (-EINVAL);
return (0);
}
/* Entry point for FICLONERANGE, before Linux 4.5. */
long
zpl_ioctl_ficlonerange(struct file *dst_file, void __user *arg)
{
zfs_ioc_compat_file_clone_range_t fcr;
if (copy_from_user(&fcr, arg, sizeof (fcr)))
return (-EFAULT);
struct file *src_file = fget(fcr.fcr_src_fd);
if (src_file == NULL)
return (-EBADF);
if (dst_file->f_op != src_file->f_op) {
fput(src_file);
return (-EXDEV);
}
size_t len = fcr.fcr_src_length;
if (len == 0)
len = i_size_read(file_inode(src_file)) - fcr.fcr_src_offset;
- ssize_t ret = __zpl_clone_file_range(src_file, fcr.fcr_src_offset,
+ ssize_t ret = zpl_clone_file_range_impl(src_file, fcr.fcr_src_offset,
dst_file, fcr.fcr_dest_offset, len);
fput(src_file);
if (ret < 0) {
if (ret == -EOPNOTSUPP)
return (-ENOTTY);
return (ret);
}
if (ret != len)
return (-EINVAL);
return (0);
}
/* Entry point for FIDEDUPERANGE, before Linux 4.5. */
long
zpl_ioctl_fideduperange(struct file *filp, void *arg)
{
(void) arg;
/* No support for dedup yet */
return (-ENOTTY);
}
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zpl_inode.c b/sys/contrib/openzfs/module/os/linux/zfs/zpl_inode.c
index 96f65b9e94e2..ad1753f7a071 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zpl_inode.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zpl_inode.c
@@ -1,911 +1,912 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2011, Lawrence Livermore National Security, LLC.
* Copyright (c) 2015 by Chunwei Chen. All rights reserved.
*/
#include <sys/sysmacros.h>
#include <sys/zfs_ctldir.h>
#include <sys/zfs_vfsops.h>
#include <sys/zfs_vnops.h>
#include <sys/zfs_znode.h>
#include <sys/dmu_objset.h>
#include <sys/vfs.h>
#include <sys/zpl.h>
#include <sys/file.h>
static struct dentry *
zpl_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
{
cred_t *cr = CRED();
struct inode *ip;
znode_t *zp;
int error;
fstrans_cookie_t cookie;
pathname_t *ppn = NULL;
pathname_t pn;
int zfs_flags = 0;
zfsvfs_t *zfsvfs = dentry->d_sb->s_fs_info;
if (dlen(dentry) >= ZAP_MAXNAMELEN)
return (ERR_PTR(-ENAMETOOLONG));
crhold(cr);
cookie = spl_fstrans_mark();
/* If we are a case insensitive fs, we need the real name */
if (zfsvfs->z_case == ZFS_CASE_INSENSITIVE) {
zfs_flags = FIGNORECASE;
pn_alloc(&pn);
ppn = &pn;
}
error = -zfs_lookup(ITOZ(dir), dname(dentry), &zp,
zfs_flags, cr, NULL, ppn);
spl_fstrans_unmark(cookie);
ASSERT3S(error, <=, 0);
crfree(cr);
spin_lock(&dentry->d_lock);
dentry->d_time = jiffies;
spin_unlock(&dentry->d_lock);
if (error) {
/*
* If we have a case sensitive fs, we do not want to
* insert negative entries, so return NULL for ENOENT.
* Fall through if the error is not ENOENT. Also free memory.
*/
if (ppn) {
pn_free(ppn);
if (error == -ENOENT)
return (NULL);
}
if (error == -ENOENT)
return (d_splice_alias(NULL, dentry));
else
return (ERR_PTR(error));
}
ip = ZTOI(zp);
/*
* If we are case insensitive, call the correct function
* to install the name.
*/
if (ppn) {
struct dentry *new_dentry;
struct qstr ci_name;
if (strcmp(dname(dentry), pn.pn_buf) == 0) {
new_dentry = d_splice_alias(ip, dentry);
} else {
ci_name.name = pn.pn_buf;
ci_name.len = strlen(pn.pn_buf);
new_dentry = d_add_ci(dentry, ip, &ci_name);
}
pn_free(ppn);
return (new_dentry);
} else {
return (d_splice_alias(ip, dentry));
}
}
void
zpl_vap_init(vattr_t *vap, struct inode *dir, umode_t mode, cred_t *cr,
zidmap_t *mnt_ns)
{
vap->va_mask = ATTR_MODE;
vap->va_mode = mode;
vap->va_uid = zfs_vfsuid_to_uid(mnt_ns,
zfs_i_user_ns(dir), crgetuid(cr));
if (dir->i_mode & S_ISGID) {
vap->va_gid = KGID_TO_SGID(dir->i_gid);
if (S_ISDIR(mode))
vap->va_mode |= S_ISGID;
} else {
vap->va_gid = zfs_vfsgid_to_gid(mnt_ns,
zfs_i_user_ns(dir), crgetgid(cr));
}
}
static int
#ifdef HAVE_IOPS_CREATE_USERNS
zpl_create(struct user_namespace *user_ns, struct inode *dir,
struct dentry *dentry, umode_t mode, bool flag)
#elif defined(HAVE_IOPS_CREATE_IDMAP)
zpl_create(struct mnt_idmap *user_ns, struct inode *dir,
struct dentry *dentry, umode_t mode, bool flag)
#else
zpl_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool flag)
#endif
{
cred_t *cr = CRED();
znode_t *zp;
vattr_t *vap;
int error;
fstrans_cookie_t cookie;
#if !(defined(HAVE_IOPS_CREATE_USERNS) || defined(HAVE_IOPS_CREATE_IDMAP))
zidmap_t *user_ns = kcred->user_ns;
#endif
crhold(cr);
vap = kmem_zalloc(sizeof (vattr_t), KM_SLEEP);
zpl_vap_init(vap, dir, mode, cr, user_ns);
cookie = spl_fstrans_mark();
error = -zfs_create(ITOZ(dir), dname(dentry), vap, 0,
mode, &zp, cr, 0, NULL, user_ns);
if (error == 0) {
error = zpl_xattr_security_init(ZTOI(zp), dir, &dentry->d_name);
if (error == 0)
error = zpl_init_acl(ZTOI(zp), dir);
if (error) {
(void) zfs_remove(ITOZ(dir), dname(dentry), cr, 0);
remove_inode_hash(ZTOI(zp));
iput(ZTOI(zp));
} else {
d_instantiate(dentry, ZTOI(zp));
}
}
spl_fstrans_unmark(cookie);
kmem_free(vap, sizeof (vattr_t));
crfree(cr);
ASSERT3S(error, <=, 0);
return (error);
}
static int
#ifdef HAVE_IOPS_MKNOD_USERNS
zpl_mknod(struct user_namespace *user_ns, struct inode *dir,
struct dentry *dentry, umode_t mode,
#elif defined(HAVE_IOPS_MKNOD_IDMAP)
zpl_mknod(struct mnt_idmap *user_ns, struct inode *dir,
struct dentry *dentry, umode_t mode,
#else
zpl_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
#endif
dev_t rdev)
{
cred_t *cr = CRED();
znode_t *zp;
vattr_t *vap;
int error;
fstrans_cookie_t cookie;
#if !(defined(HAVE_IOPS_MKNOD_USERNS) || defined(HAVE_IOPS_MKNOD_IDMAP))
zidmap_t *user_ns = kcred->user_ns;
#endif
/*
* We currently expect Linux to supply rdev=0 for all sockets
* and fifos, but we want to know if this behavior ever changes.
*/
if (S_ISSOCK(mode) || S_ISFIFO(mode))
ASSERT(rdev == 0);
crhold(cr);
vap = kmem_zalloc(sizeof (vattr_t), KM_SLEEP);
zpl_vap_init(vap, dir, mode, cr, user_ns);
vap->va_rdev = rdev;
cookie = spl_fstrans_mark();
error = -zfs_create(ITOZ(dir), dname(dentry), vap, 0,
mode, &zp, cr, 0, NULL, user_ns);
if (error == 0) {
error = zpl_xattr_security_init(ZTOI(zp), dir, &dentry->d_name);
if (error == 0)
error = zpl_init_acl(ZTOI(zp), dir);
if (error) {
(void) zfs_remove(ITOZ(dir), dname(dentry), cr, 0);
remove_inode_hash(ZTOI(zp));
iput(ZTOI(zp));
} else {
d_instantiate(dentry, ZTOI(zp));
}
}
spl_fstrans_unmark(cookie);
kmem_free(vap, sizeof (vattr_t));
crfree(cr);
ASSERT3S(error, <=, 0);
return (error);
}
#ifdef HAVE_TMPFILE
static int
#ifdef HAVE_TMPFILE_IDMAP
zpl_tmpfile(struct mnt_idmap *userns, struct inode *dir,
struct file *file, umode_t mode)
#elif !defined(HAVE_TMPFILE_DENTRY)
zpl_tmpfile(struct user_namespace *userns, struct inode *dir,
struct file *file, umode_t mode)
#else
#ifdef HAVE_TMPFILE_USERNS
zpl_tmpfile(struct user_namespace *userns, struct inode *dir,
struct dentry *dentry, umode_t mode)
#else
zpl_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
#endif
#endif
{
cred_t *cr = CRED();
struct inode *ip;
vattr_t *vap;
int error;
fstrans_cookie_t cookie;
#if !(defined(HAVE_TMPFILE_USERNS) || defined(HAVE_TMPFILE_IDMAP))
zidmap_t *userns = kcred->user_ns;
#endif
crhold(cr);
vap = kmem_zalloc(sizeof (vattr_t), KM_SLEEP);
/*
* The VFS does not apply the umask, therefore it is applied here
* when POSIX ACLs are not enabled.
*/
if (!IS_POSIXACL(dir))
mode &= ~current_umask();
zpl_vap_init(vap, dir, mode, cr, userns);
cookie = spl_fstrans_mark();
error = -zfs_tmpfile(dir, vap, 0, mode, &ip, cr, 0, NULL, userns);
if (error == 0) {
/* d_tmpfile will do drop_nlink, so we should set it first */
set_nlink(ip, 1);
#ifndef HAVE_TMPFILE_DENTRY
d_tmpfile(file, ip);
error = zpl_xattr_security_init(ip, dir,
&file->f_path.dentry->d_name);
#else
d_tmpfile(dentry, ip);
error = zpl_xattr_security_init(ip, dir, &dentry->d_name);
#endif
if (error == 0)
error = zpl_init_acl(ip, dir);
#ifndef HAVE_TMPFILE_DENTRY
error = finish_open_simple(file, error);
#endif
/*
* don't need to handle error here, file is already in
* unlinked set.
*/
}
spl_fstrans_unmark(cookie);
kmem_free(vap, sizeof (vattr_t));
crfree(cr);
ASSERT3S(error, <=, 0);
return (error);
}
#endif
static int
zpl_unlink(struct inode *dir, struct dentry *dentry)
{
cred_t *cr = CRED();
int error;
fstrans_cookie_t cookie;
zfsvfs_t *zfsvfs = dentry->d_sb->s_fs_info;
crhold(cr);
cookie = spl_fstrans_mark();
error = -zfs_remove(ITOZ(dir), dname(dentry), cr, 0);
/*
* For a CI FS we must invalidate the dentry to prevent the
* creation of negative entries.
*/
if (error == 0 && zfsvfs->z_case == ZFS_CASE_INSENSITIVE)
d_invalidate(dentry);
spl_fstrans_unmark(cookie);
crfree(cr);
ASSERT3S(error, <=, 0);
return (error);
}
static int
#ifdef HAVE_IOPS_MKDIR_USERNS
zpl_mkdir(struct user_namespace *user_ns, struct inode *dir,
struct dentry *dentry, umode_t mode)
#elif defined(HAVE_IOPS_MKDIR_IDMAP)
zpl_mkdir(struct mnt_idmap *user_ns, struct inode *dir,
struct dentry *dentry, umode_t mode)
#else
zpl_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
#endif
{
cred_t *cr = CRED();
vattr_t *vap;
znode_t *zp;
int error;
fstrans_cookie_t cookie;
#if !(defined(HAVE_IOPS_MKDIR_USERNS) || defined(HAVE_IOPS_MKDIR_IDMAP))
zidmap_t *user_ns = kcred->user_ns;
#endif
crhold(cr);
vap = kmem_zalloc(sizeof (vattr_t), KM_SLEEP);
zpl_vap_init(vap, dir, mode | S_IFDIR, cr, user_ns);
cookie = spl_fstrans_mark();
error = -zfs_mkdir(ITOZ(dir), dname(dentry), vap, &zp, cr, 0, NULL,
user_ns);
if (error == 0) {
error = zpl_xattr_security_init(ZTOI(zp), dir, &dentry->d_name);
if (error == 0)
error = zpl_init_acl(ZTOI(zp), dir);
if (error) {
(void) zfs_rmdir(ITOZ(dir), dname(dentry), NULL, cr, 0);
remove_inode_hash(ZTOI(zp));
iput(ZTOI(zp));
} else {
d_instantiate(dentry, ZTOI(zp));
}
}
spl_fstrans_unmark(cookie);
kmem_free(vap, sizeof (vattr_t));
crfree(cr);
ASSERT3S(error, <=, 0);
return (error);
}
static int
zpl_rmdir(struct inode *dir, struct dentry *dentry)
{
cred_t *cr = CRED();
int error;
fstrans_cookie_t cookie;
zfsvfs_t *zfsvfs = dentry->d_sb->s_fs_info;
crhold(cr);
cookie = spl_fstrans_mark();
error = -zfs_rmdir(ITOZ(dir), dname(dentry), NULL, cr, 0);
/*
* For a CI FS we must invalidate the dentry to prevent the
* creation of negative entries.
*/
if (error == 0 && zfsvfs->z_case == ZFS_CASE_INSENSITIVE)
d_invalidate(dentry);
spl_fstrans_unmark(cookie);
crfree(cr);
ASSERT3S(error, <=, 0);
return (error);
}
static int
#ifdef HAVE_USERNS_IOPS_GETATTR
zpl_getattr_impl(struct user_namespace *user_ns,
const struct path *path, struct kstat *stat, u32 request_mask,
unsigned int query_flags)
#elif defined(HAVE_IDMAP_IOPS_GETATTR)
zpl_getattr_impl(struct mnt_idmap *user_ns,
const struct path *path, struct kstat *stat, u32 request_mask,
unsigned int query_flags)
#else
zpl_getattr_impl(const struct path *path, struct kstat *stat, u32 request_mask,
unsigned int query_flags)
#endif
{
int error;
fstrans_cookie_t cookie;
struct inode *ip = path->dentry->d_inode;
znode_t *zp __maybe_unused = ITOZ(ip);
cookie = spl_fstrans_mark();
/*
* XXX query_flags currently ignored.
*/
#ifdef HAVE_GENERIC_FILLATTR_IDMAP_REQMASK
error = -zfs_getattr_fast(user_ns, request_mask, ip, stat);
#elif (defined(HAVE_USERNS_IOPS_GETATTR) || defined(HAVE_IDMAP_IOPS_GETATTR))
error = -zfs_getattr_fast(user_ns, ip, stat);
#else
error = -zfs_getattr_fast(kcred->user_ns, ip, stat);
#endif
#ifdef STATX_BTIME
if (request_mask & STATX_BTIME) {
stat->btime = zp->z_btime;
stat->result_mask |= STATX_BTIME;
}
#endif
#ifdef STATX_ATTR_IMMUTABLE
if (zp->z_pflags & ZFS_IMMUTABLE)
stat->attributes |= STATX_ATTR_IMMUTABLE;
stat->attributes_mask |= STATX_ATTR_IMMUTABLE;
#endif
#ifdef STATX_ATTR_APPEND
if (zp->z_pflags & ZFS_APPENDONLY)
stat->attributes |= STATX_ATTR_APPEND;
stat->attributes_mask |= STATX_ATTR_APPEND;
#endif
#ifdef STATX_ATTR_NODUMP
if (zp->z_pflags & ZFS_NODUMP)
stat->attributes |= STATX_ATTR_NODUMP;
stat->attributes_mask |= STATX_ATTR_NODUMP;
#endif
spl_fstrans_unmark(cookie);
ASSERT3S(error, <=, 0);
return (error);
}
ZPL_GETATTR_WRAPPER(zpl_getattr);
static int
#ifdef HAVE_USERNS_IOPS_SETATTR
zpl_setattr(struct user_namespace *user_ns, struct dentry *dentry,
struct iattr *ia)
#elif defined(HAVE_IDMAP_IOPS_SETATTR)
zpl_setattr(struct mnt_idmap *user_ns, struct dentry *dentry,
struct iattr *ia)
#else
zpl_setattr(struct dentry *dentry, struct iattr *ia)
#endif
{
struct inode *ip = dentry->d_inode;
cred_t *cr = CRED();
vattr_t *vap;
int error;
fstrans_cookie_t cookie;
#ifdef HAVE_SETATTR_PREPARE_USERNS
error = zpl_setattr_prepare(user_ns, dentry, ia);
#elif defined(HAVE_SETATTR_PREPARE_IDMAP)
error = zpl_setattr_prepare(user_ns, dentry, ia);
#else
error = zpl_setattr_prepare(zfs_init_idmap, dentry, ia);
#endif
if (error)
return (error);
crhold(cr);
vap = kmem_zalloc(sizeof (vattr_t), KM_SLEEP);
vap->va_mask = ia->ia_valid & ATTR_IATTR_MASK;
vap->va_mode = ia->ia_mode;
if (ia->ia_valid & ATTR_UID)
#ifdef HAVE_IATTR_VFSID
vap->va_uid = zfs_vfsuid_to_uid(user_ns, zfs_i_user_ns(ip),
__vfsuid_val(ia->ia_vfsuid));
#else
vap->va_uid = KUID_TO_SUID(ia->ia_uid);
#endif
if (ia->ia_valid & ATTR_GID)
#ifdef HAVE_IATTR_VFSID
vap->va_gid = zfs_vfsgid_to_gid(user_ns, zfs_i_user_ns(ip),
__vfsgid_val(ia->ia_vfsgid));
#else
vap->va_gid = KGID_TO_SGID(ia->ia_gid);
#endif
vap->va_size = ia->ia_size;
vap->va_atime = ia->ia_atime;
vap->va_mtime = ia->ia_mtime;
vap->va_ctime = ia->ia_ctime;
if (vap->va_mask & ATTR_ATIME)
- ip->i_atime = zpl_inode_timestamp_truncate(ia->ia_atime, ip);
+ zpl_inode_set_atime_to_ts(ip,
+ zpl_inode_timestamp_truncate(ia->ia_atime, ip));
cookie = spl_fstrans_mark();
#ifdef HAVE_USERNS_IOPS_SETATTR
error = -zfs_setattr(ITOZ(ip), vap, 0, cr, user_ns);
#elif defined(HAVE_IDMAP_IOPS_SETATTR)
error = -zfs_setattr(ITOZ(ip), vap, 0, cr, user_ns);
#else
error = -zfs_setattr(ITOZ(ip), vap, 0, cr, zfs_init_idmap);
#endif
if (!error && (ia->ia_valid & ATTR_MODE))
error = zpl_chmod_acl(ip);
spl_fstrans_unmark(cookie);
kmem_free(vap, sizeof (vattr_t));
crfree(cr);
ASSERT3S(error, <=, 0);
return (error);
}
static int
#ifdef HAVE_IOPS_RENAME_USERNS
zpl_rename2(struct user_namespace *user_ns, struct inode *sdip,
struct dentry *sdentry, struct inode *tdip, struct dentry *tdentry,
unsigned int rflags)
#elif defined(HAVE_IOPS_RENAME_IDMAP)
zpl_rename2(struct mnt_idmap *user_ns, struct inode *sdip,
struct dentry *sdentry, struct inode *tdip, struct dentry *tdentry,
unsigned int rflags)
#else
zpl_rename2(struct inode *sdip, struct dentry *sdentry,
struct inode *tdip, struct dentry *tdentry, unsigned int rflags)
#endif
{
cred_t *cr = CRED();
vattr_t *wo_vap = NULL;
int error;
fstrans_cookie_t cookie;
#if !(defined(HAVE_IOPS_RENAME_USERNS) || defined(HAVE_IOPS_RENAME_IDMAP))
zidmap_t *user_ns = kcred->user_ns;
#endif
crhold(cr);
if (rflags & RENAME_WHITEOUT) {
wo_vap = kmem_zalloc(sizeof (vattr_t), KM_SLEEP);
zpl_vap_init(wo_vap, sdip, S_IFCHR, cr, user_ns);
wo_vap->va_rdev = makedevice(0, 0);
}
cookie = spl_fstrans_mark();
error = -zfs_rename(ITOZ(sdip), dname(sdentry), ITOZ(tdip),
dname(tdentry), cr, 0, rflags, wo_vap, user_ns);
spl_fstrans_unmark(cookie);
if (wo_vap)
kmem_free(wo_vap, sizeof (vattr_t));
crfree(cr);
ASSERT3S(error, <=, 0);
return (error);
}
#if !defined(HAVE_IOPS_RENAME_USERNS) && \
!defined(HAVE_RENAME_WANTS_FLAGS) && \
!defined(HAVE_RENAME2) && \
!defined(HAVE_IOPS_RENAME_IDMAP)
static int
zpl_rename(struct inode *sdip, struct dentry *sdentry,
struct inode *tdip, struct dentry *tdentry)
{
return (zpl_rename2(sdip, sdentry, tdip, tdentry, 0));
}
#endif
static int
#ifdef HAVE_IOPS_SYMLINK_USERNS
zpl_symlink(struct user_namespace *user_ns, struct inode *dir,
struct dentry *dentry, const char *name)
#elif defined(HAVE_IOPS_SYMLINK_IDMAP)
zpl_symlink(struct mnt_idmap *user_ns, struct inode *dir,
struct dentry *dentry, const char *name)
#else
zpl_symlink(struct inode *dir, struct dentry *dentry, const char *name)
#endif
{
cred_t *cr = CRED();
vattr_t *vap;
znode_t *zp;
int error;
fstrans_cookie_t cookie;
#if !(defined(HAVE_IOPS_SYMLINK_USERNS) || defined(HAVE_IOPS_SYMLINK_IDMAP))
zidmap_t *user_ns = kcred->user_ns;
#endif
crhold(cr);
vap = kmem_zalloc(sizeof (vattr_t), KM_SLEEP);
zpl_vap_init(vap, dir, S_IFLNK | S_IRWXUGO, cr, user_ns);
cookie = spl_fstrans_mark();
error = -zfs_symlink(ITOZ(dir), dname(dentry), vap,
(char *)name, &zp, cr, 0, user_ns);
if (error == 0) {
error = zpl_xattr_security_init(ZTOI(zp), dir, &dentry->d_name);
if (error) {
(void) zfs_remove(ITOZ(dir), dname(dentry), cr, 0);
remove_inode_hash(ZTOI(zp));
iput(ZTOI(zp));
} else {
d_instantiate(dentry, ZTOI(zp));
}
}
spl_fstrans_unmark(cookie);
kmem_free(vap, sizeof (vattr_t));
crfree(cr);
ASSERT3S(error, <=, 0);
return (error);
}
#if defined(HAVE_PUT_LINK_COOKIE)
static void
zpl_put_link(struct inode *unused, void *cookie)
{
kmem_free(cookie, MAXPATHLEN);
}
#elif defined(HAVE_PUT_LINK_NAMEIDATA)
static void
zpl_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
{
const char *link = nd_get_link(nd);
if (!IS_ERR(link))
kmem_free(link, MAXPATHLEN);
}
#elif defined(HAVE_PUT_LINK_DELAYED)
static void
zpl_put_link(void *ptr)
{
kmem_free(ptr, MAXPATHLEN);
}
#endif
static int
zpl_get_link_common(struct dentry *dentry, struct inode *ip, char **link)
{
fstrans_cookie_t cookie;
cred_t *cr = CRED();
int error;
crhold(cr);
*link = NULL;
struct iovec iov;
iov.iov_len = MAXPATHLEN;
iov.iov_base = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
zfs_uio_t uio;
zfs_uio_iovec_init(&uio, &iov, 1, 0, UIO_SYSSPACE, MAXPATHLEN - 1, 0);
cookie = spl_fstrans_mark();
error = -zfs_readlink(ip, &uio, cr);
spl_fstrans_unmark(cookie);
crfree(cr);
if (error)
kmem_free(iov.iov_base, MAXPATHLEN);
else
*link = iov.iov_base;
return (error);
}
#if defined(HAVE_GET_LINK_DELAYED)
static const char *
zpl_get_link(struct dentry *dentry, struct inode *inode,
struct delayed_call *done)
{
char *link = NULL;
int error;
if (!dentry)
return (ERR_PTR(-ECHILD));
error = zpl_get_link_common(dentry, inode, &link);
if (error)
return (ERR_PTR(error));
set_delayed_call(done, zpl_put_link, link);
return (link);
}
#elif defined(HAVE_GET_LINK_COOKIE)
static const char *
zpl_get_link(struct dentry *dentry, struct inode *inode, void **cookie)
{
char *link = NULL;
int error;
if (!dentry)
return (ERR_PTR(-ECHILD));
error = zpl_get_link_common(dentry, inode, &link);
if (error)
return (ERR_PTR(error));
return (*cookie = link);
}
#elif defined(HAVE_FOLLOW_LINK_COOKIE)
static const char *
zpl_follow_link(struct dentry *dentry, void **cookie)
{
char *link = NULL;
int error;
error = zpl_get_link_common(dentry, dentry->d_inode, &link);
if (error)
return (ERR_PTR(error));
return (*cookie = link);
}
#elif defined(HAVE_FOLLOW_LINK_NAMEIDATA)
static void *
zpl_follow_link(struct dentry *dentry, struct nameidata *nd)
{
char *link = NULL;
int error;
error = zpl_get_link_common(dentry, dentry->d_inode, &link);
if (error)
nd_set_link(nd, ERR_PTR(error));
else
nd_set_link(nd, link);
return (NULL);
}
#endif
static int
zpl_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
{
cred_t *cr = CRED();
struct inode *ip = old_dentry->d_inode;
int error;
fstrans_cookie_t cookie;
if (ip->i_nlink >= ZFS_LINK_MAX)
return (-EMLINK);
crhold(cr);
zpl_inode_set_ctime_to_ts(ip, current_time(ip));
/* Must have an existing ref, so igrab() cannot return NULL */
VERIFY3P(igrab(ip), !=, NULL);
cookie = spl_fstrans_mark();
error = -zfs_link(ITOZ(dir), ITOZ(ip), dname(dentry), cr, 0);
if (error) {
iput(ip);
goto out;
}
d_instantiate(dentry, ip);
out:
spl_fstrans_unmark(cookie);
crfree(cr);
ASSERT3S(error, <=, 0);
return (error);
}
const struct inode_operations zpl_inode_operations = {
.setattr = zpl_setattr,
.getattr = zpl_getattr,
#ifdef HAVE_GENERIC_SETXATTR
.setxattr = generic_setxattr,
.getxattr = generic_getxattr,
.removexattr = generic_removexattr,
#endif
.listxattr = zpl_xattr_list,
#if defined(CONFIG_FS_POSIX_ACL)
#if defined(HAVE_SET_ACL)
.set_acl = zpl_set_acl,
#endif /* HAVE_SET_ACL */
#if defined(HAVE_GET_INODE_ACL)
.get_inode_acl = zpl_get_acl,
#else
.get_acl = zpl_get_acl,
#endif /* HAVE_GET_INODE_ACL */
#endif /* CONFIG_FS_POSIX_ACL */
};
#ifdef HAVE_RENAME2_OPERATIONS_WRAPPER
const struct inode_operations_wrapper zpl_dir_inode_operations = {
.ops = {
#else
const struct inode_operations zpl_dir_inode_operations = {
#endif
.create = zpl_create,
.lookup = zpl_lookup,
.link = zpl_link,
.unlink = zpl_unlink,
.symlink = zpl_symlink,
.mkdir = zpl_mkdir,
.rmdir = zpl_rmdir,
.mknod = zpl_mknod,
#ifdef HAVE_RENAME2
.rename2 = zpl_rename2,
#elif defined(HAVE_RENAME_WANTS_FLAGS) || defined(HAVE_IOPS_RENAME_USERNS)
.rename = zpl_rename2,
#elif defined(HAVE_IOPS_RENAME_IDMAP)
.rename = zpl_rename2,
#else
.rename = zpl_rename,
#endif
#ifdef HAVE_TMPFILE
.tmpfile = zpl_tmpfile,
#endif
.setattr = zpl_setattr,
.getattr = zpl_getattr,
#ifdef HAVE_GENERIC_SETXATTR
.setxattr = generic_setxattr,
.getxattr = generic_getxattr,
.removexattr = generic_removexattr,
#endif
.listxattr = zpl_xattr_list,
#if defined(CONFIG_FS_POSIX_ACL)
#if defined(HAVE_SET_ACL)
.set_acl = zpl_set_acl,
#endif /* HAVE_SET_ACL */
#if defined(HAVE_GET_INODE_ACL)
.get_inode_acl = zpl_get_acl,
#else
.get_acl = zpl_get_acl,
#endif /* HAVE_GET_INODE_ACL */
#endif /* CONFIG_FS_POSIX_ACL */
#ifdef HAVE_RENAME2_OPERATIONS_WRAPPER
},
.rename2 = zpl_rename2,
#endif
};
const struct inode_operations zpl_symlink_inode_operations = {
#ifdef HAVE_GENERIC_READLINK
.readlink = generic_readlink,
#endif
#if defined(HAVE_GET_LINK_DELAYED) || defined(HAVE_GET_LINK_COOKIE)
.get_link = zpl_get_link,
#elif defined(HAVE_FOLLOW_LINK_COOKIE) || defined(HAVE_FOLLOW_LINK_NAMEIDATA)
.follow_link = zpl_follow_link,
#endif
#if defined(HAVE_PUT_LINK_COOKIE) || defined(HAVE_PUT_LINK_NAMEIDATA)
.put_link = zpl_put_link,
#endif
.setattr = zpl_setattr,
.getattr = zpl_getattr,
#ifdef HAVE_GENERIC_SETXATTR
.setxattr = generic_setxattr,
.getxattr = generic_getxattr,
.removexattr = generic_removexattr,
#endif
.listxattr = zpl_xattr_list,
};
const struct inode_operations zpl_special_inode_operations = {
.setattr = zpl_setattr,
.getattr = zpl_getattr,
#ifdef HAVE_GENERIC_SETXATTR
.setxattr = generic_setxattr,
.getxattr = generic_getxattr,
.removexattr = generic_removexattr,
#endif
.listxattr = zpl_xattr_list,
#if defined(CONFIG_FS_POSIX_ACL)
#if defined(HAVE_SET_ACL)
.set_acl = zpl_set_acl,
#endif /* HAVE_SET_ACL */
#if defined(HAVE_GET_INODE_ACL)
.get_inode_acl = zpl_get_acl,
#else
.get_acl = zpl_get_acl,
#endif /* HAVE_GET_INODE_ACL */
#endif /* CONFIG_FS_POSIX_ACL */
};
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c b/sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c
index f94ce69fb9e2..8562e989738d 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c
@@ -1,1646 +1,1648 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2012, 2020 by Delphix. All rights reserved.
*/
#include <sys/dataset_kstats.h>
#include <sys/dbuf.h>
#include <sys/dmu_traverse.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_dir.h>
#include <sys/zap.h>
#include <sys/zfeature.h>
#include <sys/zil_impl.h>
#include <sys/dmu_tx.h>
#include <sys/zio.h>
#include <sys/zfs_rlock.h>
#include <sys/spa_impl.h>
#include <sys/zvol.h>
#include <sys/zvol_impl.h>
#include <linux/blkdev_compat.h>
#include <linux/task_io_accounting_ops.h>
#ifdef HAVE_BLK_MQ
#include <linux/blk-mq.h>
#endif
static void zvol_request_impl(zvol_state_t *zv, struct bio *bio,
struct request *rq, boolean_t force_sync);
static unsigned int zvol_major = ZVOL_MAJOR;
static unsigned int zvol_request_sync = 0;
static unsigned int zvol_prefetch_bytes = (128 * 1024);
static unsigned long zvol_max_discard_blocks = 16384;
#ifndef HAVE_BLKDEV_GET_ERESTARTSYS
static unsigned int zvol_open_timeout_ms = 1000;
#endif
static unsigned int zvol_threads = 0;
#ifdef HAVE_BLK_MQ
static unsigned int zvol_blk_mq_threads = 0;
static unsigned int zvol_blk_mq_actual_threads;
static boolean_t zvol_use_blk_mq = B_FALSE;
/*
* The maximum number of volblocksize blocks to process per thread. Typically,
* write heavy workloads preform better with higher values here, and read
* heavy workloads preform better with lower values, but that's not a hard
* and fast rule. It's basically a knob to tune between "less overhead with
* less parallelism" and "more overhead, but more parallelism".
*
* '8' was chosen as a reasonable, balanced, default based off of sequential
* read and write tests to a zvol in an NVMe pool (with 16 CPUs).
*/
static unsigned int zvol_blk_mq_blocks_per_thread = 8;
#endif
#ifndef BLKDEV_DEFAULT_RQ
/* BLKDEV_MAX_RQ was renamed to BLKDEV_DEFAULT_RQ in the 5.16 kernel */
#define BLKDEV_DEFAULT_RQ BLKDEV_MAX_RQ
#endif
/*
* Finalize our BIO or request.
*/
#ifdef HAVE_BLK_MQ
#define END_IO(zv, bio, rq, error) do { \
if (bio) { \
BIO_END_IO(bio, error); \
} else { \
blk_mq_end_request(rq, errno_to_bi_status(error)); \
} \
} while (0)
#else
#define END_IO(zv, bio, rq, error) BIO_END_IO(bio, error)
#endif
#ifdef HAVE_BLK_MQ
static unsigned int zvol_blk_mq_queue_depth = BLKDEV_DEFAULT_RQ;
static unsigned int zvol_actual_blk_mq_queue_depth;
#endif
struct zvol_state_os {
struct gendisk *zvo_disk; /* generic disk */
struct request_queue *zvo_queue; /* request queue */
dev_t zvo_dev; /* device id */
#ifdef HAVE_BLK_MQ
struct blk_mq_tag_set tag_set;
#endif
/* Set from the global 'zvol_use_blk_mq' at zvol load */
boolean_t use_blk_mq;
};
static taskq_t *zvol_taskq;
static struct ida zvol_ida;
typedef struct zv_request_stack {
zvol_state_t *zv;
struct bio *bio;
struct request *rq;
} zv_request_t;
typedef struct zv_work {
struct request *rq;
struct work_struct work;
} zv_work_t;
typedef struct zv_request_task {
zv_request_t zvr;
taskq_ent_t ent;
} zv_request_task_t;
static zv_request_task_t *
zv_request_task_create(zv_request_t zvr)
{
zv_request_task_t *task;
task = kmem_alloc(sizeof (zv_request_task_t), KM_SLEEP);
taskq_init_ent(&task->ent);
task->zvr = zvr;
return (task);
}
static void
zv_request_task_free(zv_request_task_t *task)
{
kmem_free(task, sizeof (*task));
}
#ifdef HAVE_BLK_MQ
/*
* This is called when a new block multiqueue request comes in. A request
* contains one or more BIOs.
*/
static blk_status_t zvol_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct request *rq = bd->rq;
zvol_state_t *zv = rq->q->queuedata;
/* Tell the kernel that we are starting to process this request */
blk_mq_start_request(rq);
if (blk_rq_is_passthrough(rq)) {
/* Skip non filesystem request */
blk_mq_end_request(rq, BLK_STS_IOERR);
return (BLK_STS_IOERR);
}
zvol_request_impl(zv, NULL, rq, 0);
/* Acknowledge to the kernel that we got this request */
return (BLK_STS_OK);
}
static struct blk_mq_ops zvol_blk_mq_queue_ops = {
.queue_rq = zvol_mq_queue_rq,
};
/* Initialize our blk-mq struct */
static int zvol_blk_mq_alloc_tag_set(zvol_state_t *zv)
{
struct zvol_state_os *zso = zv->zv_zso;
memset(&zso->tag_set, 0, sizeof (zso->tag_set));
/* Initialize tag set. */
zso->tag_set.ops = &zvol_blk_mq_queue_ops;
zso->tag_set.nr_hw_queues = zvol_blk_mq_actual_threads;
zso->tag_set.queue_depth = zvol_actual_blk_mq_queue_depth;
zso->tag_set.numa_node = NUMA_NO_NODE;
zso->tag_set.cmd_size = 0;
/*
* We need BLK_MQ_F_BLOCKING here since we do blocking calls in
* zvol_request_impl()
*/
zso->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
zso->tag_set.driver_data = zv;
return (blk_mq_alloc_tag_set(&zso->tag_set));
}
#endif /* HAVE_BLK_MQ */
/*
* Given a path, return TRUE if path is a ZVOL.
*/
boolean_t
zvol_os_is_zvol(const char *path)
{
dev_t dev = 0;
if (vdev_lookup_bdev(path, &dev) != 0)
return (B_FALSE);
if (MAJOR(dev) == zvol_major)
return (B_TRUE);
return (B_FALSE);
}
static void
zvol_write(zv_request_t *zvr)
{
struct bio *bio = zvr->bio;
struct request *rq = zvr->rq;
int error = 0;
zfs_uio_t uio;
zvol_state_t *zv = zvr->zv;
struct request_queue *q;
struct gendisk *disk;
unsigned long start_time = 0;
boolean_t acct = B_FALSE;
ASSERT3P(zv, !=, NULL);
ASSERT3U(zv->zv_open_count, >, 0);
ASSERT3P(zv->zv_zilog, !=, NULL);
q = zv->zv_zso->zvo_queue;
disk = zv->zv_zso->zvo_disk;
/* bio marked as FLUSH need to flush before write */
if (io_is_flush(bio, rq))
zil_commit(zv->zv_zilog, ZVOL_OBJ);
/* Some requests are just for flush and nothing else. */
if (io_size(bio, rq) == 0) {
rw_exit(&zv->zv_suspend_lock);
END_IO(zv, bio, rq, 0);
return;
}
zfs_uio_bvec_init(&uio, bio, rq);
ssize_t start_resid = uio.uio_resid;
/*
* With use_blk_mq, accounting is done by blk_mq_start_request()
* and blk_mq_end_request(), so we can skip it here.
*/
if (bio) {
acct = blk_queue_io_stat(q);
if (acct) {
start_time = blk_generic_start_io_acct(q, disk, WRITE,
bio);
}
}
boolean_t sync =
io_is_fua(bio, rq) || zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
zfs_locked_range_t *lr = zfs_rangelock_enter(&zv->zv_rangelock,
uio.uio_loffset, uio.uio_resid, RL_WRITER);
uint64_t volsize = zv->zv_volsize;
while (uio.uio_resid > 0 && uio.uio_loffset < volsize) {
uint64_t bytes = MIN(uio.uio_resid, DMU_MAX_ACCESS >> 1);
uint64_t off = uio.uio_loffset;
dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
if (bytes > volsize - off) /* don't write past the end */
bytes = volsize - off;
dmu_tx_hold_write_by_dnode(tx, zv->zv_dn, off, bytes);
/* This will only fail for ENOSPC */
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
break;
}
error = dmu_write_uio_dnode(zv->zv_dn, &uio, bytes, tx);
if (error == 0) {
zvol_log_write(zv, tx, off, bytes, sync);
}
dmu_tx_commit(tx);
if (error)
break;
}
zfs_rangelock_exit(lr);
int64_t nwritten = start_resid - uio.uio_resid;
dataset_kstats_update_write_kstats(&zv->zv_kstat, nwritten);
task_io_account_write(nwritten);
if (sync)
zil_commit(zv->zv_zilog, ZVOL_OBJ);
rw_exit(&zv->zv_suspend_lock);
if (bio && acct) {
blk_generic_end_io_acct(q, disk, WRITE, bio, start_time);
}
END_IO(zv, bio, rq, -error);
}
static void
zvol_write_task(void *arg)
{
zv_request_task_t *task = arg;
zvol_write(&task->zvr);
zv_request_task_free(task);
}
static void
zvol_discard(zv_request_t *zvr)
{
struct bio *bio = zvr->bio;
struct request *rq = zvr->rq;
zvol_state_t *zv = zvr->zv;
uint64_t start = io_offset(bio, rq);
uint64_t size = io_size(bio, rq);
uint64_t end = start + size;
boolean_t sync;
int error = 0;
dmu_tx_t *tx;
struct request_queue *q = zv->zv_zso->zvo_queue;
struct gendisk *disk = zv->zv_zso->zvo_disk;
unsigned long start_time = 0;
boolean_t acct = B_FALSE;
ASSERT3P(zv, !=, NULL);
ASSERT3U(zv->zv_open_count, >, 0);
ASSERT3P(zv->zv_zilog, !=, NULL);
if (bio) {
acct = blk_queue_io_stat(q);
if (acct) {
start_time = blk_generic_start_io_acct(q, disk, WRITE,
bio);
}
}
sync = io_is_fua(bio, rq) || zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
if (end > zv->zv_volsize) {
error = SET_ERROR(EIO);
goto unlock;
}
/*
* Align the request to volume block boundaries when a secure erase is
* not required. This will prevent dnode_free_range() from zeroing out
* the unaligned parts which is slow (read-modify-write) and useless
* since we are not freeing any space by doing so.
*/
if (!io_is_secure_erase(bio, rq)) {
start = P2ROUNDUP(start, zv->zv_volblocksize);
end = P2ALIGN(end, zv->zv_volblocksize);
size = end - start;
}
if (start >= end)
goto unlock;
zfs_locked_range_t *lr = zfs_rangelock_enter(&zv->zv_rangelock,
start, size, RL_WRITER);
tx = dmu_tx_create(zv->zv_objset);
dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error != 0) {
dmu_tx_abort(tx);
} else {
zvol_log_truncate(zv, tx, start, size, B_TRUE);
dmu_tx_commit(tx);
error = dmu_free_long_range(zv->zv_objset,
ZVOL_OBJ, start, size);
}
zfs_rangelock_exit(lr);
if (error == 0 && sync)
zil_commit(zv->zv_zilog, ZVOL_OBJ);
unlock:
rw_exit(&zv->zv_suspend_lock);
if (bio && acct) {
blk_generic_end_io_acct(q, disk, WRITE, bio,
start_time);
}
END_IO(zv, bio, rq, -error);
}
static void
zvol_discard_task(void *arg)
{
zv_request_task_t *task = arg;
zvol_discard(&task->zvr);
zv_request_task_free(task);
}
static void
zvol_read(zv_request_t *zvr)
{
struct bio *bio = zvr->bio;
struct request *rq = zvr->rq;
int error = 0;
zfs_uio_t uio;
boolean_t acct = B_FALSE;
zvol_state_t *zv = zvr->zv;
struct request_queue *q;
struct gendisk *disk;
unsigned long start_time = 0;
ASSERT3P(zv, !=, NULL);
ASSERT3U(zv->zv_open_count, >, 0);
zfs_uio_bvec_init(&uio, bio, rq);
q = zv->zv_zso->zvo_queue;
disk = zv->zv_zso->zvo_disk;
ssize_t start_resid = uio.uio_resid;
/*
* When blk-mq is being used, accounting is done by
* blk_mq_start_request() and blk_mq_end_request().
*/
if (bio) {
acct = blk_queue_io_stat(q);
if (acct)
start_time = blk_generic_start_io_acct(q, disk, READ,
bio);
}
zfs_locked_range_t *lr = zfs_rangelock_enter(&zv->zv_rangelock,
uio.uio_loffset, uio.uio_resid, RL_READER);
uint64_t volsize = zv->zv_volsize;
while (uio.uio_resid > 0 && uio.uio_loffset < volsize) {
uint64_t bytes = MIN(uio.uio_resid, DMU_MAX_ACCESS >> 1);
/* don't read past the end */
if (bytes > volsize - uio.uio_loffset)
bytes = volsize - uio.uio_loffset;
error = dmu_read_uio_dnode(zv->zv_dn, &uio, bytes);
if (error) {
/* convert checksum errors into IO errors */
if (error == ECKSUM)
error = SET_ERROR(EIO);
break;
}
}
zfs_rangelock_exit(lr);
int64_t nread = start_resid - uio.uio_resid;
dataset_kstats_update_read_kstats(&zv->zv_kstat, nread);
task_io_account_read(nread);
rw_exit(&zv->zv_suspend_lock);
if (bio && acct) {
blk_generic_end_io_acct(q, disk, READ, bio, start_time);
}
END_IO(zv, bio, rq, -error);
}
static void
zvol_read_task(void *arg)
{
zv_request_task_t *task = arg;
zvol_read(&task->zvr);
zv_request_task_free(task);
}
/*
* Process a BIO or request
*
* Either 'bio' or 'rq' should be set depending on if we are processing a
* bio or a request (both should not be set).
*
* force_sync: Set to 0 to defer processing to a background taskq
* Set to 1 to process data synchronously
*/
static void
zvol_request_impl(zvol_state_t *zv, struct bio *bio, struct request *rq,
boolean_t force_sync)
{
fstrans_cookie_t cookie = spl_fstrans_mark();
uint64_t offset = io_offset(bio, rq);
uint64_t size = io_size(bio, rq);
int rw = io_data_dir(bio, rq);
if (zvol_request_sync)
force_sync = 1;
zv_request_t zvr = {
.zv = zv,
.bio = bio,
.rq = rq,
};
if (io_has_data(bio, rq) && offset + size > zv->zv_volsize) {
printk(KERN_INFO "%s: bad access: offset=%llu, size=%lu\n",
zv->zv_zso->zvo_disk->disk_name,
(long long unsigned)offset,
(long unsigned)size);
END_IO(zv, bio, rq, -SET_ERROR(EIO));
goto out;
}
zv_request_task_t *task;
if (rw == WRITE) {
if (unlikely(zv->zv_flags & ZVOL_RDONLY)) {
END_IO(zv, bio, rq, -SET_ERROR(EROFS));
goto out;
}
/*
* Prevents the zvol from being suspended, or the ZIL being
* concurrently opened. Will be released after the i/o
* completes.
*/
rw_enter(&zv->zv_suspend_lock, RW_READER);
/*
* Open a ZIL if this is the first time we have written to this
* zvol. We protect zv->zv_zilog with zv_suspend_lock rather
* than zv_state_lock so that we don't need to acquire an
* additional lock in this path.
*/
if (zv->zv_zilog == NULL) {
rw_exit(&zv->zv_suspend_lock);
rw_enter(&zv->zv_suspend_lock, RW_WRITER);
if (zv->zv_zilog == NULL) {
zv->zv_zilog = zil_open(zv->zv_objset,
zvol_get_data, &zv->zv_kstat.dk_zil_sums);
zv->zv_flags |= ZVOL_WRITTEN_TO;
/* replay / destroy done in zvol_create_minor */
VERIFY0((zv->zv_zilog->zl_header->zh_flags &
ZIL_REPLAY_NEEDED));
}
rw_downgrade(&zv->zv_suspend_lock);
}
/*
* We don't want this thread to be blocked waiting for i/o to
* complete, so we instead wait from a taskq callback. The
* i/o may be a ZIL write (via zil_commit()), or a read of an
* indirect block, or a read of a data block (if this is a
* partial-block write). We will indicate that the i/o is
* complete by calling END_IO() from the taskq callback.
*
* This design allows the calling thread to continue and
* initiate more concurrent operations by calling
* zvol_request() again. There are typically only a small
* number of threads available to call zvol_request() (e.g.
* one per iSCSI target), so keeping the latency of
* zvol_request() low is important for performance.
*
* The zvol_request_sync module parameter allows this
* behavior to be altered, for performance evaluation
* purposes. If the callback blocks, setting
* zvol_request_sync=1 will result in much worse performance.
*
* We can have up to zvol_threads concurrent i/o's being
* processed for all zvols on the system. This is typically
* a vast improvement over the zvol_request_sync=1 behavior
* of one i/o at a time per zvol. However, an even better
* design would be for zvol_request() to initiate the zio
* directly, and then be notified by the zio_done callback,
* which would call END_IO(). Unfortunately, the DMU/ZIL
* interfaces lack this functionality (they block waiting for
* the i/o to complete).
*/
if (io_is_discard(bio, rq) || io_is_secure_erase(bio, rq)) {
if (force_sync) {
zvol_discard(&zvr);
} else {
task = zv_request_task_create(zvr);
taskq_dispatch_ent(zvol_taskq,
zvol_discard_task, task, 0, &task->ent);
}
} else {
if (force_sync) {
zvol_write(&zvr);
} else {
task = zv_request_task_create(zvr);
taskq_dispatch_ent(zvol_taskq,
zvol_write_task, task, 0, &task->ent);
}
}
} else {
/*
* The SCST driver, and possibly others, may issue READ I/Os
* with a length of zero bytes. These empty I/Os contain no
* data and require no additional handling.
*/
if (size == 0) {
END_IO(zv, bio, rq, 0);
goto out;
}
rw_enter(&zv->zv_suspend_lock, RW_READER);
/* See comment in WRITE case above. */
if (force_sync) {
zvol_read(&zvr);
} else {
task = zv_request_task_create(zvr);
taskq_dispatch_ent(zvol_taskq,
zvol_read_task, task, 0, &task->ent);
}
}
out:
spl_fstrans_unmark(cookie);
}
#ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
#ifdef HAVE_BDEV_SUBMIT_BIO_RETURNS_VOID
static void
zvol_submit_bio(struct bio *bio)
#else
static blk_qc_t
zvol_submit_bio(struct bio *bio)
#endif
#else
static MAKE_REQUEST_FN_RET
zvol_request(struct request_queue *q, struct bio *bio)
#endif
{
#ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
#if defined(HAVE_BIO_BDEV_DISK)
struct request_queue *q = bio->bi_bdev->bd_disk->queue;
#else
struct request_queue *q = bio->bi_disk->queue;
#endif
#endif
zvol_state_t *zv = q->queuedata;
zvol_request_impl(zv, bio, NULL, 0);
#if defined(HAVE_MAKE_REQUEST_FN_RET_QC) || \
defined(HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS) && \
!defined(HAVE_BDEV_SUBMIT_BIO_RETURNS_VOID)
return (BLK_QC_T_NONE);
#endif
}
static int
#ifdef HAVE_BLK_MODE_T
zvol_open(struct gendisk *disk, blk_mode_t flag)
#else
zvol_open(struct block_device *bdev, fmode_t flag)
#endif
{
zvol_state_t *zv;
int error = 0;
boolean_t drop_suspend = B_FALSE;
#ifndef HAVE_BLKDEV_GET_ERESTARTSYS
hrtime_t timeout = MSEC2NSEC(zvol_open_timeout_ms);
hrtime_t start = gethrtime();
retry:
#endif
rw_enter(&zvol_state_lock, RW_READER);
/*
* Obtain a copy of private_data under the zvol_state_lock to make
* sure that either the result of zvol free code path setting
* disk->private_data to NULL is observed, or zvol_os_free()
* is not called on this zv because of the positive zv_open_count.
*/
#ifdef HAVE_BLK_MODE_T
zv = disk->private_data;
#else
zv = bdev->bd_disk->private_data;
#endif
if (zv == NULL) {
rw_exit(&zvol_state_lock);
return (SET_ERROR(-ENXIO));
}
mutex_enter(&zv->zv_state_lock);
/*
* Make sure zvol is not suspended during first open
* (hold zv_suspend_lock) and respect proper lock acquisition
* ordering - zv_suspend_lock before zv_state_lock
*/
if (zv->zv_open_count == 0) {
if (!rw_tryenter(&zv->zv_suspend_lock, RW_READER)) {
mutex_exit(&zv->zv_state_lock);
rw_enter(&zv->zv_suspend_lock, RW_READER);
mutex_enter(&zv->zv_state_lock);
/* check to see if zv_suspend_lock is needed */
if (zv->zv_open_count != 0) {
rw_exit(&zv->zv_suspend_lock);
} else {
drop_suspend = B_TRUE;
}
} else {
drop_suspend = B_TRUE;
}
}
rw_exit(&zvol_state_lock);
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
if (zv->zv_open_count == 0) {
boolean_t drop_namespace = B_FALSE;
ASSERT(RW_READ_HELD(&zv->zv_suspend_lock));
/*
* In all other call paths the spa_namespace_lock is taken
* before the bdev->bd_mutex lock. However, on open(2)
* the __blkdev_get() function calls fops->open() with the
* bdev->bd_mutex lock held. This can result in a deadlock
* when zvols from one pool are used as vdevs in another.
*
* To prevent a lock inversion deadlock we preemptively
* take the spa_namespace_lock. Normally the lock will not
* be contended and this is safe because spa_open_common()
* handles the case where the caller already holds the
* spa_namespace_lock.
*
* When the lock cannot be aquired after multiple retries
* this must be the vdev on zvol deadlock case and we have
* no choice but to return an error. For 5.12 and older
* kernels returning -ERESTARTSYS will result in the
* bdev->bd_mutex being dropped, then reacquired, and
* fops->open() being called again. This process can be
* repeated safely until both locks are acquired. For 5.13
* and newer the -ERESTARTSYS retry logic was removed from
* the kernel so the only option is to return the error for
* the caller to handle it.
*/
if (!mutex_owned(&spa_namespace_lock)) {
if (!mutex_tryenter(&spa_namespace_lock)) {
mutex_exit(&zv->zv_state_lock);
rw_exit(&zv->zv_suspend_lock);
#ifdef HAVE_BLKDEV_GET_ERESTARTSYS
schedule();
return (SET_ERROR(-ERESTARTSYS));
#else
if ((gethrtime() - start) > timeout)
return (SET_ERROR(-ERESTARTSYS));
schedule_timeout(MSEC_TO_TICK(10));
goto retry;
#endif
} else {
drop_namespace = B_TRUE;
}
}
error = -zvol_first_open(zv, !(blk_mode_is_open_write(flag)));
if (drop_namespace)
mutex_exit(&spa_namespace_lock);
}
if (error == 0) {
if ((blk_mode_is_open_write(flag)) &&
(zv->zv_flags & ZVOL_RDONLY)) {
if (zv->zv_open_count == 0)
zvol_last_close(zv);
error = SET_ERROR(-EROFS);
} else {
zv->zv_open_count++;
}
}
mutex_exit(&zv->zv_state_lock);
if (drop_suspend)
rw_exit(&zv->zv_suspend_lock);
if (error == 0)
#ifdef HAVE_BLK_MODE_T
disk_check_media_change(disk);
#else
zfs_check_media_change(bdev);
#endif
return (error);
}
static void
#ifdef HAVE_BLOCK_DEVICE_OPERATIONS_RELEASE_1ARG
zvol_release(struct gendisk *disk)
#else
zvol_release(struct gendisk *disk, fmode_t unused)
#endif
{
#if !defined(HAVE_BLOCK_DEVICE_OPERATIONS_RELEASE_1ARG)
(void) unused;
#endif
zvol_state_t *zv;
boolean_t drop_suspend = B_TRUE;
rw_enter(&zvol_state_lock, RW_READER);
zv = disk->private_data;
mutex_enter(&zv->zv_state_lock);
ASSERT3U(zv->zv_open_count, >, 0);
/*
* make sure zvol is not suspended during last close
* (hold zv_suspend_lock) and respect proper lock acquisition
* ordering - zv_suspend_lock before zv_state_lock
*/
if (zv->zv_open_count == 1) {
if (!rw_tryenter(&zv->zv_suspend_lock, RW_READER)) {
mutex_exit(&zv->zv_state_lock);
rw_enter(&zv->zv_suspend_lock, RW_READER);
mutex_enter(&zv->zv_state_lock);
/* check to see if zv_suspend_lock is needed */
if (zv->zv_open_count != 1) {
rw_exit(&zv->zv_suspend_lock);
drop_suspend = B_FALSE;
}
}
} else {
drop_suspend = B_FALSE;
}
rw_exit(&zvol_state_lock);
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
zv->zv_open_count--;
if (zv->zv_open_count == 0) {
ASSERT(RW_READ_HELD(&zv->zv_suspend_lock));
zvol_last_close(zv);
}
mutex_exit(&zv->zv_state_lock);
if (drop_suspend)
rw_exit(&zv->zv_suspend_lock);
}
static int
zvol_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
zvol_state_t *zv = bdev->bd_disk->private_data;
int error = 0;
ASSERT3U(zv->zv_open_count, >, 0);
switch (cmd) {
case BLKFLSBUF:
#ifdef HAVE_FSYNC_BDEV
fsync_bdev(bdev);
#elif defined(HAVE_SYNC_BLOCKDEV)
sync_blockdev(bdev);
#else
#error "Neither fsync_bdev() nor sync_blockdev() found"
#endif
invalidate_bdev(bdev);
rw_enter(&zv->zv_suspend_lock, RW_READER);
if (!(zv->zv_flags & ZVOL_RDONLY))
txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
rw_exit(&zv->zv_suspend_lock);
break;
case BLKZNAME:
mutex_enter(&zv->zv_state_lock);
error = copy_to_user((void *)arg, zv->zv_name, MAXNAMELEN);
mutex_exit(&zv->zv_state_lock);
break;
default:
error = -ENOTTY;
break;
}
return (SET_ERROR(error));
}
#ifdef CONFIG_COMPAT
static int
zvol_compat_ioctl(struct block_device *bdev, fmode_t mode,
unsigned cmd, unsigned long arg)
{
return (zvol_ioctl(bdev, mode, cmd, arg));
}
#else
#define zvol_compat_ioctl NULL
#endif
static unsigned int
zvol_check_events(struct gendisk *disk, unsigned int clearing)
{
unsigned int mask = 0;
rw_enter(&zvol_state_lock, RW_READER);
zvol_state_t *zv = disk->private_data;
if (zv != NULL) {
mutex_enter(&zv->zv_state_lock);
mask = zv->zv_changed ? DISK_EVENT_MEDIA_CHANGE : 0;
zv->zv_changed = 0;
mutex_exit(&zv->zv_state_lock);
}
rw_exit(&zvol_state_lock);
return (mask);
}
static int
zvol_revalidate_disk(struct gendisk *disk)
{
rw_enter(&zvol_state_lock, RW_READER);
zvol_state_t *zv = disk->private_data;
if (zv != NULL) {
mutex_enter(&zv->zv_state_lock);
set_capacity(zv->zv_zso->zvo_disk,
zv->zv_volsize >> SECTOR_BITS);
mutex_exit(&zv->zv_state_lock);
}
rw_exit(&zvol_state_lock);
return (0);
}
int
zvol_os_update_volsize(zvol_state_t *zv, uint64_t volsize)
{
struct gendisk *disk = zv->zv_zso->zvo_disk;
#if defined(HAVE_REVALIDATE_DISK_SIZE)
revalidate_disk_size(disk, zvol_revalidate_disk(disk) == 0);
#elif defined(HAVE_REVALIDATE_DISK)
revalidate_disk(disk);
#else
zvol_revalidate_disk(disk);
#endif
return (0);
}
void
zvol_os_clear_private(zvol_state_t *zv)
{
/*
* Cleared while holding zvol_state_lock as a writer
* which will prevent zvol_open() from opening it.
*/
zv->zv_zso->zvo_disk->private_data = NULL;
}
/*
* Provide a simple virtual geometry for legacy compatibility. For devices
* smaller than 1 MiB a small head and sector count is used to allow very
* tiny devices. For devices over 1 Mib a standard head and sector count
* is used to keep the cylinders count reasonable.
*/
static int
zvol_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
zvol_state_t *zv = bdev->bd_disk->private_data;
sector_t sectors;
ASSERT3U(zv->zv_open_count, >, 0);
sectors = get_capacity(zv->zv_zso->zvo_disk);
if (sectors > 2048) {
geo->heads = 16;
geo->sectors = 63;
} else {
geo->heads = 2;
geo->sectors = 4;
}
geo->start = 0;
geo->cylinders = sectors / (geo->heads * geo->sectors);
return (0);
}
/*
* Why have two separate block_device_operations structs?
*
* Normally we'd just have one, and assign 'submit_bio' as needed. However,
* it's possible the user's kernel is built with CONSTIFY_PLUGIN, meaning we
* can't just change submit_bio dynamically at runtime. So just create two
* separate structs to get around this.
*/
static const struct block_device_operations zvol_ops_blk_mq = {
.open = zvol_open,
.release = zvol_release,
.ioctl = zvol_ioctl,
.compat_ioctl = zvol_compat_ioctl,
.check_events = zvol_check_events,
#ifdef HAVE_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK
.revalidate_disk = zvol_revalidate_disk,
#endif
.getgeo = zvol_getgeo,
.owner = THIS_MODULE,
};
static const struct block_device_operations zvol_ops = {
.open = zvol_open,
.release = zvol_release,
.ioctl = zvol_ioctl,
.compat_ioctl = zvol_compat_ioctl,
.check_events = zvol_check_events,
#ifdef HAVE_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK
.revalidate_disk = zvol_revalidate_disk,
#endif
.getgeo = zvol_getgeo,
.owner = THIS_MODULE,
#ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
.submit_bio = zvol_submit_bio,
#endif
};
static int
zvol_alloc_non_blk_mq(struct zvol_state_os *zso)
{
#if defined(HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS)
#if defined(HAVE_BLK_ALLOC_DISK)
zso->zvo_disk = blk_alloc_disk(NUMA_NO_NODE);
if (zso->zvo_disk == NULL)
return (1);
zso->zvo_disk->minors = ZVOL_MINORS;
zso->zvo_queue = zso->zvo_disk->queue;
#else
zso->zvo_queue = blk_alloc_queue(NUMA_NO_NODE);
if (zso->zvo_queue == NULL)
return (1);
zso->zvo_disk = alloc_disk(ZVOL_MINORS);
if (zso->zvo_disk == NULL) {
blk_cleanup_queue(zso->zvo_queue);
return (1);
}
zso->zvo_disk->queue = zso->zvo_queue;
#endif /* HAVE_BLK_ALLOC_DISK */
#else
zso->zvo_queue = blk_generic_alloc_queue(zvol_request, NUMA_NO_NODE);
if (zso->zvo_queue == NULL)
return (1);
zso->zvo_disk = alloc_disk(ZVOL_MINORS);
if (zso->zvo_disk == NULL) {
blk_cleanup_queue(zso->zvo_queue);
return (1);
}
zso->zvo_disk->queue = zso->zvo_queue;
#endif /* HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS */
return (0);
}
static int
zvol_alloc_blk_mq(zvol_state_t *zv)
{
#ifdef HAVE_BLK_MQ
struct zvol_state_os *zso = zv->zv_zso;
/* Allocate our blk-mq tag_set */
if (zvol_blk_mq_alloc_tag_set(zv) != 0)
return (1);
#if defined(HAVE_BLK_ALLOC_DISK)
zso->zvo_disk = blk_mq_alloc_disk(&zso->tag_set, zv);
if (zso->zvo_disk == NULL) {
blk_mq_free_tag_set(&zso->tag_set);
return (1);
}
zso->zvo_queue = zso->zvo_disk->queue;
zso->zvo_disk->minors = ZVOL_MINORS;
#else
zso->zvo_disk = alloc_disk(ZVOL_MINORS);
if (zso->zvo_disk == NULL) {
blk_cleanup_queue(zso->zvo_queue);
blk_mq_free_tag_set(&zso->tag_set);
return (1);
}
/* Allocate queue */
zso->zvo_queue = blk_mq_init_queue(&zso->tag_set);
if (IS_ERR(zso->zvo_queue)) {
blk_mq_free_tag_set(&zso->tag_set);
return (1);
}
/* Our queue is now created, assign it to our disk */
zso->zvo_disk->queue = zso->zvo_queue;
#endif
#endif
return (0);
}
/*
* Allocate memory for a new zvol_state_t and setup the required
* request queue and generic disk structures for the block device.
*/
static zvol_state_t *
zvol_alloc(dev_t dev, const char *name)
{
zvol_state_t *zv;
struct zvol_state_os *zso;
uint64_t volmode;
int ret;
if (dsl_prop_get_integer(name, "volmode", &volmode, NULL) != 0)
return (NULL);
if (volmode == ZFS_VOLMODE_DEFAULT)
volmode = zvol_volmode;
if (volmode == ZFS_VOLMODE_NONE)
return (NULL);
zv = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
zso = kmem_zalloc(sizeof (struct zvol_state_os), KM_SLEEP);
zv->zv_zso = zso;
zv->zv_volmode = volmode;
list_link_init(&zv->zv_next);
mutex_init(&zv->zv_state_lock, NULL, MUTEX_DEFAULT, NULL);
#ifdef HAVE_BLK_MQ
zv->zv_zso->use_blk_mq = zvol_use_blk_mq;
#endif
/*
* The block layer has 3 interfaces for getting BIOs:
*
* 1. blk-mq request queues (new)
* 2. submit_bio() (oldest)
* 3. regular request queues (old).
*
* Each of those interfaces has two permutations:
*
* a) We have blk_alloc_disk()/blk_mq_alloc_disk(), which allocates
* both the disk and its queue (5.14 kernel or newer)
*
* b) We don't have blk_*alloc_disk(), and have to allocate the
* disk and the queue separately. (5.13 kernel or older)
*/
if (zv->zv_zso->use_blk_mq) {
ret = zvol_alloc_blk_mq(zv);
zso->zvo_disk->fops = &zvol_ops_blk_mq;
} else {
ret = zvol_alloc_non_blk_mq(zso);
zso->zvo_disk->fops = &zvol_ops;
}
if (ret != 0)
goto out_kmem;
blk_queue_set_write_cache(zso->zvo_queue, B_TRUE, B_TRUE);
/* Limit read-ahead to a single page to prevent over-prefetching. */
blk_queue_set_read_ahead(zso->zvo_queue, 1);
if (!zv->zv_zso->use_blk_mq) {
/* Disable write merging in favor of the ZIO pipeline. */
blk_queue_flag_set(QUEUE_FLAG_NOMERGES, zso->zvo_queue);
}
/* Enable /proc/diskstats */
blk_queue_flag_set(QUEUE_FLAG_IO_STAT, zso->zvo_queue);
zso->zvo_queue->queuedata = zv;
zso->zvo_dev = dev;
zv->zv_open_count = 0;
strlcpy(zv->zv_name, name, MAXNAMELEN);
zfs_rangelock_init(&zv->zv_rangelock, NULL, NULL);
rw_init(&zv->zv_suspend_lock, NULL, RW_DEFAULT, NULL);
zso->zvo_disk->major = zvol_major;
zso->zvo_disk->events = DISK_EVENT_MEDIA_CHANGE;
/*
* Setting ZFS_VOLMODE_DEV disables partitioning on ZVOL devices.
* This is accomplished by limiting the number of minors for the
* device to one and explicitly disabling partition scanning.
*/
if (volmode == ZFS_VOLMODE_DEV) {
zso->zvo_disk->minors = 1;
zso->zvo_disk->flags &= ~ZFS_GENHD_FL_EXT_DEVT;
zso->zvo_disk->flags |= ZFS_GENHD_FL_NO_PART;
}
zso->zvo_disk->first_minor = (dev & MINORMASK);
zso->zvo_disk->private_data = zv;
snprintf(zso->zvo_disk->disk_name, DISK_NAME_LEN, "%s%d",
ZVOL_DEV_NAME, (dev & MINORMASK));
return (zv);
out_kmem:
kmem_free(zso, sizeof (struct zvol_state_os));
kmem_free(zv, sizeof (zvol_state_t));
return (NULL);
}
/*
* Cleanup then free a zvol_state_t which was created by zvol_alloc().
* At this time, the structure is not opened by anyone, is taken off
* the zvol_state_list, and has its private data set to NULL.
* The zvol_state_lock is dropped.
*
* This function may take many milliseconds to complete (e.g. we've seen
* it take over 256ms), due to the calls to "blk_cleanup_queue" and
* "del_gendisk". Thus, consumers need to be careful to account for this
* latency when calling this function.
*/
void
zvol_os_free(zvol_state_t *zv)
{
ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock));
ASSERT(!MUTEX_HELD(&zv->zv_state_lock));
ASSERT0(zv->zv_open_count);
ASSERT3P(zv->zv_zso->zvo_disk->private_data, ==, NULL);
rw_destroy(&zv->zv_suspend_lock);
zfs_rangelock_fini(&zv->zv_rangelock);
del_gendisk(zv->zv_zso->zvo_disk);
#if defined(HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS) && \
defined(HAVE_BLK_ALLOC_DISK)
#if defined(HAVE_BLK_CLEANUP_DISK)
blk_cleanup_disk(zv->zv_zso->zvo_disk);
#else
put_disk(zv->zv_zso->zvo_disk);
#endif
#else
blk_cleanup_queue(zv->zv_zso->zvo_queue);
put_disk(zv->zv_zso->zvo_disk);
#endif
#ifdef HAVE_BLK_MQ
if (zv->zv_zso->use_blk_mq)
blk_mq_free_tag_set(&zv->zv_zso->tag_set);
#endif
ida_simple_remove(&zvol_ida,
MINOR(zv->zv_zso->zvo_dev) >> ZVOL_MINOR_BITS);
mutex_destroy(&zv->zv_state_lock);
dataset_kstats_destroy(&zv->zv_kstat);
kmem_free(zv->zv_zso, sizeof (struct zvol_state_os));
kmem_free(zv, sizeof (zvol_state_t));
}
void
zvol_wait_close(zvol_state_t *zv)
{
}
/*
* Create a block device minor node and setup the linkage between it
* and the specified volume. Once this function returns the block
* device is live and ready for use.
*/
int
zvol_os_create_minor(const char *name)
{
zvol_state_t *zv;
objset_t *os;
dmu_object_info_t *doi;
uint64_t volsize;
uint64_t len;
unsigned minor = 0;
int error = 0;
int idx;
uint64_t hash = zvol_name_hash(name);
bool replayed_zil = B_FALSE;
if (zvol_inhibit_dev)
return (0);
idx = ida_simple_get(&zvol_ida, 0, 0, kmem_flags_convert(KM_SLEEP));
if (idx < 0)
return (SET_ERROR(-idx));
minor = idx << ZVOL_MINOR_BITS;
zv = zvol_find_by_name_hash(name, hash, RW_NONE);
if (zv) {
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
mutex_exit(&zv->zv_state_lock);
ida_simple_remove(&zvol_ida, idx);
return (SET_ERROR(EEXIST));
}
doi = kmem_alloc(sizeof (dmu_object_info_t), KM_SLEEP);
error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, B_TRUE, FTAG, &os);
if (error)
goto out_doi;
error = dmu_object_info(os, ZVOL_OBJ, doi);
if (error)
goto out_dmu_objset_disown;
error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
if (error)
goto out_dmu_objset_disown;
zv = zvol_alloc(MKDEV(zvol_major, minor), name);
if (zv == NULL) {
error = SET_ERROR(EAGAIN);
goto out_dmu_objset_disown;
}
zv->zv_hash = hash;
if (dmu_objset_is_snapshot(os))
zv->zv_flags |= ZVOL_RDONLY;
zv->zv_volblocksize = doi->doi_data_block_size;
zv->zv_volsize = volsize;
zv->zv_objset = os;
set_capacity(zv->zv_zso->zvo_disk, zv->zv_volsize >> 9);
blk_queue_max_hw_sectors(zv->zv_zso->zvo_queue,
(DMU_MAX_ACCESS / 4) >> 9);
if (zv->zv_zso->use_blk_mq) {
/*
* IO requests can be really big (1MB). When an IO request
* comes in, it is passed off to zvol_read() or zvol_write()
* in a new thread, where it is chunked up into 'volblocksize'
* sized pieces and processed. So for example, if the request
* is a 1MB write and your volblocksize is 128k, one zvol_write
* thread will take that request and sequentially do ten 128k
* IOs. This is due to the fact that the thread needs to lock
* each volblocksize sized block. So you might be wondering:
* "instead of passing the whole 1MB request to one thread,
* why not pass ten individual 128k chunks to ten threads and
* process the whole write in parallel?" The short answer is
* that there's a sweet spot number of chunks that balances
* the greater parallelism with the added overhead of more
* threads. The sweet spot can be different depending on if you
* have a read or write heavy workload. Writes typically want
* high chunk counts while reads typically want lower ones. On
* a test pool with 6 NVMe drives in a 3x 2-disk mirror
* configuration, with volblocksize=8k, the sweet spot for good
* sequential reads and writes was at 8 chunks.
*/
/*
* Below we tell the kernel how big we want our requests
* to be. You would think that blk_queue_io_opt() would be
* used to do this since it is used to "set optimal request
* size for the queue", but that doesn't seem to do
* anything - the kernel still gives you huge requests
* with tons of little PAGE_SIZE segments contained within it.
*
* Knowing that the kernel will just give you PAGE_SIZE segments
* no matter what, you can say "ok, I want PAGE_SIZE byte
* segments, and I want 'N' of them per request", where N is
* the correct number of segments for the volblocksize and
* number of chunks you want.
*/
#ifdef HAVE_BLK_MQ
if (zvol_blk_mq_blocks_per_thread != 0) {
unsigned int chunks;
chunks = MIN(zvol_blk_mq_blocks_per_thread, UINT16_MAX);
blk_queue_max_segment_size(zv->zv_zso->zvo_queue,
PAGE_SIZE);
blk_queue_max_segments(zv->zv_zso->zvo_queue,
(zv->zv_volblocksize * chunks) / PAGE_SIZE);
} else {
/*
* Special case: zvol_blk_mq_blocks_per_thread = 0
* Max everything out.
*/
blk_queue_max_segments(zv->zv_zso->zvo_queue,
UINT16_MAX);
blk_queue_max_segment_size(zv->zv_zso->zvo_queue,
UINT_MAX);
}
#endif
} else {
blk_queue_max_segments(zv->zv_zso->zvo_queue, UINT16_MAX);
blk_queue_max_segment_size(zv->zv_zso->zvo_queue, UINT_MAX);
}
blk_queue_physical_block_size(zv->zv_zso->zvo_queue,
zv->zv_volblocksize);
blk_queue_io_opt(zv->zv_zso->zvo_queue, zv->zv_volblocksize);
blk_queue_max_discard_sectors(zv->zv_zso->zvo_queue,
(zvol_max_discard_blocks * zv->zv_volblocksize) >> 9);
blk_queue_discard_granularity(zv->zv_zso->zvo_queue,
zv->zv_volblocksize);
#ifdef QUEUE_FLAG_DISCARD
blk_queue_flag_set(QUEUE_FLAG_DISCARD, zv->zv_zso->zvo_queue);
#endif
#ifdef QUEUE_FLAG_NONROT
blk_queue_flag_set(QUEUE_FLAG_NONROT, zv->zv_zso->zvo_queue);
#endif
#ifdef QUEUE_FLAG_ADD_RANDOM
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, zv->zv_zso->zvo_queue);
#endif
/* This flag was introduced in kernel version 4.12. */
#ifdef QUEUE_FLAG_SCSI_PASSTHROUGH
blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, zv->zv_zso->zvo_queue);
#endif
ASSERT3P(zv->zv_kstat.dk_kstats, ==, NULL);
error = dataset_kstats_create(&zv->zv_kstat, zv->zv_objset);
if (error)
goto out_dmu_objset_disown;
ASSERT3P(zv->zv_zilog, ==, NULL);
zv->zv_zilog = zil_open(os, zvol_get_data, &zv->zv_kstat.dk_zil_sums);
if (spa_writeable(dmu_objset_spa(os))) {
if (zil_replay_disable)
replayed_zil = zil_destroy(zv->zv_zilog, B_FALSE);
else
replayed_zil = zil_replay(os, zv, zvol_replay_vector);
}
if (replayed_zil)
zil_close(zv->zv_zilog);
zv->zv_zilog = NULL;
/*
* When udev detects the addition of the device it will immediately
* invoke blkid(8) to determine the type of content on the device.
* Prefetching the blocks commonly scanned by blkid(8) will speed
* up this process.
*/
len = MIN(zvol_prefetch_bytes, SPA_MAXBLOCKSIZE);
if (len > 0) {
dmu_prefetch(os, ZVOL_OBJ, 0, 0, len, ZIO_PRIORITY_SYNC_READ);
dmu_prefetch(os, ZVOL_OBJ, 0, volsize - len, len,
ZIO_PRIORITY_SYNC_READ);
}
zv->zv_objset = NULL;
out_dmu_objset_disown:
dmu_objset_disown(os, B_TRUE, FTAG);
out_doi:
kmem_free(doi, sizeof (dmu_object_info_t));
/*
* Keep in mind that once add_disk() is called, the zvol is
* announced to the world, and zvol_open()/zvol_release() can
* be called at any time. Incidentally, add_disk() itself calls
* zvol_open()->zvol_first_open() and zvol_release()->zvol_last_close()
* directly as well.
*/
if (error == 0) {
rw_enter(&zvol_state_lock, RW_WRITER);
zvol_insert(zv);
rw_exit(&zvol_state_lock);
#ifdef HAVE_ADD_DISK_RET
error = add_disk(zv->zv_zso->zvo_disk);
#else
add_disk(zv->zv_zso->zvo_disk);
#endif
} else {
ida_simple_remove(&zvol_ida, idx);
}
return (error);
}
void
zvol_os_rename_minor(zvol_state_t *zv, const char *newname)
{
int readonly = get_disk_ro(zv->zv_zso->zvo_disk);
ASSERT(RW_LOCK_HELD(&zvol_state_lock));
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
strlcpy(zv->zv_name, newname, sizeof (zv->zv_name));
/* move to new hashtable entry */
zv->zv_hash = zvol_name_hash(zv->zv_name);
hlist_del(&zv->zv_hlink);
hlist_add_head(&zv->zv_hlink, ZVOL_HT_HEAD(zv->zv_hash));
/*
* The block device's read-only state is briefly changed causing
* a KOBJ_CHANGE uevent to be issued. This ensures udev detects
* the name change and fixes the symlinks. This does not change
* ZVOL_RDONLY in zv->zv_flags so the actual read-only state never
* changes. This would normally be done using kobject_uevent() but
* that is a GPL-only symbol which is why we need this workaround.
*/
set_disk_ro(zv->zv_zso->zvo_disk, !readonly);
set_disk_ro(zv->zv_zso->zvo_disk, readonly);
+
+ dataset_kstats_rename(&zv->zv_kstat, newname);
}
void
zvol_os_set_disk_ro(zvol_state_t *zv, int flags)
{
set_disk_ro(zv->zv_zso->zvo_disk, flags);
}
void
zvol_os_set_capacity(zvol_state_t *zv, uint64_t capacity)
{
set_capacity(zv->zv_zso->zvo_disk, capacity);
}
int
zvol_init(void)
{
int error;
/*
* zvol_threads is the module param the user passes in.
*
* zvol_actual_threads is what we use internally, since the user can
* pass zvol_thread = 0 to mean "use all the CPUs" (the default).
*/
static unsigned int zvol_actual_threads;
if (zvol_threads == 0) {
/*
* See dde9380a1 for why 32 was chosen here. This should
* probably be refined to be some multiple of the number
* of CPUs.
*/
zvol_actual_threads = MAX(num_online_cpus(), 32);
} else {
zvol_actual_threads = MIN(MAX(zvol_threads, 1), 1024);
}
error = register_blkdev(zvol_major, ZVOL_DRIVER);
if (error) {
printk(KERN_INFO "ZFS: register_blkdev() failed %d\n", error);
return (error);
}
#ifdef HAVE_BLK_MQ
if (zvol_blk_mq_queue_depth == 0) {
zvol_actual_blk_mq_queue_depth = BLKDEV_DEFAULT_RQ;
} else {
zvol_actual_blk_mq_queue_depth =
MAX(zvol_blk_mq_queue_depth, BLKDEV_MIN_RQ);
}
if (zvol_blk_mq_threads == 0) {
zvol_blk_mq_actual_threads = num_online_cpus();
} else {
zvol_blk_mq_actual_threads = MIN(MAX(zvol_blk_mq_threads, 1),
1024);
}
#endif
zvol_taskq = taskq_create(ZVOL_DRIVER, zvol_actual_threads, maxclsyspri,
zvol_actual_threads, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
if (zvol_taskq == NULL) {
unregister_blkdev(zvol_major, ZVOL_DRIVER);
return (-ENOMEM);
}
zvol_init_impl();
ida_init(&zvol_ida);
return (0);
}
void
zvol_fini(void)
{
zvol_fini_impl();
unregister_blkdev(zvol_major, ZVOL_DRIVER);
taskq_destroy(zvol_taskq);
ida_destroy(&zvol_ida);
}
/* BEGIN CSTYLED */
module_param(zvol_inhibit_dev, uint, 0644);
MODULE_PARM_DESC(zvol_inhibit_dev, "Do not create zvol device nodes");
module_param(zvol_major, uint, 0444);
MODULE_PARM_DESC(zvol_major, "Major number for zvol device");
module_param(zvol_threads, uint, 0444);
MODULE_PARM_DESC(zvol_threads, "Number of threads to handle I/O requests. Set"
"to 0 to use all active CPUs");
module_param(zvol_request_sync, uint, 0644);
MODULE_PARM_DESC(zvol_request_sync, "Synchronously handle bio requests");
module_param(zvol_max_discard_blocks, ulong, 0444);
MODULE_PARM_DESC(zvol_max_discard_blocks, "Max number of blocks to discard");
module_param(zvol_prefetch_bytes, uint, 0644);
MODULE_PARM_DESC(zvol_prefetch_bytes, "Prefetch N bytes at zvol start+end");
module_param(zvol_volmode, uint, 0644);
MODULE_PARM_DESC(zvol_volmode, "Default volmode property value");
#ifdef HAVE_BLK_MQ
module_param(zvol_blk_mq_queue_depth, uint, 0644);
MODULE_PARM_DESC(zvol_blk_mq_queue_depth, "Default blk-mq queue depth");
module_param(zvol_use_blk_mq, uint, 0644);
MODULE_PARM_DESC(zvol_use_blk_mq, "Use the blk-mq API for zvols");
module_param(zvol_blk_mq_blocks_per_thread, uint, 0644);
MODULE_PARM_DESC(zvol_blk_mq_blocks_per_thread,
"Process volblocksize blocks per thread");
#endif
#ifndef HAVE_BLKDEV_GET_ERESTARTSYS
module_param(zvol_open_timeout_ms, uint, 0644);
MODULE_PARM_DESC(zvol_open_timeout_ms, "Timeout for ZVOL open retries");
#endif
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/abd.c b/sys/contrib/openzfs/module/zfs/abd.c
index 745ee8f02ed4..d982f201c930 100644
--- a/sys/contrib/openzfs/module/zfs/abd.c
+++ b/sys/contrib/openzfs/module/zfs/abd.c
@@ -1,1229 +1,1177 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2014 by Chunwei Chen. All rights reserved.
* Copyright (c) 2019 by Delphix. All rights reserved.
*/
/*
* ARC buffer data (ABD).
*
* ABDs are an abstract data structure for the ARC which can use two
* different ways of storing the underlying data:
*
* (a) Linear buffer. In this case, all the data in the ABD is stored in one
* contiguous buffer in memory (from a zio_[data_]buf_* kmem cache).
*
* +-------------------+
* | ABD (linear) |
* | abd_flags = ... |
* | abd_size = ... | +--------------------------------+
* | abd_buf ------------->| raw buffer of size abd_size |
* +-------------------+ +--------------------------------+
* no abd_chunks
*
* (b) Scattered buffer. In this case, the data in the ABD is split into
* equal-sized chunks (from the abd_chunk_cache kmem_cache), with pointers
* to the chunks recorded in an array at the end of the ABD structure.
*
* +-------------------+
* | ABD (scattered) |
* | abd_flags = ... |
* | abd_size = ... |
* | abd_offset = 0 | +-----------+
* | abd_chunks[0] ----------------------------->| chunk 0 |
* | abd_chunks[1] ---------------------+ +-----------+
* | ... | | +-----------+
* | abd_chunks[N-1] ---------+ +------->| chunk 1 |
* +-------------------+ | +-----------+
* | ...
* | +-----------+
* +----------------->| chunk N-1 |
* +-----------+
*
* In addition to directly allocating a linear or scattered ABD, it is also
* possible to create an ABD by requesting the "sub-ABD" starting at an offset
* within an existing ABD. In linear buffers this is simple (set abd_buf of
* the new ABD to the starting point within the original raw buffer), but
* scattered ABDs are a little more complex. The new ABD makes a copy of the
* relevant abd_chunks pointers (but not the underlying data). However, to
* provide arbitrary rather than only chunk-aligned starting offsets, it also
* tracks an abd_offset field which represents the starting point of the data
* within the first chunk in abd_chunks. For both linear and scattered ABDs,
* creating an offset ABD marks the original ABD as the offset's parent, and the
* original ABD's abd_children refcount is incremented. This data allows us to
* ensure the root ABD isn't deleted before its children.
*
* Most consumers should never need to know what type of ABD they're using --
* the ABD public API ensures that it's possible to transparently switch from
* using a linear ABD to a scattered one when doing so would be beneficial.
*
* If you need to use the data within an ABD directly, if you know it's linear
* (because you allocated it) you can use abd_to_buf() to access the underlying
* raw buffer. Otherwise, you should use one of the abd_borrow_buf* functions
* which will allocate a raw buffer if necessary. Use the abd_return_buf*
* functions to return any raw buffers that are no longer necessary when you're
* done using them.
*
* There are a variety of ABD APIs that implement basic buffer operations:
* compare, copy, read, write, and fill with zeroes. If you need a custom
* function which progressively accesses the whole ABD, use the abd_iterate_*
* functions.
*
* As an additional feature, linear and scatter ABD's can be stitched together
* by using the gang ABD type (abd_alloc_gang_abd()). This allows for
* multiple ABDs to be viewed as a singular ABD.
*
* It is possible to make all ABDs linear by setting zfs_abd_scatter_enabled to
* B_FALSE.
*/
#include <sys/abd_impl.h>
#include <sys/param.h>
#include <sys/zio.h>
#include <sys/zfs_context.h>
#include <sys/zfs_znode.h>
/* see block comment above for description */
int zfs_abd_scatter_enabled = B_TRUE;
void
abd_verify(abd_t *abd)
{
#ifdef ZFS_DEBUG
ASSERT3U(abd->abd_size, <=, SPA_MAXBLOCKSIZE);
ASSERT3U(abd->abd_flags, ==, abd->abd_flags & (ABD_FLAG_LINEAR |
ABD_FLAG_OWNER | ABD_FLAG_META | ABD_FLAG_MULTI_ZONE |
ABD_FLAG_MULTI_CHUNK | ABD_FLAG_LINEAR_PAGE | ABD_FLAG_GANG |
ABD_FLAG_GANG_FREE | ABD_FLAG_ZEROS | ABD_FLAG_ALLOCD));
IMPLY(abd->abd_parent != NULL, !(abd->abd_flags & ABD_FLAG_OWNER));
IMPLY(abd->abd_flags & ABD_FLAG_META, abd->abd_flags & ABD_FLAG_OWNER);
if (abd_is_linear(abd)) {
ASSERT3U(abd->abd_size, >, 0);
ASSERT3P(ABD_LINEAR_BUF(abd), !=, NULL);
} else if (abd_is_gang(abd)) {
uint_t child_sizes = 0;
for (abd_t *cabd = list_head(&ABD_GANG(abd).abd_gang_chain);
cabd != NULL;
cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
ASSERT(list_link_active(&cabd->abd_gang_link));
child_sizes += cabd->abd_size;
abd_verify(cabd);
}
ASSERT3U(abd->abd_size, ==, child_sizes);
} else {
ASSERT3U(abd->abd_size, >, 0);
abd_verify_scatter(abd);
}
#endif
}
static void
abd_init_struct(abd_t *abd)
{
list_link_init(&abd->abd_gang_link);
mutex_init(&abd->abd_mtx, NULL, MUTEX_DEFAULT, NULL);
abd->abd_flags = 0;
#ifdef ZFS_DEBUG
zfs_refcount_create(&abd->abd_children);
abd->abd_parent = NULL;
#endif
abd->abd_size = 0;
}
static void
abd_fini_struct(abd_t *abd)
{
mutex_destroy(&abd->abd_mtx);
ASSERT(!list_link_active(&abd->abd_gang_link));
#ifdef ZFS_DEBUG
zfs_refcount_destroy(&abd->abd_children);
#endif
}
abd_t *
abd_alloc_struct(size_t size)
{
abd_t *abd = abd_alloc_struct_impl(size);
abd_init_struct(abd);
abd->abd_flags |= ABD_FLAG_ALLOCD;
return (abd);
}
void
abd_free_struct(abd_t *abd)
{
abd_fini_struct(abd);
abd_free_struct_impl(abd);
}
/*
* Allocate an ABD, along with its own underlying data buffers. Use this if you
* don't care whether the ABD is linear or not.
*/
abd_t *
abd_alloc(size_t size, boolean_t is_metadata)
{
if (abd_size_alloc_linear(size))
return (abd_alloc_linear(size, is_metadata));
VERIFY3U(size, <=, SPA_MAXBLOCKSIZE);
abd_t *abd = abd_alloc_struct(size);
abd->abd_flags |= ABD_FLAG_OWNER;
abd->abd_u.abd_scatter.abd_offset = 0;
abd_alloc_chunks(abd, size);
if (is_metadata) {
abd->abd_flags |= ABD_FLAG_META;
}
abd->abd_size = size;
abd_update_scatter_stats(abd, ABDSTAT_INCR);
return (abd);
}
/*
* Allocate an ABD that must be linear, along with its own underlying data
* buffer. Only use this when it would be very annoying to write your ABD
* consumer with a scattered ABD.
*/
abd_t *
abd_alloc_linear(size_t size, boolean_t is_metadata)
{
abd_t *abd = abd_alloc_struct(0);
VERIFY3U(size, <=, SPA_MAXBLOCKSIZE);
abd->abd_flags |= ABD_FLAG_LINEAR | ABD_FLAG_OWNER;
if (is_metadata) {
abd->abd_flags |= ABD_FLAG_META;
}
abd->abd_size = size;
if (is_metadata) {
ABD_LINEAR_BUF(abd) = zio_buf_alloc(size);
} else {
ABD_LINEAR_BUF(abd) = zio_data_buf_alloc(size);
}
abd_update_linear_stats(abd, ABDSTAT_INCR);
return (abd);
}
static void
abd_free_linear(abd_t *abd)
{
if (abd_is_linear_page(abd)) {
abd_free_linear_page(abd);
return;
}
if (abd->abd_flags & ABD_FLAG_META) {
zio_buf_free(ABD_LINEAR_BUF(abd), abd->abd_size);
} else {
zio_data_buf_free(ABD_LINEAR_BUF(abd), abd->abd_size);
}
abd_update_linear_stats(abd, ABDSTAT_DECR);
}
static void
abd_free_gang(abd_t *abd)
{
ASSERT(abd_is_gang(abd));
abd_t *cabd;
while ((cabd = list_head(&ABD_GANG(abd).abd_gang_chain)) != NULL) {
/*
* We must acquire the child ABDs mutex to ensure that if it
* is being added to another gang ABD we will set the link
* as inactive when removing it from this gang ABD and before
* adding it to the other gang ABD.
*/
mutex_enter(&cabd->abd_mtx);
ASSERT(list_link_active(&cabd->abd_gang_link));
list_remove(&ABD_GANG(abd).abd_gang_chain, cabd);
mutex_exit(&cabd->abd_mtx);
if (cabd->abd_flags & ABD_FLAG_GANG_FREE)
abd_free(cabd);
}
list_destroy(&ABD_GANG(abd).abd_gang_chain);
}
static void
abd_free_scatter(abd_t *abd)
{
abd_free_chunks(abd);
abd_update_scatter_stats(abd, ABDSTAT_DECR);
}
/*
* Free an ABD. Use with any kind of abd: those created with abd_alloc_*()
* and abd_get_*(), including abd_get_offset_struct().
*
* If the ABD was created with abd_alloc_*(), the underlying data
* (scatterlist or linear buffer) will also be freed. (Subject to ownership
* changes via abd_*_ownership_of_buf().)
*
* Unless the ABD was created with abd_get_offset_struct(), the abd_t will
* also be freed.
*/
void
abd_free(abd_t *abd)
{
if (abd == NULL)
return;
abd_verify(abd);
#ifdef ZFS_DEBUG
IMPLY(abd->abd_flags & ABD_FLAG_OWNER, abd->abd_parent == NULL);
#endif
if (abd_is_gang(abd)) {
abd_free_gang(abd);
} else if (abd_is_linear(abd)) {
if (abd->abd_flags & ABD_FLAG_OWNER)
abd_free_linear(abd);
} else {
if (abd->abd_flags & ABD_FLAG_OWNER)
abd_free_scatter(abd);
}
#ifdef ZFS_DEBUG
if (abd->abd_parent != NULL) {
(void) zfs_refcount_remove_many(&abd->abd_parent->abd_children,
abd->abd_size, abd);
}
#endif
abd_fini_struct(abd);
if (abd->abd_flags & ABD_FLAG_ALLOCD)
abd_free_struct_impl(abd);
}
/*
* Allocate an ABD of the same format (same metadata flag, same scatterize
* setting) as another ABD.
*/
abd_t *
abd_alloc_sametype(abd_t *sabd, size_t size)
{
boolean_t is_metadata = (sabd->abd_flags & ABD_FLAG_META) != 0;
if (abd_is_linear(sabd) &&
!abd_is_linear_page(sabd)) {
return (abd_alloc_linear(size, is_metadata));
} else {
return (abd_alloc(size, is_metadata));
}
}
/*
* Create gang ABD that will be the head of a list of ABD's. This is used
* to "chain" scatter/gather lists together when constructing aggregated
* IO's. To free this abd, abd_free() must be called.
*/
abd_t *
abd_alloc_gang(void)
{
abd_t *abd = abd_alloc_struct(0);
abd->abd_flags |= ABD_FLAG_GANG | ABD_FLAG_OWNER;
list_create(&ABD_GANG(abd).abd_gang_chain,
sizeof (abd_t), offsetof(abd_t, abd_gang_link));
return (abd);
}
/*
* Add a child gang ABD to a parent gang ABDs chained list.
*/
static void
abd_gang_add_gang(abd_t *pabd, abd_t *cabd, boolean_t free_on_free)
{
ASSERT(abd_is_gang(pabd));
ASSERT(abd_is_gang(cabd));
if (free_on_free) {
/*
* If the parent is responsible for freeing the child gang
* ABD we will just splice the child's children ABD list to
* the parent's list and immediately free the child gang ABD
* struct. The parent gang ABDs children from the child gang
* will retain all the free_on_free settings after being
* added to the parents list.
*/
#ifdef ZFS_DEBUG
/*
* If cabd had abd_parent, we have to drop it here. We can't
* transfer it to pabd, nor we can clear abd_size leaving it.
*/
if (cabd->abd_parent != NULL) {
(void) zfs_refcount_remove_many(
&cabd->abd_parent->abd_children,
cabd->abd_size, cabd);
cabd->abd_parent = NULL;
}
#endif
pabd->abd_size += cabd->abd_size;
cabd->abd_size = 0;
list_move_tail(&ABD_GANG(pabd).abd_gang_chain,
&ABD_GANG(cabd).abd_gang_chain);
ASSERT(list_is_empty(&ABD_GANG(cabd).abd_gang_chain));
abd_verify(pabd);
abd_free(cabd);
} else {
for (abd_t *child = list_head(&ABD_GANG(cabd).abd_gang_chain);
child != NULL;
child = list_next(&ABD_GANG(cabd).abd_gang_chain, child)) {
/*
* We always pass B_FALSE for free_on_free as it is the
* original child gang ABDs responsibility to determine
* if any of its child ABDs should be free'd on the call
* to abd_free().
*/
abd_gang_add(pabd, child, B_FALSE);
}
abd_verify(pabd);
}
}
/*
* Add a child ABD to a gang ABD's chained list.
*/
void
abd_gang_add(abd_t *pabd, abd_t *cabd, boolean_t free_on_free)
{
ASSERT(abd_is_gang(pabd));
abd_t *child_abd = NULL;
/*
* If the child being added is a gang ABD, we will add the
* child's ABDs to the parent gang ABD. This allows us to account
* for the offset correctly in the parent gang ABD.
*/
if (abd_is_gang(cabd)) {
ASSERT(!list_link_active(&cabd->abd_gang_link));
return (abd_gang_add_gang(pabd, cabd, free_on_free));
}
ASSERT(!abd_is_gang(cabd));
/*
* In order to verify that an ABD is not already part of
* another gang ABD, we must lock the child ABD's abd_mtx
* to check its abd_gang_link status. We unlock the abd_mtx
* only after it is has been added to a gang ABD, which
* will update the abd_gang_link's status. See comment below
* for how an ABD can be in multiple gang ABD's simultaneously.
*/
mutex_enter(&cabd->abd_mtx);
if (list_link_active(&cabd->abd_gang_link)) {
/*
* If the child ABD is already part of another
* gang ABD then we must allocate a new
* ABD to use a separate link. We mark the newly
* allocated ABD with ABD_FLAG_GANG_FREE, before
* adding it to the gang ABD's list, to make the
* gang ABD aware that it is responsible to call
* abd_free(). We use abd_get_offset() in order
* to just allocate a new ABD but avoid copying the
* data over into the newly allocated ABD.
*
* An ABD may become part of multiple gang ABD's. For
* example, when writing ditto bocks, the same ABD
* is used to write 2 or 3 locations with 2 or 3
* zio_t's. Each of the zio's may be aggregated with
* different adjacent zio's. zio aggregation uses gang
* zio's, so the single ABD can become part of multiple
* gang zio's.
*
* The ASSERT below is to make sure that if
* free_on_free is passed as B_TRUE, the ABD can
* not be in multiple gang ABD's. The gang ABD
* can not be responsible for cleaning up the child
* ABD memory allocation if the ABD can be in
* multiple gang ABD's at one time.
*/
ASSERT3B(free_on_free, ==, B_FALSE);
child_abd = abd_get_offset(cabd, 0);
child_abd->abd_flags |= ABD_FLAG_GANG_FREE;
} else {
child_abd = cabd;
if (free_on_free)
child_abd->abd_flags |= ABD_FLAG_GANG_FREE;
}
ASSERT3P(child_abd, !=, NULL);
list_insert_tail(&ABD_GANG(pabd).abd_gang_chain, child_abd);
mutex_exit(&cabd->abd_mtx);
pabd->abd_size += child_abd->abd_size;
}
/*
* Locate the ABD for the supplied offset in the gang ABD.
* Return a new offset relative to the returned ABD.
*/
abd_t *
abd_gang_get_offset(abd_t *abd, size_t *off)
{
abd_t *cabd;
ASSERT(abd_is_gang(abd));
ASSERT3U(*off, <, abd->abd_size);
for (cabd = list_head(&ABD_GANG(abd).abd_gang_chain); cabd != NULL;
cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
if (*off >= cabd->abd_size)
*off -= cabd->abd_size;
else
return (cabd);
}
VERIFY3P(cabd, !=, NULL);
return (cabd);
}
/*
* Allocate a new ABD, using the provided struct (if non-NULL, and if
* circumstances allow - otherwise allocate the struct). The returned ABD will
* point to offset off of sabd. It shares the underlying buffer data with sabd.
* Use abd_free() to free. sabd must not be freed while any derived ABDs exist.
*/
static abd_t *
abd_get_offset_impl(abd_t *abd, abd_t *sabd, size_t off, size_t size)
{
abd_verify(sabd);
ASSERT3U(off + size, <=, sabd->abd_size);
if (abd_is_linear(sabd)) {
if (abd == NULL)
abd = abd_alloc_struct(0);
/*
* Even if this buf is filesystem metadata, we only track that
* if we own the underlying data buffer, which is not true in
* this case. Therefore, we don't ever use ABD_FLAG_META here.
*/
abd->abd_flags |= ABD_FLAG_LINEAR;
ABD_LINEAR_BUF(abd) = (char *)ABD_LINEAR_BUF(sabd) + off;
} else if (abd_is_gang(sabd)) {
size_t left = size;
if (abd == NULL) {
abd = abd_alloc_gang();
} else {
abd->abd_flags |= ABD_FLAG_GANG;
list_create(&ABD_GANG(abd).abd_gang_chain,
sizeof (abd_t), offsetof(abd_t, abd_gang_link));
}
abd->abd_flags &= ~ABD_FLAG_OWNER;
for (abd_t *cabd = abd_gang_get_offset(sabd, &off);
cabd != NULL && left > 0;
cabd = list_next(&ABD_GANG(sabd).abd_gang_chain, cabd)) {
int csize = MIN(left, cabd->abd_size - off);
abd_t *nabd = abd_get_offset_size(cabd, off, csize);
abd_gang_add(abd, nabd, B_TRUE);
left -= csize;
off = 0;
}
ASSERT3U(left, ==, 0);
} else {
abd = abd_get_offset_scatter(abd, sabd, off, size);
}
ASSERT3P(abd, !=, NULL);
abd->abd_size = size;
#ifdef ZFS_DEBUG
abd->abd_parent = sabd;
(void) zfs_refcount_add_many(&sabd->abd_children, abd->abd_size, abd);
#endif
return (abd);
}
/*
* Like abd_get_offset_size(), but memory for the abd_t is provided by the
* caller. Using this routine can improve performance by avoiding the cost
* of allocating memory for the abd_t struct, and updating the abd stats.
* Usually, the provided abd is returned, but in some circumstances (FreeBSD,
* if sabd is scatter and size is more than 2 pages) a new abd_t may need to
* be allocated. Therefore callers should be careful to use the returned
* abd_t*.
*/
abd_t *
abd_get_offset_struct(abd_t *abd, abd_t *sabd, size_t off, size_t size)
{
abd_t *result;
abd_init_struct(abd);
result = abd_get_offset_impl(abd, sabd, off, size);
if (result != abd)
abd_fini_struct(abd);
return (result);
}
abd_t *
abd_get_offset(abd_t *sabd, size_t off)
{
size_t size = sabd->abd_size > off ? sabd->abd_size - off : 0;
VERIFY3U(size, >, 0);
return (abd_get_offset_impl(NULL, sabd, off, size));
}
abd_t *
abd_get_offset_size(abd_t *sabd, size_t off, size_t size)
{
ASSERT3U(off + size, <=, sabd->abd_size);
return (abd_get_offset_impl(NULL, sabd, off, size));
}
/*
* Return a size scatter ABD containing only zeros.
*/
abd_t *
abd_get_zeros(size_t size)
{
ASSERT3P(abd_zero_scatter, !=, NULL);
ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
return (abd_get_offset_size(abd_zero_scatter, 0, size));
}
/*
* Allocate a linear ABD structure for buf.
*/
abd_t *
abd_get_from_buf(void *buf, size_t size)
{
abd_t *abd = abd_alloc_struct(0);
VERIFY3U(size, <=, SPA_MAXBLOCKSIZE);
/*
* Even if this buf is filesystem metadata, we only track that if we
* own the underlying data buffer, which is not true in this case.
* Therefore, we don't ever use ABD_FLAG_META here.
*/
abd->abd_flags |= ABD_FLAG_LINEAR;
abd->abd_size = size;
ABD_LINEAR_BUF(abd) = buf;
return (abd);
}
/*
* Get the raw buffer associated with a linear ABD.
*/
void *
abd_to_buf(abd_t *abd)
{
ASSERT(abd_is_linear(abd));
abd_verify(abd);
return (ABD_LINEAR_BUF(abd));
}
/*
* Borrow a raw buffer from an ABD without copying the contents of the ABD
* into the buffer. If the ABD is scattered, this will allocate a raw buffer
* whose contents are undefined. To copy over the existing data in the ABD, use
* abd_borrow_buf_copy() instead.
*/
void *
abd_borrow_buf(abd_t *abd, size_t n)
{
void *buf;
abd_verify(abd);
ASSERT3U(abd->abd_size, >=, n);
if (abd_is_linear(abd)) {
buf = abd_to_buf(abd);
} else {
buf = zio_buf_alloc(n);
}
#ifdef ZFS_DEBUG
(void) zfs_refcount_add_many(&abd->abd_children, n, buf);
#endif
return (buf);
}
void *
abd_borrow_buf_copy(abd_t *abd, size_t n)
{
void *buf = abd_borrow_buf(abd, n);
if (!abd_is_linear(abd)) {
abd_copy_to_buf(buf, abd, n);
}
return (buf);
}
/*
* Return a borrowed raw buffer to an ABD. If the ABD is scattered, this will
* not change the contents of the ABD and will ASSERT that you didn't modify
* the buffer since it was borrowed. If you want any changes you made to buf to
* be copied back to abd, use abd_return_buf_copy() instead.
*/
void
abd_return_buf(abd_t *abd, void *buf, size_t n)
{
abd_verify(abd);
ASSERT3U(abd->abd_size, >=, n);
#ifdef ZFS_DEBUG
(void) zfs_refcount_remove_many(&abd->abd_children, n, buf);
#endif
if (abd_is_linear(abd)) {
ASSERT3P(buf, ==, abd_to_buf(abd));
} else {
ASSERT0(abd_cmp_buf(abd, buf, n));
zio_buf_free(buf, n);
}
}
void
abd_return_buf_copy(abd_t *abd, void *buf, size_t n)
{
if (!abd_is_linear(abd)) {
abd_copy_from_buf(abd, buf, n);
}
abd_return_buf(abd, buf, n);
}
void
abd_release_ownership_of_buf(abd_t *abd)
{
ASSERT(abd_is_linear(abd));
ASSERT(abd->abd_flags & ABD_FLAG_OWNER);
/*
* abd_free() needs to handle LINEAR_PAGE ABD's specially.
* Since that flag does not survive the
* abd_release_ownership_of_buf() -> abd_get_from_buf() ->
* abd_take_ownership_of_buf() sequence, we don't allow releasing
* these "linear but not zio_[data_]buf_alloc()'ed" ABD's.
*/
ASSERT(!abd_is_linear_page(abd));
abd_verify(abd);
abd->abd_flags &= ~ABD_FLAG_OWNER;
/* Disable this flag since we no longer own the data buffer */
abd->abd_flags &= ~ABD_FLAG_META;
abd_update_linear_stats(abd, ABDSTAT_DECR);
}
/*
* Give this ABD ownership of the buffer that it's storing. Can only be used on
* linear ABDs which were allocated via abd_get_from_buf(), or ones allocated
* with abd_alloc_linear() which subsequently released ownership of their buf
* with abd_release_ownership_of_buf().
*/
void
abd_take_ownership_of_buf(abd_t *abd, boolean_t is_metadata)
{
ASSERT(abd_is_linear(abd));
ASSERT(!(abd->abd_flags & ABD_FLAG_OWNER));
abd_verify(abd);
abd->abd_flags |= ABD_FLAG_OWNER;
if (is_metadata) {
abd->abd_flags |= ABD_FLAG_META;
}
abd_update_linear_stats(abd, ABDSTAT_INCR);
}
/*
* Initializes an abd_iter based on whether the abd is a gang ABD
* or just a single ABD.
*/
static inline abd_t *
abd_init_abd_iter(abd_t *abd, struct abd_iter *aiter, size_t off)
{
abd_t *cabd = NULL;
if (abd_is_gang(abd)) {
cabd = abd_gang_get_offset(abd, &off);
if (cabd) {
abd_iter_init(aiter, cabd);
abd_iter_advance(aiter, off);
}
} else {
abd_iter_init(aiter, abd);
abd_iter_advance(aiter, off);
}
return (cabd);
}
/*
* Advances an abd_iter. We have to be careful with gang ABD as
* advancing could mean that we are at the end of a particular ABD and
* must grab the ABD in the gang ABD's list.
*/
static inline abd_t *
abd_advance_abd_iter(abd_t *abd, abd_t *cabd, struct abd_iter *aiter,
size_t len)
{
abd_iter_advance(aiter, len);
if (abd_is_gang(abd) && abd_iter_at_end(aiter)) {
ASSERT3P(cabd, !=, NULL);
cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd);
if (cabd) {
abd_iter_init(aiter, cabd);
abd_iter_advance(aiter, 0);
}
}
return (cabd);
}
int
abd_iterate_func(abd_t *abd, size_t off, size_t size,
abd_iter_func_t *func, void *private)
{
struct abd_iter aiter;
int ret = 0;
if (size == 0)
return (0);
abd_verify(abd);
ASSERT3U(off + size, <=, abd->abd_size);
- boolean_t gang = abd_is_gang(abd);
abd_t *c_abd = abd_init_abd_iter(abd, &aiter, off);
while (size > 0) {
- /* If we are at the end of the gang ABD we are done */
- if (gang && !c_abd)
- break;
+ IMPLY(abd_is_gang(abd), c_abd != NULL);
abd_iter_map(&aiter);
size_t len = MIN(aiter.iter_mapsize, size);
ASSERT3U(len, >, 0);
ret = func(aiter.iter_mapaddr, len, private);
abd_iter_unmap(&aiter);
if (ret != 0)
break;
size -= len;
c_abd = abd_advance_abd_iter(abd, c_abd, &aiter, len);
}
return (ret);
}
struct buf_arg {
void *arg_buf;
};
static int
abd_copy_to_buf_off_cb(void *buf, size_t size, void *private)
{
struct buf_arg *ba_ptr = private;
(void) memcpy(ba_ptr->arg_buf, buf, size);
ba_ptr->arg_buf = (char *)ba_ptr->arg_buf + size;
return (0);
}
/*
* Copy abd to buf. (off is the offset in abd.)
*/
void
abd_copy_to_buf_off(void *buf, abd_t *abd, size_t off, size_t size)
{
struct buf_arg ba_ptr = { buf };
(void) abd_iterate_func(abd, off, size, abd_copy_to_buf_off_cb,
&ba_ptr);
}
static int
abd_cmp_buf_off_cb(void *buf, size_t size, void *private)
{
int ret;
struct buf_arg *ba_ptr = private;
ret = memcmp(buf, ba_ptr->arg_buf, size);
ba_ptr->arg_buf = (char *)ba_ptr->arg_buf + size;
return (ret);
}
/*
* Compare the contents of abd to buf. (off is the offset in abd.)
*/
int
abd_cmp_buf_off(abd_t *abd, const void *buf, size_t off, size_t size)
{
struct buf_arg ba_ptr = { (void *) buf };
return (abd_iterate_func(abd, off, size, abd_cmp_buf_off_cb, &ba_ptr));
}
static int
abd_copy_from_buf_off_cb(void *buf, size_t size, void *private)
{
struct buf_arg *ba_ptr = private;
(void) memcpy(buf, ba_ptr->arg_buf, size);
ba_ptr->arg_buf = (char *)ba_ptr->arg_buf + size;
return (0);
}
/*
* Copy from buf to abd. (off is the offset in abd.)
*/
void
abd_copy_from_buf_off(abd_t *abd, const void *buf, size_t off, size_t size)
{
struct buf_arg ba_ptr = { (void *) buf };
(void) abd_iterate_func(abd, off, size, abd_copy_from_buf_off_cb,
&ba_ptr);
}
static int
abd_zero_off_cb(void *buf, size_t size, void *private)
{
(void) private;
(void) memset(buf, 0, size);
return (0);
}
/*
* Zero out the abd from a particular offset to the end.
*/
void
abd_zero_off(abd_t *abd, size_t off, size_t size)
{
(void) abd_iterate_func(abd, off, size, abd_zero_off_cb, NULL);
}
/*
* Iterate over two ABDs and call func incrementally on the two ABDs' data in
* equal-sized chunks (passed to func as raw buffers). func could be called many
* times during this iteration.
*/
int
abd_iterate_func2(abd_t *dabd, abd_t *sabd, size_t doff, size_t soff,
size_t size, abd_iter_func2_t *func, void *private)
{
int ret = 0;
struct abd_iter daiter, saiter;
- boolean_t dabd_is_gang_abd, sabd_is_gang_abd;
abd_t *c_dabd, *c_sabd;
if (size == 0)
return (0);
abd_verify(dabd);
abd_verify(sabd);
ASSERT3U(doff + size, <=, dabd->abd_size);
ASSERT3U(soff + size, <=, sabd->abd_size);
- dabd_is_gang_abd = abd_is_gang(dabd);
- sabd_is_gang_abd = abd_is_gang(sabd);
c_dabd = abd_init_abd_iter(dabd, &daiter, doff);
c_sabd = abd_init_abd_iter(sabd, &saiter, soff);
while (size > 0) {
- /* if we are at the end of the gang ABD we are done */
- if ((dabd_is_gang_abd && !c_dabd) ||
- (sabd_is_gang_abd && !c_sabd))
- break;
+ IMPLY(abd_is_gang(dabd), c_dabd != NULL);
+ IMPLY(abd_is_gang(sabd), c_sabd != NULL);
abd_iter_map(&daiter);
abd_iter_map(&saiter);
size_t dlen = MIN(daiter.iter_mapsize, size);
size_t slen = MIN(saiter.iter_mapsize, size);
size_t len = MIN(dlen, slen);
ASSERT(dlen > 0 || slen > 0);
ret = func(daiter.iter_mapaddr, saiter.iter_mapaddr, len,
private);
abd_iter_unmap(&saiter);
abd_iter_unmap(&daiter);
if (ret != 0)
break;
size -= len;
c_dabd =
abd_advance_abd_iter(dabd, c_dabd, &daiter, len);
c_sabd =
abd_advance_abd_iter(sabd, c_sabd, &saiter, len);
}
return (ret);
}
static int
abd_copy_off_cb(void *dbuf, void *sbuf, size_t size, void *private)
{
(void) private;
(void) memcpy(dbuf, sbuf, size);
return (0);
}
/*
* Copy from sabd to dabd starting from soff and doff.
*/
void
abd_copy_off(abd_t *dabd, abd_t *sabd, size_t doff, size_t soff, size_t size)
{
(void) abd_iterate_func2(dabd, sabd, doff, soff, size,
abd_copy_off_cb, NULL);
}
static int
abd_cmp_cb(void *bufa, void *bufb, size_t size, void *private)
{
(void) private;
return (memcmp(bufa, bufb, size));
}
/*
* Compares the contents of two ABDs.
*/
int
abd_cmp(abd_t *dabd, abd_t *sabd)
{
ASSERT3U(dabd->abd_size, ==, sabd->abd_size);
return (abd_iterate_func2(dabd, sabd, 0, 0, dabd->abd_size,
abd_cmp_cb, NULL));
}
/*
* Iterate over code ABDs and a data ABD and call @func_raidz_gen.
*
* @cabds parity ABDs, must have equal size
* @dabd data ABD. Can be NULL (in this case @dsize = 0)
* @func_raidz_gen should be implemented so that its behaviour
* is the same when taking linear and when taking scatter
*/
void
abd_raidz_gen_iterate(abd_t **cabds, abd_t *dabd,
ssize_t csize, ssize_t dsize, const unsigned parity,
void (*func_raidz_gen)(void **, const void *, size_t, size_t))
{
int i;
ssize_t len, dlen;
struct abd_iter caiters[3];
- struct abd_iter daiter = {0};
+ struct abd_iter daiter;
void *caddrs[3];
unsigned long flags __maybe_unused = 0;
abd_t *c_cabds[3];
abd_t *c_dabd = NULL;
- boolean_t cabds_is_gang_abd[3];
- boolean_t dabd_is_gang_abd = B_FALSE;
ASSERT3U(parity, <=, 3);
-
for (i = 0; i < parity; i++) {
- cabds_is_gang_abd[i] = abd_is_gang(cabds[i]);
+ abd_verify(cabds[i]);
+ ASSERT3U(csize, <=, cabds[i]->abd_size);
c_cabds[i] = abd_init_abd_iter(cabds[i], &caiters[i], 0);
}
- if (dabd) {
- dabd_is_gang_abd = abd_is_gang(dabd);
+ ASSERT3S(dsize, >=, 0);
+ if (dsize > 0) {
+ ASSERT(dabd);
+ abd_verify(dabd);
+ ASSERT3U(dsize, <=, dabd->abd_size);
c_dabd = abd_init_abd_iter(dabd, &daiter, 0);
}
- ASSERT3S(dsize, >=, 0);
-
abd_enter_critical(flags);
while (csize > 0) {
- /* if we are at the end of the gang ABD we are done */
- if (dabd_is_gang_abd && !c_dabd)
- break;
-
+ len = csize;
for (i = 0; i < parity; i++) {
- /*
- * If we are at the end of the gang ABD we are
- * done.
- */
- if (cabds_is_gang_abd[i] && !c_cabds[i])
- break;
+ IMPLY(abd_is_gang(cabds[i]), c_cabds[i] != NULL);
abd_iter_map(&caiters[i]);
caddrs[i] = caiters[i].iter_mapaddr;
+ len = MIN(caiters[i].iter_mapsize, len);
}
- len = csize;
-
- if (dabd && dsize > 0)
+ if (dsize > 0) {
+ IMPLY(abd_is_gang(dabd), c_dabd != NULL);
abd_iter_map(&daiter);
-
- switch (parity) {
- case 3:
- len = MIN(caiters[2].iter_mapsize, len);
- zfs_fallthrough;
- case 2:
- len = MIN(caiters[1].iter_mapsize, len);
- zfs_fallthrough;
- case 1:
- len = MIN(caiters[0].iter_mapsize, len);
- }
-
- /* must be progressive */
- ASSERT3S(len, >, 0);
-
- if (dabd && dsize > 0) {
- /* this needs precise iter.length */
len = MIN(daiter.iter_mapsize, len);
dlen = len;
} else
dlen = 0;
/* must be progressive */
ASSERT3S(len, >, 0);
/*
* The iterated function likely will not do well if each
* segment except the last one is not multiple of 512 (raidz).
*/
ASSERT3U(((uint64_t)len & 511ULL), ==, 0);
func_raidz_gen(caddrs, daiter.iter_mapaddr, len, dlen);
for (i = parity-1; i >= 0; i--) {
abd_iter_unmap(&caiters[i]);
c_cabds[i] =
abd_advance_abd_iter(cabds[i], c_cabds[i],
&caiters[i], len);
}
- if (dabd && dsize > 0) {
+ if (dsize > 0) {
abd_iter_unmap(&daiter);
c_dabd =
abd_advance_abd_iter(dabd, c_dabd, &daiter,
dlen);
dsize -= dlen;
}
csize -= len;
ASSERT3S(dsize, >=, 0);
ASSERT3S(csize, >=, 0);
}
abd_exit_critical(flags);
}
/*
* Iterate over code ABDs and data reconstruction target ABDs and call
* @func_raidz_rec. Function maps at most 6 pages atomically.
*
* @cabds parity ABDs, must have equal size
* @tabds rec target ABDs, at most 3
* @tsize size of data target columns
* @func_raidz_rec expects syndrome data in target columns. Function
* reconstructs data and overwrites target columns.
*/
void
abd_raidz_rec_iterate(abd_t **cabds, abd_t **tabds,
ssize_t tsize, const unsigned parity,
void (*func_raidz_rec)(void **t, const size_t tsize, void **c,
const unsigned *mul),
const unsigned *mul)
{
int i;
ssize_t len;
struct abd_iter citers[3];
struct abd_iter xiters[3];
void *caddrs[3], *xaddrs[3];
unsigned long flags __maybe_unused = 0;
- boolean_t cabds_is_gang_abd[3];
- boolean_t tabds_is_gang_abd[3];
abd_t *c_cabds[3];
abd_t *c_tabds[3];
ASSERT3U(parity, <=, 3);
for (i = 0; i < parity; i++) {
- cabds_is_gang_abd[i] = abd_is_gang(cabds[i]);
- tabds_is_gang_abd[i] = abd_is_gang(tabds[i]);
+ abd_verify(cabds[i]);
+ abd_verify(tabds[i]);
+ ASSERT3U(tsize, <=, cabds[i]->abd_size);
+ ASSERT3U(tsize, <=, tabds[i]->abd_size);
c_cabds[i] =
abd_init_abd_iter(cabds[i], &citers[i], 0);
c_tabds[i] =
abd_init_abd_iter(tabds[i], &xiters[i], 0);
}
abd_enter_critical(flags);
while (tsize > 0) {
-
+ len = tsize;
for (i = 0; i < parity; i++) {
- /*
- * If we are at the end of the gang ABD we
- * are done.
- */
- if (cabds_is_gang_abd[i] && !c_cabds[i])
- break;
- if (tabds_is_gang_abd[i] && !c_tabds[i])
- break;
+ IMPLY(abd_is_gang(cabds[i]), c_cabds[i] != NULL);
+ IMPLY(abd_is_gang(tabds[i]), c_tabds[i] != NULL);
abd_iter_map(&citers[i]);
abd_iter_map(&xiters[i]);
caddrs[i] = citers[i].iter_mapaddr;
xaddrs[i] = xiters[i].iter_mapaddr;
+ len = MIN(citers[i].iter_mapsize, len);
+ len = MIN(xiters[i].iter_mapsize, len);
}
- len = tsize;
- switch (parity) {
- case 3:
- len = MIN(xiters[2].iter_mapsize, len);
- len = MIN(citers[2].iter_mapsize, len);
- zfs_fallthrough;
- case 2:
- len = MIN(xiters[1].iter_mapsize, len);
- len = MIN(citers[1].iter_mapsize, len);
- zfs_fallthrough;
- case 1:
- len = MIN(xiters[0].iter_mapsize, len);
- len = MIN(citers[0].iter_mapsize, len);
- }
/* must be progressive */
ASSERT3S(len, >, 0);
/*
* The iterated function likely will not do well if each
* segment except the last one is not multiple of 512 (raidz).
*/
ASSERT3U(((uint64_t)len & 511ULL), ==, 0);
func_raidz_rec(xaddrs, len, caddrs, mul);
for (i = parity-1; i >= 0; i--) {
abd_iter_unmap(&xiters[i]);
abd_iter_unmap(&citers[i]);
c_tabds[i] =
abd_advance_abd_iter(tabds[i], c_tabds[i],
&xiters[i], len);
c_cabds[i] =
abd_advance_abd_iter(cabds[i], c_cabds[i],
&citers[i], len);
}
tsize -= len;
ASSERT3S(tsize, >=, 0);
}
abd_exit_critical(flags);
}
diff --git a/sys/contrib/openzfs/module/zfs/arc.c b/sys/contrib/openzfs/module/zfs/arc.c
index dfea15b74394..4db6c06148b1 100644
--- a/sys/contrib/openzfs/module/zfs/arc.c
+++ b/sys/contrib/openzfs/module/zfs/arc.c
@@ -1,10778 +1,10756 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, Joyent, Inc.
* Copyright (c) 2011, 2020, Delphix. All rights reserved.
* Copyright (c) 2014, Saso Kiselkov. All rights reserved.
* Copyright (c) 2017, Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
* Copyright (c) 2020, George Amanakis. All rights reserved.
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019, Allan Jude
* Copyright (c) 2020, The FreeBSD Foundation [1]
*
* [1] Portions of this software were developed by Allan Jude
* under sponsorship from the FreeBSD Foundation.
*/
/*
* DVA-based Adjustable Replacement Cache
*
* While much of the theory of operation used here is
* based on the self-tuning, low overhead replacement cache
* presented by Megiddo and Modha at FAST 2003, there are some
* significant differences:
*
* 1. The Megiddo and Modha model assumes any page is evictable.
* Pages in its cache cannot be "locked" into memory. This makes
* the eviction algorithm simple: evict the last page in the list.
* This also make the performance characteristics easy to reason
* about. Our cache is not so simple. At any given moment, some
* subset of the blocks in the cache are un-evictable because we
* have handed out a reference to them. Blocks are only evictable
* when there are no external references active. This makes
* eviction far more problematic: we choose to evict the evictable
* blocks that are the "lowest" in the list.
*
* There are times when it is not possible to evict the requested
* space. In these circumstances we are unable to adjust the cache
* size. To prevent the cache growing unbounded at these times we
* implement a "cache throttle" that slows the flow of new data
* into the cache until we can make space available.
*
* 2. The Megiddo and Modha model assumes a fixed cache size.
* Pages are evicted when the cache is full and there is a cache
* miss. Our model has a variable sized cache. It grows with
* high use, but also tries to react to memory pressure from the
* operating system: decreasing its size when system memory is
* tight.
*
* 3. The Megiddo and Modha model assumes a fixed page size. All
* elements of the cache are therefore exactly the same size. So
* when adjusting the cache size following a cache miss, its simply
* a matter of choosing a single page to evict. In our model, we
* have variable sized cache blocks (ranging from 512 bytes to
* 128K bytes). We therefore choose a set of blocks to evict to make
* space for a cache miss that approximates as closely as possible
* the space used by the new block.
*
* See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache"
* by N. Megiddo & D. Modha, FAST 2003
*/
/*
* The locking model:
*
* A new reference to a cache buffer can be obtained in two
* ways: 1) via a hash table lookup using the DVA as a key,
* or 2) via one of the ARC lists. The arc_read() interface
* uses method 1, while the internal ARC algorithms for
* adjusting the cache use method 2. We therefore provide two
* types of locks: 1) the hash table lock array, and 2) the
* ARC list locks.
*
* Buffers do not have their own mutexes, rather they rely on the
* hash table mutexes for the bulk of their protection (i.e. most
* fields in the arc_buf_hdr_t are protected by these mutexes).
*
* buf_hash_find() returns the appropriate mutex (held) when it
* locates the requested buffer in the hash table. It returns
* NULL for the mutex if the buffer was not in the table.
*
* buf_hash_remove() expects the appropriate hash mutex to be
* already held before it is invoked.
*
* Each ARC state also has a mutex which is used to protect the
* buffer list associated with the state. When attempting to
* obtain a hash table lock while holding an ARC list lock you
* must use: mutex_tryenter() to avoid deadlock. Also note that
* the active state mutex must be held before the ghost state mutex.
*
* It as also possible to register a callback which is run when the
* metadata limit is reached and no buffers can be safely evicted. In
* this case the arc user should drop a reference on some arc buffers so
* they can be reclaimed. For example, when using the ZPL each dentry
* holds a references on a znode. These dentries must be pruned before
* the arc buffer holding the znode can be safely evicted.
*
* Note that the majority of the performance stats are manipulated
* with atomic operations.
*
* The L2ARC uses the l2ad_mtx on each vdev for the following:
*
* - L2ARC buflist creation
* - L2ARC buflist eviction
* - L2ARC write completion, which walks L2ARC buflists
* - ARC header destruction, as it removes from L2ARC buflists
* - ARC header release, as it removes from L2ARC buflists
*/
/*
* ARC operation:
*
* Every block that is in the ARC is tracked by an arc_buf_hdr_t structure.
* This structure can point either to a block that is still in the cache or to
* one that is only accessible in an L2 ARC device, or it can provide
* information about a block that was recently evicted. If a block is
* only accessible in the L2ARC, then the arc_buf_hdr_t only has enough
* information to retrieve it from the L2ARC device. This information is
* stored in the l2arc_buf_hdr_t sub-structure of the arc_buf_hdr_t. A block
* that is in this state cannot access the data directly.
*
* Blocks that are actively being referenced or have not been evicted
* are cached in the L1ARC. The L1ARC (l1arc_buf_hdr_t) is a structure within
* the arc_buf_hdr_t that will point to the data block in memory. A block can
* only be read by a consumer if it has an l1arc_buf_hdr_t. The L1ARC
* caches data in two ways -- in a list of ARC buffers (arc_buf_t) and
* also in the arc_buf_hdr_t's private physical data block pointer (b_pabd).
*
* The L1ARC's data pointer may or may not be uncompressed. The ARC has the
* ability to store the physical data (b_pabd) associated with the DVA of the
* arc_buf_hdr_t. Since the b_pabd is a copy of the on-disk physical block,
* it will match its on-disk compression characteristics. This behavior can be
* disabled by setting 'zfs_compressed_arc_enabled' to B_FALSE. When the
* compressed ARC functionality is disabled, the b_pabd will point to an
* uncompressed version of the on-disk data.
*
* Data in the L1ARC is not accessed by consumers of the ARC directly. Each
* arc_buf_hdr_t can have multiple ARC buffers (arc_buf_t) which reference it.
* Each ARC buffer (arc_buf_t) is being actively accessed by a specific ARC
* consumer. The ARC will provide references to this data and will keep it
* cached until it is no longer in use. The ARC caches only the L1ARC's physical
* data block and will evict any arc_buf_t that is no longer referenced. The
* amount of memory consumed by the arc_buf_ts' data buffers can be seen via the
* "overhead_size" kstat.
*
* Depending on the consumer, an arc_buf_t can be requested in uncompressed or
* compressed form. The typical case is that consumers will want uncompressed
* data, and when that happens a new data buffer is allocated where the data is
* decompressed for them to use. Currently the only consumer who wants
* compressed arc_buf_t's is "zfs send", when it streams data exactly as it
* exists on disk. When this happens, the arc_buf_t's data buffer is shared
* with the arc_buf_hdr_t.
*
* Here is a diagram showing an arc_buf_hdr_t referenced by two arc_buf_t's. The
* first one is owned by a compressed send consumer (and therefore references
* the same compressed data buffer as the arc_buf_hdr_t) and the second could be
* used by any other consumer (and has its own uncompressed copy of the data
* buffer).
*
* arc_buf_hdr_t
* +-----------+
* | fields |
* | common to |
* | L1- and |
* | L2ARC |
* +-----------+
* | l2arc_buf_hdr_t
* | |
* +-----------+
* | l1arc_buf_hdr_t
* | | arc_buf_t
* | b_buf +------------>+-----------+ arc_buf_t
* | b_pabd +-+ |b_next +---->+-----------+
* +-----------+ | |-----------| |b_next +-->NULL
* | |b_comp = T | +-----------+
* | |b_data +-+ |b_comp = F |
* | +-----------+ | |b_data +-+
* +->+------+ | +-----------+ |
* compressed | | | |
* data | |<--------------+ | uncompressed
* +------+ compressed, | data
* shared +-->+------+
* data | |
* | |
* +------+
*
* When a consumer reads a block, the ARC must first look to see if the
* arc_buf_hdr_t is cached. If the hdr is cached then the ARC allocates a new
* arc_buf_t and either copies uncompressed data into a new data buffer from an
* existing uncompressed arc_buf_t, decompresses the hdr's b_pabd buffer into a
* new data buffer, or shares the hdr's b_pabd buffer, depending on whether the
* hdr is compressed and the desired compression characteristics of the
* arc_buf_t consumer. If the arc_buf_t ends up sharing data with the
* arc_buf_hdr_t and both of them are uncompressed then the arc_buf_t must be
* the last buffer in the hdr's b_buf list, however a shared compressed buf can
* be anywhere in the hdr's list.
*
* The diagram below shows an example of an uncompressed ARC hdr that is
* sharing its data with an arc_buf_t (note that the shared uncompressed buf is
* the last element in the buf list):
*
* arc_buf_hdr_t
* +-----------+
* | |
* | |
* | |
* +-----------+
* l2arc_buf_hdr_t| |
* | |
* +-----------+
* l1arc_buf_hdr_t| |
* | | arc_buf_t (shared)
* | b_buf +------------>+---------+ arc_buf_t
* | | |b_next +---->+---------+
* | b_pabd +-+ |---------| |b_next +-->NULL
* +-----------+ | | | +---------+
* | |b_data +-+ | |
* | +---------+ | |b_data +-+
* +->+------+ | +---------+ |
* | | | |
* uncompressed | | | |
* data +------+ | |
* ^ +->+------+ |
* | uncompressed | | |
* | data | | |
* | +------+ |
* +---------------------------------+
*
* Writing to the ARC requires that the ARC first discard the hdr's b_pabd
* since the physical block is about to be rewritten. The new data contents
* will be contained in the arc_buf_t. As the I/O pipeline performs the write,
* it may compress the data before writing it to disk. The ARC will be called
* with the transformed data and will memcpy the transformed on-disk block into
* a newly allocated b_pabd. Writes are always done into buffers which have
* either been loaned (and hence are new and don't have other readers) or
* buffers which have been released (and hence have their own hdr, if there
* were originally other readers of the buf's original hdr). This ensures that
* the ARC only needs to update a single buf and its hdr after a write occurs.
*
* When the L2ARC is in use, it will also take advantage of the b_pabd. The
* L2ARC will always write the contents of b_pabd to the L2ARC. This means
* that when compressed ARC is enabled that the L2ARC blocks are identical
* to the on-disk block in the main data pool. This provides a significant
* advantage since the ARC can leverage the bp's checksum when reading from the
* L2ARC to determine if the contents are valid. However, if the compressed
* ARC is disabled, then the L2ARC's block must be transformed to look
* like the physical block in the main data pool before comparing the
* checksum and determining its validity.
*
* The L1ARC has a slightly different system for storing encrypted data.
* Raw (encrypted + possibly compressed) data has a few subtle differences from
* data that is just compressed. The biggest difference is that it is not
* possible to decrypt encrypted data (or vice-versa) if the keys aren't loaded.
* The other difference is that encryption cannot be treated as a suggestion.
* If a caller would prefer compressed data, but they actually wind up with
* uncompressed data the worst thing that could happen is there might be a
* performance hit. If the caller requests encrypted data, however, we must be
* sure they actually get it or else secret information could be leaked. Raw
* data is stored in hdr->b_crypt_hdr.b_rabd. An encrypted header, therefore,
* may have both an encrypted version and a decrypted version of its data at
* once. When a caller needs a raw arc_buf_t, it is allocated and the data is
* copied out of this header. To avoid complications with b_pabd, raw buffers
* cannot be shared.
*/
#include <sys/spa.h>
#include <sys/zio.h>
#include <sys/spa_impl.h>
#include <sys/zio_compress.h>
#include <sys/zio_checksum.h>
#include <sys/zfs_context.h>
#include <sys/arc.h>
#include <sys/zfs_refcount.h>
#include <sys/vdev.h>
#include <sys/vdev_impl.h>
#include <sys/dsl_pool.h>
#include <sys/multilist.h>
#include <sys/abd.h>
#include <sys/zil.h>
#include <sys/fm/fs/zfs.h>
#include <sys/callb.h>
#include <sys/kstat.h>
#include <sys/zthr.h>
#include <zfs_fletcher.h>
#include <sys/arc_impl.h>
#include <sys/trace_zfs.h>
#include <sys/aggsum.h>
#include <sys/wmsum.h>
#include <cityhash.h>
#include <sys/vdev_trim.h>
#include <sys/zfs_racct.h>
#include <sys/zstd/zstd.h>
#ifndef _KERNEL
/* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */
boolean_t arc_watch = B_FALSE;
#endif
/*
* This thread's job is to keep enough free memory in the system, by
* calling arc_kmem_reap_soon() plus arc_reduce_target_size(), which improves
* arc_available_memory().
*/
static zthr_t *arc_reap_zthr;
/*
* This thread's job is to keep arc_size under arc_c, by calling
* arc_evict(), which improves arc_is_overflowing().
*/
static zthr_t *arc_evict_zthr;
static arc_buf_hdr_t **arc_state_evict_markers;
static int arc_state_evict_marker_count;
static kmutex_t arc_evict_lock;
static boolean_t arc_evict_needed = B_FALSE;
static clock_t arc_last_uncached_flush;
/*
* Count of bytes evicted since boot.
*/
static uint64_t arc_evict_count;
/*
* List of arc_evict_waiter_t's, representing threads waiting for the
* arc_evict_count to reach specific values.
*/
static list_t arc_evict_waiters;
/*
* When arc_is_overflowing(), arc_get_data_impl() waits for this percent of
* the requested amount of data to be evicted. For example, by default for
* every 2KB that's evicted, 1KB of it may be "reused" by a new allocation.
* Since this is above 100%, it ensures that progress is made towards getting
* arc_size under arc_c. Since this is finite, it ensures that allocations
* can still happen, even during the potentially long time that arc_size is
* more than arc_c.
*/
static uint_t zfs_arc_eviction_pct = 200;
/*
* The number of headers to evict in arc_evict_state_impl() before
* dropping the sublist lock and evicting from another sublist. A lower
* value means we're more likely to evict the "correct" header (i.e. the
* oldest header in the arc state), but comes with higher overhead
* (i.e. more invocations of arc_evict_state_impl()).
*/
static uint_t zfs_arc_evict_batch_limit = 10;
/* number of seconds before growing cache again */
uint_t arc_grow_retry = 5;
/*
* Minimum time between calls to arc_kmem_reap_soon().
*/
static const int arc_kmem_cache_reap_retry_ms = 1000;
/* shift of arc_c for calculating overflow limit in arc_get_data_impl */
static int zfs_arc_overflow_shift = 8;
/* log2(fraction of arc to reclaim) */
uint_t arc_shrink_shift = 7;
/* percent of pagecache to reclaim arc to */
#ifdef _KERNEL
uint_t zfs_arc_pc_percent = 0;
#endif
/*
* log2(fraction of ARC which must be free to allow growing).
* I.e. If there is less than arc_c >> arc_no_grow_shift free memory,
* when reading a new block into the ARC, we will evict an equal-sized block
* from the ARC.
*
* This must be less than arc_shrink_shift, so that when we shrink the ARC,
* we will still not allow it to grow.
*/
uint_t arc_no_grow_shift = 5;
/*
* minimum lifespan of a prefetch block in clock ticks
* (initialized in arc_init())
*/
static uint_t arc_min_prefetch_ms;
static uint_t arc_min_prescient_prefetch_ms;
/*
* If this percent of memory is free, don't throttle.
*/
uint_t arc_lotsfree_percent = 10;
/*
* The arc has filled available memory and has now warmed up.
*/
boolean_t arc_warm;
/*
* These tunables are for performance analysis.
*/
uint64_t zfs_arc_max = 0;
uint64_t zfs_arc_min = 0;
static uint64_t zfs_arc_dnode_limit = 0;
static uint_t zfs_arc_dnode_reduce_percent = 10;
static uint_t zfs_arc_grow_retry = 0;
static uint_t zfs_arc_shrink_shift = 0;
uint_t zfs_arc_average_blocksize = 8 * 1024; /* 8KB */
/*
* ARC dirty data constraints for arc_tempreserve_space() throttle:
* * total dirty data limit
* * anon block dirty limit
* * each pool's anon allowance
*/
static const unsigned long zfs_arc_dirty_limit_percent = 50;
static const unsigned long zfs_arc_anon_limit_percent = 25;
static const unsigned long zfs_arc_pool_dirty_percent = 20;
/*
* Enable or disable compressed arc buffers.
*/
int zfs_compressed_arc_enabled = B_TRUE;
/*
* Balance between metadata and data on ghost hits. Values above 100
* increase metadata caching by proportionally reducing effect of ghost
* data hits on target data/metadata rate.
*/
static uint_t zfs_arc_meta_balance = 500;
/*
* Percentage that can be consumed by dnodes of ARC meta buffers.
*/
static uint_t zfs_arc_dnode_limit_percent = 10;
/*
* These tunables are Linux-specific
*/
static uint64_t zfs_arc_sys_free = 0;
static uint_t zfs_arc_min_prefetch_ms = 0;
static uint_t zfs_arc_min_prescient_prefetch_ms = 0;
static uint_t zfs_arc_lotsfree_percent = 10;
/*
* Number of arc_prune threads
*/
static int zfs_arc_prune_task_threads = 1;
/* The 7 states: */
arc_state_t ARC_anon;
arc_state_t ARC_mru;
arc_state_t ARC_mru_ghost;
arc_state_t ARC_mfu;
arc_state_t ARC_mfu_ghost;
arc_state_t ARC_l2c_only;
arc_state_t ARC_uncached;
arc_stats_t arc_stats = {
{ "hits", KSTAT_DATA_UINT64 },
{ "iohits", KSTAT_DATA_UINT64 },
{ "misses", KSTAT_DATA_UINT64 },
{ "demand_data_hits", KSTAT_DATA_UINT64 },
{ "demand_data_iohits", KSTAT_DATA_UINT64 },
{ "demand_data_misses", KSTAT_DATA_UINT64 },
{ "demand_metadata_hits", KSTAT_DATA_UINT64 },
{ "demand_metadata_iohits", KSTAT_DATA_UINT64 },
{ "demand_metadata_misses", KSTAT_DATA_UINT64 },
{ "prefetch_data_hits", KSTAT_DATA_UINT64 },
{ "prefetch_data_iohits", KSTAT_DATA_UINT64 },
{ "prefetch_data_misses", KSTAT_DATA_UINT64 },
{ "prefetch_metadata_hits", KSTAT_DATA_UINT64 },
{ "prefetch_metadata_iohits", KSTAT_DATA_UINT64 },
{ "prefetch_metadata_misses", KSTAT_DATA_UINT64 },
{ "mru_hits", KSTAT_DATA_UINT64 },
{ "mru_ghost_hits", KSTAT_DATA_UINT64 },
{ "mfu_hits", KSTAT_DATA_UINT64 },
{ "mfu_ghost_hits", KSTAT_DATA_UINT64 },
{ "uncached_hits", KSTAT_DATA_UINT64 },
{ "deleted", KSTAT_DATA_UINT64 },
{ "mutex_miss", KSTAT_DATA_UINT64 },
{ "access_skip", KSTAT_DATA_UINT64 },
{ "evict_skip", KSTAT_DATA_UINT64 },
{ "evict_not_enough", KSTAT_DATA_UINT64 },
{ "evict_l2_cached", KSTAT_DATA_UINT64 },
{ "evict_l2_eligible", KSTAT_DATA_UINT64 },
{ "evict_l2_eligible_mfu", KSTAT_DATA_UINT64 },
{ "evict_l2_eligible_mru", KSTAT_DATA_UINT64 },
{ "evict_l2_ineligible", KSTAT_DATA_UINT64 },
{ "evict_l2_skip", KSTAT_DATA_UINT64 },
{ "hash_elements", KSTAT_DATA_UINT64 },
{ "hash_elements_max", KSTAT_DATA_UINT64 },
{ "hash_collisions", KSTAT_DATA_UINT64 },
{ "hash_chains", KSTAT_DATA_UINT64 },
{ "hash_chain_max", KSTAT_DATA_UINT64 },
{ "meta", KSTAT_DATA_UINT64 },
{ "pd", KSTAT_DATA_UINT64 },
{ "pm", KSTAT_DATA_UINT64 },
{ "c", KSTAT_DATA_UINT64 },
{ "c_min", KSTAT_DATA_UINT64 },
{ "c_max", KSTAT_DATA_UINT64 },
{ "size", KSTAT_DATA_UINT64 },
{ "compressed_size", KSTAT_DATA_UINT64 },
{ "uncompressed_size", KSTAT_DATA_UINT64 },
{ "overhead_size", KSTAT_DATA_UINT64 },
{ "hdr_size", KSTAT_DATA_UINT64 },
{ "data_size", KSTAT_DATA_UINT64 },
{ "metadata_size", KSTAT_DATA_UINT64 },
{ "dbuf_size", KSTAT_DATA_UINT64 },
{ "dnode_size", KSTAT_DATA_UINT64 },
{ "bonus_size", KSTAT_DATA_UINT64 },
#if defined(COMPAT_FREEBSD11)
{ "other_size", KSTAT_DATA_UINT64 },
#endif
{ "anon_size", KSTAT_DATA_UINT64 },
{ "anon_data", KSTAT_DATA_UINT64 },
{ "anon_metadata", KSTAT_DATA_UINT64 },
{ "anon_evictable_data", KSTAT_DATA_UINT64 },
{ "anon_evictable_metadata", KSTAT_DATA_UINT64 },
{ "mru_size", KSTAT_DATA_UINT64 },
{ "mru_data", KSTAT_DATA_UINT64 },
{ "mru_metadata", KSTAT_DATA_UINT64 },
{ "mru_evictable_data", KSTAT_DATA_UINT64 },
{ "mru_evictable_metadata", KSTAT_DATA_UINT64 },
{ "mru_ghost_size", KSTAT_DATA_UINT64 },
{ "mru_ghost_data", KSTAT_DATA_UINT64 },
{ "mru_ghost_metadata", KSTAT_DATA_UINT64 },
{ "mru_ghost_evictable_data", KSTAT_DATA_UINT64 },
{ "mru_ghost_evictable_metadata", KSTAT_DATA_UINT64 },
{ "mfu_size", KSTAT_DATA_UINT64 },
{ "mfu_data", KSTAT_DATA_UINT64 },
{ "mfu_metadata", KSTAT_DATA_UINT64 },
{ "mfu_evictable_data", KSTAT_DATA_UINT64 },
{ "mfu_evictable_metadata", KSTAT_DATA_UINT64 },
{ "mfu_ghost_size", KSTAT_DATA_UINT64 },
{ "mfu_ghost_data", KSTAT_DATA_UINT64 },
{ "mfu_ghost_metadata", KSTAT_DATA_UINT64 },
{ "mfu_ghost_evictable_data", KSTAT_DATA_UINT64 },
{ "mfu_ghost_evictable_metadata", KSTAT_DATA_UINT64 },
{ "uncached_size", KSTAT_DATA_UINT64 },
{ "uncached_data", KSTAT_DATA_UINT64 },
{ "uncached_metadata", KSTAT_DATA_UINT64 },
{ "uncached_evictable_data", KSTAT_DATA_UINT64 },
{ "uncached_evictable_metadata", KSTAT_DATA_UINT64 },
{ "l2_hits", KSTAT_DATA_UINT64 },
{ "l2_misses", KSTAT_DATA_UINT64 },
{ "l2_prefetch_asize", KSTAT_DATA_UINT64 },
{ "l2_mru_asize", KSTAT_DATA_UINT64 },
{ "l2_mfu_asize", KSTAT_DATA_UINT64 },
{ "l2_bufc_data_asize", KSTAT_DATA_UINT64 },
{ "l2_bufc_metadata_asize", KSTAT_DATA_UINT64 },
{ "l2_feeds", KSTAT_DATA_UINT64 },
{ "l2_rw_clash", KSTAT_DATA_UINT64 },
{ "l2_read_bytes", KSTAT_DATA_UINT64 },
{ "l2_write_bytes", KSTAT_DATA_UINT64 },
{ "l2_writes_sent", KSTAT_DATA_UINT64 },
{ "l2_writes_done", KSTAT_DATA_UINT64 },
{ "l2_writes_error", KSTAT_DATA_UINT64 },
{ "l2_writes_lock_retry", KSTAT_DATA_UINT64 },
{ "l2_evict_lock_retry", KSTAT_DATA_UINT64 },
{ "l2_evict_reading", KSTAT_DATA_UINT64 },
{ "l2_evict_l1cached", KSTAT_DATA_UINT64 },
{ "l2_free_on_write", KSTAT_DATA_UINT64 },
{ "l2_abort_lowmem", KSTAT_DATA_UINT64 },
{ "l2_cksum_bad", KSTAT_DATA_UINT64 },
{ "l2_io_error", KSTAT_DATA_UINT64 },
{ "l2_size", KSTAT_DATA_UINT64 },
{ "l2_asize", KSTAT_DATA_UINT64 },
{ "l2_hdr_size", KSTAT_DATA_UINT64 },
{ "l2_log_blk_writes", KSTAT_DATA_UINT64 },
{ "l2_log_blk_avg_asize", KSTAT_DATA_UINT64 },
{ "l2_log_blk_asize", KSTAT_DATA_UINT64 },
{ "l2_log_blk_count", KSTAT_DATA_UINT64 },
{ "l2_data_to_meta_ratio", KSTAT_DATA_UINT64 },
{ "l2_rebuild_success", KSTAT_DATA_UINT64 },
{ "l2_rebuild_unsupported", KSTAT_DATA_UINT64 },
{ "l2_rebuild_io_errors", KSTAT_DATA_UINT64 },
{ "l2_rebuild_dh_errors", KSTAT_DATA_UINT64 },
{ "l2_rebuild_cksum_lb_errors", KSTAT_DATA_UINT64 },
{ "l2_rebuild_lowmem", KSTAT_DATA_UINT64 },
{ "l2_rebuild_size", KSTAT_DATA_UINT64 },
{ "l2_rebuild_asize", KSTAT_DATA_UINT64 },
{ "l2_rebuild_bufs", KSTAT_DATA_UINT64 },
{ "l2_rebuild_bufs_precached", KSTAT_DATA_UINT64 },
{ "l2_rebuild_log_blks", KSTAT_DATA_UINT64 },
{ "memory_throttle_count", KSTAT_DATA_UINT64 },
{ "memory_direct_count", KSTAT_DATA_UINT64 },
{ "memory_indirect_count", KSTAT_DATA_UINT64 },
{ "memory_all_bytes", KSTAT_DATA_UINT64 },
{ "memory_free_bytes", KSTAT_DATA_UINT64 },
{ "memory_available_bytes", KSTAT_DATA_INT64 },
{ "arc_no_grow", KSTAT_DATA_UINT64 },
{ "arc_tempreserve", KSTAT_DATA_UINT64 },
{ "arc_loaned_bytes", KSTAT_DATA_UINT64 },
{ "arc_prune", KSTAT_DATA_UINT64 },
{ "arc_meta_used", KSTAT_DATA_UINT64 },
{ "arc_dnode_limit", KSTAT_DATA_UINT64 },
{ "async_upgrade_sync", KSTAT_DATA_UINT64 },
{ "predictive_prefetch", KSTAT_DATA_UINT64 },
{ "demand_hit_predictive_prefetch", KSTAT_DATA_UINT64 },
{ "demand_iohit_predictive_prefetch", KSTAT_DATA_UINT64 },
{ "prescient_prefetch", KSTAT_DATA_UINT64 },
{ "demand_hit_prescient_prefetch", KSTAT_DATA_UINT64 },
{ "demand_iohit_prescient_prefetch", KSTAT_DATA_UINT64 },
{ "arc_need_free", KSTAT_DATA_UINT64 },
{ "arc_sys_free", KSTAT_DATA_UINT64 },
{ "arc_raw_size", KSTAT_DATA_UINT64 },
{ "cached_only_in_progress", KSTAT_DATA_UINT64 },
{ "abd_chunk_waste_size", KSTAT_DATA_UINT64 },
};
arc_sums_t arc_sums;
#define ARCSTAT_MAX(stat, val) { \
uint64_t m; \
while ((val) > (m = arc_stats.stat.value.ui64) && \
(m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \
continue; \
}
/*
* We define a macro to allow ARC hits/misses to be easily broken down by
* two separate conditions, giving a total of four different subtypes for
* each of hits and misses (so eight statistics total).
*/
#define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \
if (cond1) { \
if (cond2) { \
ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \
} else { \
ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \
} \
} else { \
if (cond2) { \
ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \
} else { \
ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\
} \
}
/*
* This macro allows us to use kstats as floating averages. Each time we
* update this kstat, we first factor it and the update value by
* ARCSTAT_AVG_FACTOR to shrink the new value's contribution to the overall
* average. This macro assumes that integer loads and stores are atomic, but
* is not safe for multiple writers updating the kstat in parallel (only the
* last writer's update will remain).
*/
#define ARCSTAT_F_AVG_FACTOR 3
#define ARCSTAT_F_AVG(stat, value) \
do { \
uint64_t x = ARCSTAT(stat); \
x = x - x / ARCSTAT_F_AVG_FACTOR + \
(value) / ARCSTAT_F_AVG_FACTOR; \
ARCSTAT(stat) = x; \
} while (0)
static kstat_t *arc_ksp;
/*
* There are several ARC variables that are critical to export as kstats --
* but we don't want to have to grovel around in the kstat whenever we wish to
* manipulate them. For these variables, we therefore define them to be in
* terms of the statistic variable. This assures that we are not introducing
* the possibility of inconsistency by having shadow copies of the variables,
* while still allowing the code to be readable.
*/
#define arc_tempreserve ARCSTAT(arcstat_tempreserve)
#define arc_loaned_bytes ARCSTAT(arcstat_loaned_bytes)
#define arc_dnode_limit ARCSTAT(arcstat_dnode_limit) /* max size for dnodes */
#define arc_need_free ARCSTAT(arcstat_need_free) /* waiting to be evicted */
hrtime_t arc_growtime;
list_t arc_prune_list;
kmutex_t arc_prune_mtx;
taskq_t *arc_prune_taskq;
#define GHOST_STATE(state) \
((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \
(state) == arc_l2c_only)
#define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_FLAG_IN_HASH_TABLE)
#define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS)
#define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_FLAG_IO_ERROR)
#define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_FLAG_PREFETCH)
#define HDR_PRESCIENT_PREFETCH(hdr) \
((hdr)->b_flags & ARC_FLAG_PRESCIENT_PREFETCH)
#define HDR_COMPRESSION_ENABLED(hdr) \
((hdr)->b_flags & ARC_FLAG_COMPRESSED_ARC)
#define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_FLAG_L2CACHE)
#define HDR_UNCACHED(hdr) ((hdr)->b_flags & ARC_FLAG_UNCACHED)
#define HDR_L2_READING(hdr) \
(((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS) && \
((hdr)->b_flags & ARC_FLAG_HAS_L2HDR))
#define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITING)
#define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_FLAG_L2_EVICTED)
#define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITE_HEAD)
#define HDR_PROTECTED(hdr) ((hdr)->b_flags & ARC_FLAG_PROTECTED)
#define HDR_NOAUTH(hdr) ((hdr)->b_flags & ARC_FLAG_NOAUTH)
#define HDR_SHARED_DATA(hdr) ((hdr)->b_flags & ARC_FLAG_SHARED_DATA)
#define HDR_ISTYPE_METADATA(hdr) \
((hdr)->b_flags & ARC_FLAG_BUFC_METADATA)
#define HDR_ISTYPE_DATA(hdr) (!HDR_ISTYPE_METADATA(hdr))
#define HDR_HAS_L1HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L1HDR)
#define HDR_HAS_L2HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR)
#define HDR_HAS_RABD(hdr) \
(HDR_HAS_L1HDR(hdr) && HDR_PROTECTED(hdr) && \
(hdr)->b_crypt_hdr.b_rabd != NULL)
#define HDR_ENCRYPTED(hdr) \
(HDR_PROTECTED(hdr) && DMU_OT_IS_ENCRYPTED((hdr)->b_crypt_hdr.b_ot))
#define HDR_AUTHENTICATED(hdr) \
(HDR_PROTECTED(hdr) && !DMU_OT_IS_ENCRYPTED((hdr)->b_crypt_hdr.b_ot))
/* For storing compression mode in b_flags */
#define HDR_COMPRESS_OFFSET (highbit64(ARC_FLAG_COMPRESS_0) - 1)
#define HDR_GET_COMPRESS(hdr) ((enum zio_compress)BF32_GET((hdr)->b_flags, \
HDR_COMPRESS_OFFSET, SPA_COMPRESSBITS))
#define HDR_SET_COMPRESS(hdr, cmp) BF32_SET((hdr)->b_flags, \
HDR_COMPRESS_OFFSET, SPA_COMPRESSBITS, (cmp));
#define ARC_BUF_LAST(buf) ((buf)->b_next == NULL)
#define ARC_BUF_SHARED(buf) ((buf)->b_flags & ARC_BUF_FLAG_SHARED)
#define ARC_BUF_COMPRESSED(buf) ((buf)->b_flags & ARC_BUF_FLAG_COMPRESSED)
#define ARC_BUF_ENCRYPTED(buf) ((buf)->b_flags & ARC_BUF_FLAG_ENCRYPTED)
/*
* Other sizes
*/
#define HDR_FULL_SIZE ((int64_t)sizeof (arc_buf_hdr_t))
#define HDR_L2ONLY_SIZE ((int64_t)offsetof(arc_buf_hdr_t, b_l1hdr))
/*
* Hash table routines
*/
#define BUF_LOCKS 2048
typedef struct buf_hash_table {
uint64_t ht_mask;
arc_buf_hdr_t **ht_table;
kmutex_t ht_locks[BUF_LOCKS] ____cacheline_aligned;
} buf_hash_table_t;
static buf_hash_table_t buf_hash_table;
#define BUF_HASH_INDEX(spa, dva, birth) \
(buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
#define BUF_HASH_LOCK(idx) (&buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
#define HDR_LOCK(hdr) \
(BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth)))
uint64_t zfs_crc64_table[256];
/*
* Level 2 ARC
*/
#define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */
#define L2ARC_HEADROOM 2 /* num of writes */
/*
* If we discover during ARC scan any buffers to be compressed, we boost
* our headroom for the next scanning cycle by this percentage multiple.
*/
#define L2ARC_HEADROOM_BOOST 200
#define L2ARC_FEED_SECS 1 /* caching interval secs */
#define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */
/*
* We can feed L2ARC from two states of ARC buffers, mru and mfu,
* and each of the state has two types: data and metadata.
*/
#define L2ARC_FEED_TYPES 4
/* L2ARC Performance Tunables */
uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* def max write size */
uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra warmup write */
uint64_t l2arc_headroom = L2ARC_HEADROOM; /* # of dev writes */
uint64_t l2arc_headroom_boost = L2ARC_HEADROOM_BOOST;
uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */
uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval msecs */
int l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */
int l2arc_feed_again = B_TRUE; /* turbo warmup */
int l2arc_norw = B_FALSE; /* no reads during writes */
static uint_t l2arc_meta_percent = 33; /* limit on headers size */
/*
* L2ARC Internals
*/
static list_t L2ARC_dev_list; /* device list */
static list_t *l2arc_dev_list; /* device list pointer */
static kmutex_t l2arc_dev_mtx; /* device list mutex */
static l2arc_dev_t *l2arc_dev_last; /* last device used */
static list_t L2ARC_free_on_write; /* free after write buf list */
static list_t *l2arc_free_on_write; /* free after write list ptr */
static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */
static uint64_t l2arc_ndev; /* number of devices */
typedef struct l2arc_read_callback {
arc_buf_hdr_t *l2rcb_hdr; /* read header */
blkptr_t l2rcb_bp; /* original blkptr */
zbookmark_phys_t l2rcb_zb; /* original bookmark */
int l2rcb_flags; /* original flags */
abd_t *l2rcb_abd; /* temporary buffer */
} l2arc_read_callback_t;
typedef struct l2arc_data_free {
/* protected by l2arc_free_on_write_mtx */
abd_t *l2df_abd;
size_t l2df_size;
arc_buf_contents_t l2df_type;
list_node_t l2df_list_node;
} l2arc_data_free_t;
typedef enum arc_fill_flags {
ARC_FILL_LOCKED = 1 << 0, /* hdr lock is held */
ARC_FILL_COMPRESSED = 1 << 1, /* fill with compressed data */
ARC_FILL_ENCRYPTED = 1 << 2, /* fill with encrypted data */
ARC_FILL_NOAUTH = 1 << 3, /* don't attempt to authenticate */
ARC_FILL_IN_PLACE = 1 << 4 /* fill in place (special case) */
} arc_fill_flags_t;
typedef enum arc_ovf_level {
ARC_OVF_NONE, /* ARC within target size. */
ARC_OVF_SOME, /* ARC is slightly overflowed. */
ARC_OVF_SEVERE /* ARC is severely overflowed. */
} arc_ovf_level_t;
static kmutex_t l2arc_feed_thr_lock;
static kcondvar_t l2arc_feed_thr_cv;
static uint8_t l2arc_thread_exit;
static kmutex_t l2arc_rebuild_thr_lock;
static kcondvar_t l2arc_rebuild_thr_cv;
enum arc_hdr_alloc_flags {
ARC_HDR_ALLOC_RDATA = 0x1,
ARC_HDR_USE_RESERVE = 0x4,
ARC_HDR_ALLOC_LINEAR = 0x8,
};
static abd_t *arc_get_data_abd(arc_buf_hdr_t *, uint64_t, const void *, int);
static void *arc_get_data_buf(arc_buf_hdr_t *, uint64_t, const void *);
static void arc_get_data_impl(arc_buf_hdr_t *, uint64_t, const void *, int);
static void arc_free_data_abd(arc_buf_hdr_t *, abd_t *, uint64_t, const void *);
static void arc_free_data_buf(arc_buf_hdr_t *, void *, uint64_t, const void *);
static void arc_free_data_impl(arc_buf_hdr_t *hdr, uint64_t size,
const void *tag);
static void arc_hdr_free_abd(arc_buf_hdr_t *, boolean_t);
static void arc_hdr_alloc_abd(arc_buf_hdr_t *, int);
static void arc_hdr_destroy(arc_buf_hdr_t *);
static void arc_access(arc_buf_hdr_t *, arc_flags_t, boolean_t);
static void arc_buf_watch(arc_buf_t *);
static void arc_change_state(arc_state_t *, arc_buf_hdr_t *);
static arc_buf_contents_t arc_buf_type(arc_buf_hdr_t *);
static uint32_t arc_bufc_to_flags(arc_buf_contents_t);
static inline void arc_hdr_set_flags(arc_buf_hdr_t *hdr, arc_flags_t flags);
static inline void arc_hdr_clear_flags(arc_buf_hdr_t *hdr, arc_flags_t flags);
static boolean_t l2arc_write_eligible(uint64_t, arc_buf_hdr_t *);
static void l2arc_read_done(zio_t *);
static void l2arc_do_free_on_write(void);
static void l2arc_hdr_arcstats_update(arc_buf_hdr_t *hdr, boolean_t incr,
boolean_t state_only);
static void arc_prune_async(uint64_t adjust);
#define l2arc_hdr_arcstats_increment(hdr) \
l2arc_hdr_arcstats_update((hdr), B_TRUE, B_FALSE)
#define l2arc_hdr_arcstats_decrement(hdr) \
l2arc_hdr_arcstats_update((hdr), B_FALSE, B_FALSE)
#define l2arc_hdr_arcstats_increment_state(hdr) \
l2arc_hdr_arcstats_update((hdr), B_TRUE, B_TRUE)
#define l2arc_hdr_arcstats_decrement_state(hdr) \
l2arc_hdr_arcstats_update((hdr), B_FALSE, B_TRUE)
/*
* l2arc_exclude_special : A zfs module parameter that controls whether buffers
* present on special vdevs are eligibile for caching in L2ARC. If
* set to 1, exclude dbufs on special vdevs from being cached to
* L2ARC.
*/
int l2arc_exclude_special = 0;
/*
* l2arc_mfuonly : A ZFS module parameter that controls whether only MFU
* metadata and data are cached from ARC into L2ARC.
*/
static int l2arc_mfuonly = 0;
/*
* L2ARC TRIM
* l2arc_trim_ahead : A ZFS module parameter that controls how much ahead of
* the current write size (l2arc_write_max) we should TRIM if we
* have filled the device. It is defined as a percentage of the
* write size. If set to 100 we trim twice the space required to
* accommodate upcoming writes. A minimum of 64MB will be trimmed.
* It also enables TRIM of the whole L2ARC device upon creation or
* addition to an existing pool or if the header of the device is
* invalid upon importing a pool or onlining a cache device. The
* default is 0, which disables TRIM on L2ARC altogether as it can
* put significant stress on the underlying storage devices. This
* will vary depending of how well the specific device handles
* these commands.
*/
static uint64_t l2arc_trim_ahead = 0;
/*
* Performance tuning of L2ARC persistence:
*
* l2arc_rebuild_enabled : A ZFS module parameter that controls whether adding
* an L2ARC device (either at pool import or later) will attempt
* to rebuild L2ARC buffer contents.
* l2arc_rebuild_blocks_min_l2size : A ZFS module parameter that controls
* whether log blocks are written to the L2ARC device. If the L2ARC
* device is less than 1GB, the amount of data l2arc_evict()
* evicts is significant compared to the amount of restored L2ARC
* data. In this case do not write log blocks in L2ARC in order
* not to waste space.
*/
static int l2arc_rebuild_enabled = B_TRUE;
static uint64_t l2arc_rebuild_blocks_min_l2size = 1024 * 1024 * 1024;
/* L2ARC persistence rebuild control routines. */
void l2arc_rebuild_vdev(vdev_t *vd, boolean_t reopen);
static __attribute__((noreturn)) void l2arc_dev_rebuild_thread(void *arg);
static int l2arc_rebuild(l2arc_dev_t *dev);
/* L2ARC persistence read I/O routines. */
static int l2arc_dev_hdr_read(l2arc_dev_t *dev);
static int l2arc_log_blk_read(l2arc_dev_t *dev,
const l2arc_log_blkptr_t *this_lp, const l2arc_log_blkptr_t *next_lp,
l2arc_log_blk_phys_t *this_lb, l2arc_log_blk_phys_t *next_lb,
zio_t *this_io, zio_t **next_io);
static zio_t *l2arc_log_blk_fetch(vdev_t *vd,
const l2arc_log_blkptr_t *lp, l2arc_log_blk_phys_t *lb);
static void l2arc_log_blk_fetch_abort(zio_t *zio);
/* L2ARC persistence block restoration routines. */
static void l2arc_log_blk_restore(l2arc_dev_t *dev,
const l2arc_log_blk_phys_t *lb, uint64_t lb_asize);
static void l2arc_hdr_restore(const l2arc_log_ent_phys_t *le,
l2arc_dev_t *dev);
/* L2ARC persistence write I/O routines. */
static uint64_t l2arc_log_blk_commit(l2arc_dev_t *dev, zio_t *pio,
l2arc_write_callback_t *cb);
/* L2ARC persistence auxiliary routines. */
boolean_t l2arc_log_blkptr_valid(l2arc_dev_t *dev,
const l2arc_log_blkptr_t *lbp);
static boolean_t l2arc_log_blk_insert(l2arc_dev_t *dev,
const arc_buf_hdr_t *ab);
boolean_t l2arc_range_check_overlap(uint64_t bottom,
uint64_t top, uint64_t check);
static void l2arc_blk_fetch_done(zio_t *zio);
static inline uint64_t
l2arc_log_blk_overhead(uint64_t write_sz, l2arc_dev_t *dev);
/*
* We use Cityhash for this. It's fast, and has good hash properties without
* requiring any large static buffers.
*/
static uint64_t
buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth)
{
return (cityhash4(spa, dva->dva_word[0], dva->dva_word[1], birth));
}
#define HDR_EMPTY(hdr) \
((hdr)->b_dva.dva_word[0] == 0 && \
(hdr)->b_dva.dva_word[1] == 0)
#define HDR_EMPTY_OR_LOCKED(hdr) \
(HDR_EMPTY(hdr) || MUTEX_HELD(HDR_LOCK(hdr)))
#define HDR_EQUAL(spa, dva, birth, hdr) \
((hdr)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \
((hdr)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \
((hdr)->b_birth == birth) && ((hdr)->b_spa == spa)
static void
buf_discard_identity(arc_buf_hdr_t *hdr)
{
hdr->b_dva.dva_word[0] = 0;
hdr->b_dva.dva_word[1] = 0;
hdr->b_birth = 0;
}
static arc_buf_hdr_t *
buf_hash_find(uint64_t spa, const blkptr_t *bp, kmutex_t **lockp)
{
const dva_t *dva = BP_IDENTITY(bp);
uint64_t birth = BP_PHYSICAL_BIRTH(bp);
uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
arc_buf_hdr_t *hdr;
mutex_enter(hash_lock);
for (hdr = buf_hash_table.ht_table[idx]; hdr != NULL;
hdr = hdr->b_hash_next) {
if (HDR_EQUAL(spa, dva, birth, hdr)) {
*lockp = hash_lock;
return (hdr);
}
}
mutex_exit(hash_lock);
*lockp = NULL;
return (NULL);
}
/*
* Insert an entry into the hash table. If there is already an element
* equal to elem in the hash table, then the already existing element
* will be returned and the new element will not be inserted.
* Otherwise returns NULL.
* If lockp == NULL, the caller is assumed to already hold the hash lock.
*/
static arc_buf_hdr_t *
buf_hash_insert(arc_buf_hdr_t *hdr, kmutex_t **lockp)
{
uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth);
kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
arc_buf_hdr_t *fhdr;
uint32_t i;
ASSERT(!DVA_IS_EMPTY(&hdr->b_dva));
ASSERT(hdr->b_birth != 0);
ASSERT(!HDR_IN_HASH_TABLE(hdr));
if (lockp != NULL) {
*lockp = hash_lock;
mutex_enter(hash_lock);
} else {
ASSERT(MUTEX_HELD(hash_lock));
}
for (fhdr = buf_hash_table.ht_table[idx], i = 0; fhdr != NULL;
fhdr = fhdr->b_hash_next, i++) {
if (HDR_EQUAL(hdr->b_spa, &hdr->b_dva, hdr->b_birth, fhdr))
return (fhdr);
}
hdr->b_hash_next = buf_hash_table.ht_table[idx];
buf_hash_table.ht_table[idx] = hdr;
arc_hdr_set_flags(hdr, ARC_FLAG_IN_HASH_TABLE);
/* collect some hash table performance data */
if (i > 0) {
ARCSTAT_BUMP(arcstat_hash_collisions);
if (i == 1)
ARCSTAT_BUMP(arcstat_hash_chains);
ARCSTAT_MAX(arcstat_hash_chain_max, i);
}
uint64_t he = atomic_inc_64_nv(
&arc_stats.arcstat_hash_elements.value.ui64);
ARCSTAT_MAX(arcstat_hash_elements_max, he);
return (NULL);
}
static void
buf_hash_remove(arc_buf_hdr_t *hdr)
{
arc_buf_hdr_t *fhdr, **hdrp;
uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth);
ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
ASSERT(HDR_IN_HASH_TABLE(hdr));
hdrp = &buf_hash_table.ht_table[idx];
while ((fhdr = *hdrp) != hdr) {
ASSERT3P(fhdr, !=, NULL);
hdrp = &fhdr->b_hash_next;
}
*hdrp = hdr->b_hash_next;
hdr->b_hash_next = NULL;
arc_hdr_clear_flags(hdr, ARC_FLAG_IN_HASH_TABLE);
/* collect some hash table performance data */
atomic_dec_64(&arc_stats.arcstat_hash_elements.value.ui64);
if (buf_hash_table.ht_table[idx] &&
buf_hash_table.ht_table[idx]->b_hash_next == NULL)
ARCSTAT_BUMPDOWN(arcstat_hash_chains);
}
/*
* Global data structures and functions for the buf kmem cache.
*/
static kmem_cache_t *hdr_full_cache;
static kmem_cache_t *hdr_l2only_cache;
static kmem_cache_t *buf_cache;
static void
buf_fini(void)
{
#if defined(_KERNEL)
/*
* Large allocations which do not require contiguous pages
* should be using vmem_free() in the linux kernel\
*/
vmem_free(buf_hash_table.ht_table,
(buf_hash_table.ht_mask + 1) * sizeof (void *));
#else
kmem_free(buf_hash_table.ht_table,
(buf_hash_table.ht_mask + 1) * sizeof (void *));
#endif
for (int i = 0; i < BUF_LOCKS; i++)
mutex_destroy(BUF_HASH_LOCK(i));
kmem_cache_destroy(hdr_full_cache);
kmem_cache_destroy(hdr_l2only_cache);
kmem_cache_destroy(buf_cache);
}
/*
* Constructor callback - called when the cache is empty
* and a new buf is requested.
*/
static int
hdr_full_cons(void *vbuf, void *unused, int kmflag)
{
(void) unused, (void) kmflag;
arc_buf_hdr_t *hdr = vbuf;
memset(hdr, 0, HDR_FULL_SIZE);
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
zfs_refcount_create(&hdr->b_l1hdr.b_refcnt);
#ifdef ZFS_DEBUG
mutex_init(&hdr->b_l1hdr.b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
#endif
multilist_link_init(&hdr->b_l1hdr.b_arc_node);
list_link_init(&hdr->b_l2hdr.b_l2node);
arc_space_consume(HDR_FULL_SIZE, ARC_SPACE_HDRS);
return (0);
}
static int
hdr_l2only_cons(void *vbuf, void *unused, int kmflag)
{
(void) unused, (void) kmflag;
arc_buf_hdr_t *hdr = vbuf;
memset(hdr, 0, HDR_L2ONLY_SIZE);
arc_space_consume(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS);
return (0);
}
static int
buf_cons(void *vbuf, void *unused, int kmflag)
{
(void) unused, (void) kmflag;
arc_buf_t *buf = vbuf;
memset(buf, 0, sizeof (arc_buf_t));
arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS);
return (0);
}
/*
* Destructor callback - called when a cached buf is
* no longer required.
*/
static void
hdr_full_dest(void *vbuf, void *unused)
{
(void) unused;
arc_buf_hdr_t *hdr = vbuf;
ASSERT(HDR_EMPTY(hdr));
zfs_refcount_destroy(&hdr->b_l1hdr.b_refcnt);
#ifdef ZFS_DEBUG
mutex_destroy(&hdr->b_l1hdr.b_freeze_lock);
#endif
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
arc_space_return(HDR_FULL_SIZE, ARC_SPACE_HDRS);
}
static void
hdr_l2only_dest(void *vbuf, void *unused)
{
(void) unused;
arc_buf_hdr_t *hdr = vbuf;
ASSERT(HDR_EMPTY(hdr));
arc_space_return(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS);
}
static void
buf_dest(void *vbuf, void *unused)
{
(void) unused;
(void) vbuf;
arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS);
}
static void
buf_init(void)
{
uint64_t *ct = NULL;
uint64_t hsize = 1ULL << 12;
int i, j;
/*
* The hash table is big enough to fill all of physical memory
* with an average block size of zfs_arc_average_blocksize (default 8K).
* By default, the table will take up
* totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
*/
while (hsize * zfs_arc_average_blocksize < arc_all_memory())
hsize <<= 1;
retry:
buf_hash_table.ht_mask = hsize - 1;
#if defined(_KERNEL)
/*
* Large allocations which do not require contiguous pages
* should be using vmem_alloc() in the linux kernel
*/
buf_hash_table.ht_table =
vmem_zalloc(hsize * sizeof (void*), KM_SLEEP);
#else
buf_hash_table.ht_table =
kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP);
#endif
if (buf_hash_table.ht_table == NULL) {
ASSERT(hsize > (1ULL << 8));
hsize >>= 1;
goto retry;
}
hdr_full_cache = kmem_cache_create("arc_buf_hdr_t_full", HDR_FULL_SIZE,
0, hdr_full_cons, hdr_full_dest, NULL, NULL, NULL, 0);
hdr_l2only_cache = kmem_cache_create("arc_buf_hdr_t_l2only",
HDR_L2ONLY_SIZE, 0, hdr_l2only_cons, hdr_l2only_dest, NULL,
NULL, NULL, 0);
buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
0, buf_cons, buf_dest, NULL, NULL, NULL, 0);
for (i = 0; i < 256; i++)
for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
*ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
for (i = 0; i < BUF_LOCKS; i++)
mutex_init(BUF_HASH_LOCK(i), NULL, MUTEX_DEFAULT, NULL);
}
#define ARC_MINTIME (hz>>4) /* 62 ms */
/*
* This is the size that the buf occupies in memory. If the buf is compressed,
* it will correspond to the compressed size. You should use this method of
* getting the buf size unless you explicitly need the logical size.
*/
uint64_t
arc_buf_size(arc_buf_t *buf)
{
return (ARC_BUF_COMPRESSED(buf) ?
HDR_GET_PSIZE(buf->b_hdr) : HDR_GET_LSIZE(buf->b_hdr));
}
uint64_t
arc_buf_lsize(arc_buf_t *buf)
{
return (HDR_GET_LSIZE(buf->b_hdr));
}
/*
* This function will return B_TRUE if the buffer is encrypted in memory.
* This buffer can be decrypted by calling arc_untransform().
*/
boolean_t
arc_is_encrypted(arc_buf_t *buf)
{
return (ARC_BUF_ENCRYPTED(buf) != 0);
}
/*
* Returns B_TRUE if the buffer represents data that has not had its MAC
* verified yet.
*/
boolean_t
arc_is_unauthenticated(arc_buf_t *buf)
{
return (HDR_NOAUTH(buf->b_hdr) != 0);
}
void
arc_get_raw_params(arc_buf_t *buf, boolean_t *byteorder, uint8_t *salt,
uint8_t *iv, uint8_t *mac)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT(HDR_PROTECTED(hdr));
memcpy(salt, hdr->b_crypt_hdr.b_salt, ZIO_DATA_SALT_LEN);
memcpy(iv, hdr->b_crypt_hdr.b_iv, ZIO_DATA_IV_LEN);
memcpy(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN);
*byteorder = (hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS) ?
ZFS_HOST_BYTEORDER : !ZFS_HOST_BYTEORDER;
}
/*
* Indicates how this buffer is compressed in memory. If it is not compressed
* the value will be ZIO_COMPRESS_OFF. It can be made normally readable with
* arc_untransform() as long as it is also unencrypted.
*/
enum zio_compress
arc_get_compression(arc_buf_t *buf)
{
return (ARC_BUF_COMPRESSED(buf) ?
HDR_GET_COMPRESS(buf->b_hdr) : ZIO_COMPRESS_OFF);
}
/*
* Return the compression algorithm used to store this data in the ARC. If ARC
* compression is enabled or this is an encrypted block, this will be the same
* as what's used to store it on-disk. Otherwise, this will be ZIO_COMPRESS_OFF.
*/
static inline enum zio_compress
arc_hdr_get_compress(arc_buf_hdr_t *hdr)
{
return (HDR_COMPRESSION_ENABLED(hdr) ?
HDR_GET_COMPRESS(hdr) : ZIO_COMPRESS_OFF);
}
uint8_t
arc_get_complevel(arc_buf_t *buf)
{
return (buf->b_hdr->b_complevel);
}
static inline boolean_t
arc_buf_is_shared(arc_buf_t *buf)
{
boolean_t shared = (buf->b_data != NULL &&
buf->b_hdr->b_l1hdr.b_pabd != NULL &&
abd_is_linear(buf->b_hdr->b_l1hdr.b_pabd) &&
buf->b_data == abd_to_buf(buf->b_hdr->b_l1hdr.b_pabd));
IMPLY(shared, HDR_SHARED_DATA(buf->b_hdr));
EQUIV(shared, ARC_BUF_SHARED(buf));
IMPLY(shared, ARC_BUF_COMPRESSED(buf) || ARC_BUF_LAST(buf));
/*
* It would be nice to assert arc_can_share() too, but the "hdr isn't
* already being shared" requirement prevents us from doing that.
*/
return (shared);
}
/*
* Free the checksum associated with this header. If there is no checksum, this
* is a no-op.
*/
static inline void
arc_cksum_free(arc_buf_hdr_t *hdr)
{
#ifdef ZFS_DEBUG
ASSERT(HDR_HAS_L1HDR(hdr));
mutex_enter(&hdr->b_l1hdr.b_freeze_lock);
if (hdr->b_l1hdr.b_freeze_cksum != NULL) {
kmem_free(hdr->b_l1hdr.b_freeze_cksum, sizeof (zio_cksum_t));
hdr->b_l1hdr.b_freeze_cksum = NULL;
}
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
#endif
}
/*
* Return true iff at least one of the bufs on hdr is not compressed.
* Encrypted buffers count as compressed.
*/
static boolean_t
arc_hdr_has_uncompressed_buf(arc_buf_hdr_t *hdr)
{
ASSERT(hdr->b_l1hdr.b_state == arc_anon || HDR_EMPTY_OR_LOCKED(hdr));
for (arc_buf_t *b = hdr->b_l1hdr.b_buf; b != NULL; b = b->b_next) {
if (!ARC_BUF_COMPRESSED(b)) {
return (B_TRUE);
}
}
return (B_FALSE);
}
/*
* If we've turned on the ZFS_DEBUG_MODIFY flag, verify that the buf's data
* matches the checksum that is stored in the hdr. If there is no checksum,
* or if the buf is compressed, this is a no-op.
*/
static void
arc_cksum_verify(arc_buf_t *buf)
{
#ifdef ZFS_DEBUG
arc_buf_hdr_t *hdr = buf->b_hdr;
zio_cksum_t zc;
if (!(zfs_flags & ZFS_DEBUG_MODIFY))
return;
if (ARC_BUF_COMPRESSED(buf))
return;
ASSERT(HDR_HAS_L1HDR(hdr));
mutex_enter(&hdr->b_l1hdr.b_freeze_lock);
if (hdr->b_l1hdr.b_freeze_cksum == NULL || HDR_IO_ERROR(hdr)) {
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
return;
}
fletcher_2_native(buf->b_data, arc_buf_size(buf), NULL, &zc);
if (!ZIO_CHECKSUM_EQUAL(*hdr->b_l1hdr.b_freeze_cksum, zc))
panic("buffer modified while frozen!");
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
#endif
}
/*
* This function makes the assumption that data stored in the L2ARC
* will be transformed exactly as it is in the main pool. Because of
* this we can verify the checksum against the reading process's bp.
*/
static boolean_t
arc_cksum_is_equal(arc_buf_hdr_t *hdr, zio_t *zio)
{
ASSERT(!BP_IS_EMBEDDED(zio->io_bp));
VERIFY3U(BP_GET_PSIZE(zio->io_bp), ==, HDR_GET_PSIZE(hdr));
/*
* Block pointers always store the checksum for the logical data.
* If the block pointer has the gang bit set, then the checksum
* it represents is for the reconstituted data and not for an
* individual gang member. The zio pipeline, however, must be able to
* determine the checksum of each of the gang constituents so it
* treats the checksum comparison differently than what we need
* for l2arc blocks. This prevents us from using the
* zio_checksum_error() interface directly. Instead we must call the
* zio_checksum_error_impl() so that we can ensure the checksum is
* generated using the correct checksum algorithm and accounts for the
* logical I/O size and not just a gang fragment.
*/
return (zio_checksum_error_impl(zio->io_spa, zio->io_bp,
BP_GET_CHECKSUM(zio->io_bp), zio->io_abd, zio->io_size,
zio->io_offset, NULL) == 0);
}
/*
* Given a buf full of data, if ZFS_DEBUG_MODIFY is enabled this computes a
* checksum and attaches it to the buf's hdr so that we can ensure that the buf
* isn't modified later on. If buf is compressed or there is already a checksum
* on the hdr, this is a no-op (we only checksum uncompressed bufs).
*/
static void
arc_cksum_compute(arc_buf_t *buf)
{
if (!(zfs_flags & ZFS_DEBUG_MODIFY))
return;
#ifdef ZFS_DEBUG
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT(HDR_HAS_L1HDR(hdr));
mutex_enter(&hdr->b_l1hdr.b_freeze_lock);
if (hdr->b_l1hdr.b_freeze_cksum != NULL || ARC_BUF_COMPRESSED(buf)) {
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
return;
}
ASSERT(!ARC_BUF_ENCRYPTED(buf));
ASSERT(!ARC_BUF_COMPRESSED(buf));
hdr->b_l1hdr.b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t),
KM_SLEEP);
fletcher_2_native(buf->b_data, arc_buf_size(buf), NULL,
hdr->b_l1hdr.b_freeze_cksum);
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
#endif
arc_buf_watch(buf);
}
#ifndef _KERNEL
void
arc_buf_sigsegv(int sig, siginfo_t *si, void *unused)
{
(void) sig, (void) unused;
panic("Got SIGSEGV at address: 0x%lx\n", (long)si->si_addr);
}
#endif
static void
arc_buf_unwatch(arc_buf_t *buf)
{
#ifndef _KERNEL
if (arc_watch) {
ASSERT0(mprotect(buf->b_data, arc_buf_size(buf),
PROT_READ | PROT_WRITE));
}
#else
(void) buf;
#endif
}
static void
arc_buf_watch(arc_buf_t *buf)
{
#ifndef _KERNEL
if (arc_watch)
ASSERT0(mprotect(buf->b_data, arc_buf_size(buf),
PROT_READ));
#else
(void) buf;
#endif
}
static arc_buf_contents_t
arc_buf_type(arc_buf_hdr_t *hdr)
{
arc_buf_contents_t type;
if (HDR_ISTYPE_METADATA(hdr)) {
type = ARC_BUFC_METADATA;
} else {
type = ARC_BUFC_DATA;
}
VERIFY3U(hdr->b_type, ==, type);
return (type);
}
boolean_t
arc_is_metadata(arc_buf_t *buf)
{
return (HDR_ISTYPE_METADATA(buf->b_hdr) != 0);
}
static uint32_t
arc_bufc_to_flags(arc_buf_contents_t type)
{
switch (type) {
case ARC_BUFC_DATA:
/* metadata field is 0 if buffer contains normal data */
return (0);
case ARC_BUFC_METADATA:
return (ARC_FLAG_BUFC_METADATA);
default:
break;
}
panic("undefined ARC buffer type!");
return ((uint32_t)-1);
}
void
arc_buf_thaw(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
arc_cksum_verify(buf);
/*
* Compressed buffers do not manipulate the b_freeze_cksum.
*/
if (ARC_BUF_COMPRESSED(buf))
return;
ASSERT(HDR_HAS_L1HDR(hdr));
arc_cksum_free(hdr);
arc_buf_unwatch(buf);
}
void
arc_buf_freeze(arc_buf_t *buf)
{
if (!(zfs_flags & ZFS_DEBUG_MODIFY))
return;
if (ARC_BUF_COMPRESSED(buf))
return;
ASSERT(HDR_HAS_L1HDR(buf->b_hdr));
arc_cksum_compute(buf);
}
/*
* The arc_buf_hdr_t's b_flags should never be modified directly. Instead,
* the following functions should be used to ensure that the flags are
* updated in a thread-safe way. When manipulating the flags either
* the hash_lock must be held or the hdr must be undiscoverable. This
* ensures that we're not racing with any other threads when updating
* the flags.
*/
static inline void
arc_hdr_set_flags(arc_buf_hdr_t *hdr, arc_flags_t flags)
{
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
hdr->b_flags |= flags;
}
static inline void
arc_hdr_clear_flags(arc_buf_hdr_t *hdr, arc_flags_t flags)
{
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
hdr->b_flags &= ~flags;
}
/*
* Setting the compression bits in the arc_buf_hdr_t's b_flags is
* done in a special way since we have to clear and set bits
* at the same time. Consumers that wish to set the compression bits
* must use this function to ensure that the flags are updated in
* thread-safe manner.
*/
static void
arc_hdr_set_compress(arc_buf_hdr_t *hdr, enum zio_compress cmp)
{
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
/*
* Holes and embedded blocks will always have a psize = 0 so
* we ignore the compression of the blkptr and set the
* want to uncompress them. Mark them as uncompressed.
*/
if (!zfs_compressed_arc_enabled || HDR_GET_PSIZE(hdr) == 0) {
arc_hdr_clear_flags(hdr, ARC_FLAG_COMPRESSED_ARC);
ASSERT(!HDR_COMPRESSION_ENABLED(hdr));
} else {
arc_hdr_set_flags(hdr, ARC_FLAG_COMPRESSED_ARC);
ASSERT(HDR_COMPRESSION_ENABLED(hdr));
}
HDR_SET_COMPRESS(hdr, cmp);
ASSERT3U(HDR_GET_COMPRESS(hdr), ==, cmp);
}
/*
* Looks for another buf on the same hdr which has the data decompressed, copies
* from it, and returns true. If no such buf exists, returns false.
*/
static boolean_t
arc_buf_try_copy_decompressed_data(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
boolean_t copied = B_FALSE;
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT3P(buf->b_data, !=, NULL);
ASSERT(!ARC_BUF_COMPRESSED(buf));
for (arc_buf_t *from = hdr->b_l1hdr.b_buf; from != NULL;
from = from->b_next) {
/* can't use our own data buffer */
if (from == buf) {
continue;
}
if (!ARC_BUF_COMPRESSED(from)) {
memcpy(buf->b_data, from->b_data, arc_buf_size(buf));
copied = B_TRUE;
break;
}
}
#ifdef ZFS_DEBUG
/*
* There were no decompressed bufs, so there should not be a
* checksum on the hdr either.
*/
if (zfs_flags & ZFS_DEBUG_MODIFY)
EQUIV(!copied, hdr->b_l1hdr.b_freeze_cksum == NULL);
#endif
return (copied);
}
/*
* Allocates an ARC buf header that's in an evicted & L2-cached state.
* This is used during l2arc reconstruction to make empty ARC buffers
* which circumvent the regular disk->arc->l2arc path and instead come
* into being in the reverse order, i.e. l2arc->arc.
*/
static arc_buf_hdr_t *
arc_buf_alloc_l2only(size_t size, arc_buf_contents_t type, l2arc_dev_t *dev,
dva_t dva, uint64_t daddr, int32_t psize, uint64_t birth,
enum zio_compress compress, uint8_t complevel, boolean_t protected,
boolean_t prefetch, arc_state_type_t arcs_state)
{
arc_buf_hdr_t *hdr;
ASSERT(size != 0);
hdr = kmem_cache_alloc(hdr_l2only_cache, KM_SLEEP);
hdr->b_birth = birth;
hdr->b_type = type;
hdr->b_flags = 0;
arc_hdr_set_flags(hdr, arc_bufc_to_flags(type) | ARC_FLAG_HAS_L2HDR);
HDR_SET_LSIZE(hdr, size);
HDR_SET_PSIZE(hdr, psize);
arc_hdr_set_compress(hdr, compress);
hdr->b_complevel = complevel;
if (protected)
arc_hdr_set_flags(hdr, ARC_FLAG_PROTECTED);
if (prefetch)
arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH);
hdr->b_spa = spa_load_guid(dev->l2ad_vdev->vdev_spa);
hdr->b_dva = dva;
hdr->b_l2hdr.b_dev = dev;
hdr->b_l2hdr.b_daddr = daddr;
hdr->b_l2hdr.b_arcs_state = arcs_state;
return (hdr);
}
/*
* Return the size of the block, b_pabd, that is stored in the arc_buf_hdr_t.
*/
static uint64_t
arc_hdr_size(arc_buf_hdr_t *hdr)
{
uint64_t size;
if (arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF &&
HDR_GET_PSIZE(hdr) > 0) {
size = HDR_GET_PSIZE(hdr);
} else {
ASSERT3U(HDR_GET_LSIZE(hdr), !=, 0);
size = HDR_GET_LSIZE(hdr);
}
return (size);
}
static int
arc_hdr_authenticate(arc_buf_hdr_t *hdr, spa_t *spa, uint64_t dsobj)
{
int ret;
uint64_t csize;
uint64_t lsize = HDR_GET_LSIZE(hdr);
uint64_t psize = HDR_GET_PSIZE(hdr);
void *tmpbuf = NULL;
abd_t *abd = hdr->b_l1hdr.b_pabd;
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
ASSERT(HDR_AUTHENTICATED(hdr));
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
/*
* The MAC is calculated on the compressed data that is stored on disk.
* However, if compressed arc is disabled we will only have the
* decompressed data available to us now. Compress it into a temporary
* abd so we can verify the MAC. The performance overhead of this will
* be relatively low, since most objects in an encrypted objset will
* be encrypted (instead of authenticated) anyway.
*/
if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
!HDR_COMPRESSION_ENABLED(hdr)) {
csize = zio_compress_data(HDR_GET_COMPRESS(hdr),
hdr->b_l1hdr.b_pabd, &tmpbuf, lsize, hdr->b_complevel);
ASSERT3P(tmpbuf, !=, NULL);
ASSERT3U(csize, <=, psize);
abd = abd_get_from_buf(tmpbuf, lsize);
abd_take_ownership_of_buf(abd, B_TRUE);
abd_zero_off(abd, csize, psize - csize);
}
/*
* Authentication is best effort. We authenticate whenever the key is
* available. If we succeed we clear ARC_FLAG_NOAUTH.
*/
if (hdr->b_crypt_hdr.b_ot == DMU_OT_OBJSET) {
ASSERT3U(HDR_GET_COMPRESS(hdr), ==, ZIO_COMPRESS_OFF);
ASSERT3U(lsize, ==, psize);
ret = spa_do_crypt_objset_mac_abd(B_FALSE, spa, dsobj, abd,
psize, hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS);
} else {
ret = spa_do_crypt_mac_abd(B_FALSE, spa, dsobj, abd, psize,
hdr->b_crypt_hdr.b_mac);
}
if (ret == 0)
arc_hdr_clear_flags(hdr, ARC_FLAG_NOAUTH);
else if (ret != ENOENT)
goto error;
if (tmpbuf != NULL)
abd_free(abd);
return (0);
error:
if (tmpbuf != NULL)
abd_free(abd);
return (ret);
}
/*
* This function will take a header that only has raw encrypted data in
* b_crypt_hdr.b_rabd and decrypt it into a new buffer which is stored in
* b_l1hdr.b_pabd. If designated in the header flags, this function will
* also decompress the data.
*/
static int
arc_hdr_decrypt(arc_buf_hdr_t *hdr, spa_t *spa, const zbookmark_phys_t *zb)
{
int ret;
abd_t *cabd = NULL;
void *tmp = NULL;
boolean_t no_crypt = B_FALSE;
boolean_t bswap = (hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS);
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
ASSERT(HDR_ENCRYPTED(hdr));
arc_hdr_alloc_abd(hdr, 0);
ret = spa_do_crypt_abd(B_FALSE, spa, zb, hdr->b_crypt_hdr.b_ot,
B_FALSE, bswap, hdr->b_crypt_hdr.b_salt, hdr->b_crypt_hdr.b_iv,
hdr->b_crypt_hdr.b_mac, HDR_GET_PSIZE(hdr), hdr->b_l1hdr.b_pabd,
hdr->b_crypt_hdr.b_rabd, &no_crypt);
if (ret != 0)
goto error;
if (no_crypt) {
abd_copy(hdr->b_l1hdr.b_pabd, hdr->b_crypt_hdr.b_rabd,
HDR_GET_PSIZE(hdr));
}
/*
* If this header has disabled arc compression but the b_pabd is
* compressed after decrypting it, we need to decompress the newly
* decrypted data.
*/
if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
!HDR_COMPRESSION_ENABLED(hdr)) {
/*
* We want to make sure that we are correctly honoring the
* zfs_abd_scatter_enabled setting, so we allocate an abd here
* and then loan a buffer from it, rather than allocating a
* linear buffer and wrapping it in an abd later.
*/
cabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr, 0);
tmp = abd_borrow_buf(cabd, arc_hdr_size(hdr));
ret = zio_decompress_data(HDR_GET_COMPRESS(hdr),
hdr->b_l1hdr.b_pabd, tmp, HDR_GET_PSIZE(hdr),
HDR_GET_LSIZE(hdr), &hdr->b_complevel);
if (ret != 0) {
abd_return_buf(cabd, tmp, arc_hdr_size(hdr));
goto error;
}
abd_return_buf_copy(cabd, tmp, arc_hdr_size(hdr));
arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd,
arc_hdr_size(hdr), hdr);
hdr->b_l1hdr.b_pabd = cabd;
}
return (0);
error:
arc_hdr_free_abd(hdr, B_FALSE);
if (cabd != NULL)
arc_free_data_buf(hdr, cabd, arc_hdr_size(hdr), hdr);
return (ret);
}
/*
* This function is called during arc_buf_fill() to prepare the header's
* abd plaintext pointer for use. This involves authenticated protected
* data and decrypting encrypted data into the plaintext abd.
*/
static int
arc_fill_hdr_crypt(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, spa_t *spa,
const zbookmark_phys_t *zb, boolean_t noauth)
{
int ret;
ASSERT(HDR_PROTECTED(hdr));
if (hash_lock != NULL)
mutex_enter(hash_lock);
if (HDR_NOAUTH(hdr) && !noauth) {
/*
* The caller requested authenticated data but our data has
* not been authenticated yet. Verify the MAC now if we can.
*/
ret = arc_hdr_authenticate(hdr, spa, zb->zb_objset);
if (ret != 0)
goto error;
} else if (HDR_HAS_RABD(hdr) && hdr->b_l1hdr.b_pabd == NULL) {
/*
* If we only have the encrypted version of the data, but the
* unencrypted version was requested we take this opportunity
* to store the decrypted version in the header for future use.
*/
ret = arc_hdr_decrypt(hdr, spa, zb);
if (ret != 0)
goto error;
}
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
if (hash_lock != NULL)
mutex_exit(hash_lock);
return (0);
error:
if (hash_lock != NULL)
mutex_exit(hash_lock);
return (ret);
}
/*
* This function is used by the dbuf code to decrypt bonus buffers in place.
* The dbuf code itself doesn't have any locking for decrypting a shared dnode
* block, so we use the hash lock here to protect against concurrent calls to
* arc_buf_fill().
*/
static void
arc_buf_untransform_in_place(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT(HDR_ENCRYPTED(hdr));
ASSERT3U(hdr->b_crypt_hdr.b_ot, ==, DMU_OT_DNODE);
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
zio_crypt_copy_dnode_bonus(hdr->b_l1hdr.b_pabd, buf->b_data,
arc_buf_size(buf));
buf->b_flags &= ~ARC_BUF_FLAG_ENCRYPTED;
buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED;
}
/*
* Given a buf that has a data buffer attached to it, this function will
* efficiently fill the buf with data of the specified compression setting from
* the hdr and update the hdr's b_freeze_cksum if necessary. If the buf and hdr
* are already sharing a data buf, no copy is performed.
*
* If the buf is marked as compressed but uncompressed data was requested, this
* will allocate a new data buffer for the buf, remove that flag, and fill the
* buf with uncompressed data. You can't request a compressed buf on a hdr with
* uncompressed data, and (since we haven't added support for it yet) if you
* want compressed data your buf must already be marked as compressed and have
* the correct-sized data buffer.
*/
static int
arc_buf_fill(arc_buf_t *buf, spa_t *spa, const zbookmark_phys_t *zb,
arc_fill_flags_t flags)
{
int error = 0;
arc_buf_hdr_t *hdr = buf->b_hdr;
boolean_t hdr_compressed =
(arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF);
boolean_t compressed = (flags & ARC_FILL_COMPRESSED) != 0;
boolean_t encrypted = (flags & ARC_FILL_ENCRYPTED) != 0;
dmu_object_byteswap_t bswap = hdr->b_l1hdr.b_byteswap;
kmutex_t *hash_lock = (flags & ARC_FILL_LOCKED) ? NULL : HDR_LOCK(hdr);
ASSERT3P(buf->b_data, !=, NULL);
IMPLY(compressed, hdr_compressed || ARC_BUF_ENCRYPTED(buf));
IMPLY(compressed, ARC_BUF_COMPRESSED(buf));
IMPLY(encrypted, HDR_ENCRYPTED(hdr));
IMPLY(encrypted, ARC_BUF_ENCRYPTED(buf));
IMPLY(encrypted, ARC_BUF_COMPRESSED(buf));
IMPLY(encrypted, !arc_buf_is_shared(buf));
/*
* If the caller wanted encrypted data we just need to copy it from
* b_rabd and potentially byteswap it. We won't be able to do any
* further transforms on it.
*/
if (encrypted) {
ASSERT(HDR_HAS_RABD(hdr));
abd_copy_to_buf(buf->b_data, hdr->b_crypt_hdr.b_rabd,
HDR_GET_PSIZE(hdr));
goto byteswap;
}
/*
* Adjust encrypted and authenticated headers to accommodate
* the request if needed. Dnode blocks (ARC_FILL_IN_PLACE) are
* allowed to fail decryption due to keys not being loaded
* without being marked as an IO error.
*/
if (HDR_PROTECTED(hdr)) {
error = arc_fill_hdr_crypt(hdr, hash_lock, spa,
zb, !!(flags & ARC_FILL_NOAUTH));
if (error == EACCES && (flags & ARC_FILL_IN_PLACE) != 0) {
return (error);
} else if (error != 0) {
if (hash_lock != NULL)
mutex_enter(hash_lock);
arc_hdr_set_flags(hdr, ARC_FLAG_IO_ERROR);
if (hash_lock != NULL)
mutex_exit(hash_lock);
return (error);
}
}
/*
* There is a special case here for dnode blocks which are
* decrypting their bonus buffers. These blocks may request to
* be decrypted in-place. This is necessary because there may
* be many dnodes pointing into this buffer and there is
* currently no method to synchronize replacing the backing
* b_data buffer and updating all of the pointers. Here we use
* the hash lock to ensure there are no races. If the need
* arises for other types to be decrypted in-place, they must
* add handling here as well.
*/
if ((flags & ARC_FILL_IN_PLACE) != 0) {
ASSERT(!hdr_compressed);
ASSERT(!compressed);
ASSERT(!encrypted);
if (HDR_ENCRYPTED(hdr) && ARC_BUF_ENCRYPTED(buf)) {
ASSERT3U(hdr->b_crypt_hdr.b_ot, ==, DMU_OT_DNODE);
if (hash_lock != NULL)
mutex_enter(hash_lock);
arc_buf_untransform_in_place(buf);
if (hash_lock != NULL)
mutex_exit(hash_lock);
/* Compute the hdr's checksum if necessary */
arc_cksum_compute(buf);
}
return (0);
}
if (hdr_compressed == compressed) {
if (ARC_BUF_SHARED(buf)) {
ASSERT(arc_buf_is_shared(buf));
} else {
abd_copy_to_buf(buf->b_data, hdr->b_l1hdr.b_pabd,
arc_buf_size(buf));
}
} else {
ASSERT(hdr_compressed);
ASSERT(!compressed);
/*
* If the buf is sharing its data with the hdr, unlink it and
* allocate a new data buffer for the buf.
*/
if (ARC_BUF_SHARED(buf)) {
ASSERT(ARC_BUF_COMPRESSED(buf));
/* We need to give the buf its own b_data */
buf->b_flags &= ~ARC_BUF_FLAG_SHARED;
buf->b_data =
arc_get_data_buf(hdr, HDR_GET_LSIZE(hdr), buf);
arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA);
/* Previously overhead was 0; just add new overhead */
ARCSTAT_INCR(arcstat_overhead_size, HDR_GET_LSIZE(hdr));
} else if (ARC_BUF_COMPRESSED(buf)) {
ASSERT(!arc_buf_is_shared(buf));
/* We need to reallocate the buf's b_data */
arc_free_data_buf(hdr, buf->b_data, HDR_GET_PSIZE(hdr),
buf);
buf->b_data =
arc_get_data_buf(hdr, HDR_GET_LSIZE(hdr), buf);
/* We increased the size of b_data; update overhead */
ARCSTAT_INCR(arcstat_overhead_size,
HDR_GET_LSIZE(hdr) - HDR_GET_PSIZE(hdr));
}
/*
* Regardless of the buf's previous compression settings, it
* should not be compressed at the end of this function.
*/
buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED;
/*
* Try copying the data from another buf which already has a
* decompressed version. If that's not possible, it's time to
* bite the bullet and decompress the data from the hdr.
*/
if (arc_buf_try_copy_decompressed_data(buf)) {
/* Skip byteswapping and checksumming (already done) */
return (0);
} else {
error = zio_decompress_data(HDR_GET_COMPRESS(hdr),
hdr->b_l1hdr.b_pabd, buf->b_data,
HDR_GET_PSIZE(hdr), HDR_GET_LSIZE(hdr),
&hdr->b_complevel);
/*
* Absent hardware errors or software bugs, this should
* be impossible, but log it anyway so we can debug it.
*/
if (error != 0) {
zfs_dbgmsg(
"hdr %px, compress %d, psize %d, lsize %d",
hdr, arc_hdr_get_compress(hdr),
HDR_GET_PSIZE(hdr), HDR_GET_LSIZE(hdr));
if (hash_lock != NULL)
mutex_enter(hash_lock);
arc_hdr_set_flags(hdr, ARC_FLAG_IO_ERROR);
if (hash_lock != NULL)
mutex_exit(hash_lock);
return (SET_ERROR(EIO));
}
}
}
byteswap:
/* Byteswap the buf's data if necessary */
if (bswap != DMU_BSWAP_NUMFUNCS) {
ASSERT(!HDR_SHARED_DATA(hdr));
ASSERT3U(bswap, <, DMU_BSWAP_NUMFUNCS);
dmu_ot_byteswap[bswap].ob_func(buf->b_data, HDR_GET_LSIZE(hdr));
}
/* Compute the hdr's checksum if necessary */
arc_cksum_compute(buf);
return (0);
}
/*
* If this function is being called to decrypt an encrypted buffer or verify an
* authenticated one, the key must be loaded and a mapping must be made
* available in the keystore via spa_keystore_create_mapping() or one of its
* callers.
*/
int
arc_untransform(arc_buf_t *buf, spa_t *spa, const zbookmark_phys_t *zb,
boolean_t in_place)
{
int ret;
arc_fill_flags_t flags = 0;
if (in_place)
flags |= ARC_FILL_IN_PLACE;
ret = arc_buf_fill(buf, spa, zb, flags);
if (ret == ECKSUM) {
/*
* Convert authentication and decryption errors to EIO
* (and generate an ereport) before leaving the ARC.
*/
ret = SET_ERROR(EIO);
spa_log_error(spa, zb, &buf->b_hdr->b_birth);
(void) zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION,
spa, NULL, zb, NULL, 0);
}
return (ret);
}
/*
* Increment the amount of evictable space in the arc_state_t's refcount.
* We account for the space used by the hdr and the arc buf individually
* so that we can add and remove them from the refcount individually.
*/
static void
arc_evictable_space_increment(arc_buf_hdr_t *hdr, arc_state_t *state)
{
arc_buf_contents_t type = arc_buf_type(hdr);
ASSERT(HDR_HAS_L1HDR(hdr));
if (GHOST_STATE(state)) {
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
(void) zfs_refcount_add_many(&state->arcs_esize[type],
HDR_GET_LSIZE(hdr), hdr);
return;
}
if (hdr->b_l1hdr.b_pabd != NULL) {
(void) zfs_refcount_add_many(&state->arcs_esize[type],
arc_hdr_size(hdr), hdr);
}
if (HDR_HAS_RABD(hdr)) {
(void) zfs_refcount_add_many(&state->arcs_esize[type],
HDR_GET_PSIZE(hdr), hdr);
}
for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
buf = buf->b_next) {
if (ARC_BUF_SHARED(buf))
continue;
(void) zfs_refcount_add_many(&state->arcs_esize[type],
arc_buf_size(buf), buf);
}
}
/*
* Decrement the amount of evictable space in the arc_state_t's refcount.
* We account for the space used by the hdr and the arc buf individually
* so that we can add and remove them from the refcount individually.
*/
static void
arc_evictable_space_decrement(arc_buf_hdr_t *hdr, arc_state_t *state)
{
arc_buf_contents_t type = arc_buf_type(hdr);
ASSERT(HDR_HAS_L1HDR(hdr));
if (GHOST_STATE(state)) {
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
HDR_GET_LSIZE(hdr), hdr);
return;
}
if (hdr->b_l1hdr.b_pabd != NULL) {
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
arc_hdr_size(hdr), hdr);
}
if (HDR_HAS_RABD(hdr)) {
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
HDR_GET_PSIZE(hdr), hdr);
}
for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
buf = buf->b_next) {
if (ARC_BUF_SHARED(buf))
continue;
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
arc_buf_size(buf), buf);
}
}
/*
* Add a reference to this hdr indicating that someone is actively
* referencing that memory. When the refcount transitions from 0 to 1,
* we remove it from the respective arc_state_t list to indicate that
* it is not evictable.
*/
static void
add_reference(arc_buf_hdr_t *hdr, const void *tag)
{
arc_state_t *state = hdr->b_l1hdr.b_state;
ASSERT(HDR_HAS_L1HDR(hdr));
if (!HDR_EMPTY(hdr) && !MUTEX_HELD(HDR_LOCK(hdr))) {
ASSERT(state == arc_anon);
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
}
if ((zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, tag) == 1) &&
state != arc_anon && state != arc_l2c_only) {
/* We don't use the L2-only state list. */
multilist_remove(&state->arcs_list[arc_buf_type(hdr)], hdr);
arc_evictable_space_decrement(hdr, state);
}
}
/*
* Remove a reference from this hdr. When the reference transitions from
* 1 to 0 and we're not anonymous, then we add this hdr to the arc_state_t's
* list making it eligible for eviction.
*/
static int
remove_reference(arc_buf_hdr_t *hdr, const void *tag)
{
int cnt;
arc_state_t *state = hdr->b_l1hdr.b_state;
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(state == arc_anon || MUTEX_HELD(HDR_LOCK(hdr)));
ASSERT(!GHOST_STATE(state)); /* arc_l2c_only counts as a ghost. */
if ((cnt = zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, tag)) != 0)
return (cnt);
if (state == arc_anon) {
arc_hdr_destroy(hdr);
return (0);
}
if (state == arc_uncached && !HDR_PREFETCH(hdr)) {
arc_change_state(arc_anon, hdr);
arc_hdr_destroy(hdr);
return (0);
}
multilist_insert(&state->arcs_list[arc_buf_type(hdr)], hdr);
arc_evictable_space_increment(hdr, state);
return (0);
}
/*
* Returns detailed information about a specific arc buffer. When the
* state_index argument is set the function will calculate the arc header
* list position for its arc state. Since this requires a linear traversal
* callers are strongly encourage not to do this. However, it can be helpful
* for targeted analysis so the functionality is provided.
*/
void
arc_buf_info(arc_buf_t *ab, arc_buf_info_t *abi, int state_index)
{
(void) state_index;
arc_buf_hdr_t *hdr = ab->b_hdr;
l1arc_buf_hdr_t *l1hdr = NULL;
l2arc_buf_hdr_t *l2hdr = NULL;
arc_state_t *state = NULL;
memset(abi, 0, sizeof (arc_buf_info_t));
if (hdr == NULL)
return;
abi->abi_flags = hdr->b_flags;
if (HDR_HAS_L1HDR(hdr)) {
l1hdr = &hdr->b_l1hdr;
state = l1hdr->b_state;
}
if (HDR_HAS_L2HDR(hdr))
l2hdr = &hdr->b_l2hdr;
if (l1hdr) {
abi->abi_bufcnt = 0;
for (arc_buf_t *buf = l1hdr->b_buf; buf; buf = buf->b_next)
abi->abi_bufcnt++;
abi->abi_access = l1hdr->b_arc_access;
abi->abi_mru_hits = l1hdr->b_mru_hits;
abi->abi_mru_ghost_hits = l1hdr->b_mru_ghost_hits;
abi->abi_mfu_hits = l1hdr->b_mfu_hits;
abi->abi_mfu_ghost_hits = l1hdr->b_mfu_ghost_hits;
abi->abi_holds = zfs_refcount_count(&l1hdr->b_refcnt);
}
if (l2hdr) {
abi->abi_l2arc_dattr = l2hdr->b_daddr;
abi->abi_l2arc_hits = l2hdr->b_hits;
}
abi->abi_state_type = state ? state->arcs_state : ARC_STATE_ANON;
abi->abi_state_contents = arc_buf_type(hdr);
abi->abi_size = arc_hdr_size(hdr);
}
/*
* Move the supplied buffer to the indicated state. The hash lock
* for the buffer must be held by the caller.
*/
static void
arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr)
{
arc_state_t *old_state;
int64_t refcnt;
boolean_t update_old, update_new;
arc_buf_contents_t type = arc_buf_type(hdr);
/*
* We almost always have an L1 hdr here, since we call arc_hdr_realloc()
* in arc_read() when bringing a buffer out of the L2ARC. However, the
* L1 hdr doesn't always exist when we change state to arc_anon before
* destroying a header, in which case reallocating to add the L1 hdr is
* pointless.
*/
if (HDR_HAS_L1HDR(hdr)) {
old_state = hdr->b_l1hdr.b_state;
refcnt = zfs_refcount_count(&hdr->b_l1hdr.b_refcnt);
update_old = (hdr->b_l1hdr.b_buf != NULL ||
hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr));
IMPLY(GHOST_STATE(old_state), hdr->b_l1hdr.b_buf == NULL);
IMPLY(GHOST_STATE(new_state), hdr->b_l1hdr.b_buf == NULL);
IMPLY(old_state == arc_anon, hdr->b_l1hdr.b_buf == NULL ||
ARC_BUF_LAST(hdr->b_l1hdr.b_buf));
} else {
old_state = arc_l2c_only;
refcnt = 0;
update_old = B_FALSE;
}
update_new = update_old;
if (GHOST_STATE(old_state))
update_old = B_TRUE;
if (GHOST_STATE(new_state))
update_new = B_TRUE;
ASSERT(MUTEX_HELD(HDR_LOCK(hdr)));
ASSERT3P(new_state, !=, old_state);
/*
* If this buffer is evictable, transfer it from the
* old state list to the new state list.
*/
if (refcnt == 0) {
if (old_state != arc_anon && old_state != arc_l2c_only) {
ASSERT(HDR_HAS_L1HDR(hdr));
/* remove_reference() saves on insert. */
if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
multilist_remove(&old_state->arcs_list[type],
hdr);
arc_evictable_space_decrement(hdr, old_state);
}
}
if (new_state != arc_anon && new_state != arc_l2c_only) {
/*
* An L1 header always exists here, since if we're
* moving to some L1-cached state (i.e. not l2c_only or
* anonymous), we realloc the header to add an L1hdr
* beforehand.
*/
ASSERT(HDR_HAS_L1HDR(hdr));
multilist_insert(&new_state->arcs_list[type], hdr);
arc_evictable_space_increment(hdr, new_state);
}
}
ASSERT(!HDR_EMPTY(hdr));
if (new_state == arc_anon && HDR_IN_HASH_TABLE(hdr))
buf_hash_remove(hdr);
/* adjust state sizes (ignore arc_l2c_only) */
if (update_new && new_state != arc_l2c_only) {
ASSERT(HDR_HAS_L1HDR(hdr));
if (GHOST_STATE(new_state)) {
/*
* When moving a header to a ghost state, we first
* remove all arc buffers. Thus, we'll have no arc
* buffer to use for the reference. As a result, we
* use the arc header pointer for the reference.
*/
(void) zfs_refcount_add_many(
&new_state->arcs_size[type],
HDR_GET_LSIZE(hdr), hdr);
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
} else {
/*
* Each individual buffer holds a unique reference,
* thus we must remove each of these references one
* at a time.
*/
for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
buf = buf->b_next) {
/*
* When the arc_buf_t is sharing the data
* block with the hdr, the owner of the
* reference belongs to the hdr. Only
* add to the refcount if the arc_buf_t is
* not shared.
*/
if (ARC_BUF_SHARED(buf))
continue;
(void) zfs_refcount_add_many(
&new_state->arcs_size[type],
arc_buf_size(buf), buf);
}
if (hdr->b_l1hdr.b_pabd != NULL) {
(void) zfs_refcount_add_many(
&new_state->arcs_size[type],
arc_hdr_size(hdr), hdr);
}
if (HDR_HAS_RABD(hdr)) {
(void) zfs_refcount_add_many(
&new_state->arcs_size[type],
HDR_GET_PSIZE(hdr), hdr);
}
}
}
if (update_old && old_state != arc_l2c_only) {
ASSERT(HDR_HAS_L1HDR(hdr));
if (GHOST_STATE(old_state)) {
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
/*
* When moving a header off of a ghost state,
* the header will not contain any arc buffers.
* We use the arc header pointer for the reference
* which is exactly what we did when we put the
* header on the ghost state.
*/
(void) zfs_refcount_remove_many(
&old_state->arcs_size[type],
HDR_GET_LSIZE(hdr), hdr);
} else {
/*
* Each individual buffer holds a unique reference,
* thus we must remove each of these references one
* at a time.
*/
for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
buf = buf->b_next) {
/*
* When the arc_buf_t is sharing the data
* block with the hdr, the owner of the
* reference belongs to the hdr. Only
* add to the refcount if the arc_buf_t is
* not shared.
*/
if (ARC_BUF_SHARED(buf))
continue;
(void) zfs_refcount_remove_many(
&old_state->arcs_size[type],
arc_buf_size(buf), buf);
}
ASSERT(hdr->b_l1hdr.b_pabd != NULL ||
HDR_HAS_RABD(hdr));
if (hdr->b_l1hdr.b_pabd != NULL) {
(void) zfs_refcount_remove_many(
&old_state->arcs_size[type],
arc_hdr_size(hdr), hdr);
}
if (HDR_HAS_RABD(hdr)) {
(void) zfs_refcount_remove_many(
&old_state->arcs_size[type],
HDR_GET_PSIZE(hdr), hdr);
}
}
}
if (HDR_HAS_L1HDR(hdr)) {
hdr->b_l1hdr.b_state = new_state;
if (HDR_HAS_L2HDR(hdr) && new_state != arc_l2c_only) {
l2arc_hdr_arcstats_decrement_state(hdr);
hdr->b_l2hdr.b_arcs_state = new_state->arcs_state;
l2arc_hdr_arcstats_increment_state(hdr);
}
}
}
void
arc_space_consume(uint64_t space, arc_space_type_t type)
{
ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
switch (type) {
default:
break;
case ARC_SPACE_DATA:
ARCSTAT_INCR(arcstat_data_size, space);
break;
case ARC_SPACE_META:
ARCSTAT_INCR(arcstat_metadata_size, space);
break;
case ARC_SPACE_BONUS:
ARCSTAT_INCR(arcstat_bonus_size, space);
break;
case ARC_SPACE_DNODE:
ARCSTAT_INCR(arcstat_dnode_size, space);
break;
case ARC_SPACE_DBUF:
ARCSTAT_INCR(arcstat_dbuf_size, space);
break;
case ARC_SPACE_HDRS:
ARCSTAT_INCR(arcstat_hdr_size, space);
break;
case ARC_SPACE_L2HDRS:
aggsum_add(&arc_sums.arcstat_l2_hdr_size, space);
break;
case ARC_SPACE_ABD_CHUNK_WASTE:
/*
* Note: this includes space wasted by all scatter ABD's, not
* just those allocated by the ARC. But the vast majority of
* scatter ABD's come from the ARC, because other users are
* very short-lived.
*/
ARCSTAT_INCR(arcstat_abd_chunk_waste_size, space);
break;
}
if (type != ARC_SPACE_DATA && type != ARC_SPACE_ABD_CHUNK_WASTE)
ARCSTAT_INCR(arcstat_meta_used, space);
aggsum_add(&arc_sums.arcstat_size, space);
}
void
arc_space_return(uint64_t space, arc_space_type_t type)
{
ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
switch (type) {
default:
break;
case ARC_SPACE_DATA:
ARCSTAT_INCR(arcstat_data_size, -space);
break;
case ARC_SPACE_META:
ARCSTAT_INCR(arcstat_metadata_size, -space);
break;
case ARC_SPACE_BONUS:
ARCSTAT_INCR(arcstat_bonus_size, -space);
break;
case ARC_SPACE_DNODE:
ARCSTAT_INCR(arcstat_dnode_size, -space);
break;
case ARC_SPACE_DBUF:
ARCSTAT_INCR(arcstat_dbuf_size, -space);
break;
case ARC_SPACE_HDRS:
ARCSTAT_INCR(arcstat_hdr_size, -space);
break;
case ARC_SPACE_L2HDRS:
aggsum_add(&arc_sums.arcstat_l2_hdr_size, -space);
break;
case ARC_SPACE_ABD_CHUNK_WASTE:
ARCSTAT_INCR(arcstat_abd_chunk_waste_size, -space);
break;
}
if (type != ARC_SPACE_DATA && type != ARC_SPACE_ABD_CHUNK_WASTE)
ARCSTAT_INCR(arcstat_meta_used, -space);
ASSERT(aggsum_compare(&arc_sums.arcstat_size, space) >= 0);
aggsum_add(&arc_sums.arcstat_size, -space);
}
/*
* Given a hdr and a buf, returns whether that buf can share its b_data buffer
* with the hdr's b_pabd.
*/
static boolean_t
arc_can_share(arc_buf_hdr_t *hdr, arc_buf_t *buf)
{
/*
* The criteria for sharing a hdr's data are:
* 1. the buffer is not encrypted
* 2. the hdr's compression matches the buf's compression
* 3. the hdr doesn't need to be byteswapped
* 4. the hdr isn't already being shared
* 5. the buf is either compressed or it is the last buf in the hdr list
*
* Criterion #5 maintains the invariant that shared uncompressed
* bufs must be the final buf in the hdr's b_buf list. Reading this, you
* might ask, "if a compressed buf is allocated first, won't that be the
* last thing in the list?", but in that case it's impossible to create
* a shared uncompressed buf anyway (because the hdr must be compressed
* to have the compressed buf). You might also think that #3 is
* sufficient to make this guarantee, however it's possible
* (specifically in the rare L2ARC write race mentioned in
* arc_buf_alloc_impl()) there will be an existing uncompressed buf that
* is shareable, but wasn't at the time of its allocation. Rather than
* allow a new shared uncompressed buf to be created and then shuffle
* the list around to make it the last element, this simply disallows
* sharing if the new buf isn't the first to be added.
*/
ASSERT3P(buf->b_hdr, ==, hdr);
boolean_t hdr_compressed =
arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF;
boolean_t buf_compressed = ARC_BUF_COMPRESSED(buf) != 0;
return (!ARC_BUF_ENCRYPTED(buf) &&
buf_compressed == hdr_compressed &&
hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS &&
!HDR_SHARED_DATA(hdr) &&
(ARC_BUF_LAST(buf) || ARC_BUF_COMPRESSED(buf)));
}
/*
* Allocate a buf for this hdr. If you care about the data that's in the hdr,
* or if you want a compressed buffer, pass those flags in. Returns 0 if the
* copy was made successfully, or an error code otherwise.
*/
static int
arc_buf_alloc_impl(arc_buf_hdr_t *hdr, spa_t *spa, const zbookmark_phys_t *zb,
const void *tag, boolean_t encrypted, boolean_t compressed,
boolean_t noauth, boolean_t fill, arc_buf_t **ret)
{
arc_buf_t *buf;
arc_fill_flags_t flags = ARC_FILL_LOCKED;
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT3U(HDR_GET_LSIZE(hdr), >, 0);
VERIFY(hdr->b_type == ARC_BUFC_DATA ||
hdr->b_type == ARC_BUFC_METADATA);
ASSERT3P(ret, !=, NULL);
ASSERT3P(*ret, ==, NULL);
IMPLY(encrypted, compressed);
buf = *ret = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
buf->b_hdr = hdr;
buf->b_data = NULL;
buf->b_next = hdr->b_l1hdr.b_buf;
buf->b_flags = 0;
add_reference(hdr, tag);
/*
* We're about to change the hdr's b_flags. We must either
* hold the hash_lock or be undiscoverable.
*/
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
/*
* Only honor requests for compressed bufs if the hdr is actually
* compressed. This must be overridden if the buffer is encrypted since
* encrypted buffers cannot be decompressed.
*/
if (encrypted) {
buf->b_flags |= ARC_BUF_FLAG_COMPRESSED;
buf->b_flags |= ARC_BUF_FLAG_ENCRYPTED;
flags |= ARC_FILL_COMPRESSED | ARC_FILL_ENCRYPTED;
} else if (compressed &&
arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF) {
buf->b_flags |= ARC_BUF_FLAG_COMPRESSED;
flags |= ARC_FILL_COMPRESSED;
}
if (noauth) {
ASSERT0(encrypted);
flags |= ARC_FILL_NOAUTH;
}
/*
* If the hdr's data can be shared then we share the data buffer and
* set the appropriate bit in the hdr's b_flags to indicate the hdr is
* sharing it's b_pabd with the arc_buf_t. Otherwise, we allocate a new
* buffer to store the buf's data.
*
* There are two additional restrictions here because we're sharing
* hdr -> buf instead of the usual buf -> hdr. First, the hdr can't be
* actively involved in an L2ARC write, because if this buf is used by
* an arc_write() then the hdr's data buffer will be released when the
* write completes, even though the L2ARC write might still be using it.
* Second, the hdr's ABD must be linear so that the buf's user doesn't
* need to be ABD-aware. It must be allocated via
* zio_[data_]buf_alloc(), not as a page, because we need to be able
* to abd_release_ownership_of_buf(), which isn't allowed on "linear
* page" buffers because the ABD code needs to handle freeing them
* specially.
*/
boolean_t can_share = arc_can_share(hdr, buf) &&
!HDR_L2_WRITING(hdr) &&
hdr->b_l1hdr.b_pabd != NULL &&
abd_is_linear(hdr->b_l1hdr.b_pabd) &&
!abd_is_linear_page(hdr->b_l1hdr.b_pabd);
/* Set up b_data and sharing */
if (can_share) {
buf->b_data = abd_to_buf(hdr->b_l1hdr.b_pabd);
buf->b_flags |= ARC_BUF_FLAG_SHARED;
arc_hdr_set_flags(hdr, ARC_FLAG_SHARED_DATA);
} else {
buf->b_data =
arc_get_data_buf(hdr, arc_buf_size(buf), buf);
ARCSTAT_INCR(arcstat_overhead_size, arc_buf_size(buf));
}
VERIFY3P(buf->b_data, !=, NULL);
hdr->b_l1hdr.b_buf = buf;
/*
* If the user wants the data from the hdr, we need to either copy or
* decompress the data.
*/
if (fill) {
ASSERT3P(zb, !=, NULL);
return (arc_buf_fill(buf, spa, zb, flags));
}
return (0);
}
static const char *arc_onloan_tag = "onloan";
static inline void
arc_loaned_bytes_update(int64_t delta)
{
atomic_add_64(&arc_loaned_bytes, delta);
/* assert that it did not wrap around */
ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes, 0), >=, 0);
}
/*
* Loan out an anonymous arc buffer. Loaned buffers are not counted as in
* flight data by arc_tempreserve_space() until they are "returned". Loaned
* buffers must be returned to the arc before they can be used by the DMU or
* freed.
*/
arc_buf_t *
arc_loan_buf(spa_t *spa, boolean_t is_metadata, int size)
{
arc_buf_t *buf = arc_alloc_buf(spa, arc_onloan_tag,
is_metadata ? ARC_BUFC_METADATA : ARC_BUFC_DATA, size);
arc_loaned_bytes_update(arc_buf_size(buf));
return (buf);
}
arc_buf_t *
arc_loan_compressed_buf(spa_t *spa, uint64_t psize, uint64_t lsize,
enum zio_compress compression_type, uint8_t complevel)
{
arc_buf_t *buf = arc_alloc_compressed_buf(spa, arc_onloan_tag,
psize, lsize, compression_type, complevel);
arc_loaned_bytes_update(arc_buf_size(buf));
return (buf);
}
arc_buf_t *
arc_loan_raw_buf(spa_t *spa, uint64_t dsobj, boolean_t byteorder,
const uint8_t *salt, const uint8_t *iv, const uint8_t *mac,
dmu_object_type_t ot, uint64_t psize, uint64_t lsize,
enum zio_compress compression_type, uint8_t complevel)
{
arc_buf_t *buf = arc_alloc_raw_buf(spa, arc_onloan_tag, dsobj,
byteorder, salt, iv, mac, ot, psize, lsize, compression_type,
complevel);
atomic_add_64(&arc_loaned_bytes, psize);
return (buf);
}
/*
* Return a loaned arc buffer to the arc.
*/
void
arc_return_buf(arc_buf_t *buf, const void *tag)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT3P(buf->b_data, !=, NULL);
ASSERT(HDR_HAS_L1HDR(hdr));
(void) zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, tag);
(void) zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
arc_loaned_bytes_update(-arc_buf_size(buf));
}
/* Detach an arc_buf from a dbuf (tag) */
void
arc_loan_inuse_buf(arc_buf_t *buf, const void *tag)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT3P(buf->b_data, !=, NULL);
ASSERT(HDR_HAS_L1HDR(hdr));
(void) zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
(void) zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, tag);
arc_loaned_bytes_update(arc_buf_size(buf));
}
static void
l2arc_free_abd_on_write(abd_t *abd, size_t size, arc_buf_contents_t type)
{
l2arc_data_free_t *df = kmem_alloc(sizeof (*df), KM_SLEEP);
df->l2df_abd = abd;
df->l2df_size = size;
df->l2df_type = type;
mutex_enter(&l2arc_free_on_write_mtx);
list_insert_head(l2arc_free_on_write, df);
mutex_exit(&l2arc_free_on_write_mtx);
}
static void
arc_hdr_free_on_write(arc_buf_hdr_t *hdr, boolean_t free_rdata)
{
arc_state_t *state = hdr->b_l1hdr.b_state;
arc_buf_contents_t type = arc_buf_type(hdr);
uint64_t size = (free_rdata) ? HDR_GET_PSIZE(hdr) : arc_hdr_size(hdr);
/* protected by hash lock, if in the hash table */
if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
ASSERT(state != arc_anon && state != arc_l2c_only);
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
size, hdr);
}
(void) zfs_refcount_remove_many(&state->arcs_size[type], size, hdr);
if (type == ARC_BUFC_METADATA) {
arc_space_return(size, ARC_SPACE_META);
} else {
ASSERT(type == ARC_BUFC_DATA);
arc_space_return(size, ARC_SPACE_DATA);
}
if (free_rdata) {
l2arc_free_abd_on_write(hdr->b_crypt_hdr.b_rabd, size, type);
} else {
l2arc_free_abd_on_write(hdr->b_l1hdr.b_pabd, size, type);
}
}
/*
* Share the arc_buf_t's data with the hdr. Whenever we are sharing the
* data buffer, we transfer the refcount ownership to the hdr and update
* the appropriate kstats.
*/
static void
arc_share_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf)
{
ASSERT(arc_can_share(hdr, buf));
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!ARC_BUF_ENCRYPTED(buf));
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
/*
* Start sharing the data buffer. We transfer the
* refcount ownership to the hdr since it always owns
* the refcount whenever an arc_buf_t is shared.
*/
zfs_refcount_transfer_ownership_many(
&hdr->b_l1hdr.b_state->arcs_size[arc_buf_type(hdr)],
arc_hdr_size(hdr), buf, hdr);
hdr->b_l1hdr.b_pabd = abd_get_from_buf(buf->b_data, arc_buf_size(buf));
abd_take_ownership_of_buf(hdr->b_l1hdr.b_pabd,
HDR_ISTYPE_METADATA(hdr));
arc_hdr_set_flags(hdr, ARC_FLAG_SHARED_DATA);
buf->b_flags |= ARC_BUF_FLAG_SHARED;
/*
* Since we've transferred ownership to the hdr we need
* to increment its compressed and uncompressed kstats and
* decrement the overhead size.
*/
ARCSTAT_INCR(arcstat_compressed_size, arc_hdr_size(hdr));
ARCSTAT_INCR(arcstat_uncompressed_size, HDR_GET_LSIZE(hdr));
ARCSTAT_INCR(arcstat_overhead_size, -arc_buf_size(buf));
}
static void
arc_unshare_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf)
{
ASSERT(arc_buf_is_shared(buf));
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
/*
* We are no longer sharing this buffer so we need
* to transfer its ownership to the rightful owner.
*/
zfs_refcount_transfer_ownership_many(
&hdr->b_l1hdr.b_state->arcs_size[arc_buf_type(hdr)],
arc_hdr_size(hdr), hdr, buf);
arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA);
abd_release_ownership_of_buf(hdr->b_l1hdr.b_pabd);
abd_free(hdr->b_l1hdr.b_pabd);
hdr->b_l1hdr.b_pabd = NULL;
buf->b_flags &= ~ARC_BUF_FLAG_SHARED;
/*
* Since the buffer is no longer shared between
* the arc buf and the hdr, count it as overhead.
*/
ARCSTAT_INCR(arcstat_compressed_size, -arc_hdr_size(hdr));
ARCSTAT_INCR(arcstat_uncompressed_size, -HDR_GET_LSIZE(hdr));
ARCSTAT_INCR(arcstat_overhead_size, arc_buf_size(buf));
}
/*
* Remove an arc_buf_t from the hdr's buf list and return the last
* arc_buf_t on the list. If no buffers remain on the list then return
* NULL.
*/
static arc_buf_t *
arc_buf_remove(arc_buf_hdr_t *hdr, arc_buf_t *buf)
{
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
arc_buf_t **bufp = &hdr->b_l1hdr.b_buf;
arc_buf_t *lastbuf = NULL;
/*
* Remove the buf from the hdr list and locate the last
* remaining buffer on the list.
*/
while (*bufp != NULL) {
if (*bufp == buf)
*bufp = buf->b_next;
/*
* If we've removed a buffer in the middle of
* the list then update the lastbuf and update
* bufp.
*/
if (*bufp != NULL) {
lastbuf = *bufp;
bufp = &(*bufp)->b_next;
}
}
buf->b_next = NULL;
ASSERT3P(lastbuf, !=, buf);
IMPLY(lastbuf != NULL, ARC_BUF_LAST(lastbuf));
return (lastbuf);
}
/*
* Free up buf->b_data and pull the arc_buf_t off of the arc_buf_hdr_t's
* list and free it.
*/
static void
arc_buf_destroy_impl(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
/*
* Free up the data associated with the buf but only if we're not
* sharing this with the hdr. If we are sharing it with the hdr, the
* hdr is responsible for doing the free.
*/
if (buf->b_data != NULL) {
/*
* We're about to change the hdr's b_flags. We must either
* hold the hash_lock or be undiscoverable.
*/
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
arc_cksum_verify(buf);
arc_buf_unwatch(buf);
if (ARC_BUF_SHARED(buf)) {
arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA);
} else {
ASSERT(!arc_buf_is_shared(buf));
uint64_t size = arc_buf_size(buf);
arc_free_data_buf(hdr, buf->b_data, size, buf);
ARCSTAT_INCR(arcstat_overhead_size, -size);
}
buf->b_data = NULL;
/*
* If we have no more encrypted buffers and we've already
* gotten a copy of the decrypted data we can free b_rabd
* to save some space.
*/
if (ARC_BUF_ENCRYPTED(buf) && HDR_HAS_RABD(hdr) &&
hdr->b_l1hdr.b_pabd != NULL && !HDR_IO_IN_PROGRESS(hdr)) {
arc_buf_t *b;
for (b = hdr->b_l1hdr.b_buf; b; b = b->b_next) {
if (b != buf && ARC_BUF_ENCRYPTED(b))
break;
}
if (b == NULL)
arc_hdr_free_abd(hdr, B_TRUE);
}
}
arc_buf_t *lastbuf = arc_buf_remove(hdr, buf);
if (ARC_BUF_SHARED(buf) && !ARC_BUF_COMPRESSED(buf)) {
/*
* If the current arc_buf_t is sharing its data buffer with the
* hdr, then reassign the hdr's b_pabd to share it with the new
* buffer at the end of the list. The shared buffer is always
* the last one on the hdr's buffer list.
*
* There is an equivalent case for compressed bufs, but since
* they aren't guaranteed to be the last buf in the list and
* that is an exceedingly rare case, we just allow that space be
* wasted temporarily. We must also be careful not to share
* encrypted buffers, since they cannot be shared.
*/
if (lastbuf != NULL && !ARC_BUF_ENCRYPTED(lastbuf)) {
/* Only one buf can be shared at once */
ASSERT(!arc_buf_is_shared(lastbuf));
/* hdr is uncompressed so can't have compressed buf */
ASSERT(!ARC_BUF_COMPRESSED(lastbuf));
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
arc_hdr_free_abd(hdr, B_FALSE);
/*
* We must setup a new shared block between the
* last buffer and the hdr. The data would have
* been allocated by the arc buf so we need to transfer
* ownership to the hdr since it's now being shared.
*/
arc_share_buf(hdr, lastbuf);
}
} else if (HDR_SHARED_DATA(hdr)) {
/*
* Uncompressed shared buffers are always at the end
* of the list. Compressed buffers don't have the
* same requirements. This makes it hard to
* simply assert that the lastbuf is shared so
* we rely on the hdr's compression flags to determine
* if we have a compressed, shared buffer.
*/
ASSERT3P(lastbuf, !=, NULL);
ASSERT(arc_buf_is_shared(lastbuf) ||
arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF);
}
/*
* Free the checksum if we're removing the last uncompressed buf from
* this hdr.
*/
if (!arc_hdr_has_uncompressed_buf(hdr)) {
arc_cksum_free(hdr);
}
/* clean up the buf */
buf->b_hdr = NULL;
kmem_cache_free(buf_cache, buf);
}
static void
arc_hdr_alloc_abd(arc_buf_hdr_t *hdr, int alloc_flags)
{
uint64_t size;
boolean_t alloc_rdata = ((alloc_flags & ARC_HDR_ALLOC_RDATA) != 0);
ASSERT3U(HDR_GET_LSIZE(hdr), >, 0);
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(!HDR_SHARED_DATA(hdr) || alloc_rdata);
IMPLY(alloc_rdata, HDR_PROTECTED(hdr));
if (alloc_rdata) {
size = HDR_GET_PSIZE(hdr);
ASSERT3P(hdr->b_crypt_hdr.b_rabd, ==, NULL);
hdr->b_crypt_hdr.b_rabd = arc_get_data_abd(hdr, size, hdr,
alloc_flags);
ASSERT3P(hdr->b_crypt_hdr.b_rabd, !=, NULL);
ARCSTAT_INCR(arcstat_raw_size, size);
} else {
size = arc_hdr_size(hdr);
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
hdr->b_l1hdr.b_pabd = arc_get_data_abd(hdr, size, hdr,
alloc_flags);
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
}
ARCSTAT_INCR(arcstat_compressed_size, size);
ARCSTAT_INCR(arcstat_uncompressed_size, HDR_GET_LSIZE(hdr));
}
static void
arc_hdr_free_abd(arc_buf_hdr_t *hdr, boolean_t free_rdata)
{
uint64_t size = (free_rdata) ? HDR_GET_PSIZE(hdr) : arc_hdr_size(hdr);
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr));
IMPLY(free_rdata, HDR_HAS_RABD(hdr));
/*
* If the hdr is currently being written to the l2arc then
* we defer freeing the data by adding it to the l2arc_free_on_write
* list. The l2arc will free the data once it's finished
* writing it to the l2arc device.
*/
if (HDR_L2_WRITING(hdr)) {
arc_hdr_free_on_write(hdr, free_rdata);
ARCSTAT_BUMP(arcstat_l2_free_on_write);
} else if (free_rdata) {
arc_free_data_abd(hdr, hdr->b_crypt_hdr.b_rabd, size, hdr);
} else {
arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd, size, hdr);
}
if (free_rdata) {
hdr->b_crypt_hdr.b_rabd = NULL;
ARCSTAT_INCR(arcstat_raw_size, -size);
} else {
hdr->b_l1hdr.b_pabd = NULL;
}
if (hdr->b_l1hdr.b_pabd == NULL && !HDR_HAS_RABD(hdr))
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
ARCSTAT_INCR(arcstat_compressed_size, -size);
ARCSTAT_INCR(arcstat_uncompressed_size, -HDR_GET_LSIZE(hdr));
}
/*
* Allocate empty anonymous ARC header. The header will get its identity
* assigned and buffers attached later as part of read or write operations.
*
* In case of read arc_read() assigns header its identify (b_dva + b_birth),
* inserts it into ARC hash to become globally visible and allocates physical
* (b_pabd) or raw (b_rabd) ABD buffer to read into from disk. On disk read
* completion arc_read_done() allocates ARC buffer(s) as needed, potentially
* sharing one of them with the physical ABD buffer.
*
* In case of write arc_alloc_buf() allocates ARC buffer to be filled with
* data. Then after compression and/or encryption arc_write_ready() allocates
* and fills (or potentially shares) physical (b_pabd) or raw (b_rabd) ABD
* buffer. On disk write completion arc_write_done() assigns the header its
* new identity (b_dva + b_birth) and inserts into ARC hash.
*
* In case of partial overwrite the old data is read first as described. Then
* arc_release() either allocates new anonymous ARC header and moves the ARC
* buffer to it, or reuses the old ARC header by discarding its identity and
* removing it from ARC hash. After buffer modification normal write process
* follows as described.
*/
static arc_buf_hdr_t *
arc_hdr_alloc(uint64_t spa, int32_t psize, int32_t lsize,
boolean_t protected, enum zio_compress compression_type, uint8_t complevel,
arc_buf_contents_t type)
{
arc_buf_hdr_t *hdr;
VERIFY(type == ARC_BUFC_DATA || type == ARC_BUFC_METADATA);
hdr = kmem_cache_alloc(hdr_full_cache, KM_PUSHPAGE);
ASSERT(HDR_EMPTY(hdr));
#ifdef ZFS_DEBUG
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
#endif
HDR_SET_PSIZE(hdr, psize);
HDR_SET_LSIZE(hdr, lsize);
hdr->b_spa = spa;
hdr->b_type = type;
hdr->b_flags = 0;
arc_hdr_set_flags(hdr, arc_bufc_to_flags(type) | ARC_FLAG_HAS_L1HDR);
arc_hdr_set_compress(hdr, compression_type);
hdr->b_complevel = complevel;
if (protected)
arc_hdr_set_flags(hdr, ARC_FLAG_PROTECTED);
hdr->b_l1hdr.b_state = arc_anon;
hdr->b_l1hdr.b_arc_access = 0;
hdr->b_l1hdr.b_mru_hits = 0;
hdr->b_l1hdr.b_mru_ghost_hits = 0;
hdr->b_l1hdr.b_mfu_hits = 0;
hdr->b_l1hdr.b_mfu_ghost_hits = 0;
hdr->b_l1hdr.b_buf = NULL;
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
return (hdr);
}
/*
* Transition between the two allocation states for the arc_buf_hdr struct.
* The arc_buf_hdr struct can be allocated with (hdr_full_cache) or without
* (hdr_l2only_cache) the fields necessary for the L1 cache - the smaller
* version is used when a cache buffer is only in the L2ARC in order to reduce
* memory usage.
*/
static arc_buf_hdr_t *
arc_hdr_realloc(arc_buf_hdr_t *hdr, kmem_cache_t *old, kmem_cache_t *new)
{
ASSERT(HDR_HAS_L2HDR(hdr));
arc_buf_hdr_t *nhdr;
l2arc_dev_t *dev = hdr->b_l2hdr.b_dev;
ASSERT((old == hdr_full_cache && new == hdr_l2only_cache) ||
(old == hdr_l2only_cache && new == hdr_full_cache));
nhdr = kmem_cache_alloc(new, KM_PUSHPAGE);
ASSERT(MUTEX_HELD(HDR_LOCK(hdr)));
buf_hash_remove(hdr);
memcpy(nhdr, hdr, HDR_L2ONLY_SIZE);
if (new == hdr_full_cache) {
arc_hdr_set_flags(nhdr, ARC_FLAG_HAS_L1HDR);
/*
* arc_access and arc_change_state need to be aware that a
* header has just come out of L2ARC, so we set its state to
* l2c_only even though it's about to change.
*/
nhdr->b_l1hdr.b_state = arc_l2c_only;
/* Verify previous threads set to NULL before freeing */
ASSERT3P(nhdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
} else {
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
#ifdef ZFS_DEBUG
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
#endif
/*
* If we've reached here, We must have been called from
* arc_evict_hdr(), as such we should have already been
* removed from any ghost list we were previously on
* (which protects us from racing with arc_evict_state),
* thus no locking is needed during this check.
*/
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
/*
* A buffer must not be moved into the arc_l2c_only
* state if it's not finished being written out to the
* l2arc device. Otherwise, the b_l1hdr.b_pabd field
* might try to be accessed, even though it was removed.
*/
VERIFY(!HDR_L2_WRITING(hdr));
VERIFY3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
arc_hdr_clear_flags(nhdr, ARC_FLAG_HAS_L1HDR);
}
/*
* The header has been reallocated so we need to re-insert it into any
* lists it was on.
*/
(void) buf_hash_insert(nhdr, NULL);
ASSERT(list_link_active(&hdr->b_l2hdr.b_l2node));
mutex_enter(&dev->l2ad_mtx);
/*
* We must place the realloc'ed header back into the list at
* the same spot. Otherwise, if it's placed earlier in the list,
* l2arc_write_buffers() could find it during the function's
* write phase, and try to write it out to the l2arc.
*/
list_insert_after(&dev->l2ad_buflist, hdr, nhdr);
list_remove(&dev->l2ad_buflist, hdr);
mutex_exit(&dev->l2ad_mtx);
/*
* Since we're using the pointer address as the tag when
* incrementing and decrementing the l2ad_alloc refcount, we
* must remove the old pointer (that we're about to destroy) and
* add the new pointer to the refcount. Otherwise we'd remove
* the wrong pointer address when calling arc_hdr_destroy() later.
*/
(void) zfs_refcount_remove_many(&dev->l2ad_alloc,
arc_hdr_size(hdr), hdr);
(void) zfs_refcount_add_many(&dev->l2ad_alloc,
arc_hdr_size(nhdr), nhdr);
buf_discard_identity(hdr);
kmem_cache_free(old, hdr);
return (nhdr);
}
/*
* This function is used by the send / receive code to convert a newly
* allocated arc_buf_t to one that is suitable for a raw encrypted write. It
* is also used to allow the root objset block to be updated without altering
* its embedded MACs. Both block types will always be uncompressed so we do not
* have to worry about compression type or psize.
*/
void
arc_convert_to_raw(arc_buf_t *buf, uint64_t dsobj, boolean_t byteorder,
dmu_object_type_t ot, const uint8_t *salt, const uint8_t *iv,
const uint8_t *mac)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT(ot == DMU_OT_DNODE || ot == DMU_OT_OBJSET);
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
buf->b_flags |= (ARC_BUF_FLAG_COMPRESSED | ARC_BUF_FLAG_ENCRYPTED);
arc_hdr_set_flags(hdr, ARC_FLAG_PROTECTED);
hdr->b_crypt_hdr.b_dsobj = dsobj;
hdr->b_crypt_hdr.b_ot = ot;
hdr->b_l1hdr.b_byteswap = (byteorder == ZFS_HOST_BYTEORDER) ?
DMU_BSWAP_NUMFUNCS : DMU_OT_BYTESWAP(ot);
if (!arc_hdr_has_uncompressed_buf(hdr))
arc_cksum_free(hdr);
if (salt != NULL)
memcpy(hdr->b_crypt_hdr.b_salt, salt, ZIO_DATA_SALT_LEN);
if (iv != NULL)
memcpy(hdr->b_crypt_hdr.b_iv, iv, ZIO_DATA_IV_LEN);
if (mac != NULL)
memcpy(hdr->b_crypt_hdr.b_mac, mac, ZIO_DATA_MAC_LEN);
}
/*
* Allocate a new arc_buf_hdr_t and arc_buf_t and return the buf to the caller.
* The buf is returned thawed since we expect the consumer to modify it.
*/
arc_buf_t *
arc_alloc_buf(spa_t *spa, const void *tag, arc_buf_contents_t type,
int32_t size)
{
arc_buf_hdr_t *hdr = arc_hdr_alloc(spa_load_guid(spa), size, size,
B_FALSE, ZIO_COMPRESS_OFF, 0, type);
arc_buf_t *buf = NULL;
VERIFY0(arc_buf_alloc_impl(hdr, spa, NULL, tag, B_FALSE, B_FALSE,
B_FALSE, B_FALSE, &buf));
arc_buf_thaw(buf);
return (buf);
}
/*
* Allocate a compressed buf in the same manner as arc_alloc_buf. Don't use this
* for bufs containing metadata.
*/
arc_buf_t *
arc_alloc_compressed_buf(spa_t *spa, const void *tag, uint64_t psize,
uint64_t lsize, enum zio_compress compression_type, uint8_t complevel)
{
ASSERT3U(lsize, >, 0);
ASSERT3U(lsize, >=, psize);
ASSERT3U(compression_type, >, ZIO_COMPRESS_OFF);
ASSERT3U(compression_type, <, ZIO_COMPRESS_FUNCTIONS);
arc_buf_hdr_t *hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize,
B_FALSE, compression_type, complevel, ARC_BUFC_DATA);
arc_buf_t *buf = NULL;
VERIFY0(arc_buf_alloc_impl(hdr, spa, NULL, tag, B_FALSE,
B_TRUE, B_FALSE, B_FALSE, &buf));
arc_buf_thaw(buf);
/*
* To ensure that the hdr has the correct data in it if we call
* arc_untransform() on this buf before it's been written to disk,
* it's easiest if we just set up sharing between the buf and the hdr.
*/
arc_share_buf(hdr, buf);
return (buf);
}
arc_buf_t *
arc_alloc_raw_buf(spa_t *spa, const void *tag, uint64_t dsobj,
boolean_t byteorder, const uint8_t *salt, const uint8_t *iv,
const uint8_t *mac, dmu_object_type_t ot, uint64_t psize, uint64_t lsize,
enum zio_compress compression_type, uint8_t complevel)
{
arc_buf_hdr_t *hdr;
arc_buf_t *buf;
arc_buf_contents_t type = DMU_OT_IS_METADATA(ot) ?
ARC_BUFC_METADATA : ARC_BUFC_DATA;
ASSERT3U(lsize, >, 0);
ASSERT3U(lsize, >=, psize);
ASSERT3U(compression_type, >=, ZIO_COMPRESS_OFF);
ASSERT3U(compression_type, <, ZIO_COMPRESS_FUNCTIONS);
hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize, B_TRUE,
compression_type, complevel, type);
hdr->b_crypt_hdr.b_dsobj = dsobj;
hdr->b_crypt_hdr.b_ot = ot;
hdr->b_l1hdr.b_byteswap = (byteorder == ZFS_HOST_BYTEORDER) ?
DMU_BSWAP_NUMFUNCS : DMU_OT_BYTESWAP(ot);
memcpy(hdr->b_crypt_hdr.b_salt, salt, ZIO_DATA_SALT_LEN);
memcpy(hdr->b_crypt_hdr.b_iv, iv, ZIO_DATA_IV_LEN);
memcpy(hdr->b_crypt_hdr.b_mac, mac, ZIO_DATA_MAC_LEN);
/*
* This buffer will be considered encrypted even if the ot is not an
* encrypted type. It will become authenticated instead in
* arc_write_ready().
*/
buf = NULL;
VERIFY0(arc_buf_alloc_impl(hdr, spa, NULL, tag, B_TRUE, B_TRUE,
B_FALSE, B_FALSE, &buf));
arc_buf_thaw(buf);
return (buf);
}
static void
l2arc_hdr_arcstats_update(arc_buf_hdr_t *hdr, boolean_t incr,
boolean_t state_only)
{
l2arc_buf_hdr_t *l2hdr = &hdr->b_l2hdr;
l2arc_dev_t *dev = l2hdr->b_dev;
uint64_t lsize = HDR_GET_LSIZE(hdr);
uint64_t psize = HDR_GET_PSIZE(hdr);
uint64_t asize = vdev_psize_to_asize(dev->l2ad_vdev, psize);
arc_buf_contents_t type = hdr->b_type;
int64_t lsize_s;
int64_t psize_s;
int64_t asize_s;
if (incr) {
lsize_s = lsize;
psize_s = psize;
asize_s = asize;
} else {
lsize_s = -lsize;
psize_s = -psize;
asize_s = -asize;
}
/* If the buffer is a prefetch, count it as such. */
if (HDR_PREFETCH(hdr)) {
ARCSTAT_INCR(arcstat_l2_prefetch_asize, asize_s);
} else {
/*
* We use the value stored in the L2 header upon initial
* caching in L2ARC. This value will be updated in case
* an MRU/MRU_ghost buffer transitions to MFU but the L2ARC
* metadata (log entry) cannot currently be updated. Having
* the ARC state in the L2 header solves the problem of a
* possibly absent L1 header (apparent in buffers restored
* from persistent L2ARC).
*/
switch (hdr->b_l2hdr.b_arcs_state) {
case ARC_STATE_MRU_GHOST:
case ARC_STATE_MRU:
ARCSTAT_INCR(arcstat_l2_mru_asize, asize_s);
break;
case ARC_STATE_MFU_GHOST:
case ARC_STATE_MFU:
ARCSTAT_INCR(arcstat_l2_mfu_asize, asize_s);
break;
default:
break;
}
}
if (state_only)
return;
ARCSTAT_INCR(arcstat_l2_psize, psize_s);
ARCSTAT_INCR(arcstat_l2_lsize, lsize_s);
switch (type) {
case ARC_BUFC_DATA:
ARCSTAT_INCR(arcstat_l2_bufc_data_asize, asize_s);
break;
case ARC_BUFC_METADATA:
ARCSTAT_INCR(arcstat_l2_bufc_metadata_asize, asize_s);
break;
default:
break;
}
}
static void
arc_hdr_l2hdr_destroy(arc_buf_hdr_t *hdr)
{
l2arc_buf_hdr_t *l2hdr = &hdr->b_l2hdr;
l2arc_dev_t *dev = l2hdr->b_dev;
uint64_t psize = HDR_GET_PSIZE(hdr);
uint64_t asize = vdev_psize_to_asize(dev->l2ad_vdev, psize);
ASSERT(MUTEX_HELD(&dev->l2ad_mtx));
ASSERT(HDR_HAS_L2HDR(hdr));
list_remove(&dev->l2ad_buflist, hdr);
l2arc_hdr_arcstats_decrement(hdr);
vdev_space_update(dev->l2ad_vdev, -asize, 0, 0);
(void) zfs_refcount_remove_many(&dev->l2ad_alloc, arc_hdr_size(hdr),
hdr);
arc_hdr_clear_flags(hdr, ARC_FLAG_HAS_L2HDR);
}
static void
arc_hdr_destroy(arc_buf_hdr_t *hdr)
{
if (HDR_HAS_L1HDR(hdr)) {
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
}
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT(!HDR_IN_HASH_TABLE(hdr));
if (HDR_HAS_L2HDR(hdr)) {
l2arc_dev_t *dev = hdr->b_l2hdr.b_dev;
boolean_t buflist_held = MUTEX_HELD(&dev->l2ad_mtx);
if (!buflist_held)
mutex_enter(&dev->l2ad_mtx);
/*
* Even though we checked this conditional above, we
* need to check this again now that we have the
* l2ad_mtx. This is because we could be racing with
* another thread calling l2arc_evict() which might have
* destroyed this header's L2 portion as we were waiting
* to acquire the l2ad_mtx. If that happens, we don't
* want to re-destroy the header's L2 portion.
*/
if (HDR_HAS_L2HDR(hdr)) {
if (!HDR_EMPTY(hdr))
buf_discard_identity(hdr);
arc_hdr_l2hdr_destroy(hdr);
}
if (!buflist_held)
mutex_exit(&dev->l2ad_mtx);
}
/*
* The header's identify can only be safely discarded once it is no
* longer discoverable. This requires removing it from the hash table
* and the l2arc header list. After this point the hash lock can not
* be used to protect the header.
*/
if (!HDR_EMPTY(hdr))
buf_discard_identity(hdr);
if (HDR_HAS_L1HDR(hdr)) {
arc_cksum_free(hdr);
while (hdr->b_l1hdr.b_buf != NULL)
arc_buf_destroy_impl(hdr->b_l1hdr.b_buf);
if (hdr->b_l1hdr.b_pabd != NULL)
arc_hdr_free_abd(hdr, B_FALSE);
if (HDR_HAS_RABD(hdr))
arc_hdr_free_abd(hdr, B_TRUE);
}
ASSERT3P(hdr->b_hash_next, ==, NULL);
if (HDR_HAS_L1HDR(hdr)) {
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
#ifdef ZFS_DEBUG
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
#endif
kmem_cache_free(hdr_full_cache, hdr);
} else {
kmem_cache_free(hdr_l2only_cache, hdr);
}
}
void
arc_buf_destroy(arc_buf_t *buf, const void *tag)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
if (hdr->b_l1hdr.b_state == arc_anon) {
ASSERT3P(hdr->b_l1hdr.b_buf, ==, buf);
ASSERT(ARC_BUF_LAST(buf));
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
VERIFY0(remove_reference(hdr, tag));
return;
}
kmutex_t *hash_lock = HDR_LOCK(hdr);
mutex_enter(hash_lock);
ASSERT3P(hdr, ==, buf->b_hdr);
ASSERT3P(hdr->b_l1hdr.b_buf, !=, NULL);
ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
ASSERT3P(hdr->b_l1hdr.b_state, !=, arc_anon);
ASSERT3P(buf->b_data, !=, NULL);
arc_buf_destroy_impl(buf);
(void) remove_reference(hdr, tag);
mutex_exit(hash_lock);
}
/*
* Evict the arc_buf_hdr that is provided as a parameter. The resultant
* state of the header is dependent on its state prior to entering this
* function. The following transitions are possible:
*
* - arc_mru -> arc_mru_ghost
* - arc_mfu -> arc_mfu_ghost
* - arc_mru_ghost -> arc_l2c_only
* - arc_mru_ghost -> deleted
* - arc_mfu_ghost -> arc_l2c_only
* - arc_mfu_ghost -> deleted
* - arc_uncached -> deleted
*
* Return total size of evicted data buffers for eviction progress tracking.
* When evicting from ghost states return logical buffer size to make eviction
* progress at the same (or at least comparable) rate as from non-ghost states.
*
* Return *real_evicted for actual ARC size reduction to wake up threads
* waiting for it. For non-ghost states it includes size of evicted data
* buffers (the headers are not freed there). For ghost states it includes
* only the evicted headers size.
*/
static int64_t
arc_evict_hdr(arc_buf_hdr_t *hdr, uint64_t *real_evicted)
{
arc_state_t *evicted_state, *state;
int64_t bytes_evicted = 0;
uint_t min_lifetime = HDR_PRESCIENT_PREFETCH(hdr) ?
arc_min_prescient_prefetch_ms : arc_min_prefetch_ms;
ASSERT(MUTEX_HELD(HDR_LOCK(hdr)));
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
ASSERT0(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt));
*real_evicted = 0;
state = hdr->b_l1hdr.b_state;
if (GHOST_STATE(state)) {
/*
* l2arc_write_buffers() relies on a header's L1 portion
* (i.e. its b_pabd field) during it's write phase.
* Thus, we cannot push a header onto the arc_l2c_only
* state (removing its L1 piece) until the header is
* done being written to the l2arc.
*/
if (HDR_HAS_L2HDR(hdr) && HDR_L2_WRITING(hdr)) {
ARCSTAT_BUMP(arcstat_evict_l2_skip);
return (bytes_evicted);
}
ARCSTAT_BUMP(arcstat_deleted);
bytes_evicted += HDR_GET_LSIZE(hdr);
DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, hdr);
if (HDR_HAS_L2HDR(hdr)) {
ASSERT(hdr->b_l1hdr.b_pabd == NULL);
ASSERT(!HDR_HAS_RABD(hdr));
/*
* This buffer is cached on the 2nd Level ARC;
* don't destroy the header.
*/
arc_change_state(arc_l2c_only, hdr);
/*
* dropping from L1+L2 cached to L2-only,
* realloc to remove the L1 header.
*/
(void) arc_hdr_realloc(hdr, hdr_full_cache,
hdr_l2only_cache);
*real_evicted += HDR_FULL_SIZE - HDR_L2ONLY_SIZE;
} else {
arc_change_state(arc_anon, hdr);
arc_hdr_destroy(hdr);
*real_evicted += HDR_FULL_SIZE;
}
return (bytes_evicted);
}
ASSERT(state == arc_mru || state == arc_mfu || state == arc_uncached);
evicted_state = (state == arc_uncached) ? arc_anon :
((state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost);
/* prefetch buffers have a minimum lifespan */
if ((hdr->b_flags & (ARC_FLAG_PREFETCH | ARC_FLAG_INDIRECT)) &&
ddi_get_lbolt() - hdr->b_l1hdr.b_arc_access <
MSEC_TO_TICK(min_lifetime)) {
ARCSTAT_BUMP(arcstat_evict_skip);
return (bytes_evicted);
}
if (HDR_HAS_L2HDR(hdr)) {
ARCSTAT_INCR(arcstat_evict_l2_cached, HDR_GET_LSIZE(hdr));
} else {
if (l2arc_write_eligible(hdr->b_spa, hdr)) {
ARCSTAT_INCR(arcstat_evict_l2_eligible,
HDR_GET_LSIZE(hdr));
switch (state->arcs_state) {
case ARC_STATE_MRU:
ARCSTAT_INCR(
arcstat_evict_l2_eligible_mru,
HDR_GET_LSIZE(hdr));
break;
case ARC_STATE_MFU:
ARCSTAT_INCR(
arcstat_evict_l2_eligible_mfu,
HDR_GET_LSIZE(hdr));
break;
default:
break;
}
} else {
ARCSTAT_INCR(arcstat_evict_l2_ineligible,
HDR_GET_LSIZE(hdr));
}
}
bytes_evicted += arc_hdr_size(hdr);
*real_evicted += arc_hdr_size(hdr);
/*
* If this hdr is being evicted and has a compressed buffer then we
* discard it here before we change states. This ensures that the
* accounting is updated correctly in arc_free_data_impl().
*/
if (hdr->b_l1hdr.b_pabd != NULL)
arc_hdr_free_abd(hdr, B_FALSE);
if (HDR_HAS_RABD(hdr))
arc_hdr_free_abd(hdr, B_TRUE);
arc_change_state(evicted_state, hdr);
DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, hdr);
if (evicted_state == arc_anon) {
arc_hdr_destroy(hdr);
*real_evicted += HDR_FULL_SIZE;
} else {
ASSERT(HDR_IN_HASH_TABLE(hdr));
}
return (bytes_evicted);
}
static void
arc_set_need_free(void)
{
ASSERT(MUTEX_HELD(&arc_evict_lock));
int64_t remaining = arc_free_memory() - arc_sys_free / 2;
arc_evict_waiter_t *aw = list_tail(&arc_evict_waiters);
if (aw == NULL) {
arc_need_free = MAX(-remaining, 0);
} else {
arc_need_free =
MAX(-remaining, (int64_t)(aw->aew_count - arc_evict_count));
}
}
static uint64_t
arc_evict_state_impl(multilist_t *ml, int idx, arc_buf_hdr_t *marker,
uint64_t spa, uint64_t bytes)
{
multilist_sublist_t *mls;
uint64_t bytes_evicted = 0, real_evicted = 0;
arc_buf_hdr_t *hdr;
kmutex_t *hash_lock;
uint_t evict_count = zfs_arc_evict_batch_limit;
ASSERT3P(marker, !=, NULL);
mls = multilist_sublist_lock(ml, idx);
for (hdr = multilist_sublist_prev(mls, marker); likely(hdr != NULL);
hdr = multilist_sublist_prev(mls, marker)) {
if ((evict_count == 0) || (bytes_evicted >= bytes))
break;
/*
* To keep our iteration location, move the marker
* forward. Since we're not holding hdr's hash lock, we
* must be very careful and not remove 'hdr' from the
* sublist. Otherwise, other consumers might mistake the
* 'hdr' as not being on a sublist when they call the
* multilist_link_active() function (they all rely on
* the hash lock protecting concurrent insertions and
* removals). multilist_sublist_move_forward() was
* specifically implemented to ensure this is the case
* (only 'marker' will be removed and re-inserted).
*/
multilist_sublist_move_forward(mls, marker);
/*
* The only case where the b_spa field should ever be
* zero, is the marker headers inserted by
* arc_evict_state(). It's possible for multiple threads
* to be calling arc_evict_state() concurrently (e.g.
* dsl_pool_close() and zio_inject_fault()), so we must
* skip any markers we see from these other threads.
*/
if (hdr->b_spa == 0)
continue;
/* we're only interested in evicting buffers of a certain spa */
if (spa != 0 && hdr->b_spa != spa) {
ARCSTAT_BUMP(arcstat_evict_skip);
continue;
}
hash_lock = HDR_LOCK(hdr);
/*
* We aren't calling this function from any code path
* that would already be holding a hash lock, so we're
* asserting on this assumption to be defensive in case
* this ever changes. Without this check, it would be
* possible to incorrectly increment arcstat_mutex_miss
* below (e.g. if the code changed such that we called
* this function with a hash lock held).
*/
ASSERT(!MUTEX_HELD(hash_lock));
if (mutex_tryenter(hash_lock)) {
uint64_t revicted;
uint64_t evicted = arc_evict_hdr(hdr, &revicted);
mutex_exit(hash_lock);
bytes_evicted += evicted;
real_evicted += revicted;
/*
* If evicted is zero, arc_evict_hdr() must have
* decided to skip this header, don't increment
* evict_count in this case.
*/
if (evicted != 0)
evict_count--;
} else {
ARCSTAT_BUMP(arcstat_mutex_miss);
}
}
multilist_sublist_unlock(mls);
/*
* Increment the count of evicted bytes, and wake up any threads that
* are waiting for the count to reach this value. Since the list is
* ordered by ascending aew_count, we pop off the beginning of the
* list until we reach the end, or a waiter that's past the current
* "count". Doing this outside the loop reduces the number of times
* we need to acquire the global arc_evict_lock.
*
* Only wake when there's sufficient free memory in the system
* (specifically, arc_sys_free/2, which by default is a bit more than
* 1/64th of RAM). See the comments in arc_wait_for_eviction().
*/
mutex_enter(&arc_evict_lock);
arc_evict_count += real_evicted;
if (arc_free_memory() > arc_sys_free / 2) {
arc_evict_waiter_t *aw;
while ((aw = list_head(&arc_evict_waiters)) != NULL &&
aw->aew_count <= arc_evict_count) {
list_remove(&arc_evict_waiters, aw);
cv_broadcast(&aw->aew_cv);
}
}
arc_set_need_free();
mutex_exit(&arc_evict_lock);
/*
* If the ARC size is reduced from arc_c_max to arc_c_min (especially
* if the average cached block is small), eviction can be on-CPU for
* many seconds. To ensure that other threads that may be bound to
* this CPU are able to make progress, make a voluntary preemption
* call here.
*/
kpreempt(KPREEMPT_SYNC);
return (bytes_evicted);
}
/*
* Allocate an array of buffer headers used as placeholders during arc state
* eviction.
*/
static arc_buf_hdr_t **
arc_state_alloc_markers(int count)
{
arc_buf_hdr_t **markers;
markers = kmem_zalloc(sizeof (*markers) * count, KM_SLEEP);
for (int i = 0; i < count; i++) {
markers[i] = kmem_cache_alloc(hdr_full_cache, KM_SLEEP);
/*
* A b_spa of 0 is used to indicate that this header is
* a marker. This fact is used in arc_evict_state_impl().
*/
markers[i]->b_spa = 0;
}
return (markers);
}
static void
arc_state_free_markers(arc_buf_hdr_t **markers, int count)
{
for (int i = 0; i < count; i++)
kmem_cache_free(hdr_full_cache, markers[i]);
kmem_free(markers, sizeof (*markers) * count);
}
/*
* Evict buffers from the given arc state, until we've removed the
* specified number of bytes. Move the removed buffers to the
* appropriate evict state.
*
* This function makes a "best effort". It skips over any buffers
* it can't get a hash_lock on, and so, may not catch all candidates.
* It may also return without evicting as much space as requested.
*
* If bytes is specified using the special value ARC_EVICT_ALL, this
* will evict all available (i.e. unlocked and evictable) buffers from
* the given arc state; which is used by arc_flush().
*/
static uint64_t
arc_evict_state(arc_state_t *state, arc_buf_contents_t type, uint64_t spa,
uint64_t bytes)
{
uint64_t total_evicted = 0;
multilist_t *ml = &state->arcs_list[type];
int num_sublists;
arc_buf_hdr_t **markers;
num_sublists = multilist_get_num_sublists(ml);
/*
* If we've tried to evict from each sublist, made some
* progress, but still have not hit the target number of bytes
* to evict, we want to keep trying. The markers allow us to
* pick up where we left off for each individual sublist, rather
* than starting from the tail each time.
*/
if (zthr_iscurthread(arc_evict_zthr)) {
markers = arc_state_evict_markers;
ASSERT3S(num_sublists, <=, arc_state_evict_marker_count);
} else {
markers = arc_state_alloc_markers(num_sublists);
}
for (int i = 0; i < num_sublists; i++) {
multilist_sublist_t *mls;
mls = multilist_sublist_lock(ml, i);
multilist_sublist_insert_tail(mls, markers[i]);
multilist_sublist_unlock(mls);
}
/*
* While we haven't hit our target number of bytes to evict, or
* we're evicting all available buffers.
*/
while (total_evicted < bytes) {
int sublist_idx = multilist_get_random_index(ml);
uint64_t scan_evicted = 0;
/*
* Start eviction using a randomly selected sublist,
* this is to try and evenly balance eviction across all
* sublists. Always starting at the same sublist
* (e.g. index 0) would cause evictions to favor certain
* sublists over others.
*/
for (int i = 0; i < num_sublists; i++) {
uint64_t bytes_remaining;
uint64_t bytes_evicted;
if (total_evicted < bytes)
bytes_remaining = bytes - total_evicted;
else
break;
bytes_evicted = arc_evict_state_impl(ml, sublist_idx,
markers[sublist_idx], spa, bytes_remaining);
scan_evicted += bytes_evicted;
total_evicted += bytes_evicted;
/* we've reached the end, wrap to the beginning */
if (++sublist_idx >= num_sublists)
sublist_idx = 0;
}
/*
* If we didn't evict anything during this scan, we have
* no reason to believe we'll evict more during another
* scan, so break the loop.
*/
if (scan_evicted == 0) {
/* This isn't possible, let's make that obvious */
ASSERT3S(bytes, !=, 0);
/*
* When bytes is ARC_EVICT_ALL, the only way to
* break the loop is when scan_evicted is zero.
* In that case, we actually have evicted enough,
* so we don't want to increment the kstat.
*/
if (bytes != ARC_EVICT_ALL) {
ASSERT3S(total_evicted, <, bytes);
ARCSTAT_BUMP(arcstat_evict_not_enough);
}
break;
}
}
for (int i = 0; i < num_sublists; i++) {
multilist_sublist_t *mls = multilist_sublist_lock(ml, i);
multilist_sublist_remove(mls, markers[i]);
multilist_sublist_unlock(mls);
}
if (markers != arc_state_evict_markers)
arc_state_free_markers(markers, num_sublists);
return (total_evicted);
}
/*
* Flush all "evictable" data of the given type from the arc state
* specified. This will not evict any "active" buffers (i.e. referenced).
*
* When 'retry' is set to B_FALSE, the function will make a single pass
* over the state and evict any buffers that it can. Since it doesn't
* continually retry the eviction, it might end up leaving some buffers
* in the ARC due to lock misses.
*
* When 'retry' is set to B_TRUE, the function will continually retry the
* eviction until *all* evictable buffers have been removed from the
* state. As a result, if concurrent insertions into the state are
* allowed (e.g. if the ARC isn't shutting down), this function might
* wind up in an infinite loop, continually trying to evict buffers.
*/
static uint64_t
arc_flush_state(arc_state_t *state, uint64_t spa, arc_buf_contents_t type,
boolean_t retry)
{
uint64_t evicted = 0;
while (zfs_refcount_count(&state->arcs_esize[type]) != 0) {
evicted += arc_evict_state(state, type, spa, ARC_EVICT_ALL);
if (!retry)
break;
}
return (evicted);
}
/*
* Evict the specified number of bytes from the state specified. This
* function prevents us from trying to evict more from a state's list
* than is "evictable", and to skip evicting altogether when passed a
* negative value for "bytes". In contrast, arc_evict_state() will
* evict everything it can, when passed a negative value for "bytes".
*/
static uint64_t
arc_evict_impl(arc_state_t *state, arc_buf_contents_t type, int64_t bytes)
{
uint64_t delta;
if (bytes > 0 && zfs_refcount_count(&state->arcs_esize[type]) > 0) {
delta = MIN(zfs_refcount_count(&state->arcs_esize[type]),
bytes);
return (arc_evict_state(state, type, 0, delta));
}
return (0);
}
/*
* Adjust specified fraction, taking into account initial ghost state(s) size,
* ghost hit bytes towards increasing the fraction, ghost hit bytes towards
* decreasing it, plus a balance factor, controlling the decrease rate, used
* to balance metadata vs data.
*/
static uint64_t
arc_evict_adj(uint64_t frac, uint64_t total, uint64_t up, uint64_t down,
uint_t balance)
{
if (total < 8 || up + down == 0)
return (frac);
/*
* We should not have more ghost hits than ghost size, but they
* may get close. Restrict maximum adjustment in that case.
*/
if (up + down >= total / 4) {
uint64_t scale = (up + down) / (total / 8);
up /= scale;
down /= scale;
}
/* Get maximal dynamic range by choosing optimal shifts. */
int s = highbit64(total);
s = MIN(64 - s, 32);
uint64_t ofrac = (1ULL << 32) - frac;
if (frac >= 4 * ofrac)
up /= frac / (2 * ofrac + 1);
up = (up << s) / (total >> (32 - s));
if (ofrac >= 4 * frac)
down /= ofrac / (2 * frac + 1);
down = (down << s) / (total >> (32 - s));
down = down * 100 / balance;
return (frac + up - down);
}
/*
* Evict buffers from the cache, such that arcstat_size is capped by arc_c.
*/
static uint64_t
arc_evict(void)
{
uint64_t asize, bytes, total_evicted = 0;
int64_t e, mrud, mrum, mfud, mfum, w;
static uint64_t ogrd, ogrm, ogfd, ogfm;
static uint64_t gsrd, gsrm, gsfd, gsfm;
uint64_t ngrd, ngrm, ngfd, ngfm;
/* Get current size of ARC states we can evict from. */
mrud = zfs_refcount_count(&arc_mru->arcs_size[ARC_BUFC_DATA]) +
zfs_refcount_count(&arc_anon->arcs_size[ARC_BUFC_DATA]);
mrum = zfs_refcount_count(&arc_mru->arcs_size[ARC_BUFC_METADATA]) +
zfs_refcount_count(&arc_anon->arcs_size[ARC_BUFC_METADATA]);
mfud = zfs_refcount_count(&arc_mfu->arcs_size[ARC_BUFC_DATA]);
mfum = zfs_refcount_count(&arc_mfu->arcs_size[ARC_BUFC_METADATA]);
uint64_t d = mrud + mfud;
uint64_t m = mrum + mfum;
uint64_t t = d + m;
/* Get ARC ghost hits since last eviction. */
ngrd = wmsum_value(&arc_mru_ghost->arcs_hits[ARC_BUFC_DATA]);
uint64_t grd = ngrd - ogrd;
ogrd = ngrd;
ngrm = wmsum_value(&arc_mru_ghost->arcs_hits[ARC_BUFC_METADATA]);
uint64_t grm = ngrm - ogrm;
ogrm = ngrm;
ngfd = wmsum_value(&arc_mfu_ghost->arcs_hits[ARC_BUFC_DATA]);
uint64_t gfd = ngfd - ogfd;
ogfd = ngfd;
ngfm = wmsum_value(&arc_mfu_ghost->arcs_hits[ARC_BUFC_METADATA]);
uint64_t gfm = ngfm - ogfm;
ogfm = ngfm;
/* Adjust ARC states balance based on ghost hits. */
arc_meta = arc_evict_adj(arc_meta, gsrd + gsrm + gsfd + gsfm,
grm + gfm, grd + gfd, zfs_arc_meta_balance);
arc_pd = arc_evict_adj(arc_pd, gsrd + gsfd, grd, gfd, 100);
arc_pm = arc_evict_adj(arc_pm, gsrm + gsfm, grm, gfm, 100);
asize = aggsum_value(&arc_sums.arcstat_size);
int64_t wt = t - (asize - arc_c);
/*
* Try to reduce pinned dnodes if more than 3/4 of wanted metadata
* target is not evictable or if they go over arc_dnode_limit.
*/
int64_t prune = 0;
int64_t dn = wmsum_value(&arc_sums.arcstat_dnode_size);
w = wt * (int64_t)(arc_meta >> 16) >> 16;
if (zfs_refcount_count(&arc_mru->arcs_size[ARC_BUFC_METADATA]) +
zfs_refcount_count(&arc_mfu->arcs_size[ARC_BUFC_METADATA]) -
zfs_refcount_count(&arc_mru->arcs_esize[ARC_BUFC_METADATA]) -
zfs_refcount_count(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]) >
w * 3 / 4) {
prune = dn / sizeof (dnode_t) *
zfs_arc_dnode_reduce_percent / 100;
} else if (dn > arc_dnode_limit) {
prune = (dn - arc_dnode_limit) / sizeof (dnode_t) *
zfs_arc_dnode_reduce_percent / 100;
}
if (prune > 0)
arc_prune_async(prune);
/* Evict MRU metadata. */
w = wt * (int64_t)(arc_meta * arc_pm >> 48) >> 16;
e = MIN((int64_t)(asize - arc_c), (int64_t)(mrum - w));
bytes = arc_evict_impl(arc_mru, ARC_BUFC_METADATA, e);
total_evicted += bytes;
mrum -= bytes;
asize -= bytes;
/* Evict MFU metadata. */
w = wt * (int64_t)(arc_meta >> 16) >> 16;
e = MIN((int64_t)(asize - arc_c), (int64_t)(m - w));
bytes = arc_evict_impl(arc_mfu, ARC_BUFC_METADATA, e);
total_evicted += bytes;
mfum -= bytes;
asize -= bytes;
/* Evict MRU data. */
wt -= m - total_evicted;
w = wt * (int64_t)(arc_pd >> 16) >> 16;
e = MIN((int64_t)(asize - arc_c), (int64_t)(mrud - w));
bytes = arc_evict_impl(arc_mru, ARC_BUFC_DATA, e);
total_evicted += bytes;
mrud -= bytes;
asize -= bytes;
/* Evict MFU data. */
e = asize - arc_c;
bytes = arc_evict_impl(arc_mfu, ARC_BUFC_DATA, e);
mfud -= bytes;
total_evicted += bytes;
/*
* Evict ghost lists
*
* Size of each state's ghost list represents how much that state
* may grow by shrinking the other states. Would it need to shrink
* other states to zero (that is unlikely), its ghost size would be
* equal to sum of other three state sizes. But excessive ghost
* size may result in false ghost hits (too far back), that may
* never result in real cache hits if several states are competing.
* So choose some arbitraty point of 1/2 of other state sizes.
*/
gsrd = (mrum + mfud + mfum) / 2;
e = zfs_refcount_count(&arc_mru_ghost->arcs_size[ARC_BUFC_DATA]) -
gsrd;
(void) arc_evict_impl(arc_mru_ghost, ARC_BUFC_DATA, e);
gsrm = (mrud + mfud + mfum) / 2;
e = zfs_refcount_count(&arc_mru_ghost->arcs_size[ARC_BUFC_METADATA]) -
gsrm;
(void) arc_evict_impl(arc_mru_ghost, ARC_BUFC_METADATA, e);
gsfd = (mrud + mrum + mfum) / 2;
e = zfs_refcount_count(&arc_mfu_ghost->arcs_size[ARC_BUFC_DATA]) -
gsfd;
(void) arc_evict_impl(arc_mfu_ghost, ARC_BUFC_DATA, e);
gsfm = (mrud + mrum + mfud) / 2;
e = zfs_refcount_count(&arc_mfu_ghost->arcs_size[ARC_BUFC_METADATA]) -
gsfm;
(void) arc_evict_impl(arc_mfu_ghost, ARC_BUFC_METADATA, e);
return (total_evicted);
}
void
arc_flush(spa_t *spa, boolean_t retry)
{
uint64_t guid = 0;
/*
* If retry is B_TRUE, a spa must not be specified since we have
* no good way to determine if all of a spa's buffers have been
* evicted from an arc state.
*/
ASSERT(!retry || spa == NULL);
if (spa != NULL)
guid = spa_load_guid(spa);
(void) arc_flush_state(arc_mru, guid, ARC_BUFC_DATA, retry);
(void) arc_flush_state(arc_mru, guid, ARC_BUFC_METADATA, retry);
(void) arc_flush_state(arc_mfu, guid, ARC_BUFC_DATA, retry);
(void) arc_flush_state(arc_mfu, guid, ARC_BUFC_METADATA, retry);
(void) arc_flush_state(arc_mru_ghost, guid, ARC_BUFC_DATA, retry);
(void) arc_flush_state(arc_mru_ghost, guid, ARC_BUFC_METADATA, retry);
(void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_DATA, retry);
(void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_METADATA, retry);
(void) arc_flush_state(arc_uncached, guid, ARC_BUFC_DATA, retry);
(void) arc_flush_state(arc_uncached, guid, ARC_BUFC_METADATA, retry);
}
void
arc_reduce_target_size(int64_t to_free)
{
uint64_t c = arc_c;
if (c <= arc_c_min)
return;
/*
* All callers want the ARC to actually evict (at least) this much
* memory. Therefore we reduce from the lower of the current size and
* the target size. This way, even if arc_c is much higher than
* arc_size (as can be the case after many calls to arc_freed(), we will
* immediately have arc_c < arc_size and therefore the arc_evict_zthr
* will evict.
*/
uint64_t asize = aggsum_value(&arc_sums.arcstat_size);
if (asize < c)
to_free += c - asize;
arc_c = MAX((int64_t)c - to_free, (int64_t)arc_c_min);
/* See comment in arc_evict_cb_check() on why lock+flag */
mutex_enter(&arc_evict_lock);
arc_evict_needed = B_TRUE;
mutex_exit(&arc_evict_lock);
zthr_wakeup(arc_evict_zthr);
}
/*
* Determine if the system is under memory pressure and is asking
* to reclaim memory. A return value of B_TRUE indicates that the system
* is under memory pressure and that the arc should adjust accordingly.
*/
boolean_t
arc_reclaim_needed(void)
{
return (arc_available_memory() < 0);
}
void
arc_kmem_reap_soon(void)
{
size_t i;
kmem_cache_t *prev_cache = NULL;
kmem_cache_t *prev_data_cache = NULL;
#ifdef _KERNEL
#if defined(_ILP32)
/*
* Reclaim unused memory from all kmem caches.
*/
kmem_reap();
#endif
#endif
for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
#if defined(_ILP32)
/* reach upper limit of cache size on 32-bit */
if (zio_buf_cache[i] == NULL)
break;
#endif
if (zio_buf_cache[i] != prev_cache) {
prev_cache = zio_buf_cache[i];
kmem_cache_reap_now(zio_buf_cache[i]);
}
if (zio_data_buf_cache[i] != prev_data_cache) {
prev_data_cache = zio_data_buf_cache[i];
kmem_cache_reap_now(zio_data_buf_cache[i]);
}
}
kmem_cache_reap_now(buf_cache);
kmem_cache_reap_now(hdr_full_cache);
kmem_cache_reap_now(hdr_l2only_cache);
kmem_cache_reap_now(zfs_btree_leaf_cache);
abd_cache_reap_now();
}
static boolean_t
arc_evict_cb_check(void *arg, zthr_t *zthr)
{
(void) arg, (void) zthr;
#ifdef ZFS_DEBUG
/*
* This is necessary in order to keep the kstat information
* up to date for tools that display kstat data such as the
* mdb ::arc dcmd and the Linux crash utility. These tools
* typically do not call kstat's update function, but simply
* dump out stats from the most recent update. Without
* this call, these commands may show stale stats for the
* anon, mru, mru_ghost, mfu, and mfu_ghost lists. Even
* with this call, the data might be out of date if the
* evict thread hasn't been woken recently; but that should
* suffice. The arc_state_t structures can be queried
* directly if more accurate information is needed.
*/
if (arc_ksp != NULL)
arc_ksp->ks_update(arc_ksp, KSTAT_READ);
#endif
/*
* We have to rely on arc_wait_for_eviction() to tell us when to
* evict, rather than checking if we are overflowing here, so that we
* are sure to not leave arc_wait_for_eviction() waiting on aew_cv.
* If we have become "not overflowing" since arc_wait_for_eviction()
* checked, we need to wake it up. We could broadcast the CV here,
* but arc_wait_for_eviction() may have not yet gone to sleep. We
* would need to use a mutex to ensure that this function doesn't
* broadcast until arc_wait_for_eviction() has gone to sleep (e.g.
* the arc_evict_lock). However, the lock ordering of such a lock
* would necessarily be incorrect with respect to the zthr_lock,
* which is held before this function is called, and is held by
* arc_wait_for_eviction() when it calls zthr_wakeup().
*/
if (arc_evict_needed)
return (B_TRUE);
/*
* If we have buffers in uncached state, evict them periodically.
*/
return ((zfs_refcount_count(&arc_uncached->arcs_esize[ARC_BUFC_DATA]) +
zfs_refcount_count(&arc_uncached->arcs_esize[ARC_BUFC_METADATA]) &&
ddi_get_lbolt() - arc_last_uncached_flush >
MSEC_TO_TICK(arc_min_prefetch_ms / 2)));
}
/*
* Keep arc_size under arc_c by running arc_evict which evicts data
* from the ARC.
*/
static void
arc_evict_cb(void *arg, zthr_t *zthr)
{
(void) arg, (void) zthr;
uint64_t evicted = 0;
fstrans_cookie_t cookie = spl_fstrans_mark();
/* Always try to evict from uncached state. */
arc_last_uncached_flush = ddi_get_lbolt();
evicted += arc_flush_state(arc_uncached, 0, ARC_BUFC_DATA, B_FALSE);
evicted += arc_flush_state(arc_uncached, 0, ARC_BUFC_METADATA, B_FALSE);
/* Evict from other states only if told to. */
if (arc_evict_needed)
evicted += arc_evict();
/*
* If evicted is zero, we couldn't evict anything
* via arc_evict(). This could be due to hash lock
* collisions, but more likely due to the majority of
* arc buffers being unevictable. Therefore, even if
* arc_size is above arc_c, another pass is unlikely to
* be helpful and could potentially cause us to enter an
* infinite loop. Additionally, zthr_iscancelled() is
* checked here so that if the arc is shutting down, the
* broadcast will wake any remaining arc evict waiters.
*/
mutex_enter(&arc_evict_lock);
arc_evict_needed = !zthr_iscancelled(arc_evict_zthr) &&
evicted > 0 && aggsum_compare(&arc_sums.arcstat_size, arc_c) > 0;
if (!arc_evict_needed) {
/*
* We're either no longer overflowing, or we
* can't evict anything more, so we should wake
* arc_get_data_impl() sooner.
*/
arc_evict_waiter_t *aw;
while ((aw = list_remove_head(&arc_evict_waiters)) != NULL) {
cv_broadcast(&aw->aew_cv);
}
arc_set_need_free();
}
mutex_exit(&arc_evict_lock);
spl_fstrans_unmark(cookie);
}
static boolean_t
arc_reap_cb_check(void *arg, zthr_t *zthr)
{
(void) arg, (void) zthr;
int64_t free_memory = arc_available_memory();
static int reap_cb_check_counter = 0;
/*
* If a kmem reap is already active, don't schedule more. We must
* check for this because kmem_cache_reap_soon() won't actually
* block on the cache being reaped (this is to prevent callers from
* becoming implicitly blocked by a system-wide kmem reap -- which,
* on a system with many, many full magazines, can take minutes).
*/
if (!kmem_cache_reap_active() && free_memory < 0) {
arc_no_grow = B_TRUE;
arc_warm = B_TRUE;
/*
* Wait at least zfs_grow_retry (default 5) seconds
* before considering growing.
*/
arc_growtime = gethrtime() + SEC2NSEC(arc_grow_retry);
return (B_TRUE);
} else if (free_memory < arc_c >> arc_no_grow_shift) {
arc_no_grow = B_TRUE;
} else if (gethrtime() >= arc_growtime) {
arc_no_grow = B_FALSE;
}
/*
* Called unconditionally every 60 seconds to reclaim unused
* zstd compression and decompression context. This is done
* here to avoid the need for an independent thread.
*/
if (!((reap_cb_check_counter++) % 60))
zfs_zstd_cache_reap_now();
return (B_FALSE);
}
/*
* Keep enough free memory in the system by reaping the ARC's kmem
* caches. To cause more slabs to be reapable, we may reduce the
* target size of the cache (arc_c), causing the arc_evict_cb()
* to free more buffers.
*/
static void
arc_reap_cb(void *arg, zthr_t *zthr)
{
(void) arg, (void) zthr;
int64_t free_memory;
fstrans_cookie_t cookie = spl_fstrans_mark();
/*
* Kick off asynchronous kmem_reap()'s of all our caches.
*/
arc_kmem_reap_soon();
/*
* Wait at least arc_kmem_cache_reap_retry_ms between
* arc_kmem_reap_soon() calls. Without this check it is possible to
* end up in a situation where we spend lots of time reaping
* caches, while we're near arc_c_min. Waiting here also gives the
* subsequent free memory check a chance of finding that the
* asynchronous reap has already freed enough memory, and we don't
* need to call arc_reduce_target_size().
*/
delay((hz * arc_kmem_cache_reap_retry_ms + 999) / 1000);
/*
* Reduce the target size as needed to maintain the amount of free
* memory in the system at a fraction of the arc_size (1/128th by
* default). If oversubscribed (free_memory < 0) then reduce the
* target arc_size by the deficit amount plus the fractional
* amount. If free memory is positive but less than the fractional
* amount, reduce by what is needed to hit the fractional amount.
*/
free_memory = arc_available_memory();
int64_t can_free = arc_c - arc_c_min;
if (can_free > 0) {
int64_t to_free = (can_free >> arc_shrink_shift) - free_memory;
if (to_free > 0)
arc_reduce_target_size(to_free);
}
spl_fstrans_unmark(cookie);
}
#ifdef _KERNEL
/*
* Determine the amount of memory eligible for eviction contained in the
* ARC. All clean data reported by the ghost lists can always be safely
* evicted. Due to arc_c_min, the same does not hold for all clean data
* contained by the regular mru and mfu lists.
*
* In the case of the regular mru and mfu lists, we need to report as
* much clean data as possible, such that evicting that same reported
* data will not bring arc_size below arc_c_min. Thus, in certain
* circumstances, the total amount of clean data in the mru and mfu
* lists might not actually be evictable.
*
* The following two distinct cases are accounted for:
*
* 1. The sum of the amount of dirty data contained by both the mru and
* mfu lists, plus the ARC's other accounting (e.g. the anon list),
* is greater than or equal to arc_c_min.
* (i.e. amount of dirty data >= arc_c_min)
*
* This is the easy case; all clean data contained by the mru and mfu
* lists is evictable. Evicting all clean data can only drop arc_size
* to the amount of dirty data, which is greater than arc_c_min.
*
* 2. The sum of the amount of dirty data contained by both the mru and
* mfu lists, plus the ARC's other accounting (e.g. the anon list),
* is less than arc_c_min.
* (i.e. arc_c_min > amount of dirty data)
*
* 2.1. arc_size is greater than or equal arc_c_min.
* (i.e. arc_size >= arc_c_min > amount of dirty data)
*
* In this case, not all clean data from the regular mru and mfu
* lists is actually evictable; we must leave enough clean data
* to keep arc_size above arc_c_min. Thus, the maximum amount of
* evictable data from the two lists combined, is exactly the
* difference between arc_size and arc_c_min.
*
* 2.2. arc_size is less than arc_c_min
* (i.e. arc_c_min > arc_size > amount of dirty data)
*
* In this case, none of the data contained in the mru and mfu
* lists is evictable, even if it's clean. Since arc_size is
* already below arc_c_min, evicting any more would only
* increase this negative difference.
*/
#endif /* _KERNEL */
/*
* Adapt arc info given the number of bytes we are trying to add and
* the state that we are coming from. This function is only called
* when we are adding new content to the cache.
*/
static void
arc_adapt(uint64_t bytes)
{
/*
* Wake reap thread if we do not have any available memory
*/
if (arc_reclaim_needed()) {
zthr_wakeup(arc_reap_zthr);
return;
}
if (arc_no_grow)
return;
if (arc_c >= arc_c_max)
return;
/*
* If we're within (2 * maxblocksize) bytes of the target
* cache size, increment the target cache size
*/
if (aggsum_upper_bound(&arc_sums.arcstat_size) +
2 * SPA_MAXBLOCKSIZE >= arc_c) {
uint64_t dc = MAX(bytes, SPA_OLD_MAXBLOCKSIZE);
if (atomic_add_64_nv(&arc_c, dc) > arc_c_max)
arc_c = arc_c_max;
}
}
/*
* Check if arc_size has grown past our upper threshold, determined by
* zfs_arc_overflow_shift.
*/
static arc_ovf_level_t
arc_is_overflowing(boolean_t use_reserve)
{
/* Always allow at least one block of overflow */
int64_t overflow = MAX(SPA_MAXBLOCKSIZE,
arc_c >> zfs_arc_overflow_shift);
/*
* We just compare the lower bound here for performance reasons. Our
* primary goals are to make sure that the arc never grows without
* bound, and that it can reach its maximum size. This check
* accomplishes both goals. The maximum amount we could run over by is
* 2 * aggsum_borrow_multiplier * NUM_CPUS * the average size of a block
* in the ARC. In practice, that's in the tens of MB, which is low
* enough to be safe.
*/
int64_t over = aggsum_lower_bound(&arc_sums.arcstat_size) -
arc_c - overflow / 2;
if (!use_reserve)
overflow /= 2;
return (over < 0 ? ARC_OVF_NONE :
over < overflow ? ARC_OVF_SOME : ARC_OVF_SEVERE);
}
static abd_t *
arc_get_data_abd(arc_buf_hdr_t *hdr, uint64_t size, const void *tag,
int alloc_flags)
{
arc_buf_contents_t type = arc_buf_type(hdr);
arc_get_data_impl(hdr, size, tag, alloc_flags);
if (alloc_flags & ARC_HDR_ALLOC_LINEAR)
return (abd_alloc_linear(size, type == ARC_BUFC_METADATA));
else
return (abd_alloc(size, type == ARC_BUFC_METADATA));
}
static void *
arc_get_data_buf(arc_buf_hdr_t *hdr, uint64_t size, const void *tag)
{
arc_buf_contents_t type = arc_buf_type(hdr);
arc_get_data_impl(hdr, size, tag, 0);
if (type == ARC_BUFC_METADATA) {
return (zio_buf_alloc(size));
} else {
ASSERT(type == ARC_BUFC_DATA);
return (zio_data_buf_alloc(size));
}
}
/*
* Wait for the specified amount of data (in bytes) to be evicted from the
* ARC, and for there to be sufficient free memory in the system. Waiting for
* eviction ensures that the memory used by the ARC decreases. Waiting for
* free memory ensures that the system won't run out of free pages, regardless
* of ARC behavior and settings. See arc_lowmem_init().
*/
void
arc_wait_for_eviction(uint64_t amount, boolean_t use_reserve)
{
switch (arc_is_overflowing(use_reserve)) {
case ARC_OVF_NONE:
return;
case ARC_OVF_SOME:
/*
* This is a bit racy without taking arc_evict_lock, but the
* worst that can happen is we either call zthr_wakeup() extra
* time due to race with other thread here, or the set flag
* get cleared by arc_evict_cb(), which is unlikely due to
* big hysteresis, but also not important since at this level
* of overflow the eviction is purely advisory. Same time
* taking the global lock here every time without waiting for
* the actual eviction creates a significant lock contention.
*/
if (!arc_evict_needed) {
arc_evict_needed = B_TRUE;
zthr_wakeup(arc_evict_zthr);
}
return;
case ARC_OVF_SEVERE:
default:
{
arc_evict_waiter_t aw;
list_link_init(&aw.aew_node);
cv_init(&aw.aew_cv, NULL, CV_DEFAULT, NULL);
uint64_t last_count = 0;
mutex_enter(&arc_evict_lock);
if (!list_is_empty(&arc_evict_waiters)) {
arc_evict_waiter_t *last =
list_tail(&arc_evict_waiters);
last_count = last->aew_count;
} else if (!arc_evict_needed) {
arc_evict_needed = B_TRUE;
zthr_wakeup(arc_evict_zthr);
}
/*
* Note, the last waiter's count may be less than
* arc_evict_count if we are low on memory in which
* case arc_evict_state_impl() may have deferred
* wakeups (but still incremented arc_evict_count).
*/
aw.aew_count = MAX(last_count, arc_evict_count) + amount;
list_insert_tail(&arc_evict_waiters, &aw);
arc_set_need_free();
DTRACE_PROBE3(arc__wait__for__eviction,
uint64_t, amount,
uint64_t, arc_evict_count,
uint64_t, aw.aew_count);
/*
* We will be woken up either when arc_evict_count reaches
* aew_count, or when the ARC is no longer overflowing and
* eviction completes.
* In case of "false" wakeup, we will still be on the list.
*/
do {
cv_wait(&aw.aew_cv, &arc_evict_lock);
} while (list_link_active(&aw.aew_node));
mutex_exit(&arc_evict_lock);
cv_destroy(&aw.aew_cv);
}
}
}
/*
* Allocate a block and return it to the caller. If we are hitting the
* hard limit for the cache size, we must sleep, waiting for the eviction
* thread to catch up. If we're past the target size but below the hard
* limit, we'll only signal the reclaim thread and continue on.
*/
static void
arc_get_data_impl(arc_buf_hdr_t *hdr, uint64_t size, const void *tag,
int alloc_flags)
{
arc_adapt(size);
/*
* If arc_size is currently overflowing, we must be adding data
* faster than we are evicting. To ensure we don't compound the
* problem by adding more data and forcing arc_size to grow even
* further past it's target size, we wait for the eviction thread to
* make some progress. We also wait for there to be sufficient free
* memory in the system, as measured by arc_free_memory().
*
* Specifically, we wait for zfs_arc_eviction_pct percent of the
* requested size to be evicted. This should be more than 100%, to
* ensure that that progress is also made towards getting arc_size
* under arc_c. See the comment above zfs_arc_eviction_pct.
*/
arc_wait_for_eviction(size * zfs_arc_eviction_pct / 100,
alloc_flags & ARC_HDR_USE_RESERVE);
arc_buf_contents_t type = arc_buf_type(hdr);
if (type == ARC_BUFC_METADATA) {
arc_space_consume(size, ARC_SPACE_META);
} else {
arc_space_consume(size, ARC_SPACE_DATA);
}
/*
* Update the state size. Note that ghost states have a
* "ghost size" and so don't need to be updated.
*/
arc_state_t *state = hdr->b_l1hdr.b_state;
if (!GHOST_STATE(state)) {
(void) zfs_refcount_add_many(&state->arcs_size[type], size,
tag);
/*
* If this is reached via arc_read, the link is
* protected by the hash lock. If reached via
* arc_buf_alloc, the header should not be accessed by
* any other thread. And, if reached via arc_read_done,
* the hash lock will protect it if it's found in the
* hash table; otherwise no other thread should be
* trying to [add|remove]_reference it.
*/
if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
(void) zfs_refcount_add_many(&state->arcs_esize[type],
size, tag);
}
}
}
static void
arc_free_data_abd(arc_buf_hdr_t *hdr, abd_t *abd, uint64_t size,
const void *tag)
{
arc_free_data_impl(hdr, size, tag);
abd_free(abd);
}
static void
arc_free_data_buf(arc_buf_hdr_t *hdr, void *buf, uint64_t size, const void *tag)
{
arc_buf_contents_t type = arc_buf_type(hdr);
arc_free_data_impl(hdr, size, tag);
if (type == ARC_BUFC_METADATA) {
zio_buf_free(buf, size);
} else {
ASSERT(type == ARC_BUFC_DATA);
zio_data_buf_free(buf, size);
}
}
/*
* Free the arc data buffer.
*/
static void
arc_free_data_impl(arc_buf_hdr_t *hdr, uint64_t size, const void *tag)
{
arc_state_t *state = hdr->b_l1hdr.b_state;
arc_buf_contents_t type = arc_buf_type(hdr);
/* protected by hash lock, if in the hash table */
if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
ASSERT(state != arc_anon && state != arc_l2c_only);
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
size, tag);
}
(void) zfs_refcount_remove_many(&state->arcs_size[type], size, tag);
VERIFY3U(hdr->b_type, ==, type);
if (type == ARC_BUFC_METADATA) {
arc_space_return(size, ARC_SPACE_META);
} else {
ASSERT(type == ARC_BUFC_DATA);
arc_space_return(size, ARC_SPACE_DATA);
}
}
/*
* This routine is called whenever a buffer is accessed.
*/
static void
arc_access(arc_buf_hdr_t *hdr, arc_flags_t arc_flags, boolean_t hit)
{
ASSERT(MUTEX_HELD(HDR_LOCK(hdr)));
ASSERT(HDR_HAS_L1HDR(hdr));
/*
* Update buffer prefetch status.
*/
boolean_t was_prefetch = HDR_PREFETCH(hdr);
boolean_t now_prefetch = arc_flags & ARC_FLAG_PREFETCH;
if (was_prefetch != now_prefetch) {
if (was_prefetch) {
ARCSTAT_CONDSTAT(hit, demand_hit, demand_iohit,
HDR_PRESCIENT_PREFETCH(hdr), prescient, predictive,
prefetch);
}
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_decrement_state(hdr);
if (was_prefetch) {
arc_hdr_clear_flags(hdr,
ARC_FLAG_PREFETCH | ARC_FLAG_PRESCIENT_PREFETCH);
} else {
arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH);
}
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_increment_state(hdr);
}
if (now_prefetch) {
if (arc_flags & ARC_FLAG_PRESCIENT_PREFETCH) {
arc_hdr_set_flags(hdr, ARC_FLAG_PRESCIENT_PREFETCH);
ARCSTAT_BUMP(arcstat_prescient_prefetch);
} else {
ARCSTAT_BUMP(arcstat_predictive_prefetch);
}
}
if (arc_flags & ARC_FLAG_L2CACHE)
arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE);
clock_t now = ddi_get_lbolt();
if (hdr->b_l1hdr.b_state == arc_anon) {
arc_state_t *new_state;
/*
* This buffer is not in the cache, and does not appear in
* our "ghost" lists. Add it to the MRU or uncached state.
*/
ASSERT0(hdr->b_l1hdr.b_arc_access);
hdr->b_l1hdr.b_arc_access = now;
if (HDR_UNCACHED(hdr)) {
new_state = arc_uncached;
DTRACE_PROBE1(new_state__uncached, arc_buf_hdr_t *,
hdr);
} else {
new_state = arc_mru;
DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr);
}
arc_change_state(new_state, hdr);
} else if (hdr->b_l1hdr.b_state == arc_mru) {
/*
* This buffer has been accessed once recently and either
* its read is still in progress or it is in the cache.
*/
if (HDR_IO_IN_PROGRESS(hdr)) {
hdr->b_l1hdr.b_arc_access = now;
return;
}
hdr->b_l1hdr.b_mru_hits++;
ARCSTAT_BUMP(arcstat_mru_hits);
/*
* If the previous access was a prefetch, then it already
* handled possible promotion, so nothing more to do for now.
*/
if (was_prefetch) {
hdr->b_l1hdr.b_arc_access = now;
return;
}
/*
* If more than ARC_MINTIME have passed from the previous
* hit, promote the buffer to the MFU state.
*/
if (ddi_time_after(now, hdr->b_l1hdr.b_arc_access +
ARC_MINTIME)) {
hdr->b_l1hdr.b_arc_access = now;
DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
arc_change_state(arc_mfu, hdr);
}
} else if (hdr->b_l1hdr.b_state == arc_mru_ghost) {
arc_state_t *new_state;
/*
* This buffer has been accessed once recently, but was
* evicted from the cache. Would we have bigger MRU, it
* would be an MRU hit, so handle it the same way, except
* we don't need to check the previous access time.
*/
hdr->b_l1hdr.b_mru_ghost_hits++;
ARCSTAT_BUMP(arcstat_mru_ghost_hits);
hdr->b_l1hdr.b_arc_access = now;
wmsum_add(&arc_mru_ghost->arcs_hits[arc_buf_type(hdr)],
arc_hdr_size(hdr));
if (was_prefetch) {
new_state = arc_mru;
DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr);
} else {
new_state = arc_mfu;
DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
}
arc_change_state(new_state, hdr);
} else if (hdr->b_l1hdr.b_state == arc_mfu) {
/*
* This buffer has been accessed more than once and either
* still in the cache or being restored from one of ghosts.
*/
if (!HDR_IO_IN_PROGRESS(hdr)) {
hdr->b_l1hdr.b_mfu_hits++;
ARCSTAT_BUMP(arcstat_mfu_hits);
}
hdr->b_l1hdr.b_arc_access = now;
} else if (hdr->b_l1hdr.b_state == arc_mfu_ghost) {
/*
* This buffer has been accessed more than once recently, but
* has been evicted from the cache. Would we have bigger MFU
* it would stay in cache, so move it back to MFU state.
*/
hdr->b_l1hdr.b_mfu_ghost_hits++;
ARCSTAT_BUMP(arcstat_mfu_ghost_hits);
hdr->b_l1hdr.b_arc_access = now;
wmsum_add(&arc_mfu_ghost->arcs_hits[arc_buf_type(hdr)],
arc_hdr_size(hdr));
DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
arc_change_state(arc_mfu, hdr);
} else if (hdr->b_l1hdr.b_state == arc_uncached) {
/*
* This buffer is uncacheable, but we got a hit. Probably
* a demand read after prefetch. Nothing more to do here.
*/
if (!HDR_IO_IN_PROGRESS(hdr))
ARCSTAT_BUMP(arcstat_uncached_hits);
hdr->b_l1hdr.b_arc_access = now;
} else if (hdr->b_l1hdr.b_state == arc_l2c_only) {
/*
* This buffer is on the 2nd Level ARC and was not accessed
* for a long time, so treat it as new and put into MRU.
*/
hdr->b_l1hdr.b_arc_access = now;
DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr);
arc_change_state(arc_mru, hdr);
} else {
cmn_err(CE_PANIC, "invalid arc state 0x%p",
hdr->b_l1hdr.b_state);
}
}
/*
* This routine is called by dbuf_hold() to update the arc_access() state
* which otherwise would be skipped for entries in the dbuf cache.
*/
void
arc_buf_access(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
/*
* Avoid taking the hash_lock when possible as an optimization.
* The header must be checked again under the hash_lock in order
* to handle the case where it is concurrently being released.
*/
if (hdr->b_l1hdr.b_state == arc_anon || HDR_EMPTY(hdr))
return;
kmutex_t *hash_lock = HDR_LOCK(hdr);
mutex_enter(hash_lock);
if (hdr->b_l1hdr.b_state == arc_anon || HDR_EMPTY(hdr)) {
mutex_exit(hash_lock);
ARCSTAT_BUMP(arcstat_access_skip);
return;
}
ASSERT(hdr->b_l1hdr.b_state == arc_mru ||
hdr->b_l1hdr.b_state == arc_mfu ||
hdr->b_l1hdr.b_state == arc_uncached);
DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
arc_access(hdr, 0, B_TRUE);
mutex_exit(hash_lock);
ARCSTAT_BUMP(arcstat_hits);
ARCSTAT_CONDSTAT(B_TRUE /* demand */, demand, prefetch,
!HDR_ISTYPE_METADATA(hdr), data, metadata, hits);
}
/* a generic arc_read_done_func_t which you can use */
void
arc_bcopy_func(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
arc_buf_t *buf, void *arg)
{
(void) zio, (void) zb, (void) bp;
if (buf == NULL)
return;
memcpy(arg, buf->b_data, arc_buf_size(buf));
arc_buf_destroy(buf, arg);
}
/* a generic arc_read_done_func_t */
void
arc_getbuf_func(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
arc_buf_t *buf, void *arg)
{
(void) zb, (void) bp;
arc_buf_t **bufp = arg;
if (buf == NULL) {
ASSERT(zio == NULL || zio->io_error != 0);
*bufp = NULL;
} else {
ASSERT(zio == NULL || zio->io_error == 0);
*bufp = buf;
ASSERT(buf->b_data != NULL);
}
}
static void
arc_hdr_verify(arc_buf_hdr_t *hdr, blkptr_t *bp)
{
if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) {
ASSERT3U(HDR_GET_PSIZE(hdr), ==, 0);
ASSERT3U(arc_hdr_get_compress(hdr), ==, ZIO_COMPRESS_OFF);
} else {
if (HDR_COMPRESSION_ENABLED(hdr)) {
ASSERT3U(arc_hdr_get_compress(hdr), ==,
BP_GET_COMPRESS(bp));
}
ASSERT3U(HDR_GET_LSIZE(hdr), ==, BP_GET_LSIZE(bp));
ASSERT3U(HDR_GET_PSIZE(hdr), ==, BP_GET_PSIZE(bp));
ASSERT3U(!!HDR_PROTECTED(hdr), ==, BP_IS_PROTECTED(bp));
}
}
static void
arc_read_done(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
arc_buf_hdr_t *hdr = zio->io_private;
kmutex_t *hash_lock = NULL;
arc_callback_t *callback_list;
arc_callback_t *acb;
/*
* The hdr was inserted into hash-table and removed from lists
* prior to starting I/O. We should find this header, since
* it's in the hash table, and it should be legit since it's
* not possible to evict it during the I/O. The only possible
* reason for it not to be found is if we were freed during the
* read.
*/
if (HDR_IN_HASH_TABLE(hdr)) {
arc_buf_hdr_t *found;
ASSERT3U(hdr->b_birth, ==, BP_PHYSICAL_BIRTH(zio->io_bp));
ASSERT3U(hdr->b_dva.dva_word[0], ==,
BP_IDENTITY(zio->io_bp)->dva_word[0]);
ASSERT3U(hdr->b_dva.dva_word[1], ==,
BP_IDENTITY(zio->io_bp)->dva_word[1]);
found = buf_hash_find(hdr->b_spa, zio->io_bp, &hash_lock);
ASSERT((found == hdr &&
DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) ||
(found == hdr && HDR_L2_READING(hdr)));
ASSERT3P(hash_lock, !=, NULL);
}
if (BP_IS_PROTECTED(bp)) {
hdr->b_crypt_hdr.b_ot = BP_GET_TYPE(bp);
hdr->b_crypt_hdr.b_dsobj = zio->io_bookmark.zb_objset;
zio_crypt_decode_params_bp(bp, hdr->b_crypt_hdr.b_salt,
hdr->b_crypt_hdr.b_iv);
if (zio->io_error == 0) {
if (BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG) {
void *tmpbuf;
tmpbuf = abd_borrow_buf_copy(zio->io_abd,
sizeof (zil_chain_t));
zio_crypt_decode_mac_zil(tmpbuf,
hdr->b_crypt_hdr.b_mac);
abd_return_buf(zio->io_abd, tmpbuf,
sizeof (zil_chain_t));
} else {
zio_crypt_decode_mac_bp(bp,
hdr->b_crypt_hdr.b_mac);
}
}
}
if (zio->io_error == 0) {
/* byteswap if necessary */
if (BP_SHOULD_BYTESWAP(zio->io_bp)) {
if (BP_GET_LEVEL(zio->io_bp) > 0) {
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_UINT64;
} else {
hdr->b_l1hdr.b_byteswap =
DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp));
}
} else {
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
}
if (!HDR_L2_READING(hdr)) {
hdr->b_complevel = zio->io_prop.zp_complevel;
}
}
arc_hdr_clear_flags(hdr, ARC_FLAG_L2_EVICTED);
if (l2arc_noprefetch && HDR_PREFETCH(hdr))
arc_hdr_clear_flags(hdr, ARC_FLAG_L2CACHE);
callback_list = hdr->b_l1hdr.b_acb;
ASSERT3P(callback_list, !=, NULL);
hdr->b_l1hdr.b_acb = NULL;
/*
* If a read request has a callback (i.e. acb_done is not NULL), then we
* make a buf containing the data according to the parameters which were
* passed in. The implementation of arc_buf_alloc_impl() ensures that we
* aren't needlessly decompressing the data multiple times.
*/
int callback_cnt = 0;
for (acb = callback_list; acb != NULL; acb = acb->acb_next) {
/* We need the last one to call below in original order. */
callback_list = acb;
if (!acb->acb_done || acb->acb_nobuf)
continue;
callback_cnt++;
if (zio->io_error != 0)
continue;
int error = arc_buf_alloc_impl(hdr, zio->io_spa,
&acb->acb_zb, acb->acb_private, acb->acb_encrypted,
acb->acb_compressed, acb->acb_noauth, B_TRUE,
&acb->acb_buf);
/*
* Assert non-speculative zios didn't fail because an
* encryption key wasn't loaded
*/
ASSERT((zio->io_flags & ZIO_FLAG_SPECULATIVE) ||
error != EACCES);
/*
* If we failed to decrypt, report an error now (as the zio
* layer would have done if it had done the transforms).
*/
if (error == ECKSUM) {
ASSERT(BP_IS_PROTECTED(bp));
error = SET_ERROR(EIO);
if ((zio->io_flags & ZIO_FLAG_SPECULATIVE) == 0) {
spa_log_error(zio->io_spa, &acb->acb_zb,
&zio->io_bp->blk_birth);
(void) zfs_ereport_post(
FM_EREPORT_ZFS_AUTHENTICATION,
zio->io_spa, NULL, &acb->acb_zb, zio, 0);
}
}
if (error != 0) {
/*
* Decompression or decryption failed. Set
* io_error so that when we call acb_done
* (below), we will indicate that the read
* failed. Note that in the unusual case
* where one callback is compressed and another
* uncompressed, we will mark all of them
* as failed, even though the uncompressed
* one can't actually fail. In this case,
* the hdr will not be anonymous, because
* if there are multiple callbacks, it's
* because multiple threads found the same
* arc buf in the hash table.
*/
zio->io_error = error;
}
}
/*
* If there are multiple callbacks, we must have the hash lock,
* because the only way for multiple threads to find this hdr is
* in the hash table. This ensures that if there are multiple
* callbacks, the hdr is not anonymous. If it were anonymous,
* we couldn't use arc_buf_destroy() in the error case below.
*/
ASSERT(callback_cnt < 2 || hash_lock != NULL);
if (zio->io_error == 0) {
arc_hdr_verify(hdr, zio->io_bp);
} else {
arc_hdr_set_flags(hdr, ARC_FLAG_IO_ERROR);
if (hdr->b_l1hdr.b_state != arc_anon)
arc_change_state(arc_anon, hdr);
if (HDR_IN_HASH_TABLE(hdr))
buf_hash_remove(hdr);
}
arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
(void) remove_reference(hdr, hdr);
if (hash_lock != NULL)
mutex_exit(hash_lock);
/* execute each callback and free its structure */
while ((acb = callback_list) != NULL) {
if (acb->acb_done != NULL) {
if (zio->io_error != 0 && acb->acb_buf != NULL) {
/*
* If arc_buf_alloc_impl() fails during
* decompression, the buf will still be
* allocated, and needs to be freed here.
*/
arc_buf_destroy(acb->acb_buf,
acb->acb_private);
acb->acb_buf = NULL;
}
acb->acb_done(zio, &zio->io_bookmark, zio->io_bp,
acb->acb_buf, acb->acb_private);
}
if (acb->acb_zio_dummy != NULL) {
acb->acb_zio_dummy->io_error = zio->io_error;
zio_nowait(acb->acb_zio_dummy);
}
callback_list = acb->acb_prev;
if (acb->acb_wait) {
mutex_enter(&acb->acb_wait_lock);
acb->acb_wait_error = zio->io_error;
acb->acb_wait = B_FALSE;
cv_signal(&acb->acb_wait_cv);
mutex_exit(&acb->acb_wait_lock);
/* acb will be freed by the waiting thread. */
} else {
kmem_free(acb, sizeof (arc_callback_t));
}
}
}
/*
* "Read" the block at the specified DVA (in bp) via the
* cache. If the block is found in the cache, invoke the provided
* callback immediately and return. Note that the `zio' parameter
* in the callback will be NULL in this case, since no IO was
* required. If the block is not in the cache pass the read request
* on to the spa with a substitute callback function, so that the
* requested block will be added to the cache.
*
* If a read request arrives for a block that has a read in-progress,
* either wait for the in-progress read to complete (and return the
* results); or, if this is a read with a "done" func, add a record
* to the read to invoke the "done" func when the read completes,
* and return; or just return.
*
* arc_read_done() will invoke all the requested "done" functions
* for readers of this block.
*/
int
arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
arc_read_done_func_t *done, void *private, zio_priority_t priority,
int zio_flags, arc_flags_t *arc_flags, const zbookmark_phys_t *zb)
{
arc_buf_hdr_t *hdr = NULL;
kmutex_t *hash_lock = NULL;
zio_t *rzio;
uint64_t guid = spa_load_guid(spa);
boolean_t compressed_read = (zio_flags & ZIO_FLAG_RAW_COMPRESS) != 0;
boolean_t encrypted_read = BP_IS_ENCRYPTED(bp) &&
(zio_flags & ZIO_FLAG_RAW_ENCRYPT) != 0;
boolean_t noauth_read = BP_IS_AUTHENTICATED(bp) &&
(zio_flags & ZIO_FLAG_RAW_ENCRYPT) != 0;
boolean_t embedded_bp = !!BP_IS_EMBEDDED(bp);
boolean_t no_buf = *arc_flags & ARC_FLAG_NO_BUF;
arc_buf_t *buf = NULL;
int rc = 0;
ASSERT(!embedded_bp ||
BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA);
ASSERT(!BP_IS_HOLE(bp));
ASSERT(!BP_IS_REDACTED(bp));
/*
* Normally SPL_FSTRANS will already be set since kernel threads which
* expect to call the DMU interfaces will set it when created. System
* calls are similarly handled by setting/cleaning the bit in the
* registered callback (module/os/.../zfs/zpl_*).
*
* External consumers such as Lustre which call the exported DMU
* interfaces may not have set SPL_FSTRANS. To avoid a deadlock
* on the hash_lock always set and clear the bit.
*/
fstrans_cookie_t cookie = spl_fstrans_mark();
top:
/*
* Verify the block pointer contents are reasonable. This should
* always be the case since the blkptr is protected by a checksum.
* However, if there is damage it's desirable to detect this early
* and treat it as a checksum error. This allows an alternate blkptr
* to be tried when one is available (e.g. ditto blocks).
*/
if (!zfs_blkptr_verify(spa, bp, (zio_flags & ZIO_FLAG_CONFIG_WRITER) ?
BLK_CONFIG_HELD : BLK_CONFIG_NEEDED, BLK_VERIFY_LOG)) {
rc = SET_ERROR(ECKSUM);
goto done;
}
if (!embedded_bp) {
/*
* Embedded BP's have no DVA and require no I/O to "read".
* Create an anonymous arc buf to back it.
*/
hdr = buf_hash_find(guid, bp, &hash_lock);
}
/*
* Determine if we have an L1 cache hit or a cache miss. For simplicity
* we maintain encrypted data separately from compressed / uncompressed
* data. If the user is requesting raw encrypted data and we don't have
* that in the header we will read from disk to guarantee that we can
* get it even if the encryption keys aren't loaded.
*/
if (hdr != NULL && HDR_HAS_L1HDR(hdr) && (HDR_HAS_RABD(hdr) ||
(hdr->b_l1hdr.b_pabd != NULL && !encrypted_read))) {
boolean_t is_data = !HDR_ISTYPE_METADATA(hdr);
if (HDR_IO_IN_PROGRESS(hdr)) {
if (*arc_flags & ARC_FLAG_CACHED_ONLY) {
mutex_exit(hash_lock);
ARCSTAT_BUMP(arcstat_cached_only_in_progress);
rc = SET_ERROR(ENOENT);
goto done;
}
zio_t *head_zio = hdr->b_l1hdr.b_acb->acb_zio_head;
ASSERT3P(head_zio, !=, NULL);
if ((hdr->b_flags & ARC_FLAG_PRIO_ASYNC_READ) &&
priority == ZIO_PRIORITY_SYNC_READ) {
/*
* This is a sync read that needs to wait for
* an in-flight async read. Request that the
* zio have its priority upgraded.
*/
zio_change_priority(head_zio, priority);
DTRACE_PROBE1(arc__async__upgrade__sync,
arc_buf_hdr_t *, hdr);
ARCSTAT_BUMP(arcstat_async_upgrade_sync);
}
DTRACE_PROBE1(arc__iohit, arc_buf_hdr_t *, hdr);
arc_access(hdr, *arc_flags, B_FALSE);
/*
* If there are multiple threads reading the same block
* and that block is not yet in the ARC, then only one
* thread will do the physical I/O and all other
* threads will wait until that I/O completes.
* Synchronous reads use the acb_wait_cv whereas nowait
* reads register a callback. Both are signalled/called
* in arc_read_done.
*
* Errors of the physical I/O may need to be propagated.
* Synchronous read errors are returned here from
* arc_read_done via acb_wait_error. Nowait reads
* attach the acb_zio_dummy zio to pio and
* arc_read_done propagates the physical I/O's io_error
* to acb_zio_dummy, and thereby to pio.
*/
arc_callback_t *acb = NULL;
if (done || pio || *arc_flags & ARC_FLAG_WAIT) {
acb = kmem_zalloc(sizeof (arc_callback_t),
KM_SLEEP);
acb->acb_done = done;
acb->acb_private = private;
acb->acb_compressed = compressed_read;
acb->acb_encrypted = encrypted_read;
acb->acb_noauth = noauth_read;
acb->acb_nobuf = no_buf;
if (*arc_flags & ARC_FLAG_WAIT) {
acb->acb_wait = B_TRUE;
mutex_init(&acb->acb_wait_lock, NULL,
MUTEX_DEFAULT, NULL);
cv_init(&acb->acb_wait_cv, NULL,
CV_DEFAULT, NULL);
}
acb->acb_zb = *zb;
if (pio != NULL) {
acb->acb_zio_dummy = zio_null(pio,
spa, NULL, NULL, NULL, zio_flags);
}
acb->acb_zio_head = head_zio;
acb->acb_next = hdr->b_l1hdr.b_acb;
hdr->b_l1hdr.b_acb->acb_prev = acb;
hdr->b_l1hdr.b_acb = acb;
}
mutex_exit(hash_lock);
ARCSTAT_BUMP(arcstat_iohits);
ARCSTAT_CONDSTAT(!(*arc_flags & ARC_FLAG_PREFETCH),
demand, prefetch, is_data, data, metadata, iohits);
if (*arc_flags & ARC_FLAG_WAIT) {
mutex_enter(&acb->acb_wait_lock);
while (acb->acb_wait) {
cv_wait(&acb->acb_wait_cv,
&acb->acb_wait_lock);
}
rc = acb->acb_wait_error;
mutex_exit(&acb->acb_wait_lock);
mutex_destroy(&acb->acb_wait_lock);
cv_destroy(&acb->acb_wait_cv);
kmem_free(acb, sizeof (arc_callback_t));
}
goto out;
}
ASSERT(hdr->b_l1hdr.b_state == arc_mru ||
hdr->b_l1hdr.b_state == arc_mfu ||
hdr->b_l1hdr.b_state == arc_uncached);
DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
arc_access(hdr, *arc_flags, B_TRUE);
if (done && !no_buf) {
ASSERT(!embedded_bp || !BP_IS_HOLE(bp));
/* Get a buf with the desired data in it. */
rc = arc_buf_alloc_impl(hdr, spa, zb, private,
encrypted_read, compressed_read, noauth_read,
B_TRUE, &buf);
if (rc == ECKSUM) {
/*
* Convert authentication and decryption errors
* to EIO (and generate an ereport if needed)
* before leaving the ARC.
*/
rc = SET_ERROR(EIO);
if ((zio_flags & ZIO_FLAG_SPECULATIVE) == 0) {
spa_log_error(spa, zb, &hdr->b_birth);
(void) zfs_ereport_post(
FM_EREPORT_ZFS_AUTHENTICATION,
spa, NULL, zb, NULL, 0);
}
}
if (rc != 0) {
arc_buf_destroy_impl(buf);
buf = NULL;
(void) remove_reference(hdr, private);
}
/* assert any errors weren't due to unloaded keys */
ASSERT((zio_flags & ZIO_FLAG_SPECULATIVE) ||
rc != EACCES);
}
mutex_exit(hash_lock);
ARCSTAT_BUMP(arcstat_hits);
ARCSTAT_CONDSTAT(!(*arc_flags & ARC_FLAG_PREFETCH),
demand, prefetch, is_data, data, metadata, hits);
*arc_flags |= ARC_FLAG_CACHED;
goto done;
} else {
uint64_t lsize = BP_GET_LSIZE(bp);
uint64_t psize = BP_GET_PSIZE(bp);
arc_callback_t *acb;
vdev_t *vd = NULL;
uint64_t addr = 0;
boolean_t devw = B_FALSE;
uint64_t size;
abd_t *hdr_abd;
int alloc_flags = encrypted_read ? ARC_HDR_ALLOC_RDATA : 0;
arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp);
if (*arc_flags & ARC_FLAG_CACHED_ONLY) {
if (hash_lock != NULL)
mutex_exit(hash_lock);
rc = SET_ERROR(ENOENT);
goto done;
}
if (hdr == NULL) {
/*
* This block is not in the cache or it has
* embedded data.
*/
arc_buf_hdr_t *exists = NULL;
hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize,
BP_IS_PROTECTED(bp), BP_GET_COMPRESS(bp), 0, type);
if (!embedded_bp) {
hdr->b_dva = *BP_IDENTITY(bp);
hdr->b_birth = BP_PHYSICAL_BIRTH(bp);
exists = buf_hash_insert(hdr, &hash_lock);
}
if (exists != NULL) {
/* somebody beat us to the hash insert */
mutex_exit(hash_lock);
buf_discard_identity(hdr);
arc_hdr_destroy(hdr);
goto top; /* restart the IO request */
}
} else {
/*
* This block is in the ghost cache or encrypted data
* was requested and we didn't have it. If it was
* L2-only (and thus didn't have an L1 hdr),
* we realloc the header to add an L1 hdr.
*/
if (!HDR_HAS_L1HDR(hdr)) {
hdr = arc_hdr_realloc(hdr, hdr_l2only_cache,
hdr_full_cache);
}
if (GHOST_STATE(hdr->b_l1hdr.b_state)) {
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT0(zfs_refcount_count(
&hdr->b_l1hdr.b_refcnt));
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
#ifdef ZFS_DEBUG
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
#endif
} else if (HDR_IO_IN_PROGRESS(hdr)) {
/*
* If this header already had an IO in progress
* and we are performing another IO to fetch
* encrypted data we must wait until the first
* IO completes so as not to confuse
* arc_read_done(). This should be very rare
* and so the performance impact shouldn't
* matter.
*/
arc_callback_t *acb = kmem_zalloc(
sizeof (arc_callback_t), KM_SLEEP);
acb->acb_wait = B_TRUE;
mutex_init(&acb->acb_wait_lock, NULL,
MUTEX_DEFAULT, NULL);
cv_init(&acb->acb_wait_cv, NULL, CV_DEFAULT,
NULL);
acb->acb_zio_head =
hdr->b_l1hdr.b_acb->acb_zio_head;
acb->acb_next = hdr->b_l1hdr.b_acb;
hdr->b_l1hdr.b_acb->acb_prev = acb;
hdr->b_l1hdr.b_acb = acb;
mutex_exit(hash_lock);
mutex_enter(&acb->acb_wait_lock);
while (acb->acb_wait) {
cv_wait(&acb->acb_wait_cv,
&acb->acb_wait_lock);
}
mutex_exit(&acb->acb_wait_lock);
mutex_destroy(&acb->acb_wait_lock);
cv_destroy(&acb->acb_wait_cv);
kmem_free(acb, sizeof (arc_callback_t));
goto top;
}
}
if (*arc_flags & ARC_FLAG_UNCACHED) {
arc_hdr_set_flags(hdr, ARC_FLAG_UNCACHED);
if (!encrypted_read)
alloc_flags |= ARC_HDR_ALLOC_LINEAR;
}
/*
* Take additional reference for IO_IN_PROGRESS. It stops
* arc_access() from putting this header without any buffers
* and so other references but obviously nonevictable onto
* the evictable list of MRU or MFU state.
*/
add_reference(hdr, hdr);
if (!embedded_bp)
arc_access(hdr, *arc_flags, B_FALSE);
arc_hdr_set_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
arc_hdr_alloc_abd(hdr, alloc_flags);
if (encrypted_read) {
ASSERT(HDR_HAS_RABD(hdr));
size = HDR_GET_PSIZE(hdr);
hdr_abd = hdr->b_crypt_hdr.b_rabd;
zio_flags |= ZIO_FLAG_RAW;
} else {
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
size = arc_hdr_size(hdr);
hdr_abd = hdr->b_l1hdr.b_pabd;
if (arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF) {
zio_flags |= ZIO_FLAG_RAW_COMPRESS;
}
/*
* For authenticated bp's, we do not ask the ZIO layer
* to authenticate them since this will cause the entire
* IO to fail if the key isn't loaded. Instead, we
* defer authentication until arc_buf_fill(), which will
* verify the data when the key is available.
*/
if (BP_IS_AUTHENTICATED(bp))
zio_flags |= ZIO_FLAG_RAW_ENCRYPT;
}
if (BP_IS_AUTHENTICATED(bp))
arc_hdr_set_flags(hdr, ARC_FLAG_NOAUTH);
if (BP_GET_LEVEL(bp) > 0)
arc_hdr_set_flags(hdr, ARC_FLAG_INDIRECT);
ASSERT(!GHOST_STATE(hdr->b_l1hdr.b_state));
acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
acb->acb_done = done;
acb->acb_private = private;
acb->acb_compressed = compressed_read;
acb->acb_encrypted = encrypted_read;
acb->acb_noauth = noauth_read;
acb->acb_zb = *zb;
ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
hdr->b_l1hdr.b_acb = acb;
if (HDR_HAS_L2HDR(hdr) &&
(vd = hdr->b_l2hdr.b_dev->l2ad_vdev) != NULL) {
devw = hdr->b_l2hdr.b_dev->l2ad_writing;
addr = hdr->b_l2hdr.b_daddr;
/*
* Lock out L2ARC device removal.
*/
if (vdev_is_dead(vd) ||
!spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER))
vd = NULL;
}
/*
* We count both async reads and scrub IOs as asynchronous so
* that both can be upgraded in the event of a cache hit while
* the read IO is still in-flight.
*/
if (priority == ZIO_PRIORITY_ASYNC_READ ||
priority == ZIO_PRIORITY_SCRUB)
arc_hdr_set_flags(hdr, ARC_FLAG_PRIO_ASYNC_READ);
else
arc_hdr_clear_flags(hdr, ARC_FLAG_PRIO_ASYNC_READ);
/*
* At this point, we have a level 1 cache miss or a blkptr
* with embedded data. Try again in L2ARC if possible.
*/
ASSERT3U(HDR_GET_LSIZE(hdr), ==, lsize);
/*
* Skip ARC stat bump for block pointers with embedded
* data. The data are read from the blkptr itself via
* decode_embedded_bp_compressed().
*/
if (!embedded_bp) {
DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr,
blkptr_t *, bp, uint64_t, lsize,
zbookmark_phys_t *, zb);
ARCSTAT_BUMP(arcstat_misses);
ARCSTAT_CONDSTAT(!(*arc_flags & ARC_FLAG_PREFETCH),
demand, prefetch, !HDR_ISTYPE_METADATA(hdr), data,
metadata, misses);
zfs_racct_read(size, 1);
}
/* Check if the spa even has l2 configured */
const boolean_t spa_has_l2 = l2arc_ndev != 0 &&
spa->spa_l2cache.sav_count > 0;
if (vd != NULL && spa_has_l2 && !(l2arc_norw && devw)) {
/*
* Read from the L2ARC if the following are true:
* 1. The L2ARC vdev was previously cached.
* 2. This buffer still has L2ARC metadata.
* 3. This buffer isn't currently writing to the L2ARC.
* 4. The L2ARC entry wasn't evicted, which may
* also have invalidated the vdev.
*/
if (HDR_HAS_L2HDR(hdr) &&
!HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr)) {
l2arc_read_callback_t *cb;
abd_t *abd;
uint64_t asize;
DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr);
ARCSTAT_BUMP(arcstat_l2_hits);
hdr->b_l2hdr.b_hits++;
cb = kmem_zalloc(sizeof (l2arc_read_callback_t),
KM_SLEEP);
cb->l2rcb_hdr = hdr;
cb->l2rcb_bp = *bp;
cb->l2rcb_zb = *zb;
cb->l2rcb_flags = zio_flags;
/*
* When Compressed ARC is disabled, but the
* L2ARC block is compressed, arc_hdr_size()
* will have returned LSIZE rather than PSIZE.
*/
if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
!HDR_COMPRESSION_ENABLED(hdr) &&
HDR_GET_PSIZE(hdr) != 0) {
size = HDR_GET_PSIZE(hdr);
}
asize = vdev_psize_to_asize(vd, size);
if (asize != size) {
abd = abd_alloc_for_io(asize,
HDR_ISTYPE_METADATA(hdr));
cb->l2rcb_abd = abd;
} else {
abd = hdr_abd;
}
ASSERT(addr >= VDEV_LABEL_START_SIZE &&
addr + asize <= vd->vdev_psize -
VDEV_LABEL_END_SIZE);
/*
* l2arc read. The SCL_L2ARC lock will be
* released by l2arc_read_done().
* Issue a null zio if the underlying buffer
* was squashed to zero size by compression.
*/
ASSERT3U(arc_hdr_get_compress(hdr), !=,
ZIO_COMPRESS_EMPTY);
rzio = zio_read_phys(pio, vd, addr,
asize, abd,
ZIO_CHECKSUM_OFF,
l2arc_read_done, cb, priority,
zio_flags | ZIO_FLAG_CANFAIL |
ZIO_FLAG_DONT_PROPAGATE |
ZIO_FLAG_DONT_RETRY, B_FALSE);
acb->acb_zio_head = rzio;
if (hash_lock != NULL)
mutex_exit(hash_lock);
DTRACE_PROBE2(l2arc__read, vdev_t *, vd,
zio_t *, rzio);
ARCSTAT_INCR(arcstat_l2_read_bytes,
HDR_GET_PSIZE(hdr));
if (*arc_flags & ARC_FLAG_NOWAIT) {
zio_nowait(rzio);
goto out;
}
ASSERT(*arc_flags & ARC_FLAG_WAIT);
if (zio_wait(rzio) == 0)
goto out;
/* l2arc read error; goto zio_read() */
if (hash_lock != NULL)
mutex_enter(hash_lock);
} else {
DTRACE_PROBE1(l2arc__miss,
arc_buf_hdr_t *, hdr);
ARCSTAT_BUMP(arcstat_l2_misses);
if (HDR_L2_WRITING(hdr))
ARCSTAT_BUMP(arcstat_l2_rw_clash);
spa_config_exit(spa, SCL_L2ARC, vd);
}
} else {
if (vd != NULL)
spa_config_exit(spa, SCL_L2ARC, vd);
/*
* Only a spa with l2 should contribute to l2
* miss stats. (Including the case of having a
* faulted cache device - that's also a miss.)
*/
if (spa_has_l2) {
/*
* Skip ARC stat bump for block pointers with
* embedded data. The data are read from the
* blkptr itself via
* decode_embedded_bp_compressed().
*/
if (!embedded_bp) {
DTRACE_PROBE1(l2arc__miss,
arc_buf_hdr_t *, hdr);
ARCSTAT_BUMP(arcstat_l2_misses);
}
}
}
rzio = zio_read(pio, spa, bp, hdr_abd, size,
arc_read_done, hdr, priority, zio_flags, zb);
acb->acb_zio_head = rzio;
if (hash_lock != NULL)
mutex_exit(hash_lock);
if (*arc_flags & ARC_FLAG_WAIT) {
rc = zio_wait(rzio);
goto out;
}
ASSERT(*arc_flags & ARC_FLAG_NOWAIT);
zio_nowait(rzio);
}
out:
/* embedded bps don't actually go to disk */
if (!embedded_bp)
spa_read_history_add(spa, zb, *arc_flags);
spl_fstrans_unmark(cookie);
return (rc);
done:
if (done)
done(NULL, zb, bp, buf, private);
if (pio && rc != 0) {
zio_t *zio = zio_null(pio, spa, NULL, NULL, NULL, zio_flags);
zio->io_error = rc;
zio_nowait(zio);
}
goto out;
}
arc_prune_t *
arc_add_prune_callback(arc_prune_func_t *func, void *private)
{
arc_prune_t *p;
p = kmem_alloc(sizeof (*p), KM_SLEEP);
p->p_pfunc = func;
p->p_private = private;
list_link_init(&p->p_node);
zfs_refcount_create(&p->p_refcnt);
mutex_enter(&arc_prune_mtx);
zfs_refcount_add(&p->p_refcnt, &arc_prune_list);
list_insert_head(&arc_prune_list, p);
mutex_exit(&arc_prune_mtx);
return (p);
}
void
arc_remove_prune_callback(arc_prune_t *p)
{
boolean_t wait = B_FALSE;
mutex_enter(&arc_prune_mtx);
list_remove(&arc_prune_list, p);
if (zfs_refcount_remove(&p->p_refcnt, &arc_prune_list) > 0)
wait = B_TRUE;
mutex_exit(&arc_prune_mtx);
/* wait for arc_prune_task to finish */
if (wait)
taskq_wait_outstanding(arc_prune_taskq, 0);
ASSERT0(zfs_refcount_count(&p->p_refcnt));
zfs_refcount_destroy(&p->p_refcnt);
kmem_free(p, sizeof (*p));
}
/*
* Helper function for arc_prune_async() it is responsible for safely
* handling the execution of a registered arc_prune_func_t.
*/
static void
arc_prune_task(void *ptr)
{
arc_prune_t *ap = (arc_prune_t *)ptr;
arc_prune_func_t *func = ap->p_pfunc;
if (func != NULL)
func(ap->p_adjust, ap->p_private);
zfs_refcount_remove(&ap->p_refcnt, func);
}
/*
* Notify registered consumers they must drop holds on a portion of the ARC
* buffers they reference. This provides a mechanism to ensure the ARC can
* honor the metadata limit and reclaim otherwise pinned ARC buffers.
*
* This operation is performed asynchronously so it may be safely called
* in the context of the arc_reclaim_thread(). A reference is taken here
* for each registered arc_prune_t and the arc_prune_task() is responsible
* for releasing it once the registered arc_prune_func_t has completed.
*/
static void
arc_prune_async(uint64_t adjust)
{
arc_prune_t *ap;
mutex_enter(&arc_prune_mtx);
for (ap = list_head(&arc_prune_list); ap != NULL;
ap = list_next(&arc_prune_list, ap)) {
if (zfs_refcount_count(&ap->p_refcnt) >= 2)
continue;
zfs_refcount_add(&ap->p_refcnt, ap->p_pfunc);
ap->p_adjust = adjust;
if (taskq_dispatch(arc_prune_taskq, arc_prune_task,
ap, TQ_SLEEP) == TASKQID_INVALID) {
zfs_refcount_remove(&ap->p_refcnt, ap->p_pfunc);
continue;
}
ARCSTAT_BUMP(arcstat_prune);
}
mutex_exit(&arc_prune_mtx);
}
/*
* Notify the arc that a block was freed, and thus will never be used again.
*/
void
arc_freed(spa_t *spa, const blkptr_t *bp)
{
arc_buf_hdr_t *hdr;
kmutex_t *hash_lock;
uint64_t guid = spa_load_guid(spa);
ASSERT(!BP_IS_EMBEDDED(bp));
hdr = buf_hash_find(guid, bp, &hash_lock);
if (hdr == NULL)
return;
/*
* We might be trying to free a block that is still doing I/O
* (i.e. prefetch) or has some other reference (i.e. a dedup-ed,
* dmu_sync-ed block). A block may also have a reference if it is
* part of a dedup-ed, dmu_synced write. The dmu_sync() function would
* have written the new block to its final resting place on disk but
* without the dedup flag set. This would have left the hdr in the MRU
* state and discoverable. When the txg finally syncs it detects that
* the block was overridden in open context and issues an override I/O.
* Since this is a dedup block, the override I/O will determine if the
* block is already in the DDT. If so, then it will replace the io_bp
* with the bp from the DDT and allow the I/O to finish. When the I/O
* reaches the done callback, dbuf_write_override_done, it will
* check to see if the io_bp and io_bp_override are identical.
* If they are not, then it indicates that the bp was replaced with
* the bp in the DDT and the override bp is freed. This allows
* us to arrive here with a reference on a block that is being
* freed. So if we have an I/O in progress, or a reference to
* this hdr, then we don't destroy the hdr.
*/
if (!HDR_HAS_L1HDR(hdr) ||
zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) {
arc_change_state(arc_anon, hdr);
arc_hdr_destroy(hdr);
mutex_exit(hash_lock);
} else {
mutex_exit(hash_lock);
}
}
/*
* Release this buffer from the cache, making it an anonymous buffer. This
* must be done after a read and prior to modifying the buffer contents.
* If the buffer has more than one reference, we must make
* a new hdr for the buffer.
*/
void
arc_release(arc_buf_t *buf, const void *tag)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
/*
* It would be nice to assert that if its DMU metadata (level >
* 0 || it's the dnode file), then it must be syncing context.
* But we don't know that information at this level.
*/
ASSERT(HDR_HAS_L1HDR(hdr));
/*
* We don't grab the hash lock prior to this check, because if
* the buffer's header is in the arc_anon state, it won't be
* linked into the hash table.
*/
if (hdr->b_l1hdr.b_state == arc_anon) {
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT(!HDR_IN_HASH_TABLE(hdr));
ASSERT(!HDR_HAS_L2HDR(hdr));
ASSERT3P(hdr->b_l1hdr.b_buf, ==, buf);
ASSERT(ARC_BUF_LAST(buf));
ASSERT3S(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt), ==, 1);
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
hdr->b_l1hdr.b_arc_access = 0;
/*
* If the buf is being overridden then it may already
* have a hdr that is not empty.
*/
buf_discard_identity(hdr);
arc_buf_thaw(buf);
return;
}
kmutex_t *hash_lock = HDR_LOCK(hdr);
mutex_enter(hash_lock);
/*
* This assignment is only valid as long as the hash_lock is
* held, we must be careful not to reference state or the
* b_state field after dropping the lock.
*/
arc_state_t *state = hdr->b_l1hdr.b_state;
ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
ASSERT3P(state, !=, arc_anon);
/* this buffer is not on any list */
ASSERT3S(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt), >, 0);
if (HDR_HAS_L2HDR(hdr)) {
mutex_enter(&hdr->b_l2hdr.b_dev->l2ad_mtx);
/*
* We have to recheck this conditional again now that
* we're holding the l2ad_mtx to prevent a race with
* another thread which might be concurrently calling
* l2arc_evict(). In that case, l2arc_evict() might have
* destroyed the header's L2 portion as we were waiting
* to acquire the l2ad_mtx.
*/
if (HDR_HAS_L2HDR(hdr))
arc_hdr_l2hdr_destroy(hdr);
mutex_exit(&hdr->b_l2hdr.b_dev->l2ad_mtx);
}
/*
* Do we have more than one buf?
*/
if (hdr->b_l1hdr.b_buf != buf || !ARC_BUF_LAST(buf)) {
arc_buf_hdr_t *nhdr;
uint64_t spa = hdr->b_spa;
uint64_t psize = HDR_GET_PSIZE(hdr);
uint64_t lsize = HDR_GET_LSIZE(hdr);
boolean_t protected = HDR_PROTECTED(hdr);
enum zio_compress compress = arc_hdr_get_compress(hdr);
arc_buf_contents_t type = arc_buf_type(hdr);
VERIFY3U(hdr->b_type, ==, type);
ASSERT(hdr->b_l1hdr.b_buf != buf || buf->b_next != NULL);
VERIFY3S(remove_reference(hdr, tag), >, 0);
if (ARC_BUF_SHARED(buf) && !ARC_BUF_COMPRESSED(buf)) {
ASSERT3P(hdr->b_l1hdr.b_buf, !=, buf);
ASSERT(ARC_BUF_LAST(buf));
}
/*
* Pull the data off of this hdr and attach it to
* a new anonymous hdr. Also find the last buffer
* in the hdr's buffer list.
*/
arc_buf_t *lastbuf = arc_buf_remove(hdr, buf);
ASSERT3P(lastbuf, !=, NULL);
/*
* If the current arc_buf_t and the hdr are sharing their data
* buffer, then we must stop sharing that block.
*/
if (ARC_BUF_SHARED(buf)) {
ASSERT3P(hdr->b_l1hdr.b_buf, !=, buf);
ASSERT(!arc_buf_is_shared(lastbuf));
/*
* First, sever the block sharing relationship between
* buf and the arc_buf_hdr_t.
*/
arc_unshare_buf(hdr, buf);
/*
* Now we need to recreate the hdr's b_pabd. Since we
* have lastbuf handy, we try to share with it, but if
* we can't then we allocate a new b_pabd and copy the
* data from buf into it.
*/
if (arc_can_share(hdr, lastbuf)) {
arc_share_buf(hdr, lastbuf);
} else {
arc_hdr_alloc_abd(hdr, 0);
abd_copy_from_buf(hdr->b_l1hdr.b_pabd,
buf->b_data, psize);
}
VERIFY3P(lastbuf->b_data, !=, NULL);
} else if (HDR_SHARED_DATA(hdr)) {
/*
* Uncompressed shared buffers are always at the end
* of the list. Compressed buffers don't have the
* same requirements. This makes it hard to
* simply assert that the lastbuf is shared so
* we rely on the hdr's compression flags to determine
* if we have a compressed, shared buffer.
*/
ASSERT(arc_buf_is_shared(lastbuf) ||
arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF);
ASSERT(!arc_buf_is_shared(buf));
}
ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr));
ASSERT3P(state, !=, arc_l2c_only);
(void) zfs_refcount_remove_many(&state->arcs_size[type],
arc_buf_size(buf), buf);
if (zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) {
ASSERT3P(state, !=, arc_l2c_only);
(void) zfs_refcount_remove_many(
&state->arcs_esize[type],
arc_buf_size(buf), buf);
}
arc_cksum_verify(buf);
arc_buf_unwatch(buf);
/* if this is the last uncompressed buf free the checksum */
if (!arc_hdr_has_uncompressed_buf(hdr))
arc_cksum_free(hdr);
mutex_exit(hash_lock);
nhdr = arc_hdr_alloc(spa, psize, lsize, protected,
compress, hdr->b_complevel, type);
ASSERT3P(nhdr->b_l1hdr.b_buf, ==, NULL);
ASSERT0(zfs_refcount_count(&nhdr->b_l1hdr.b_refcnt));
VERIFY3U(nhdr->b_type, ==, type);
ASSERT(!HDR_SHARED_DATA(nhdr));
nhdr->b_l1hdr.b_buf = buf;
(void) zfs_refcount_add(&nhdr->b_l1hdr.b_refcnt, tag);
buf->b_hdr = nhdr;
(void) zfs_refcount_add_many(&arc_anon->arcs_size[type],
arc_buf_size(buf), buf);
} else {
ASSERT(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) == 1);
/* protected by hash lock, or hdr is on arc_anon */
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
hdr->b_l1hdr.b_mru_hits = 0;
hdr->b_l1hdr.b_mru_ghost_hits = 0;
hdr->b_l1hdr.b_mfu_hits = 0;
hdr->b_l1hdr.b_mfu_ghost_hits = 0;
arc_change_state(arc_anon, hdr);
hdr->b_l1hdr.b_arc_access = 0;
mutex_exit(hash_lock);
buf_discard_identity(hdr);
arc_buf_thaw(buf);
}
}
int
arc_released(arc_buf_t *buf)
{
return (buf->b_data != NULL &&
buf->b_hdr->b_l1hdr.b_state == arc_anon);
}
#ifdef ZFS_DEBUG
int
arc_referenced(arc_buf_t *buf)
{
return (zfs_refcount_count(&buf->b_hdr->b_l1hdr.b_refcnt));
}
#endif
static void
arc_write_ready(zio_t *zio)
{
arc_write_callback_t *callback = zio->io_private;
arc_buf_t *buf = callback->awcb_buf;
arc_buf_hdr_t *hdr = buf->b_hdr;
blkptr_t *bp = zio->io_bp;
uint64_t psize = BP_IS_HOLE(bp) ? 0 : BP_GET_PSIZE(bp);
fstrans_cookie_t cookie = spl_fstrans_mark();
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(!zfs_refcount_is_zero(&buf->b_hdr->b_l1hdr.b_refcnt));
ASSERT3P(hdr->b_l1hdr.b_buf, !=, NULL);
/*
* If we're reexecuting this zio because the pool suspended, then
* cleanup any state that was previously set the first time the
* callback was invoked.
*/
if (zio->io_flags & ZIO_FLAG_REEXECUTED) {
arc_cksum_free(hdr);
arc_buf_unwatch(buf);
if (hdr->b_l1hdr.b_pabd != NULL) {
if (ARC_BUF_SHARED(buf)) {
arc_unshare_buf(hdr, buf);
} else {
ASSERT(!arc_buf_is_shared(buf));
arc_hdr_free_abd(hdr, B_FALSE);
}
}
if (HDR_HAS_RABD(hdr))
arc_hdr_free_abd(hdr, B_TRUE);
}
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
ASSERT(!HDR_SHARED_DATA(hdr));
ASSERT(!arc_buf_is_shared(buf));
callback->awcb_ready(zio, buf, callback->awcb_private);
if (HDR_IO_IN_PROGRESS(hdr)) {
ASSERT(zio->io_flags & ZIO_FLAG_REEXECUTED);
} else {
arc_hdr_set_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
add_reference(hdr, hdr); /* For IO_IN_PROGRESS. */
}
if (BP_IS_PROTECTED(bp)) {
/* ZIL blocks are written through zio_rewrite */
ASSERT3U(BP_GET_TYPE(bp), !=, DMU_OT_INTENT_LOG);
if (BP_SHOULD_BYTESWAP(bp)) {
if (BP_GET_LEVEL(bp) > 0) {
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_UINT64;
} else {
hdr->b_l1hdr.b_byteswap =
DMU_OT_BYTESWAP(BP_GET_TYPE(bp));
}
} else {
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
}
arc_hdr_set_flags(hdr, ARC_FLAG_PROTECTED);
hdr->b_crypt_hdr.b_ot = BP_GET_TYPE(bp);
hdr->b_crypt_hdr.b_dsobj = zio->io_bookmark.zb_objset;
zio_crypt_decode_params_bp(bp, hdr->b_crypt_hdr.b_salt,
hdr->b_crypt_hdr.b_iv);
zio_crypt_decode_mac_bp(bp, hdr->b_crypt_hdr.b_mac);
} else {
arc_hdr_clear_flags(hdr, ARC_FLAG_PROTECTED);
}
/*
* If this block was written for raw encryption but the zio layer
* ended up only authenticating it, adjust the buffer flags now.
*/
if (BP_IS_AUTHENTICATED(bp) && ARC_BUF_ENCRYPTED(buf)) {
arc_hdr_set_flags(hdr, ARC_FLAG_NOAUTH);
buf->b_flags &= ~ARC_BUF_FLAG_ENCRYPTED;
if (BP_GET_COMPRESS(bp) == ZIO_COMPRESS_OFF)
buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED;
} else if (BP_IS_HOLE(bp) && ARC_BUF_ENCRYPTED(buf)) {
buf->b_flags &= ~ARC_BUF_FLAG_ENCRYPTED;
buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED;
}
/* this must be done after the buffer flags are adjusted */
arc_cksum_compute(buf);
enum zio_compress compress;
if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) {
compress = ZIO_COMPRESS_OFF;
} else {
ASSERT3U(HDR_GET_LSIZE(hdr), ==, BP_GET_LSIZE(bp));
compress = BP_GET_COMPRESS(bp);
}
HDR_SET_PSIZE(hdr, psize);
arc_hdr_set_compress(hdr, compress);
hdr->b_complevel = zio->io_prop.zp_complevel;
if (zio->io_error != 0 || psize == 0)
goto out;
/*
* Fill the hdr with data. If the buffer is encrypted we have no choice
* but to copy the data into b_radb. If the hdr is compressed, the data
* we want is available from the zio, otherwise we can take it from
* the buf.
*
* We might be able to share the buf's data with the hdr here. However,
* doing so would cause the ARC to be full of linear ABDs if we write a
* lot of shareable data. As a compromise, we check whether scattered
* ABDs are allowed, and assume that if they are then the user wants
* the ARC to be primarily filled with them regardless of the data being
* written. Therefore, if they're allowed then we allocate one and copy
* the data into it; otherwise, we share the data directly if we can.
*/
if (ARC_BUF_ENCRYPTED(buf)) {
ASSERT3U(psize, >, 0);
ASSERT(ARC_BUF_COMPRESSED(buf));
arc_hdr_alloc_abd(hdr, ARC_HDR_ALLOC_RDATA |
ARC_HDR_USE_RESERVE);
abd_copy(hdr->b_crypt_hdr.b_rabd, zio->io_abd, psize);
} else if (!(HDR_UNCACHED(hdr) ||
abd_size_alloc_linear(arc_buf_size(buf))) ||
!arc_can_share(hdr, buf)) {
/*
* Ideally, we would always copy the io_abd into b_pabd, but the
* user may have disabled compressed ARC, thus we must check the
* hdr's compression setting rather than the io_bp's.
*/
if (BP_IS_ENCRYPTED(bp)) {
ASSERT3U(psize, >, 0);
arc_hdr_alloc_abd(hdr, ARC_HDR_ALLOC_RDATA |
ARC_HDR_USE_RESERVE);
abd_copy(hdr->b_crypt_hdr.b_rabd, zio->io_abd, psize);
} else if (arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF &&
!ARC_BUF_COMPRESSED(buf)) {
ASSERT3U(psize, >, 0);
arc_hdr_alloc_abd(hdr, ARC_HDR_USE_RESERVE);
abd_copy(hdr->b_l1hdr.b_pabd, zio->io_abd, psize);
} else {
ASSERT3U(zio->io_orig_size, ==, arc_hdr_size(hdr));
arc_hdr_alloc_abd(hdr, ARC_HDR_USE_RESERVE);
abd_copy_from_buf(hdr->b_l1hdr.b_pabd, buf->b_data,
arc_buf_size(buf));
}
} else {
ASSERT3P(buf->b_data, ==, abd_to_buf(zio->io_orig_abd));
ASSERT3U(zio->io_orig_size, ==, arc_buf_size(buf));
ASSERT3P(hdr->b_l1hdr.b_buf, ==, buf);
ASSERT(ARC_BUF_LAST(buf));
arc_share_buf(hdr, buf);
}
out:
arc_hdr_verify(hdr, bp);
spl_fstrans_unmark(cookie);
}
static void
arc_write_children_ready(zio_t *zio)
{
arc_write_callback_t *callback = zio->io_private;
arc_buf_t *buf = callback->awcb_buf;
callback->awcb_children_ready(zio, buf, callback->awcb_private);
}
static void
arc_write_done(zio_t *zio)
{
arc_write_callback_t *callback = zio->io_private;
arc_buf_t *buf = callback->awcb_buf;
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
if (zio->io_error == 0) {
arc_hdr_verify(hdr, zio->io_bp);
if (BP_IS_HOLE(zio->io_bp) || BP_IS_EMBEDDED(zio->io_bp)) {
buf_discard_identity(hdr);
} else {
hdr->b_dva = *BP_IDENTITY(zio->io_bp);
hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp);
}
} else {
ASSERT(HDR_EMPTY(hdr));
}
/*
* If the block to be written was all-zero or compressed enough to be
* embedded in the BP, no write was performed so there will be no
* dva/birth/checksum. The buffer must therefore remain anonymous
* (and uncached).
*/
if (!HDR_EMPTY(hdr)) {
arc_buf_hdr_t *exists;
kmutex_t *hash_lock;
ASSERT3U(zio->io_error, ==, 0);
arc_cksum_verify(buf);
exists = buf_hash_insert(hdr, &hash_lock);
if (exists != NULL) {
/*
* This can only happen if we overwrite for
* sync-to-convergence, because we remove
* buffers from the hash table when we arc_free().
*/
if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
panic("bad overwrite, hdr=%p exists=%p",
(void *)hdr, (void *)exists);
ASSERT(zfs_refcount_is_zero(
&exists->b_l1hdr.b_refcnt));
arc_change_state(arc_anon, exists);
arc_hdr_destroy(exists);
mutex_exit(hash_lock);
exists = buf_hash_insert(hdr, &hash_lock);
ASSERT3P(exists, ==, NULL);
} else if (zio->io_flags & ZIO_FLAG_NOPWRITE) {
/* nopwrite */
ASSERT(zio->io_prop.zp_nopwrite);
if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
panic("bad nopwrite, hdr=%p exists=%p",
(void *)hdr, (void *)exists);
} else {
/* Dedup */
ASSERT3P(hdr->b_l1hdr.b_buf, !=, NULL);
ASSERT(ARC_BUF_LAST(hdr->b_l1hdr.b_buf));
ASSERT(hdr->b_l1hdr.b_state == arc_anon);
ASSERT(BP_GET_DEDUP(zio->io_bp));
ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
}
}
arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
VERIFY3S(remove_reference(hdr, hdr), >, 0);
/* if it's not anon, we are doing a scrub */
if (exists == NULL && hdr->b_l1hdr.b_state == arc_anon)
arc_access(hdr, 0, B_FALSE);
mutex_exit(hash_lock);
} else {
arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
VERIFY3S(remove_reference(hdr, hdr), >, 0);
}
callback->awcb_done(zio, buf, callback->awcb_private);
abd_free(zio->io_abd);
kmem_free(callback, sizeof (arc_write_callback_t));
}
zio_t *
arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
blkptr_t *bp, arc_buf_t *buf, boolean_t uncached, boolean_t l2arc,
const zio_prop_t *zp, arc_write_done_func_t *ready,
arc_write_done_func_t *children_ready, arc_write_done_func_t *done,
void *private, zio_priority_t priority, int zio_flags,
const zbookmark_phys_t *zb)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
arc_write_callback_t *callback;
zio_t *zio;
zio_prop_t localprop = *zp;
ASSERT3P(ready, !=, NULL);
ASSERT3P(done, !=, NULL);
ASSERT(!HDR_IO_ERROR(hdr));
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
ASSERT3P(hdr->b_l1hdr.b_buf, !=, NULL);
if (uncached)
arc_hdr_set_flags(hdr, ARC_FLAG_UNCACHED);
else if (l2arc)
arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE);
if (ARC_BUF_ENCRYPTED(buf)) {
ASSERT(ARC_BUF_COMPRESSED(buf));
localprop.zp_encrypt = B_TRUE;
localprop.zp_compress = HDR_GET_COMPRESS(hdr);
localprop.zp_complevel = hdr->b_complevel;
localprop.zp_byteorder =
(hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS) ?
ZFS_HOST_BYTEORDER : !ZFS_HOST_BYTEORDER;
memcpy(localprop.zp_salt, hdr->b_crypt_hdr.b_salt,
ZIO_DATA_SALT_LEN);
memcpy(localprop.zp_iv, hdr->b_crypt_hdr.b_iv,
ZIO_DATA_IV_LEN);
memcpy(localprop.zp_mac, hdr->b_crypt_hdr.b_mac,
ZIO_DATA_MAC_LEN);
if (DMU_OT_IS_ENCRYPTED(localprop.zp_type)) {
localprop.zp_nopwrite = B_FALSE;
localprop.zp_copies =
MIN(localprop.zp_copies, SPA_DVAS_PER_BP - 1);
}
zio_flags |= ZIO_FLAG_RAW;
} else if (ARC_BUF_COMPRESSED(buf)) {
ASSERT3U(HDR_GET_LSIZE(hdr), !=, arc_buf_size(buf));
localprop.zp_compress = HDR_GET_COMPRESS(hdr);
localprop.zp_complevel = hdr->b_complevel;
zio_flags |= ZIO_FLAG_RAW_COMPRESS;
}
callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP);
callback->awcb_ready = ready;
callback->awcb_children_ready = children_ready;
callback->awcb_done = done;
callback->awcb_private = private;
callback->awcb_buf = buf;
/*
* The hdr's b_pabd is now stale, free it now. A new data block
* will be allocated when the zio pipeline calls arc_write_ready().
*/
if (hdr->b_l1hdr.b_pabd != NULL) {
/*
* If the buf is currently sharing the data block with
* the hdr then we need to break that relationship here.
* The hdr will remain with a NULL data pointer and the
* buf will take sole ownership of the block.
*/
if (ARC_BUF_SHARED(buf)) {
arc_unshare_buf(hdr, buf);
} else {
ASSERT(!arc_buf_is_shared(buf));
arc_hdr_free_abd(hdr, B_FALSE);
}
VERIFY3P(buf->b_data, !=, NULL);
}
if (HDR_HAS_RABD(hdr))
arc_hdr_free_abd(hdr, B_TRUE);
if (!(zio_flags & ZIO_FLAG_RAW))
arc_hdr_set_compress(hdr, ZIO_COMPRESS_OFF);
ASSERT(!arc_buf_is_shared(buf));
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
zio = zio_write(pio, spa, txg, bp,
abd_get_from_buf(buf->b_data, HDR_GET_LSIZE(hdr)),
HDR_GET_LSIZE(hdr), arc_buf_size(buf), &localprop, arc_write_ready,
(children_ready != NULL) ? arc_write_children_ready : NULL,
arc_write_done, callback, priority, zio_flags, zb);
return (zio);
}
void
arc_tempreserve_clear(uint64_t reserve)
{
atomic_add_64(&arc_tempreserve, -reserve);
ASSERT((int64_t)arc_tempreserve >= 0);
}
int
arc_tempreserve_space(spa_t *spa, uint64_t reserve, uint64_t txg)
{
int error;
uint64_t anon_size;
if (!arc_no_grow &&
reserve > arc_c/4 &&
reserve * 4 > (2ULL << SPA_MAXBLOCKSHIFT))
arc_c = MIN(arc_c_max, reserve * 4);
/*
* Throttle when the calculated memory footprint for the TXG
* exceeds the target ARC size.
*/
if (reserve > arc_c) {
DMU_TX_STAT_BUMP(dmu_tx_memory_reserve);
return (SET_ERROR(ERESTART));
}
/*
* Don't count loaned bufs as in flight dirty data to prevent long
* network delays from blocking transactions that are ready to be
* assigned to a txg.
*/
/* assert that it has not wrapped around */
ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes, 0), >=, 0);
anon_size = MAX((int64_t)
(zfs_refcount_count(&arc_anon->arcs_size[ARC_BUFC_DATA]) +
zfs_refcount_count(&arc_anon->arcs_size[ARC_BUFC_METADATA]) -
arc_loaned_bytes), 0);
/*
* Writes will, almost always, require additional memory allocations
* in order to compress/encrypt/etc the data. We therefore need to
* make sure that there is sufficient available memory for this.
*/
error = arc_memory_throttle(spa, reserve, txg);
if (error != 0)
return (error);
/*
* Throttle writes when the amount of dirty data in the cache
* gets too large. We try to keep the cache less than half full
* of dirty blocks so that our sync times don't grow too large.
*
* In the case of one pool being built on another pool, we want
* to make sure we don't end up throttling the lower (backing)
* pool when the upper pool is the majority contributor to dirty
* data. To insure we make forward progress during throttling, we
* also check the current pool's net dirty data and only throttle
* if it exceeds zfs_arc_pool_dirty_percent of the anonymous dirty
* data in the cache.
*
* Note: if two requests come in concurrently, we might let them
* both succeed, when one of them should fail. Not a huge deal.
*/
uint64_t total_dirty = reserve + arc_tempreserve + anon_size;
uint64_t spa_dirty_anon = spa_dirty_data(spa);
uint64_t rarc_c = arc_warm ? arc_c : arc_c_max;
if (total_dirty > rarc_c * zfs_arc_dirty_limit_percent / 100 &&
anon_size > rarc_c * zfs_arc_anon_limit_percent / 100 &&
spa_dirty_anon > anon_size * zfs_arc_pool_dirty_percent / 100) {
#ifdef ZFS_DEBUG
uint64_t meta_esize = zfs_refcount_count(
&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
uint64_t data_esize =
zfs_refcount_count(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
"anon_data=%lluK tempreserve=%lluK rarc_c=%lluK\n",
(u_longlong_t)arc_tempreserve >> 10,
(u_longlong_t)meta_esize >> 10,
(u_longlong_t)data_esize >> 10,
(u_longlong_t)reserve >> 10,
(u_longlong_t)rarc_c >> 10);
#endif
DMU_TX_STAT_BUMP(dmu_tx_dirty_throttle);
return (SET_ERROR(ERESTART));
}
atomic_add_64(&arc_tempreserve, reserve);
return (0);
}
static void
arc_kstat_update_state(arc_state_t *state, kstat_named_t *size,
kstat_named_t *data, kstat_named_t *metadata,
kstat_named_t *evict_data, kstat_named_t *evict_metadata)
{
data->value.ui64 =
zfs_refcount_count(&state->arcs_size[ARC_BUFC_DATA]);
metadata->value.ui64 =
zfs_refcount_count(&state->arcs_size[ARC_BUFC_METADATA]);
size->value.ui64 = data->value.ui64 + metadata->value.ui64;
evict_data->value.ui64 =
zfs_refcount_count(&state->arcs_esize[ARC_BUFC_DATA]);
evict_metadata->value.ui64 =
zfs_refcount_count(&state->arcs_esize[ARC_BUFC_METADATA]);
}
static int
arc_kstat_update(kstat_t *ksp, int rw)
{
arc_stats_t *as = ksp->ks_data;
if (rw == KSTAT_WRITE)
return (SET_ERROR(EACCES));
as->arcstat_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_hits);
as->arcstat_iohits.value.ui64 =
wmsum_value(&arc_sums.arcstat_iohits);
as->arcstat_misses.value.ui64 =
wmsum_value(&arc_sums.arcstat_misses);
as->arcstat_demand_data_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_data_hits);
as->arcstat_demand_data_iohits.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_data_iohits);
as->arcstat_demand_data_misses.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_data_misses);
as->arcstat_demand_metadata_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_metadata_hits);
as->arcstat_demand_metadata_iohits.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_metadata_iohits);
as->arcstat_demand_metadata_misses.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_metadata_misses);
as->arcstat_prefetch_data_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_prefetch_data_hits);
as->arcstat_prefetch_data_iohits.value.ui64 =
wmsum_value(&arc_sums.arcstat_prefetch_data_iohits);
as->arcstat_prefetch_data_misses.value.ui64 =
wmsum_value(&arc_sums.arcstat_prefetch_data_misses);
as->arcstat_prefetch_metadata_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_prefetch_metadata_hits);
as->arcstat_prefetch_metadata_iohits.value.ui64 =
wmsum_value(&arc_sums.arcstat_prefetch_metadata_iohits);
as->arcstat_prefetch_metadata_misses.value.ui64 =
wmsum_value(&arc_sums.arcstat_prefetch_metadata_misses);
as->arcstat_mru_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_mru_hits);
as->arcstat_mru_ghost_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_mru_ghost_hits);
as->arcstat_mfu_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_mfu_hits);
as->arcstat_mfu_ghost_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_mfu_ghost_hits);
as->arcstat_uncached_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_uncached_hits);
as->arcstat_deleted.value.ui64 =
wmsum_value(&arc_sums.arcstat_deleted);
as->arcstat_mutex_miss.value.ui64 =
wmsum_value(&arc_sums.arcstat_mutex_miss);
as->arcstat_access_skip.value.ui64 =
wmsum_value(&arc_sums.arcstat_access_skip);
as->arcstat_evict_skip.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_skip);
as->arcstat_evict_not_enough.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_not_enough);
as->arcstat_evict_l2_cached.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_l2_cached);
as->arcstat_evict_l2_eligible.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_l2_eligible);
as->arcstat_evict_l2_eligible_mfu.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_l2_eligible_mfu);
as->arcstat_evict_l2_eligible_mru.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_l2_eligible_mru);
as->arcstat_evict_l2_ineligible.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_l2_ineligible);
as->arcstat_evict_l2_skip.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_l2_skip);
as->arcstat_hash_collisions.value.ui64 =
wmsum_value(&arc_sums.arcstat_hash_collisions);
as->arcstat_hash_chains.value.ui64 =
wmsum_value(&arc_sums.arcstat_hash_chains);
as->arcstat_size.value.ui64 =
aggsum_value(&arc_sums.arcstat_size);
as->arcstat_compressed_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_compressed_size);
as->arcstat_uncompressed_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_uncompressed_size);
as->arcstat_overhead_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_overhead_size);
as->arcstat_hdr_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_hdr_size);
as->arcstat_data_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_data_size);
as->arcstat_metadata_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_metadata_size);
as->arcstat_dbuf_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_dbuf_size);
#if defined(COMPAT_FREEBSD11)
as->arcstat_other_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_bonus_size) +
wmsum_value(&arc_sums.arcstat_dnode_size) +
wmsum_value(&arc_sums.arcstat_dbuf_size);
#endif
arc_kstat_update_state(arc_anon,
&as->arcstat_anon_size,
&as->arcstat_anon_data,
&as->arcstat_anon_metadata,
&as->arcstat_anon_evictable_data,
&as->arcstat_anon_evictable_metadata);
arc_kstat_update_state(arc_mru,
&as->arcstat_mru_size,
&as->arcstat_mru_data,
&as->arcstat_mru_metadata,
&as->arcstat_mru_evictable_data,
&as->arcstat_mru_evictable_metadata);
arc_kstat_update_state(arc_mru_ghost,
&as->arcstat_mru_ghost_size,
&as->arcstat_mru_ghost_data,
&as->arcstat_mru_ghost_metadata,
&as->arcstat_mru_ghost_evictable_data,
&as->arcstat_mru_ghost_evictable_metadata);
arc_kstat_update_state(arc_mfu,
&as->arcstat_mfu_size,
&as->arcstat_mfu_data,
&as->arcstat_mfu_metadata,
&as->arcstat_mfu_evictable_data,
&as->arcstat_mfu_evictable_metadata);
arc_kstat_update_state(arc_mfu_ghost,
&as->arcstat_mfu_ghost_size,
&as->arcstat_mfu_ghost_data,
&as->arcstat_mfu_ghost_metadata,
&as->arcstat_mfu_ghost_evictable_data,
&as->arcstat_mfu_ghost_evictable_metadata);
arc_kstat_update_state(arc_uncached,
&as->arcstat_uncached_size,
&as->arcstat_uncached_data,
&as->arcstat_uncached_metadata,
&as->arcstat_uncached_evictable_data,
&as->arcstat_uncached_evictable_metadata);
as->arcstat_dnode_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_dnode_size);
as->arcstat_bonus_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_bonus_size);
as->arcstat_l2_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_hits);
as->arcstat_l2_misses.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_misses);
as->arcstat_l2_prefetch_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_prefetch_asize);
as->arcstat_l2_mru_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_mru_asize);
as->arcstat_l2_mfu_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_mfu_asize);
as->arcstat_l2_bufc_data_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_bufc_data_asize);
as->arcstat_l2_bufc_metadata_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_bufc_metadata_asize);
as->arcstat_l2_feeds.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_feeds);
as->arcstat_l2_rw_clash.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rw_clash);
as->arcstat_l2_read_bytes.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_read_bytes);
as->arcstat_l2_write_bytes.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_write_bytes);
as->arcstat_l2_writes_sent.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_writes_sent);
as->arcstat_l2_writes_done.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_writes_done);
as->arcstat_l2_writes_error.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_writes_error);
as->arcstat_l2_writes_lock_retry.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_writes_lock_retry);
as->arcstat_l2_evict_lock_retry.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_evict_lock_retry);
as->arcstat_l2_evict_reading.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_evict_reading);
as->arcstat_l2_evict_l1cached.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_evict_l1cached);
as->arcstat_l2_free_on_write.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_free_on_write);
as->arcstat_l2_abort_lowmem.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_abort_lowmem);
as->arcstat_l2_cksum_bad.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_cksum_bad);
as->arcstat_l2_io_error.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_io_error);
as->arcstat_l2_lsize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_lsize);
as->arcstat_l2_psize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_psize);
as->arcstat_l2_hdr_size.value.ui64 =
aggsum_value(&arc_sums.arcstat_l2_hdr_size);
as->arcstat_l2_log_blk_writes.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_log_blk_writes);
as->arcstat_l2_log_blk_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_log_blk_asize);
as->arcstat_l2_log_blk_count.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_log_blk_count);
as->arcstat_l2_rebuild_success.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_success);
as->arcstat_l2_rebuild_abort_unsupported.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_unsupported);
as->arcstat_l2_rebuild_abort_io_errors.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_io_errors);
as->arcstat_l2_rebuild_abort_dh_errors.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_dh_errors);
as->arcstat_l2_rebuild_abort_cksum_lb_errors.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_cksum_lb_errors);
as->arcstat_l2_rebuild_abort_lowmem.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_lowmem);
as->arcstat_l2_rebuild_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_size);
as->arcstat_l2_rebuild_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_asize);
as->arcstat_l2_rebuild_bufs.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_bufs);
as->arcstat_l2_rebuild_bufs_precached.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_bufs_precached);
as->arcstat_l2_rebuild_log_blks.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_log_blks);
as->arcstat_memory_throttle_count.value.ui64 =
wmsum_value(&arc_sums.arcstat_memory_throttle_count);
as->arcstat_memory_direct_count.value.ui64 =
wmsum_value(&arc_sums.arcstat_memory_direct_count);
as->arcstat_memory_indirect_count.value.ui64 =
wmsum_value(&arc_sums.arcstat_memory_indirect_count);
as->arcstat_memory_all_bytes.value.ui64 =
arc_all_memory();
as->arcstat_memory_free_bytes.value.ui64 =
arc_free_memory();
as->arcstat_memory_available_bytes.value.i64 =
arc_available_memory();
as->arcstat_prune.value.ui64 =
wmsum_value(&arc_sums.arcstat_prune);
as->arcstat_meta_used.value.ui64 =
wmsum_value(&arc_sums.arcstat_meta_used);
as->arcstat_async_upgrade_sync.value.ui64 =
wmsum_value(&arc_sums.arcstat_async_upgrade_sync);
as->arcstat_predictive_prefetch.value.ui64 =
wmsum_value(&arc_sums.arcstat_predictive_prefetch);
as->arcstat_demand_hit_predictive_prefetch.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_hit_predictive_prefetch);
as->arcstat_demand_iohit_predictive_prefetch.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_iohit_predictive_prefetch);
as->arcstat_prescient_prefetch.value.ui64 =
wmsum_value(&arc_sums.arcstat_prescient_prefetch);
as->arcstat_demand_hit_prescient_prefetch.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_hit_prescient_prefetch);
as->arcstat_demand_iohit_prescient_prefetch.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_iohit_prescient_prefetch);
as->arcstat_raw_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_raw_size);
as->arcstat_cached_only_in_progress.value.ui64 =
wmsum_value(&arc_sums.arcstat_cached_only_in_progress);
as->arcstat_abd_chunk_waste_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_abd_chunk_waste_size);
return (0);
}
/*
* This function *must* return indices evenly distributed between all
* sublists of the multilist. This is needed due to how the ARC eviction
* code is laid out; arc_evict_state() assumes ARC buffers are evenly
* distributed between all sublists and uses this assumption when
* deciding which sublist to evict from and how much to evict from it.
*/
static unsigned int
arc_state_multilist_index_func(multilist_t *ml, void *obj)
{
arc_buf_hdr_t *hdr = obj;
/*
* We rely on b_dva to generate evenly distributed index
* numbers using buf_hash below. So, as an added precaution,
* let's make sure we never add empty buffers to the arc lists.
*/
ASSERT(!HDR_EMPTY(hdr));
/*
* The assumption here, is the hash value for a given
* arc_buf_hdr_t will remain constant throughout its lifetime
* (i.e. its b_spa, b_dva, and b_birth fields don't change).
* Thus, we don't need to store the header's sublist index
* on insertion, as this index can be recalculated on removal.
*
* Also, the low order bits of the hash value are thought to be
* distributed evenly. Otherwise, in the case that the multilist
* has a power of two number of sublists, each sublists' usage
* would not be evenly distributed. In this context full 64bit
* division would be a waste of time, so limit it to 32 bits.
*/
return ((unsigned int)buf_hash(hdr->b_spa, &hdr->b_dva, hdr->b_birth) %
multilist_get_num_sublists(ml));
}
static unsigned int
arc_state_l2c_multilist_index_func(multilist_t *ml, void *obj)
{
panic("Header %p insert into arc_l2c_only %p", obj, ml);
}
#define WARN_IF_TUNING_IGNORED(tuning, value, do_warn) do { \
if ((do_warn) && (tuning) && ((tuning) != (value))) { \
cmn_err(CE_WARN, \
"ignoring tunable %s (using %llu instead)", \
(#tuning), (u_longlong_t)(value)); \
} \
} while (0)
/*
* Called during module initialization and periodically thereafter to
* apply reasonable changes to the exposed performance tunings. Can also be
* called explicitly by param_set_arc_*() functions when ARC tunables are
* updated manually. Non-zero zfs_* values which differ from the currently set
* values will be applied.
*/
void
arc_tuning_update(boolean_t verbose)
{
uint64_t allmem = arc_all_memory();
/* Valid range: 32M - <arc_c_max> */
if ((zfs_arc_min) && (zfs_arc_min != arc_c_min) &&
(zfs_arc_min >= 2ULL << SPA_MAXBLOCKSHIFT) &&
(zfs_arc_min <= arc_c_max)) {
arc_c_min = zfs_arc_min;
arc_c = MAX(arc_c, arc_c_min);
}
WARN_IF_TUNING_IGNORED(zfs_arc_min, arc_c_min, verbose);
/* Valid range: 64M - <all physical memory> */
if ((zfs_arc_max) && (zfs_arc_max != arc_c_max) &&
(zfs_arc_max >= MIN_ARC_MAX) && (zfs_arc_max < allmem) &&
(zfs_arc_max > arc_c_min)) {
arc_c_max = zfs_arc_max;
arc_c = MIN(arc_c, arc_c_max);
if (arc_dnode_limit > arc_c_max)
arc_dnode_limit = arc_c_max;
}
WARN_IF_TUNING_IGNORED(zfs_arc_max, arc_c_max, verbose);
/* Valid range: 0 - <all physical memory> */
arc_dnode_limit = zfs_arc_dnode_limit ? zfs_arc_dnode_limit :
MIN(zfs_arc_dnode_limit_percent, 100) * arc_c_max / 100;
WARN_IF_TUNING_IGNORED(zfs_arc_dnode_limit, arc_dnode_limit, verbose);
/* Valid range: 1 - N */
if (zfs_arc_grow_retry)
arc_grow_retry = zfs_arc_grow_retry;
/* Valid range: 1 - N */
if (zfs_arc_shrink_shift) {
arc_shrink_shift = zfs_arc_shrink_shift;
arc_no_grow_shift = MIN(arc_no_grow_shift, arc_shrink_shift -1);
}
/* Valid range: 1 - N ms */
if (zfs_arc_min_prefetch_ms)
arc_min_prefetch_ms = zfs_arc_min_prefetch_ms;
/* Valid range: 1 - N ms */
if (zfs_arc_min_prescient_prefetch_ms) {
arc_min_prescient_prefetch_ms =
zfs_arc_min_prescient_prefetch_ms;
}
/* Valid range: 0 - 100 */
if (zfs_arc_lotsfree_percent <= 100)
arc_lotsfree_percent = zfs_arc_lotsfree_percent;
WARN_IF_TUNING_IGNORED(zfs_arc_lotsfree_percent, arc_lotsfree_percent,
verbose);
/* Valid range: 0 - <all physical memory> */
if ((zfs_arc_sys_free) && (zfs_arc_sys_free != arc_sys_free))
arc_sys_free = MIN(zfs_arc_sys_free, allmem);
WARN_IF_TUNING_IGNORED(zfs_arc_sys_free, arc_sys_free, verbose);
}
static void
arc_state_multilist_init(multilist_t *ml,
multilist_sublist_index_func_t *index_func, int *maxcountp)
{
multilist_create(ml, sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), index_func);
*maxcountp = MAX(*maxcountp, multilist_get_num_sublists(ml));
}
static void
arc_state_init(void)
{
int num_sublists = 0;
arc_state_multilist_init(&arc_mru->arcs_list[ARC_BUFC_METADATA],
arc_state_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_mru->arcs_list[ARC_BUFC_DATA],
arc_state_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA],
arc_state_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA],
arc_state_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_mfu->arcs_list[ARC_BUFC_METADATA],
arc_state_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_mfu->arcs_list[ARC_BUFC_DATA],
arc_state_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA],
arc_state_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA],
arc_state_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_uncached->arcs_list[ARC_BUFC_METADATA],
arc_state_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_uncached->arcs_list[ARC_BUFC_DATA],
arc_state_multilist_index_func, &num_sublists);
/*
* L2 headers should never be on the L2 state list since they don't
* have L1 headers allocated. Special index function asserts that.
*/
arc_state_multilist_init(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA],
arc_state_l2c_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_l2c_only->arcs_list[ARC_BUFC_DATA],
arc_state_l2c_multilist_index_func, &num_sublists);
/*
* Keep track of the number of markers needed to reclaim buffers from
* any ARC state. The markers will be pre-allocated so as to minimize
* the number of memory allocations performed by the eviction thread.
*/
arc_state_evict_marker_count = num_sublists;
zfs_refcount_create(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_uncached->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_uncached->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_anon->arcs_size[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_anon->arcs_size[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_mru->arcs_size[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_mru->arcs_size[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_mru_ghost->arcs_size[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_mru_ghost->arcs_size[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_mfu->arcs_size[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_mfu->arcs_size[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_mfu_ghost->arcs_size[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_mfu_ghost->arcs_size[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_l2c_only->arcs_size[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_l2c_only->arcs_size[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_uncached->arcs_size[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_uncached->arcs_size[ARC_BUFC_METADATA]);
wmsum_init(&arc_mru_ghost->arcs_hits[ARC_BUFC_DATA], 0);
wmsum_init(&arc_mru_ghost->arcs_hits[ARC_BUFC_METADATA], 0);
wmsum_init(&arc_mfu_ghost->arcs_hits[ARC_BUFC_DATA], 0);
wmsum_init(&arc_mfu_ghost->arcs_hits[ARC_BUFC_METADATA], 0);
wmsum_init(&arc_sums.arcstat_hits, 0);
wmsum_init(&arc_sums.arcstat_iohits, 0);
wmsum_init(&arc_sums.arcstat_misses, 0);
wmsum_init(&arc_sums.arcstat_demand_data_hits, 0);
wmsum_init(&arc_sums.arcstat_demand_data_iohits, 0);
wmsum_init(&arc_sums.arcstat_demand_data_misses, 0);
wmsum_init(&arc_sums.arcstat_demand_metadata_hits, 0);
wmsum_init(&arc_sums.arcstat_demand_metadata_iohits, 0);
wmsum_init(&arc_sums.arcstat_demand_metadata_misses, 0);
wmsum_init(&arc_sums.arcstat_prefetch_data_hits, 0);
wmsum_init(&arc_sums.arcstat_prefetch_data_iohits, 0);
wmsum_init(&arc_sums.arcstat_prefetch_data_misses, 0);
wmsum_init(&arc_sums.arcstat_prefetch_metadata_hits, 0);
wmsum_init(&arc_sums.arcstat_prefetch_metadata_iohits, 0);
wmsum_init(&arc_sums.arcstat_prefetch_metadata_misses, 0);
wmsum_init(&arc_sums.arcstat_mru_hits, 0);
wmsum_init(&arc_sums.arcstat_mru_ghost_hits, 0);
wmsum_init(&arc_sums.arcstat_mfu_hits, 0);
wmsum_init(&arc_sums.arcstat_mfu_ghost_hits, 0);
wmsum_init(&arc_sums.arcstat_uncached_hits, 0);
wmsum_init(&arc_sums.arcstat_deleted, 0);
wmsum_init(&arc_sums.arcstat_mutex_miss, 0);
wmsum_init(&arc_sums.arcstat_access_skip, 0);
wmsum_init(&arc_sums.arcstat_evict_skip, 0);
wmsum_init(&arc_sums.arcstat_evict_not_enough, 0);
wmsum_init(&arc_sums.arcstat_evict_l2_cached, 0);
wmsum_init(&arc_sums.arcstat_evict_l2_eligible, 0);
wmsum_init(&arc_sums.arcstat_evict_l2_eligible_mfu, 0);
wmsum_init(&arc_sums.arcstat_evict_l2_eligible_mru, 0);
wmsum_init(&arc_sums.arcstat_evict_l2_ineligible, 0);
wmsum_init(&arc_sums.arcstat_evict_l2_skip, 0);
wmsum_init(&arc_sums.arcstat_hash_collisions, 0);
wmsum_init(&arc_sums.arcstat_hash_chains, 0);
aggsum_init(&arc_sums.arcstat_size, 0);
wmsum_init(&arc_sums.arcstat_compressed_size, 0);
wmsum_init(&arc_sums.arcstat_uncompressed_size, 0);
wmsum_init(&arc_sums.arcstat_overhead_size, 0);
wmsum_init(&arc_sums.arcstat_hdr_size, 0);
wmsum_init(&arc_sums.arcstat_data_size, 0);
wmsum_init(&arc_sums.arcstat_metadata_size, 0);
wmsum_init(&arc_sums.arcstat_dbuf_size, 0);
wmsum_init(&arc_sums.arcstat_dnode_size, 0);
wmsum_init(&arc_sums.arcstat_bonus_size, 0);
wmsum_init(&arc_sums.arcstat_l2_hits, 0);
wmsum_init(&arc_sums.arcstat_l2_misses, 0);
wmsum_init(&arc_sums.arcstat_l2_prefetch_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_mru_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_mfu_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_bufc_data_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_bufc_metadata_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_feeds, 0);
wmsum_init(&arc_sums.arcstat_l2_rw_clash, 0);
wmsum_init(&arc_sums.arcstat_l2_read_bytes, 0);
wmsum_init(&arc_sums.arcstat_l2_write_bytes, 0);
wmsum_init(&arc_sums.arcstat_l2_writes_sent, 0);
wmsum_init(&arc_sums.arcstat_l2_writes_done, 0);
wmsum_init(&arc_sums.arcstat_l2_writes_error, 0);
wmsum_init(&arc_sums.arcstat_l2_writes_lock_retry, 0);
wmsum_init(&arc_sums.arcstat_l2_evict_lock_retry, 0);
wmsum_init(&arc_sums.arcstat_l2_evict_reading, 0);
wmsum_init(&arc_sums.arcstat_l2_evict_l1cached, 0);
wmsum_init(&arc_sums.arcstat_l2_free_on_write, 0);
wmsum_init(&arc_sums.arcstat_l2_abort_lowmem, 0);
wmsum_init(&arc_sums.arcstat_l2_cksum_bad, 0);
wmsum_init(&arc_sums.arcstat_l2_io_error, 0);
wmsum_init(&arc_sums.arcstat_l2_lsize, 0);
wmsum_init(&arc_sums.arcstat_l2_psize, 0);
aggsum_init(&arc_sums.arcstat_l2_hdr_size, 0);
wmsum_init(&arc_sums.arcstat_l2_log_blk_writes, 0);
wmsum_init(&arc_sums.arcstat_l2_log_blk_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_log_blk_count, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_success, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_unsupported, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_io_errors, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_dh_errors, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_cksum_lb_errors, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_lowmem, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_size, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_bufs, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_bufs_precached, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_log_blks, 0);
wmsum_init(&arc_sums.arcstat_memory_throttle_count, 0);
wmsum_init(&arc_sums.arcstat_memory_direct_count, 0);
wmsum_init(&arc_sums.arcstat_memory_indirect_count, 0);
wmsum_init(&arc_sums.arcstat_prune, 0);
wmsum_init(&arc_sums.arcstat_meta_used, 0);
wmsum_init(&arc_sums.arcstat_async_upgrade_sync, 0);
wmsum_init(&arc_sums.arcstat_predictive_prefetch, 0);
wmsum_init(&arc_sums.arcstat_demand_hit_predictive_prefetch, 0);
wmsum_init(&arc_sums.arcstat_demand_iohit_predictive_prefetch, 0);
wmsum_init(&arc_sums.arcstat_prescient_prefetch, 0);
wmsum_init(&arc_sums.arcstat_demand_hit_prescient_prefetch, 0);
wmsum_init(&arc_sums.arcstat_demand_iohit_prescient_prefetch, 0);
wmsum_init(&arc_sums.arcstat_raw_size, 0);
wmsum_init(&arc_sums.arcstat_cached_only_in_progress, 0);
wmsum_init(&arc_sums.arcstat_abd_chunk_waste_size, 0);
arc_anon->arcs_state = ARC_STATE_ANON;
arc_mru->arcs_state = ARC_STATE_MRU;
arc_mru_ghost->arcs_state = ARC_STATE_MRU_GHOST;
arc_mfu->arcs_state = ARC_STATE_MFU;
arc_mfu_ghost->arcs_state = ARC_STATE_MFU_GHOST;
arc_l2c_only->arcs_state = ARC_STATE_L2C_ONLY;
arc_uncached->arcs_state = ARC_STATE_UNCACHED;
}
static void
arc_state_fini(void)
{
zfs_refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_uncached->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_uncached->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_anon->arcs_size[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_anon->arcs_size[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_mru->arcs_size[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_mru->arcs_size[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_mru_ghost->arcs_size[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_mru_ghost->arcs_size[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_mfu->arcs_size[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_mfu->arcs_size[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_mfu_ghost->arcs_size[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_mfu_ghost->arcs_size[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_l2c_only->arcs_size[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_l2c_only->arcs_size[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_uncached->arcs_size[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_uncached->arcs_size[ARC_BUFC_METADATA]);
multilist_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]);
multilist_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]);
multilist_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]);
multilist_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]);
multilist_destroy(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(&arc_l2c_only->arcs_list[ARC_BUFC_DATA]);
multilist_destroy(&arc_uncached->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(&arc_uncached->arcs_list[ARC_BUFC_DATA]);
wmsum_fini(&arc_mru_ghost->arcs_hits[ARC_BUFC_DATA]);
wmsum_fini(&arc_mru_ghost->arcs_hits[ARC_BUFC_METADATA]);
wmsum_fini(&arc_mfu_ghost->arcs_hits[ARC_BUFC_DATA]);
wmsum_fini(&arc_mfu_ghost->arcs_hits[ARC_BUFC_METADATA]);
wmsum_fini(&arc_sums.arcstat_hits);
wmsum_fini(&arc_sums.arcstat_iohits);
wmsum_fini(&arc_sums.arcstat_misses);
wmsum_fini(&arc_sums.arcstat_demand_data_hits);
wmsum_fini(&arc_sums.arcstat_demand_data_iohits);
wmsum_fini(&arc_sums.arcstat_demand_data_misses);
wmsum_fini(&arc_sums.arcstat_demand_metadata_hits);
wmsum_fini(&arc_sums.arcstat_demand_metadata_iohits);
wmsum_fini(&arc_sums.arcstat_demand_metadata_misses);
wmsum_fini(&arc_sums.arcstat_prefetch_data_hits);
wmsum_fini(&arc_sums.arcstat_prefetch_data_iohits);
wmsum_fini(&arc_sums.arcstat_prefetch_data_misses);
wmsum_fini(&arc_sums.arcstat_prefetch_metadata_hits);
wmsum_fini(&arc_sums.arcstat_prefetch_metadata_iohits);
wmsum_fini(&arc_sums.arcstat_prefetch_metadata_misses);
wmsum_fini(&arc_sums.arcstat_mru_hits);
wmsum_fini(&arc_sums.arcstat_mru_ghost_hits);
wmsum_fini(&arc_sums.arcstat_mfu_hits);
wmsum_fini(&arc_sums.arcstat_mfu_ghost_hits);
wmsum_fini(&arc_sums.arcstat_uncached_hits);
wmsum_fini(&arc_sums.arcstat_deleted);
wmsum_fini(&arc_sums.arcstat_mutex_miss);
wmsum_fini(&arc_sums.arcstat_access_skip);
wmsum_fini(&arc_sums.arcstat_evict_skip);
wmsum_fini(&arc_sums.arcstat_evict_not_enough);
wmsum_fini(&arc_sums.arcstat_evict_l2_cached);
wmsum_fini(&arc_sums.arcstat_evict_l2_eligible);
wmsum_fini(&arc_sums.arcstat_evict_l2_eligible_mfu);
wmsum_fini(&arc_sums.arcstat_evict_l2_eligible_mru);
wmsum_fini(&arc_sums.arcstat_evict_l2_ineligible);
wmsum_fini(&arc_sums.arcstat_evict_l2_skip);
wmsum_fini(&arc_sums.arcstat_hash_collisions);
wmsum_fini(&arc_sums.arcstat_hash_chains);
aggsum_fini(&arc_sums.arcstat_size);
wmsum_fini(&arc_sums.arcstat_compressed_size);
wmsum_fini(&arc_sums.arcstat_uncompressed_size);
wmsum_fini(&arc_sums.arcstat_overhead_size);
wmsum_fini(&arc_sums.arcstat_hdr_size);
wmsum_fini(&arc_sums.arcstat_data_size);
wmsum_fini(&arc_sums.arcstat_metadata_size);
wmsum_fini(&arc_sums.arcstat_dbuf_size);
wmsum_fini(&arc_sums.arcstat_dnode_size);
wmsum_fini(&arc_sums.arcstat_bonus_size);
wmsum_fini(&arc_sums.arcstat_l2_hits);
wmsum_fini(&arc_sums.arcstat_l2_misses);
wmsum_fini(&arc_sums.arcstat_l2_prefetch_asize);
wmsum_fini(&arc_sums.arcstat_l2_mru_asize);
wmsum_fini(&arc_sums.arcstat_l2_mfu_asize);
wmsum_fini(&arc_sums.arcstat_l2_bufc_data_asize);
wmsum_fini(&arc_sums.arcstat_l2_bufc_metadata_asize);
wmsum_fini(&arc_sums.arcstat_l2_feeds);
wmsum_fini(&arc_sums.arcstat_l2_rw_clash);
wmsum_fini(&arc_sums.arcstat_l2_read_bytes);
wmsum_fini(&arc_sums.arcstat_l2_write_bytes);
wmsum_fini(&arc_sums.arcstat_l2_writes_sent);
wmsum_fini(&arc_sums.arcstat_l2_writes_done);
wmsum_fini(&arc_sums.arcstat_l2_writes_error);
wmsum_fini(&arc_sums.arcstat_l2_writes_lock_retry);
wmsum_fini(&arc_sums.arcstat_l2_evict_lock_retry);
wmsum_fini(&arc_sums.arcstat_l2_evict_reading);
wmsum_fini(&arc_sums.arcstat_l2_evict_l1cached);
wmsum_fini(&arc_sums.arcstat_l2_free_on_write);
wmsum_fini(&arc_sums.arcstat_l2_abort_lowmem);
wmsum_fini(&arc_sums.arcstat_l2_cksum_bad);
wmsum_fini(&arc_sums.arcstat_l2_io_error);
wmsum_fini(&arc_sums.arcstat_l2_lsize);
wmsum_fini(&arc_sums.arcstat_l2_psize);
aggsum_fini(&arc_sums.arcstat_l2_hdr_size);
wmsum_fini(&arc_sums.arcstat_l2_log_blk_writes);
wmsum_fini(&arc_sums.arcstat_l2_log_blk_asize);
wmsum_fini(&arc_sums.arcstat_l2_log_blk_count);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_success);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_unsupported);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_io_errors);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_dh_errors);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_cksum_lb_errors);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_lowmem);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_size);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_asize);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_bufs);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_bufs_precached);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_log_blks);
wmsum_fini(&arc_sums.arcstat_memory_throttle_count);
wmsum_fini(&arc_sums.arcstat_memory_direct_count);
wmsum_fini(&arc_sums.arcstat_memory_indirect_count);
wmsum_fini(&arc_sums.arcstat_prune);
wmsum_fini(&arc_sums.arcstat_meta_used);
wmsum_fini(&arc_sums.arcstat_async_upgrade_sync);
wmsum_fini(&arc_sums.arcstat_predictive_prefetch);
wmsum_fini(&arc_sums.arcstat_demand_hit_predictive_prefetch);
wmsum_fini(&arc_sums.arcstat_demand_iohit_predictive_prefetch);
wmsum_fini(&arc_sums.arcstat_prescient_prefetch);
wmsum_fini(&arc_sums.arcstat_demand_hit_prescient_prefetch);
wmsum_fini(&arc_sums.arcstat_demand_iohit_prescient_prefetch);
wmsum_fini(&arc_sums.arcstat_raw_size);
wmsum_fini(&arc_sums.arcstat_cached_only_in_progress);
wmsum_fini(&arc_sums.arcstat_abd_chunk_waste_size);
}
uint64_t
arc_target_bytes(void)
{
return (arc_c);
}
void
arc_set_limits(uint64_t allmem)
{
/* Set min cache to 1/32 of all memory, or 32MB, whichever is more. */
arc_c_min = MAX(allmem / 32, 2ULL << SPA_MAXBLOCKSHIFT);
/* How to set default max varies by platform. */
arc_c_max = arc_default_max(arc_c_min, allmem);
}
void
arc_init(void)
{
uint64_t percent, allmem = arc_all_memory();
mutex_init(&arc_evict_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&arc_evict_waiters, sizeof (arc_evict_waiter_t),
offsetof(arc_evict_waiter_t, aew_node));
arc_min_prefetch_ms = 1000;
arc_min_prescient_prefetch_ms = 6000;
#if defined(_KERNEL)
arc_lowmem_init();
#endif
arc_set_limits(allmem);
#ifdef _KERNEL
/*
* If zfs_arc_max is non-zero at init, meaning it was set in the kernel
* environment before the module was loaded, don't block setting the
* maximum because it is less than arc_c_min, instead, reset arc_c_min
* to a lower value.
* zfs_arc_min will be handled by arc_tuning_update().
*/
if (zfs_arc_max != 0 && zfs_arc_max >= MIN_ARC_MAX &&
zfs_arc_max < allmem) {
arc_c_max = zfs_arc_max;
if (arc_c_min >= arc_c_max) {
arc_c_min = MAX(zfs_arc_max / 2,
2ULL << SPA_MAXBLOCKSHIFT);
}
}
#else
/*
* In userland, there's only the memory pressure that we artificially
* create (see arc_available_memory()). Don't let arc_c get too
* small, because it can cause transactions to be larger than
* arc_c, causing arc_tempreserve_space() to fail.
*/
arc_c_min = MAX(arc_c_max / 2, 2ULL << SPA_MAXBLOCKSHIFT);
#endif
arc_c = arc_c_min;
/*
* 32-bit fixed point fractions of metadata from total ARC size,
* MRU data from all data and MRU metadata from all metadata.
*/
arc_meta = (1ULL << 32) / 4; /* Metadata is 25% of arc_c. */
arc_pd = (1ULL << 32) / 2; /* Data MRU is 50% of data. */
arc_pm = (1ULL << 32) / 2; /* Metadata MRU is 50% of metadata. */
percent = MIN(zfs_arc_dnode_limit_percent, 100);
arc_dnode_limit = arc_c_max * percent / 100;
/* Apply user specified tunings */
arc_tuning_update(B_TRUE);
/* if kmem_flags are set, lets try to use less memory */
if (kmem_debugging())
arc_c = arc_c / 2;
if (arc_c < arc_c_min)
arc_c = arc_c_min;
arc_register_hotplug();
arc_state_init();
buf_init();
list_create(&arc_prune_list, sizeof (arc_prune_t),
offsetof(arc_prune_t, p_node));
mutex_init(&arc_prune_mtx, NULL, MUTEX_DEFAULT, NULL);
arc_prune_taskq = taskq_create("arc_prune", zfs_arc_prune_task_threads,
defclsyspri, 100, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED,
sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
if (arc_ksp != NULL) {
arc_ksp->ks_data = &arc_stats;
arc_ksp->ks_update = arc_kstat_update;
kstat_install(arc_ksp);
}
arc_state_evict_markers =
arc_state_alloc_markers(arc_state_evict_marker_count);
arc_evict_zthr = zthr_create_timer("arc_evict",
arc_evict_cb_check, arc_evict_cb, NULL, SEC2NSEC(1), defclsyspri);
arc_reap_zthr = zthr_create_timer("arc_reap",
arc_reap_cb_check, arc_reap_cb, NULL, SEC2NSEC(1), minclsyspri);
arc_warm = B_FALSE;
/*
* Calculate maximum amount of dirty data per pool.
*
* If it has been set by a module parameter, take that.
* Otherwise, use a percentage of physical memory defined by
* zfs_dirty_data_max_percent (default 10%) with a cap at
* zfs_dirty_data_max_max (default 4G or 25% of physical memory).
*/
#ifdef __LP64__
if (zfs_dirty_data_max_max == 0)
zfs_dirty_data_max_max = MIN(4ULL * 1024 * 1024 * 1024,
allmem * zfs_dirty_data_max_max_percent / 100);
#else
if (zfs_dirty_data_max_max == 0)
zfs_dirty_data_max_max = MIN(1ULL * 1024 * 1024 * 1024,
allmem * zfs_dirty_data_max_max_percent / 100);
#endif
if (zfs_dirty_data_max == 0) {
zfs_dirty_data_max = allmem *
zfs_dirty_data_max_percent / 100;
zfs_dirty_data_max = MIN(zfs_dirty_data_max,
zfs_dirty_data_max_max);
}
if (zfs_wrlog_data_max == 0) {
/*
* dp_wrlog_total is reduced for each txg at the end of
* spa_sync(). However, dp_dirty_total is reduced every time
* a block is written out. Thus under normal operation,
* dp_wrlog_total could grow 2 times as big as
* zfs_dirty_data_max.
*/
zfs_wrlog_data_max = zfs_dirty_data_max * 2;
}
}
void
arc_fini(void)
{
arc_prune_t *p;
#ifdef _KERNEL
arc_lowmem_fini();
#endif /* _KERNEL */
/* Use B_TRUE to ensure *all* buffers are evicted */
arc_flush(NULL, B_TRUE);
if (arc_ksp != NULL) {
kstat_delete(arc_ksp);
arc_ksp = NULL;
}
taskq_wait(arc_prune_taskq);
taskq_destroy(arc_prune_taskq);
mutex_enter(&arc_prune_mtx);
while ((p = list_remove_head(&arc_prune_list)) != NULL) {
zfs_refcount_remove(&p->p_refcnt, &arc_prune_list);
zfs_refcount_destroy(&p->p_refcnt);
kmem_free(p, sizeof (*p));
}
mutex_exit(&arc_prune_mtx);
list_destroy(&arc_prune_list);
mutex_destroy(&arc_prune_mtx);
(void) zthr_cancel(arc_evict_zthr);
(void) zthr_cancel(arc_reap_zthr);
arc_state_free_markers(arc_state_evict_markers,
arc_state_evict_marker_count);
mutex_destroy(&arc_evict_lock);
list_destroy(&arc_evict_waiters);
/*
* Free any buffers that were tagged for destruction. This needs
* to occur before arc_state_fini() runs and destroys the aggsum
* values which are updated when freeing scatter ABDs.
*/
l2arc_do_free_on_write();
/*
* buf_fini() must proceed arc_state_fini() because buf_fin() may
* trigger the release of kmem magazines, which can callback to
* arc_space_return() which accesses aggsums freed in act_state_fini().
*/
buf_fini();
arc_state_fini();
arc_unregister_hotplug();
/*
* We destroy the zthrs after all the ARC state has been
* torn down to avoid the case of them receiving any
* wakeup() signals after they are destroyed.
*/
zthr_destroy(arc_evict_zthr);
zthr_destroy(arc_reap_zthr);
ASSERT0(arc_loaned_bytes);
}
/*
* Level 2 ARC
*
* The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
* It uses dedicated storage devices to hold cached data, which are populated
* using large infrequent writes. The main role of this cache is to boost
* the performance of random read workloads. The intended L2ARC devices
* include short-stroked disks, solid state disks, and other media with
* substantially faster read latency than disk.
*
* +-----------------------+
* | ARC |
* +-----------------------+
* | ^ ^
* | | |
* l2arc_feed_thread() arc_read()
* | | |
* | l2arc read |
* V | |
* +---------------+ |
* | L2ARC | |
* +---------------+ |
* | ^ |
* l2arc_write() | |
* | | |
* V | |
* +-------+ +-------+
* | vdev | | vdev |
* | cache | | cache |
* +-------+ +-------+
* +=========+ .-----.
* : L2ARC : |-_____-|
* : devices : | Disks |
* +=========+ `-_____-'
*
* Read requests are satisfied from the following sources, in order:
*
* 1) ARC
* 2) vdev cache of L2ARC devices
* 3) L2ARC devices
* 4) vdev cache of disks
* 5) disks
*
* Some L2ARC device types exhibit extremely slow write performance.
* To accommodate for this there are some significant differences between
* the L2ARC and traditional cache design:
*
* 1. There is no eviction path from the ARC to the L2ARC. Evictions from
* the ARC behave as usual, freeing buffers and placing headers on ghost
* lists. The ARC does not send buffers to the L2ARC during eviction as
* this would add inflated write latencies for all ARC memory pressure.
*
* 2. The L2ARC attempts to cache data from the ARC before it is evicted.
* It does this by periodically scanning buffers from the eviction-end of
* the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
* not already there. It scans until a headroom of buffers is satisfied,
* which itself is a buffer for ARC eviction. If a compressible buffer is
* found during scanning and selected for writing to an L2ARC device, we
* temporarily boost scanning headroom during the next scan cycle to make
* sure we adapt to compression effects (which might significantly reduce
* the data volume we write to L2ARC). The thread that does this is
* l2arc_feed_thread(), illustrated below; example sizes are included to
* provide a better sense of ratio than this diagram:
*
* head --> tail
* +---------------------+----------+
* ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC
* +---------------------+----------+ | o L2ARC eligible
* ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer
* +---------------------+----------+ |
* 15.9 Gbytes ^ 32 Mbytes |
* headroom |
* l2arc_feed_thread()
* |
* l2arc write hand <--[oooo]--'
* | 8 Mbyte
* | write max
* V
* +==============================+
* L2ARC dev |####|#|###|###| |####| ... |
* +==============================+
* 32 Gbytes
*
* 3. If an ARC buffer is copied to the L2ARC but then hit instead of
* evicted, then the L2ARC has cached a buffer much sooner than it probably
* needed to, potentially wasting L2ARC device bandwidth and storage. It is
* safe to say that this is an uncommon case, since buffers at the end of
* the ARC lists have moved there due to inactivity.
*
* 4. If the ARC evicts faster than the L2ARC can maintain a headroom,
* then the L2ARC simply misses copying some buffers. This serves as a
* pressure valve to prevent heavy read workloads from both stalling the ARC
* with waits and clogging the L2ARC with writes. This also helps prevent
* the potential for the L2ARC to churn if it attempts to cache content too
* quickly, such as during backups of the entire pool.
*
* 5. After system boot and before the ARC has filled main memory, there are
* no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru
* lists can remain mostly static. Instead of searching from tail of these
* lists as pictured, the l2arc_feed_thread() will search from the list heads
* for eligible buffers, greatly increasing its chance of finding them.
*
* The L2ARC device write speed is also boosted during this time so that
* the L2ARC warms up faster. Since there have been no ARC evictions yet,
* there are no L2ARC reads, and no fear of degrading read performance
* through increased writes.
*
* 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that
* the vdev queue can aggregate them into larger and fewer writes. Each
* device is written to in a rotor fashion, sweeping writes through
* available space then repeating.
*
* 7. The L2ARC does not store dirty content. It never needs to flush
* write buffers back to disk based storage.
*
* 8. If an ARC buffer is written (and dirtied) which also exists in the
* L2ARC, the now stale L2ARC buffer is immediately dropped.
*
* The performance of the L2ARC can be tweaked by a number of tunables, which
* may be necessary for different workloads:
*
* l2arc_write_max max write bytes per interval
* l2arc_write_boost extra write bytes during device warmup
* l2arc_noprefetch skip caching prefetched buffers
* l2arc_headroom number of max device writes to precache
* l2arc_headroom_boost when we find compressed buffers during ARC
* scanning, we multiply headroom by this
* percentage factor for the next scan cycle,
* since more compressed buffers are likely to
* be present
* l2arc_feed_secs seconds between L2ARC writing
*
* Tunables may be removed or added as future performance improvements are
* integrated, and also may become zpool properties.
*
* There are three key functions that control how the L2ARC warms up:
*
* l2arc_write_eligible() check if a buffer is eligible to cache
* l2arc_write_size() calculate how much to write
* l2arc_write_interval() calculate sleep delay between writes
*
* These three functions determine what to write, how much, and how quickly
* to send writes.
*
* L2ARC persistence:
*
* When writing buffers to L2ARC, we periodically add some metadata to
* make sure we can pick them up after reboot, thus dramatically reducing
* the impact that any downtime has on the performance of storage systems
* with large caches.
*
* The implementation works fairly simply by integrating the following two
* modifications:
*
* *) When writing to the L2ARC, we occasionally write a "l2arc log block",
* which is an additional piece of metadata which describes what's been
* written. This allows us to rebuild the arc_buf_hdr_t structures of the
* main ARC buffers. There are 2 linked-lists of log blocks headed by
* dh_start_lbps[2]. We alternate which chain we append to, so they are
* time-wise and offset-wise interleaved, but that is an optimization rather
* than for correctness. The log block also includes a pointer to the
* previous block in its chain.
*
* *) We reserve SPA_MINBLOCKSIZE of space at the start of each L2ARC device
* for our header bookkeeping purposes. This contains a device header,
* which contains our top-level reference structures. We update it each
* time we write a new log block, so that we're able to locate it in the
* L2ARC device. If this write results in an inconsistent device header
* (e.g. due to power failure), we detect this by verifying the header's
* checksum and simply fail to reconstruct the L2ARC after reboot.
*
* Implementation diagram:
*
* +=== L2ARC device (not to scale) ======================================+
* | ___two newest log block pointers__.__________ |
* | / \dh_start_lbps[1] |
* | / \ \dh_start_lbps[0]|
* |.___/__. V V |
* ||L2 dev|....|lb |bufs |lb |bufs |lb |bufs |lb |bufs |lb |---(empty)---|
* || hdr| ^ /^ /^ / / |
* |+------+ ...--\-------/ \-----/--\------/ / |
* | \--------------/ \--------------/ |
* +======================================================================+
*
* As can be seen on the diagram, rather than using a simple linked list,
* we use a pair of linked lists with alternating elements. This is a
* performance enhancement due to the fact that we only find out the
* address of the next log block access once the current block has been
* completely read in. Obviously, this hurts performance, because we'd be
* keeping the device's I/O queue at only a 1 operation deep, thus
* incurring a large amount of I/O round-trip latency. Having two lists
* allows us to fetch two log blocks ahead of where we are currently
* rebuilding L2ARC buffers.
*
* On-device data structures:
*
* L2ARC device header: l2arc_dev_hdr_phys_t
* L2ARC log block: l2arc_log_blk_phys_t
*
* L2ARC reconstruction:
*
* When writing data, we simply write in the standard rotary fashion,
* evicting buffers as we go and simply writing new data over them (writing
* a new log block every now and then). This obviously means that once we
* loop around the end of the device, we will start cutting into an already
* committed log block (and its referenced data buffers), like so:
*
* current write head__ __old tail
* \ /
* V V
* <--|bufs |lb |bufs |lb | |bufs |lb |bufs |lb |-->
* ^ ^^^^^^^^^___________________________________
* | \
* <<nextwrite>> may overwrite this blk and/or its bufs --'
*
* When importing the pool, we detect this situation and use it to stop
* our scanning process (see l2arc_rebuild).
*
* There is one significant caveat to consider when rebuilding ARC contents
* from an L2ARC device: what about invalidated buffers? Given the above
* construction, we cannot update blocks which we've already written to amend
* them to remove buffers which were invalidated. Thus, during reconstruction,
* we might be populating the cache with buffers for data that's not on the
* main pool anymore, or may have been overwritten!
*
* As it turns out, this isn't a problem. Every arc_read request includes
* both the DVA and, crucially, the birth TXG of the BP the caller is
* looking for. So even if the cache were populated by completely rotten
* blocks for data that had been long deleted and/or overwritten, we'll
* never actually return bad data from the cache, since the DVA with the
* birth TXG uniquely identify a block in space and time - once created,
* a block is immutable on disk. The worst thing we have done is wasted
* some time and memory at l2arc rebuild to reconstruct outdated ARC
* entries that will get dropped from the l2arc as it is being updated
* with new blocks.
*
* L2ARC buffers that have been evicted by l2arc_evict() ahead of the write
* hand are not restored. This is done by saving the offset (in bytes)
* l2arc_evict() has evicted to in the L2ARC device header and taking it
* into account when restoring buffers.
*/
static boolean_t
l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *hdr)
{
/*
* A buffer is *not* eligible for the L2ARC if it:
* 1. belongs to a different spa.
* 2. is already cached on the L2ARC.
* 3. has an I/O in progress (it may be an incomplete read).
* 4. is flagged not eligible (zfs property).
*/
if (hdr->b_spa != spa_guid || HDR_HAS_L2HDR(hdr) ||
HDR_IO_IN_PROGRESS(hdr) || !HDR_L2CACHE(hdr))
return (B_FALSE);
return (B_TRUE);
}
static uint64_t
l2arc_write_size(l2arc_dev_t *dev)
{
uint64_t size;
/*
* Make sure our globals have meaningful values in case the user
* altered them.
*/
size = l2arc_write_max;
if (size == 0) {
- cmn_err(CE_NOTE, "Bad value for l2arc_write_max, value must "
- "be greater than zero, resetting it to the default (%d)",
- L2ARC_WRITE_SIZE);
+ cmn_err(CE_NOTE, "l2arc_write_max must be greater than zero, "
+ "resetting it to the default (%d)", L2ARC_WRITE_SIZE);
size = l2arc_write_max = L2ARC_WRITE_SIZE;
}
if (arc_warm == B_FALSE)
size += l2arc_write_boost;
/* We need to add in the worst case scenario of log block overhead. */
size += l2arc_log_blk_overhead(size, dev);
if (dev->l2ad_vdev->vdev_has_trim && l2arc_trim_ahead > 0) {
/*
* Trim ahead of the write size 64MB or (l2arc_trim_ahead/100)
* times the writesize, whichever is greater.
*/
size += MAX(64 * 1024 * 1024,
(size * l2arc_trim_ahead) / 100);
}
/*
* Make sure the write size does not exceed the size of the cache
* device. This is important in l2arc_evict(), otherwise infinite
* iteration can occur.
*/
- if (size > dev->l2ad_end - dev->l2ad_start) {
- cmn_err(CE_NOTE, "l2arc_write_max or l2arc_write_boost "
- "plus the overhead of log blocks (persistent L2ARC, "
- "%llu bytes) exceeds the size of the cache device "
- "(guid %llu), resetting them to the default (%d)",
- (u_longlong_t)l2arc_log_blk_overhead(size, dev),
- (u_longlong_t)dev->l2ad_vdev->vdev_guid, L2ARC_WRITE_SIZE);
+ size = MIN(size, (dev->l2ad_end - dev->l2ad_start) / 4);
- size = l2arc_write_max = l2arc_write_boost = L2ARC_WRITE_SIZE;
-
- if (l2arc_trim_ahead > 1) {
- cmn_err(CE_NOTE, "l2arc_trim_ahead set to 1");
- l2arc_trim_ahead = 1;
- }
-
- if (arc_warm == B_FALSE)
- size += l2arc_write_boost;
-
- size += l2arc_log_blk_overhead(size, dev);
- if (dev->l2ad_vdev->vdev_has_trim && l2arc_trim_ahead > 0) {
- size += MAX(64 * 1024 * 1024,
- (size * l2arc_trim_ahead) / 100);
- }
- }
+ size = P2ROUNDUP(size, 1ULL << dev->l2ad_vdev->vdev_ashift);
return (size);
}
static clock_t
l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote)
{
clock_t interval, next, now;
/*
* If the ARC lists are busy, increase our write rate; if the
* lists are stale, idle back. This is achieved by checking
* how much we previously wrote - if it was more than half of
* what we wanted, schedule the next write much sooner.
*/
if (l2arc_feed_again && wrote > (wanted / 2))
interval = (hz * l2arc_feed_min_ms) / 1000;
else
interval = hz * l2arc_feed_secs;
now = ddi_get_lbolt();
next = MAX(now, MIN(now + interval, began + interval));
return (next);
}
/*
* Cycle through L2ARC devices. This is how L2ARC load balances.
* If a device is returned, this also returns holding the spa config lock.
*/
static l2arc_dev_t *
l2arc_dev_get_next(void)
{
l2arc_dev_t *first, *next = NULL;
/*
* Lock out the removal of spas (spa_namespace_lock), then removal
* of cache devices (l2arc_dev_mtx). Once a device has been selected,
* both locks will be dropped and a spa config lock held instead.
*/
mutex_enter(&spa_namespace_lock);
mutex_enter(&l2arc_dev_mtx);
/* if there are no vdevs, there is nothing to do */
if (l2arc_ndev == 0)
goto out;
first = NULL;
next = l2arc_dev_last;
do {
/* loop around the list looking for a non-faulted vdev */
if (next == NULL) {
next = list_head(l2arc_dev_list);
} else {
next = list_next(l2arc_dev_list, next);
if (next == NULL)
next = list_head(l2arc_dev_list);
}
/* if we have come back to the start, bail out */
if (first == NULL)
first = next;
else if (next == first)
break;
ASSERT3P(next, !=, NULL);
} while (vdev_is_dead(next->l2ad_vdev) || next->l2ad_rebuild ||
next->l2ad_trim_all);
/* if we were unable to find any usable vdevs, return NULL */
if (vdev_is_dead(next->l2ad_vdev) || next->l2ad_rebuild ||
next->l2ad_trim_all)
next = NULL;
l2arc_dev_last = next;
out:
mutex_exit(&l2arc_dev_mtx);
/*
* Grab the config lock to prevent the 'next' device from being
* removed while we are writing to it.
*/
if (next != NULL)
spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER);
mutex_exit(&spa_namespace_lock);
return (next);
}
/*
* Free buffers that were tagged for destruction.
*/
static void
l2arc_do_free_on_write(void)
{
l2arc_data_free_t *df;
mutex_enter(&l2arc_free_on_write_mtx);
while ((df = list_remove_head(l2arc_free_on_write)) != NULL) {
ASSERT3P(df->l2df_abd, !=, NULL);
abd_free(df->l2df_abd);
kmem_free(df, sizeof (l2arc_data_free_t));
}
mutex_exit(&l2arc_free_on_write_mtx);
}
/*
* A write to a cache device has completed. Update all headers to allow
* reads from these buffers to begin.
*/
static void
l2arc_write_done(zio_t *zio)
{
l2arc_write_callback_t *cb;
l2arc_lb_abd_buf_t *abd_buf;
l2arc_lb_ptr_buf_t *lb_ptr_buf;
l2arc_dev_t *dev;
l2arc_dev_hdr_phys_t *l2dhdr;
list_t *buflist;
arc_buf_hdr_t *head, *hdr, *hdr_prev;
kmutex_t *hash_lock;
int64_t bytes_dropped = 0;
cb = zio->io_private;
ASSERT3P(cb, !=, NULL);
dev = cb->l2wcb_dev;
l2dhdr = dev->l2ad_dev_hdr;
ASSERT3P(dev, !=, NULL);
head = cb->l2wcb_head;
ASSERT3P(head, !=, NULL);
buflist = &dev->l2ad_buflist;
ASSERT3P(buflist, !=, NULL);
DTRACE_PROBE2(l2arc__iodone, zio_t *, zio,
l2arc_write_callback_t *, cb);
/*
* All writes completed, or an error was hit.
*/
top:
mutex_enter(&dev->l2ad_mtx);
for (hdr = list_prev(buflist, head); hdr; hdr = hdr_prev) {
hdr_prev = list_prev(buflist, hdr);
hash_lock = HDR_LOCK(hdr);
/*
* We cannot use mutex_enter or else we can deadlock
* with l2arc_write_buffers (due to swapping the order
* the hash lock and l2ad_mtx are taken).
*/
if (!mutex_tryenter(hash_lock)) {
/*
* Missed the hash lock. We must retry so we
* don't leave the ARC_FLAG_L2_WRITING bit set.
*/
ARCSTAT_BUMP(arcstat_l2_writes_lock_retry);
/*
* We don't want to rescan the headers we've
* already marked as having been written out, so
* we reinsert the head node so we can pick up
* where we left off.
*/
list_remove(buflist, head);
list_insert_after(buflist, hdr, head);
mutex_exit(&dev->l2ad_mtx);
/*
* We wait for the hash lock to become available
* to try and prevent busy waiting, and increase
* the chance we'll be able to acquire the lock
* the next time around.
*/
mutex_enter(hash_lock);
mutex_exit(hash_lock);
goto top;
}
/*
* We could not have been moved into the arc_l2c_only
* state while in-flight due to our ARC_FLAG_L2_WRITING
* bit being set. Let's just ensure that's being enforced.
*/
ASSERT(HDR_HAS_L1HDR(hdr));
/*
* Skipped - drop L2ARC entry and mark the header as no
* longer L2 eligibile.
*/
if (zio->io_error != 0) {
/*
* Error - drop L2ARC entry.
*/
list_remove(buflist, hdr);
arc_hdr_clear_flags(hdr, ARC_FLAG_HAS_L2HDR);
uint64_t psize = HDR_GET_PSIZE(hdr);
l2arc_hdr_arcstats_decrement(hdr);
bytes_dropped +=
vdev_psize_to_asize(dev->l2ad_vdev, psize);
(void) zfs_refcount_remove_many(&dev->l2ad_alloc,
arc_hdr_size(hdr), hdr);
}
/*
* Allow ARC to begin reads and ghost list evictions to
* this L2ARC entry.
*/
arc_hdr_clear_flags(hdr, ARC_FLAG_L2_WRITING);
mutex_exit(hash_lock);
}
/*
* Free the allocated abd buffers for writing the log blocks.
* If the zio failed reclaim the allocated space and remove the
* pointers to these log blocks from the log block pointer list
* of the L2ARC device.
*/
while ((abd_buf = list_remove_tail(&cb->l2wcb_abd_list)) != NULL) {
abd_free(abd_buf->abd);
zio_buf_free(abd_buf, sizeof (*abd_buf));
if (zio->io_error != 0) {
lb_ptr_buf = list_remove_head(&dev->l2ad_lbptr_list);
/*
* L2BLK_GET_PSIZE returns aligned size for log
* blocks.
*/
uint64_t asize =
L2BLK_GET_PSIZE((lb_ptr_buf->lb_ptr)->lbp_prop);
bytes_dropped += asize;
ARCSTAT_INCR(arcstat_l2_log_blk_asize, -asize);
ARCSTAT_BUMPDOWN(arcstat_l2_log_blk_count);
zfs_refcount_remove_many(&dev->l2ad_lb_asize, asize,
lb_ptr_buf);
zfs_refcount_remove(&dev->l2ad_lb_count, lb_ptr_buf);
kmem_free(lb_ptr_buf->lb_ptr,
sizeof (l2arc_log_blkptr_t));
kmem_free(lb_ptr_buf, sizeof (l2arc_lb_ptr_buf_t));
}
}
list_destroy(&cb->l2wcb_abd_list);
if (zio->io_error != 0) {
ARCSTAT_BUMP(arcstat_l2_writes_error);
/*
* Restore the lbps array in the header to its previous state.
* If the list of log block pointers is empty, zero out the
* log block pointers in the device header.
*/
lb_ptr_buf = list_head(&dev->l2ad_lbptr_list);
for (int i = 0; i < 2; i++) {
if (lb_ptr_buf == NULL) {
/*
* If the list is empty zero out the device
* header. Otherwise zero out the second log
* block pointer in the header.
*/
if (i == 0) {
memset(l2dhdr, 0,
dev->l2ad_dev_hdr_asize);
} else {
memset(&l2dhdr->dh_start_lbps[i], 0,
sizeof (l2arc_log_blkptr_t));
}
break;
}
memcpy(&l2dhdr->dh_start_lbps[i], lb_ptr_buf->lb_ptr,
sizeof (l2arc_log_blkptr_t));
lb_ptr_buf = list_next(&dev->l2ad_lbptr_list,
lb_ptr_buf);
}
}
ARCSTAT_BUMP(arcstat_l2_writes_done);
list_remove(buflist, head);
ASSERT(!HDR_HAS_L1HDR(head));
kmem_cache_free(hdr_l2only_cache, head);
mutex_exit(&dev->l2ad_mtx);
ASSERT(dev->l2ad_vdev != NULL);
vdev_space_update(dev->l2ad_vdev, -bytes_dropped, 0, 0);
l2arc_do_free_on_write();
kmem_free(cb, sizeof (l2arc_write_callback_t));
}
static int
l2arc_untransform(zio_t *zio, l2arc_read_callback_t *cb)
{
int ret;
spa_t *spa = zio->io_spa;
arc_buf_hdr_t *hdr = cb->l2rcb_hdr;
blkptr_t *bp = zio->io_bp;
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
boolean_t no_crypt = B_FALSE;
/*
* ZIL data is never be written to the L2ARC, so we don't need
* special handling for its unique MAC storage.
*/
ASSERT3U(BP_GET_TYPE(bp), !=, DMU_OT_INTENT_LOG);
ASSERT(MUTEX_HELD(HDR_LOCK(hdr)));
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
/*
* If the data was encrypted, decrypt it now. Note that
* we must check the bp here and not the hdr, since the
* hdr does not have its encryption parameters updated
* until arc_read_done().
*/
if (BP_IS_ENCRYPTED(bp)) {
abd_t *eabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr,
ARC_HDR_USE_RESERVE);
zio_crypt_decode_params_bp(bp, salt, iv);
zio_crypt_decode_mac_bp(bp, mac);
ret = spa_do_crypt_abd(B_FALSE, spa, &cb->l2rcb_zb,
BP_GET_TYPE(bp), BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp),
salt, iv, mac, HDR_GET_PSIZE(hdr), eabd,
hdr->b_l1hdr.b_pabd, &no_crypt);
if (ret != 0) {
arc_free_data_abd(hdr, eabd, arc_hdr_size(hdr), hdr);
goto error;
}
/*
* If we actually performed decryption, replace b_pabd
* with the decrypted data. Otherwise we can just throw
* our decryption buffer away.
*/
if (!no_crypt) {
arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd,
arc_hdr_size(hdr), hdr);
hdr->b_l1hdr.b_pabd = eabd;
zio->io_abd = eabd;
} else {
arc_free_data_abd(hdr, eabd, arc_hdr_size(hdr), hdr);
}
}
/*
* If the L2ARC block was compressed, but ARC compression
* is disabled we decompress the data into a new buffer and
* replace the existing data.
*/
if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
!HDR_COMPRESSION_ENABLED(hdr)) {
abd_t *cabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr,
ARC_HDR_USE_RESERVE);
void *tmp = abd_borrow_buf(cabd, arc_hdr_size(hdr));
ret = zio_decompress_data(HDR_GET_COMPRESS(hdr),
hdr->b_l1hdr.b_pabd, tmp, HDR_GET_PSIZE(hdr),
HDR_GET_LSIZE(hdr), &hdr->b_complevel);
if (ret != 0) {
abd_return_buf_copy(cabd, tmp, arc_hdr_size(hdr));
arc_free_data_abd(hdr, cabd, arc_hdr_size(hdr), hdr);
goto error;
}
abd_return_buf_copy(cabd, tmp, arc_hdr_size(hdr));
arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd,
arc_hdr_size(hdr), hdr);
hdr->b_l1hdr.b_pabd = cabd;
zio->io_abd = cabd;
zio->io_size = HDR_GET_LSIZE(hdr);
}
return (0);
error:
return (ret);
}
/*
* A read to a cache device completed. Validate buffer contents before
* handing over to the regular ARC routines.
*/
static void
l2arc_read_done(zio_t *zio)
{
int tfm_error = 0;
l2arc_read_callback_t *cb = zio->io_private;
arc_buf_hdr_t *hdr;
kmutex_t *hash_lock;
boolean_t valid_cksum;
boolean_t using_rdata = (BP_IS_ENCRYPTED(&cb->l2rcb_bp) &&
(cb->l2rcb_flags & ZIO_FLAG_RAW_ENCRYPT));
ASSERT3P(zio->io_vd, !=, NULL);
ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE);
spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd);
ASSERT3P(cb, !=, NULL);
hdr = cb->l2rcb_hdr;
ASSERT3P(hdr, !=, NULL);
hash_lock = HDR_LOCK(hdr);
mutex_enter(hash_lock);
ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
/*
* If the data was read into a temporary buffer,
* move it and free the buffer.
*/
if (cb->l2rcb_abd != NULL) {
ASSERT3U(arc_hdr_size(hdr), <, zio->io_size);
if (zio->io_error == 0) {
if (using_rdata) {
abd_copy(hdr->b_crypt_hdr.b_rabd,
cb->l2rcb_abd, arc_hdr_size(hdr));
} else {
abd_copy(hdr->b_l1hdr.b_pabd,
cb->l2rcb_abd, arc_hdr_size(hdr));
}
}
/*
* The following must be done regardless of whether
* there was an error:
* - free the temporary buffer
* - point zio to the real ARC buffer
* - set zio size accordingly
* These are required because zio is either re-used for
* an I/O of the block in the case of the error
* or the zio is passed to arc_read_done() and it
* needs real data.
*/
abd_free(cb->l2rcb_abd);
zio->io_size = zio->io_orig_size = arc_hdr_size(hdr);
if (using_rdata) {
ASSERT(HDR_HAS_RABD(hdr));
zio->io_abd = zio->io_orig_abd =
hdr->b_crypt_hdr.b_rabd;
} else {
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
zio->io_abd = zio->io_orig_abd = hdr->b_l1hdr.b_pabd;
}
}
ASSERT3P(zio->io_abd, !=, NULL);
/*
* Check this survived the L2ARC journey.
*/
ASSERT(zio->io_abd == hdr->b_l1hdr.b_pabd ||
(HDR_HAS_RABD(hdr) && zio->io_abd == hdr->b_crypt_hdr.b_rabd));
zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */
zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */
zio->io_prop.zp_complevel = hdr->b_complevel;
valid_cksum = arc_cksum_is_equal(hdr, zio);
/*
* b_rabd will always match the data as it exists on disk if it is
* being used. Therefore if we are reading into b_rabd we do not
* attempt to untransform the data.
*/
if (valid_cksum && !using_rdata)
tfm_error = l2arc_untransform(zio, cb);
if (valid_cksum && tfm_error == 0 && zio->io_error == 0 &&
!HDR_L2_EVICTED(hdr)) {
mutex_exit(hash_lock);
zio->io_private = hdr;
arc_read_done(zio);
} else {
/*
* Buffer didn't survive caching. Increment stats and
* reissue to the original storage device.
*/
if (zio->io_error != 0) {
ARCSTAT_BUMP(arcstat_l2_io_error);
} else {
zio->io_error = SET_ERROR(EIO);
}
if (!valid_cksum || tfm_error != 0)
ARCSTAT_BUMP(arcstat_l2_cksum_bad);
/*
* If there's no waiter, issue an async i/o to the primary
* storage now. If there *is* a waiter, the caller must
* issue the i/o in a context where it's OK to block.
*/
if (zio->io_waiter == NULL) {
zio_t *pio = zio_unique_parent(zio);
void *abd = (using_rdata) ?
hdr->b_crypt_hdr.b_rabd : hdr->b_l1hdr.b_pabd;
ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL);
zio = zio_read(pio, zio->io_spa, zio->io_bp,
abd, zio->io_size, arc_read_done,
hdr, zio->io_priority, cb->l2rcb_flags,
&cb->l2rcb_zb);
/*
* Original ZIO will be freed, so we need to update
* ARC header with the new ZIO pointer to be used
* by zio_change_priority() in arc_read().
*/
for (struct arc_callback *acb = hdr->b_l1hdr.b_acb;
acb != NULL; acb = acb->acb_next)
acb->acb_zio_head = zio;
mutex_exit(hash_lock);
zio_nowait(zio);
} else {
mutex_exit(hash_lock);
}
}
kmem_free(cb, sizeof (l2arc_read_callback_t));
}
/*
* This is the list priority from which the L2ARC will search for pages to
* cache. This is used within loops (0..3) to cycle through lists in the
* desired order. This order can have a significant effect on cache
* performance.
*
* Currently the metadata lists are hit first, MFU then MRU, followed by
* the data lists. This function returns a locked list, and also returns
* the lock pointer.
*/
static multilist_sublist_t *
l2arc_sublist_lock(int list_num)
{
multilist_t *ml = NULL;
unsigned int idx;
ASSERT(list_num >= 0 && list_num < L2ARC_FEED_TYPES);
switch (list_num) {
case 0:
ml = &arc_mfu->arcs_list[ARC_BUFC_METADATA];
break;
case 1:
ml = &arc_mru->arcs_list[ARC_BUFC_METADATA];
break;
case 2:
ml = &arc_mfu->arcs_list[ARC_BUFC_DATA];
break;
case 3:
ml = &arc_mru->arcs_list[ARC_BUFC_DATA];
break;
default:
return (NULL);
}
/*
* Return a randomly-selected sublist. This is acceptable
* because the caller feeds only a little bit of data for each
* call (8MB). Subsequent calls will result in different
* sublists being selected.
*/
idx = multilist_get_random_index(ml);
return (multilist_sublist_lock(ml, idx));
}
/*
* Calculates the maximum overhead of L2ARC metadata log blocks for a given
* L2ARC write size. l2arc_evict and l2arc_write_size need to include this
* overhead in processing to make sure there is enough headroom available
* when writing buffers.
*/
static inline uint64_t
l2arc_log_blk_overhead(uint64_t write_sz, l2arc_dev_t *dev)
{
if (dev->l2ad_log_entries == 0) {
return (0);
} else {
uint64_t log_entries = write_sz >> SPA_MINBLOCKSHIFT;
uint64_t log_blocks = (log_entries +
dev->l2ad_log_entries - 1) /
dev->l2ad_log_entries;
return (vdev_psize_to_asize(dev->l2ad_vdev,
sizeof (l2arc_log_blk_phys_t)) * log_blocks);
}
}
/*
* Evict buffers from the device write hand to the distance specified in
* bytes. This distance may span populated buffers, it may span nothing.
* This is clearing a region on the L2ARC device ready for writing.
* If the 'all' boolean is set, every buffer is evicted.
*/
static void
l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all)
{
list_t *buflist;
arc_buf_hdr_t *hdr, *hdr_prev;
kmutex_t *hash_lock;
uint64_t taddr;
l2arc_lb_ptr_buf_t *lb_ptr_buf, *lb_ptr_buf_prev;
vdev_t *vd = dev->l2ad_vdev;
boolean_t rerun;
buflist = &dev->l2ad_buflist;
top:
rerun = B_FALSE;
if (dev->l2ad_hand + distance > dev->l2ad_end) {
/*
* When there is no space to accommodate upcoming writes,
* evict to the end. Then bump the write and evict hands
* to the start and iterate. This iteration does not
* happen indefinitely as we make sure in
* l2arc_write_size() that when the write hand is reset,
* the write size does not exceed the end of the device.
*/
rerun = B_TRUE;
taddr = dev->l2ad_end;
} else {
taddr = dev->l2ad_hand + distance;
}
DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist,
uint64_t, taddr, boolean_t, all);
if (!all) {
/*
* This check has to be placed after deciding whether to
* iterate (rerun).
*/
if (dev->l2ad_first) {
/*
* This is the first sweep through the device. There is
* nothing to evict. We have already trimmmed the
* whole device.
*/
goto out;
} else {
/*
* Trim the space to be evicted.
*/
if (vd->vdev_has_trim && dev->l2ad_evict < taddr &&
l2arc_trim_ahead > 0) {
/*
* We have to drop the spa_config lock because
* vdev_trim_range() will acquire it.
* l2ad_evict already accounts for the label
* size. To prevent vdev_trim_ranges() from
* adding it again, we subtract it from
* l2ad_evict.
*/
spa_config_exit(dev->l2ad_spa, SCL_L2ARC, dev);
vdev_trim_simple(vd,
dev->l2ad_evict - VDEV_LABEL_START_SIZE,
taddr - dev->l2ad_evict);
spa_config_enter(dev->l2ad_spa, SCL_L2ARC, dev,
RW_READER);
}
/*
* When rebuilding L2ARC we retrieve the evict hand
* from the header of the device. Of note, l2arc_evict()
* does not actually delete buffers from the cache
* device, but trimming may do so depending on the
* hardware implementation. Thus keeping track of the
* evict hand is useful.
*/
dev->l2ad_evict = MAX(dev->l2ad_evict, taddr);
}
}
retry:
mutex_enter(&dev->l2ad_mtx);
/*
* We have to account for evicted log blocks. Run vdev_space_update()
* on log blocks whose offset (in bytes) is before the evicted offset
* (in bytes) by searching in the list of pointers to log blocks
* present in the L2ARC device.
*/
for (lb_ptr_buf = list_tail(&dev->l2ad_lbptr_list); lb_ptr_buf;
lb_ptr_buf = lb_ptr_buf_prev) {
lb_ptr_buf_prev = list_prev(&dev->l2ad_lbptr_list, lb_ptr_buf);
/* L2BLK_GET_PSIZE returns aligned size for log blocks */
uint64_t asize = L2BLK_GET_PSIZE(
(lb_ptr_buf->lb_ptr)->lbp_prop);
/*
* We don't worry about log blocks left behind (ie
* lbp_payload_start < l2ad_hand) because l2arc_write_buffers()
* will never write more than l2arc_evict() evicts.
*/
if (!all && l2arc_log_blkptr_valid(dev, lb_ptr_buf->lb_ptr)) {
break;
} else {
vdev_space_update(vd, -asize, 0, 0);
ARCSTAT_INCR(arcstat_l2_log_blk_asize, -asize);
ARCSTAT_BUMPDOWN(arcstat_l2_log_blk_count);
zfs_refcount_remove_many(&dev->l2ad_lb_asize, asize,
lb_ptr_buf);
zfs_refcount_remove(&dev->l2ad_lb_count, lb_ptr_buf);
list_remove(&dev->l2ad_lbptr_list, lb_ptr_buf);
kmem_free(lb_ptr_buf->lb_ptr,
sizeof (l2arc_log_blkptr_t));
kmem_free(lb_ptr_buf, sizeof (l2arc_lb_ptr_buf_t));
}
}
for (hdr = list_tail(buflist); hdr; hdr = hdr_prev) {
hdr_prev = list_prev(buflist, hdr);
ASSERT(!HDR_EMPTY(hdr));
hash_lock = HDR_LOCK(hdr);
/*
* We cannot use mutex_enter or else we can deadlock
* with l2arc_write_buffers (due to swapping the order
* the hash lock and l2ad_mtx are taken).
*/
if (!mutex_tryenter(hash_lock)) {
/*
* Missed the hash lock. Retry.
*/
ARCSTAT_BUMP(arcstat_l2_evict_lock_retry);
mutex_exit(&dev->l2ad_mtx);
mutex_enter(hash_lock);
mutex_exit(hash_lock);
goto retry;
}
/*
* A header can't be on this list if it doesn't have L2 header.
*/
ASSERT(HDR_HAS_L2HDR(hdr));
/* Ensure this header has finished being written. */
ASSERT(!HDR_L2_WRITING(hdr));
ASSERT(!HDR_L2_WRITE_HEAD(hdr));
if (!all && (hdr->b_l2hdr.b_daddr >= dev->l2ad_evict ||
hdr->b_l2hdr.b_daddr < dev->l2ad_hand)) {
/*
* We've evicted to the target address,
* or the end of the device.
*/
mutex_exit(hash_lock);
break;
}
if (!HDR_HAS_L1HDR(hdr)) {
ASSERT(!HDR_L2_READING(hdr));
/*
* This doesn't exist in the ARC. Destroy.
* arc_hdr_destroy() will call list_remove()
* and decrement arcstat_l2_lsize.
*/
arc_change_state(arc_anon, hdr);
arc_hdr_destroy(hdr);
} else {
ASSERT(hdr->b_l1hdr.b_state != arc_l2c_only);
ARCSTAT_BUMP(arcstat_l2_evict_l1cached);
/*
* Invalidate issued or about to be issued
* reads, since we may be about to write
* over this location.
*/
if (HDR_L2_READING(hdr)) {
ARCSTAT_BUMP(arcstat_l2_evict_reading);
arc_hdr_set_flags(hdr, ARC_FLAG_L2_EVICTED);
}
arc_hdr_l2hdr_destroy(hdr);
}
mutex_exit(hash_lock);
}
mutex_exit(&dev->l2ad_mtx);
out:
/*
* We need to check if we evict all buffers, otherwise we may iterate
* unnecessarily.
*/
if (!all && rerun) {
/*
* Bump device hand to the device start if it is approaching the
* end. l2arc_evict() has already evicted ahead for this case.
*/
dev->l2ad_hand = dev->l2ad_start;
dev->l2ad_evict = dev->l2ad_start;
dev->l2ad_first = B_FALSE;
goto top;
}
if (!all) {
/*
* In case of cache device removal (all) the following
* assertions may be violated without functional consequences
* as the device is about to be removed.
*/
ASSERT3U(dev->l2ad_hand + distance, <, dev->l2ad_end);
if (!dev->l2ad_first)
ASSERT3U(dev->l2ad_hand, <=, dev->l2ad_evict);
}
}
/*
* Handle any abd transforms that might be required for writing to the L2ARC.
* If successful, this function will always return an abd with the data
* transformed as it is on disk in a new abd of asize bytes.
*/
static int
l2arc_apply_transforms(spa_t *spa, arc_buf_hdr_t *hdr, uint64_t asize,
abd_t **abd_out)
{
int ret;
void *tmp = NULL;
abd_t *cabd = NULL, *eabd = NULL, *to_write = hdr->b_l1hdr.b_pabd;
enum zio_compress compress = HDR_GET_COMPRESS(hdr);
uint64_t psize = HDR_GET_PSIZE(hdr);
uint64_t size = arc_hdr_size(hdr);
boolean_t ismd = HDR_ISTYPE_METADATA(hdr);
boolean_t bswap = (hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS);
dsl_crypto_key_t *dck = NULL;
uint8_t mac[ZIO_DATA_MAC_LEN] = { 0 };
boolean_t no_crypt = B_FALSE;
ASSERT((HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
!HDR_COMPRESSION_ENABLED(hdr)) ||
HDR_ENCRYPTED(hdr) || HDR_SHARED_DATA(hdr) || psize != asize);
ASSERT3U(psize, <=, asize);
/*
* If this data simply needs its own buffer, we simply allocate it
* and copy the data. This may be done to eliminate a dependency on a
* shared buffer or to reallocate the buffer to match asize.
*/
if (HDR_HAS_RABD(hdr) && asize != psize) {
ASSERT3U(asize, >=, psize);
to_write = abd_alloc_for_io(asize, ismd);
abd_copy(to_write, hdr->b_crypt_hdr.b_rabd, psize);
if (psize != asize)
abd_zero_off(to_write, psize, asize - psize);
goto out;
}
if ((compress == ZIO_COMPRESS_OFF || HDR_COMPRESSION_ENABLED(hdr)) &&
!HDR_ENCRYPTED(hdr)) {
ASSERT3U(size, ==, psize);
to_write = abd_alloc_for_io(asize, ismd);
abd_copy(to_write, hdr->b_l1hdr.b_pabd, size);
if (size != asize)
abd_zero_off(to_write, size, asize - size);
goto out;
}
if (compress != ZIO_COMPRESS_OFF && !HDR_COMPRESSION_ENABLED(hdr)) {
/*
* In some cases, we can wind up with size > asize, so
* we need to opt for the larger allocation option here.
*
* (We also need abd_return_buf_copy in all cases because
* it's an ASSERT() to modify the buffer before returning it
* with arc_return_buf(), and all the compressors
* write things before deciding to fail compression in nearly
* every case.)
*/
uint64_t bufsize = MAX(size, asize);
cabd = abd_alloc_for_io(bufsize, ismd);
tmp = abd_borrow_buf(cabd, bufsize);
psize = zio_compress_data(compress, to_write, &tmp, size,
hdr->b_complevel);
if (psize >= asize) {
psize = HDR_GET_PSIZE(hdr);
abd_return_buf_copy(cabd, tmp, bufsize);
HDR_SET_COMPRESS(hdr, ZIO_COMPRESS_OFF);
to_write = cabd;
abd_copy(to_write, hdr->b_l1hdr.b_pabd, psize);
if (psize != asize)
abd_zero_off(to_write, psize, asize - psize);
goto encrypt;
}
ASSERT3U(psize, <=, HDR_GET_PSIZE(hdr));
if (psize < asize)
memset((char *)tmp + psize, 0, bufsize - psize);
psize = HDR_GET_PSIZE(hdr);
abd_return_buf_copy(cabd, tmp, bufsize);
to_write = cabd;
}
encrypt:
if (HDR_ENCRYPTED(hdr)) {
eabd = abd_alloc_for_io(asize, ismd);
/*
* If the dataset was disowned before the buffer
* made it to this point, the key to re-encrypt
* it won't be available. In this case we simply
* won't write the buffer to the L2ARC.
*/
ret = spa_keystore_lookup_key(spa, hdr->b_crypt_hdr.b_dsobj,
FTAG, &dck);
if (ret != 0)
goto error;
ret = zio_do_crypt_abd(B_TRUE, &dck->dck_key,
hdr->b_crypt_hdr.b_ot, bswap, hdr->b_crypt_hdr.b_salt,
hdr->b_crypt_hdr.b_iv, mac, psize, to_write, eabd,
&no_crypt);
if (ret != 0)
goto error;
if (no_crypt)
abd_copy(eabd, to_write, psize);
if (psize != asize)
abd_zero_off(eabd, psize, asize - psize);
/* assert that the MAC we got here matches the one we saved */
ASSERT0(memcmp(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN));
spa_keystore_dsl_key_rele(spa, dck, FTAG);
if (to_write == cabd)
abd_free(cabd);
to_write = eabd;
}
out:
ASSERT3P(to_write, !=, hdr->b_l1hdr.b_pabd);
*abd_out = to_write;
return (0);
error:
if (dck != NULL)
spa_keystore_dsl_key_rele(spa, dck, FTAG);
if (cabd != NULL)
abd_free(cabd);
if (eabd != NULL)
abd_free(eabd);
*abd_out = NULL;
return (ret);
}
static void
l2arc_blk_fetch_done(zio_t *zio)
{
l2arc_read_callback_t *cb;
cb = zio->io_private;
if (cb->l2rcb_abd != NULL)
abd_free(cb->l2rcb_abd);
kmem_free(cb, sizeof (l2arc_read_callback_t));
}
/*
* Find and write ARC buffers to the L2ARC device.
*
* An ARC_FLAG_L2_WRITING flag is set so that the L2ARC buffers are not valid
* for reading until they have completed writing.
* The headroom_boost is an in-out parameter used to maintain headroom boost
* state between calls to this function.
*
* Returns the number of bytes actually written (which may be smaller than
* the delta by which the device hand has changed due to alignment and the
* writing of log blocks).
*/
static uint64_t
l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz)
{
arc_buf_hdr_t *hdr, *hdr_prev, *head;
uint64_t write_asize, write_psize, write_lsize, headroom;
boolean_t full;
l2arc_write_callback_t *cb = NULL;
zio_t *pio, *wzio;
uint64_t guid = spa_load_guid(spa);
l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr;
ASSERT3P(dev->l2ad_vdev, !=, NULL);
pio = NULL;
write_lsize = write_asize = write_psize = 0;
full = B_FALSE;
head = kmem_cache_alloc(hdr_l2only_cache, KM_PUSHPAGE);
arc_hdr_set_flags(head, ARC_FLAG_L2_WRITE_HEAD | ARC_FLAG_HAS_L2HDR);
/*
* Copy buffers for L2ARC writing.
*/
for (int pass = 0; pass < L2ARC_FEED_TYPES; pass++) {
/*
* If pass == 1 or 3, we cache MRU metadata and data
* respectively.
*/
if (l2arc_mfuonly) {
if (pass == 1 || pass == 3)
continue;
}
multilist_sublist_t *mls = l2arc_sublist_lock(pass);
uint64_t passed_sz = 0;
VERIFY3P(mls, !=, NULL);
/*
* L2ARC fast warmup.
*
* Until the ARC is warm and starts to evict, read from the
* head of the ARC lists rather than the tail.
*/
if (arc_warm == B_FALSE)
hdr = multilist_sublist_head(mls);
else
hdr = multilist_sublist_tail(mls);
headroom = target_sz * l2arc_headroom;
if (zfs_compressed_arc_enabled)
headroom = (headroom * l2arc_headroom_boost) / 100;
for (; hdr; hdr = hdr_prev) {
kmutex_t *hash_lock;
abd_t *to_write = NULL;
if (arc_warm == B_FALSE)
hdr_prev = multilist_sublist_next(mls, hdr);
else
hdr_prev = multilist_sublist_prev(mls, hdr);
hash_lock = HDR_LOCK(hdr);
if (!mutex_tryenter(hash_lock)) {
/*
* Skip this buffer rather than waiting.
*/
continue;
}
passed_sz += HDR_GET_LSIZE(hdr);
if (l2arc_headroom != 0 && passed_sz > headroom) {
/*
* Searched too far.
*/
mutex_exit(hash_lock);
break;
}
if (!l2arc_write_eligible(guid, hdr)) {
mutex_exit(hash_lock);
continue;
}
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT3U(HDR_GET_PSIZE(hdr), >, 0);
ASSERT3U(arc_hdr_size(hdr), >, 0);
ASSERT(hdr->b_l1hdr.b_pabd != NULL ||
HDR_HAS_RABD(hdr));
uint64_t psize = HDR_GET_PSIZE(hdr);
uint64_t asize = vdev_psize_to_asize(dev->l2ad_vdev,
psize);
/*
* If the allocated size of this buffer plus the max
* size for the pending log block exceeds the evicted
* target size, terminate writing buffers for this run.
*/
if (write_asize + asize +
sizeof (l2arc_log_blk_phys_t) > target_sz) {
full = B_TRUE;
mutex_exit(hash_lock);
break;
}
/*
* We rely on the L1 portion of the header below, so
* it's invalid for this header to have been evicted out
* of the ghost cache, prior to being written out. The
* ARC_FLAG_L2_WRITING bit ensures this won't happen.
*/
arc_hdr_set_flags(hdr, ARC_FLAG_L2_WRITING);
/*
* If this header has b_rabd, we can use this since it
* must always match the data exactly as it exists on
* disk. Otherwise, the L2ARC can normally use the
* hdr's data, but if we're sharing data between the
* hdr and one of its bufs, L2ARC needs its own copy of
* the data so that the ZIO below can't race with the
* buf consumer. To ensure that this copy will be
* available for the lifetime of the ZIO and be cleaned
* up afterwards, we add it to the l2arc_free_on_write
* queue. If we need to apply any transforms to the
* data (compression, encryption) we will also need the
* extra buffer.
*/
if (HDR_HAS_RABD(hdr) && psize == asize) {
to_write = hdr->b_crypt_hdr.b_rabd;
} else if ((HDR_COMPRESSION_ENABLED(hdr) ||
HDR_GET_COMPRESS(hdr) == ZIO_COMPRESS_OFF) &&
!HDR_ENCRYPTED(hdr) && !HDR_SHARED_DATA(hdr) &&
psize == asize) {
to_write = hdr->b_l1hdr.b_pabd;
} else {
int ret;
arc_buf_contents_t type = arc_buf_type(hdr);
ret = l2arc_apply_transforms(spa, hdr, asize,
&to_write);
if (ret != 0) {
arc_hdr_clear_flags(hdr,
ARC_FLAG_L2_WRITING);
mutex_exit(hash_lock);
continue;
}
l2arc_free_abd_on_write(to_write, asize, type);
}
if (pio == NULL) {
/*
* Insert a dummy header on the buflist so
* l2arc_write_done() can find where the
* write buffers begin without searching.
*/
mutex_enter(&dev->l2ad_mtx);
list_insert_head(&dev->l2ad_buflist, head);
mutex_exit(&dev->l2ad_mtx);
cb = kmem_alloc(
sizeof (l2arc_write_callback_t), KM_SLEEP);
cb->l2wcb_dev = dev;
cb->l2wcb_head = head;
/*
* Create a list to save allocated abd buffers
* for l2arc_log_blk_commit().
*/
list_create(&cb->l2wcb_abd_list,
sizeof (l2arc_lb_abd_buf_t),
offsetof(l2arc_lb_abd_buf_t, node));
pio = zio_root(spa, l2arc_write_done, cb,
ZIO_FLAG_CANFAIL);
}
hdr->b_l2hdr.b_dev = dev;
hdr->b_l2hdr.b_hits = 0;
hdr->b_l2hdr.b_daddr = dev->l2ad_hand;
hdr->b_l2hdr.b_arcs_state =
hdr->b_l1hdr.b_state->arcs_state;
arc_hdr_set_flags(hdr, ARC_FLAG_HAS_L2HDR);
mutex_enter(&dev->l2ad_mtx);
list_insert_head(&dev->l2ad_buflist, hdr);
mutex_exit(&dev->l2ad_mtx);
(void) zfs_refcount_add_many(&dev->l2ad_alloc,
arc_hdr_size(hdr), hdr);
wzio = zio_write_phys(pio, dev->l2ad_vdev,
hdr->b_l2hdr.b_daddr, asize, to_write,
ZIO_CHECKSUM_OFF, NULL, hdr,
ZIO_PRIORITY_ASYNC_WRITE,
ZIO_FLAG_CANFAIL, B_FALSE);
write_lsize += HDR_GET_LSIZE(hdr);
DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev,
zio_t *, wzio);
write_psize += psize;
write_asize += asize;
dev->l2ad_hand += asize;
l2arc_hdr_arcstats_increment(hdr);
vdev_space_update(dev->l2ad_vdev, asize, 0, 0);
mutex_exit(hash_lock);
/*
* Append buf info to current log and commit if full.
* arcstat_l2_{size,asize} kstats are updated
* internally.
*/
if (l2arc_log_blk_insert(dev, hdr)) {
/*
* l2ad_hand will be adjusted in
* l2arc_log_blk_commit().
*/
write_asize +=
l2arc_log_blk_commit(dev, pio, cb);
}
zio_nowait(wzio);
}
multilist_sublist_unlock(mls);
if (full == B_TRUE)
break;
}
/* No buffers selected for writing? */
if (pio == NULL) {
ASSERT0(write_lsize);
ASSERT(!HDR_HAS_L1HDR(head));
kmem_cache_free(hdr_l2only_cache, head);
/*
* Although we did not write any buffers l2ad_evict may
* have advanced.
*/
if (dev->l2ad_evict != l2dhdr->dh_evict)
l2arc_dev_hdr_update(dev);
return (0);
}
if (!dev->l2ad_first)
ASSERT3U(dev->l2ad_hand, <=, dev->l2ad_evict);
ASSERT3U(write_asize, <=, target_sz);
ARCSTAT_BUMP(arcstat_l2_writes_sent);
ARCSTAT_INCR(arcstat_l2_write_bytes, write_psize);
dev->l2ad_writing = B_TRUE;
(void) zio_wait(pio);
dev->l2ad_writing = B_FALSE;
/*
* Update the device header after the zio completes as
* l2arc_write_done() may have updated the memory holding the log block
* pointers in the device header.
*/
l2arc_dev_hdr_update(dev);
return (write_asize);
}
static boolean_t
l2arc_hdr_limit_reached(void)
{
int64_t s = aggsum_upper_bound(&arc_sums.arcstat_l2_hdr_size);
return (arc_reclaim_needed() ||
(s > (arc_warm ? arc_c : arc_c_max) * l2arc_meta_percent / 100));
}
/*
* This thread feeds the L2ARC at regular intervals. This is the beating
* heart of the L2ARC.
*/
static __attribute__((noreturn)) void
l2arc_feed_thread(void *unused)
{
(void) unused;
callb_cpr_t cpr;
l2arc_dev_t *dev;
spa_t *spa;
uint64_t size, wrote;
clock_t begin, next = ddi_get_lbolt();
fstrans_cookie_t cookie;
CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG);
mutex_enter(&l2arc_feed_thr_lock);
cookie = spl_fstrans_mark();
while (l2arc_thread_exit == 0) {
CALLB_CPR_SAFE_BEGIN(&cpr);
(void) cv_timedwait_idle(&l2arc_feed_thr_cv,
&l2arc_feed_thr_lock, next);
CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock);
next = ddi_get_lbolt() + hz;
/*
* Quick check for L2ARC devices.
*/
mutex_enter(&l2arc_dev_mtx);
if (l2arc_ndev == 0) {
mutex_exit(&l2arc_dev_mtx);
continue;
}
mutex_exit(&l2arc_dev_mtx);
begin = ddi_get_lbolt();
/*
* This selects the next l2arc device to write to, and in
* doing so the next spa to feed from: dev->l2ad_spa. This
* will return NULL if there are now no l2arc devices or if
* they are all faulted.
*
* If a device is returned, its spa's config lock is also
* held to prevent device removal. l2arc_dev_get_next()
* will grab and release l2arc_dev_mtx.
*/
if ((dev = l2arc_dev_get_next()) == NULL)
continue;
spa = dev->l2ad_spa;
ASSERT3P(spa, !=, NULL);
/*
* If the pool is read-only then force the feed thread to
* sleep a little longer.
*/
if (!spa_writeable(spa)) {
next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz;
spa_config_exit(spa, SCL_L2ARC, dev);
continue;
}
/*
* Avoid contributing to memory pressure.
*/
if (l2arc_hdr_limit_reached()) {
ARCSTAT_BUMP(arcstat_l2_abort_lowmem);
spa_config_exit(spa, SCL_L2ARC, dev);
continue;
}
ARCSTAT_BUMP(arcstat_l2_feeds);
size = l2arc_write_size(dev);
/*
* Evict L2ARC buffers that will be overwritten.
*/
l2arc_evict(dev, size, B_FALSE);
/*
* Write ARC buffers.
*/
wrote = l2arc_write_buffers(spa, dev, size);
/*
* Calculate interval between writes.
*/
next = l2arc_write_interval(begin, size, wrote);
spa_config_exit(spa, SCL_L2ARC, dev);
}
spl_fstrans_unmark(cookie);
l2arc_thread_exit = 0;
cv_broadcast(&l2arc_feed_thr_cv);
CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */
thread_exit();
}
boolean_t
l2arc_vdev_present(vdev_t *vd)
{
return (l2arc_vdev_get(vd) != NULL);
}
/*
* Returns the l2arc_dev_t associated with a particular vdev_t or NULL if
* the vdev_t isn't an L2ARC device.
*/
l2arc_dev_t *
l2arc_vdev_get(vdev_t *vd)
{
l2arc_dev_t *dev;
mutex_enter(&l2arc_dev_mtx);
for (dev = list_head(l2arc_dev_list); dev != NULL;
dev = list_next(l2arc_dev_list, dev)) {
if (dev->l2ad_vdev == vd)
break;
}
mutex_exit(&l2arc_dev_mtx);
return (dev);
}
static void
l2arc_rebuild_dev(l2arc_dev_t *dev, boolean_t reopen)
{
l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr;
uint64_t l2dhdr_asize = dev->l2ad_dev_hdr_asize;
spa_t *spa = dev->l2ad_spa;
/*
* The L2ARC has to hold at least the payload of one log block for
* them to be restored (persistent L2ARC). The payload of a log block
* depends on the amount of its log entries. We always write log blocks
* with 1022 entries. How many of them are committed or restored depends
* on the size of the L2ARC device. Thus the maximum payload of
* one log block is 1022 * SPA_MAXBLOCKSIZE = 16GB. If the L2ARC device
* is less than that, we reduce the amount of committed and restored
* log entries per block so as to enable persistence.
*/
if (dev->l2ad_end < l2arc_rebuild_blocks_min_l2size) {
dev->l2ad_log_entries = 0;
} else {
dev->l2ad_log_entries = MIN((dev->l2ad_end -
dev->l2ad_start) >> SPA_MAXBLOCKSHIFT,
L2ARC_LOG_BLK_MAX_ENTRIES);
}
/*
* Read the device header, if an error is returned do not rebuild L2ARC.
*/
if (l2arc_dev_hdr_read(dev) == 0 && dev->l2ad_log_entries > 0) {
/*
* If we are onlining a cache device (vdev_reopen) that was
* still present (l2arc_vdev_present()) and rebuild is enabled,
* we should evict all ARC buffers and pointers to log blocks
* and reclaim their space before restoring its contents to
* L2ARC.
*/
if (reopen) {
if (!l2arc_rebuild_enabled) {
return;
} else {
l2arc_evict(dev, 0, B_TRUE);
/* start a new log block */
dev->l2ad_log_ent_idx = 0;
dev->l2ad_log_blk_payload_asize = 0;
dev->l2ad_log_blk_payload_start = 0;
}
}
/*
* Just mark the device as pending for a rebuild. We won't
* be starting a rebuild in line here as it would block pool
* import. Instead spa_load_impl will hand that off to an
* async task which will call l2arc_spa_rebuild_start.
*/
dev->l2ad_rebuild = B_TRUE;
} else if (spa_writeable(spa)) {
/*
* In this case TRIM the whole device if l2arc_trim_ahead > 0,
* otherwise create a new header. We zero out the memory holding
* the header to reset dh_start_lbps. If we TRIM the whole
* device the new header will be written by
* vdev_trim_l2arc_thread() at the end of the TRIM to update the
* trim_state in the header too. When reading the header, if
* trim_state is not VDEV_TRIM_COMPLETE and l2arc_trim_ahead > 0
* we opt to TRIM the whole device again.
*/
if (l2arc_trim_ahead > 0) {
dev->l2ad_trim_all = B_TRUE;
} else {
memset(l2dhdr, 0, l2dhdr_asize);
l2arc_dev_hdr_update(dev);
}
}
}
/*
* Add a vdev for use by the L2ARC. By this point the spa has already
* validated the vdev and opened it.
*/
void
l2arc_add_vdev(spa_t *spa, vdev_t *vd)
{
l2arc_dev_t *adddev;
uint64_t l2dhdr_asize;
ASSERT(!l2arc_vdev_present(vd));
/*
* Create a new l2arc device entry.
*/
adddev = vmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP);
adddev->l2ad_spa = spa;
adddev->l2ad_vdev = vd;
/* leave extra size for an l2arc device header */
l2dhdr_asize = adddev->l2ad_dev_hdr_asize =
MAX(sizeof (*adddev->l2ad_dev_hdr), 1 << vd->vdev_ashift);
adddev->l2ad_start = VDEV_LABEL_START_SIZE + l2dhdr_asize;
adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd);
ASSERT3U(adddev->l2ad_start, <, adddev->l2ad_end);
adddev->l2ad_hand = adddev->l2ad_start;
adddev->l2ad_evict = adddev->l2ad_start;
adddev->l2ad_first = B_TRUE;
adddev->l2ad_writing = B_FALSE;
adddev->l2ad_trim_all = B_FALSE;
list_link_init(&adddev->l2ad_node);
adddev->l2ad_dev_hdr = kmem_zalloc(l2dhdr_asize, KM_SLEEP);
mutex_init(&adddev->l2ad_mtx, NULL, MUTEX_DEFAULT, NULL);
/*
* This is a list of all ARC buffers that are still valid on the
* device.
*/
list_create(&adddev->l2ad_buflist, sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l2hdr.b_l2node));
/*
* This is a list of pointers to log blocks that are still present
* on the device.
*/
list_create(&adddev->l2ad_lbptr_list, sizeof (l2arc_lb_ptr_buf_t),
offsetof(l2arc_lb_ptr_buf_t, node));
vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand);
zfs_refcount_create(&adddev->l2ad_alloc);
zfs_refcount_create(&adddev->l2ad_lb_asize);
zfs_refcount_create(&adddev->l2ad_lb_count);
/*
* Decide if dev is eligible for L2ARC rebuild or whole device
* trimming. This has to happen before the device is added in the
* cache device list and l2arc_dev_mtx is released. Otherwise
* l2arc_feed_thread() might already start writing on the
* device.
*/
l2arc_rebuild_dev(adddev, B_FALSE);
/*
* Add device to global list
*/
mutex_enter(&l2arc_dev_mtx);
list_insert_head(l2arc_dev_list, adddev);
atomic_inc_64(&l2arc_ndev);
mutex_exit(&l2arc_dev_mtx);
}
/*
* Decide if a vdev is eligible for L2ARC rebuild, called from vdev_reopen()
* in case of onlining a cache device.
*/
void
l2arc_rebuild_vdev(vdev_t *vd, boolean_t reopen)
{
l2arc_dev_t *dev = NULL;
dev = l2arc_vdev_get(vd);
ASSERT3P(dev, !=, NULL);
/*
* In contrast to l2arc_add_vdev() we do not have to worry about
* l2arc_feed_thread() invalidating previous content when onlining a
* cache device. The device parameters (l2ad*) are not cleared when
* offlining the device and writing new buffers will not invalidate
* all previous content. In worst case only buffers that have not had
* their log block written to the device will be lost.
* When onlining the cache device (ie offline->online without exporting
* the pool in between) this happens:
* vdev_reopen() -> vdev_open() -> l2arc_rebuild_vdev()
* | |
* vdev_is_dead() = B_FALSE l2ad_rebuild = B_TRUE
* During the time where vdev_is_dead = B_FALSE and until l2ad_rebuild
* is set to B_TRUE we might write additional buffers to the device.
*/
l2arc_rebuild_dev(dev, reopen);
}
/*
* Remove a vdev from the L2ARC.
*/
void
l2arc_remove_vdev(vdev_t *vd)
{
l2arc_dev_t *remdev = NULL;
/*
* Find the device by vdev
*/
remdev = l2arc_vdev_get(vd);
ASSERT3P(remdev, !=, NULL);
/*
* Cancel any ongoing or scheduled rebuild.
*/
mutex_enter(&l2arc_rebuild_thr_lock);
if (remdev->l2ad_rebuild_began == B_TRUE) {
remdev->l2ad_rebuild_cancel = B_TRUE;
while (remdev->l2ad_rebuild == B_TRUE)
cv_wait(&l2arc_rebuild_thr_cv, &l2arc_rebuild_thr_lock);
}
mutex_exit(&l2arc_rebuild_thr_lock);
/*
* Remove device from global list
*/
mutex_enter(&l2arc_dev_mtx);
list_remove(l2arc_dev_list, remdev);
l2arc_dev_last = NULL; /* may have been invalidated */
atomic_dec_64(&l2arc_ndev);
mutex_exit(&l2arc_dev_mtx);
/*
* Clear all buflists and ARC references. L2ARC device flush.
*/
l2arc_evict(remdev, 0, B_TRUE);
list_destroy(&remdev->l2ad_buflist);
ASSERT(list_is_empty(&remdev->l2ad_lbptr_list));
list_destroy(&remdev->l2ad_lbptr_list);
mutex_destroy(&remdev->l2ad_mtx);
zfs_refcount_destroy(&remdev->l2ad_alloc);
zfs_refcount_destroy(&remdev->l2ad_lb_asize);
zfs_refcount_destroy(&remdev->l2ad_lb_count);
kmem_free(remdev->l2ad_dev_hdr, remdev->l2ad_dev_hdr_asize);
vmem_free(remdev, sizeof (l2arc_dev_t));
}
void
l2arc_init(void)
{
l2arc_thread_exit = 0;
l2arc_ndev = 0;
mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&l2arc_rebuild_thr_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&l2arc_rebuild_thr_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL);
l2arc_dev_list = &L2ARC_dev_list;
l2arc_free_on_write = &L2ARC_free_on_write;
list_create(l2arc_dev_list, sizeof (l2arc_dev_t),
offsetof(l2arc_dev_t, l2ad_node));
list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t),
offsetof(l2arc_data_free_t, l2df_list_node));
}
void
l2arc_fini(void)
{
mutex_destroy(&l2arc_feed_thr_lock);
cv_destroy(&l2arc_feed_thr_cv);
mutex_destroy(&l2arc_rebuild_thr_lock);
cv_destroy(&l2arc_rebuild_thr_cv);
mutex_destroy(&l2arc_dev_mtx);
mutex_destroy(&l2arc_free_on_write_mtx);
list_destroy(l2arc_dev_list);
list_destroy(l2arc_free_on_write);
}
void
l2arc_start(void)
{
if (!(spa_mode_global & SPA_MODE_WRITE))
return;
(void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0,
TS_RUN, defclsyspri);
}
void
l2arc_stop(void)
{
if (!(spa_mode_global & SPA_MODE_WRITE))
return;
mutex_enter(&l2arc_feed_thr_lock);
cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */
l2arc_thread_exit = 1;
while (l2arc_thread_exit != 0)
cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock);
mutex_exit(&l2arc_feed_thr_lock);
}
/*
* Punches out rebuild threads for the L2ARC devices in a spa. This should
* be called after pool import from the spa async thread, since starting
* these threads directly from spa_import() will make them part of the
* "zpool import" context and delay process exit (and thus pool import).
*/
void
l2arc_spa_rebuild_start(spa_t *spa)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
/*
* Locate the spa's l2arc devices and kick off rebuild threads.
*/
for (int i = 0; i < spa->spa_l2cache.sav_count; i++) {
l2arc_dev_t *dev =
l2arc_vdev_get(spa->spa_l2cache.sav_vdevs[i]);
if (dev == NULL) {
/* Don't attempt a rebuild if the vdev is UNAVAIL */
continue;
}
mutex_enter(&l2arc_rebuild_thr_lock);
if (dev->l2ad_rebuild && !dev->l2ad_rebuild_cancel) {
dev->l2ad_rebuild_began = B_TRUE;
(void) thread_create(NULL, 0, l2arc_dev_rebuild_thread,
dev, 0, &p0, TS_RUN, minclsyspri);
}
mutex_exit(&l2arc_rebuild_thr_lock);
}
}
/*
* Main entry point for L2ARC rebuilding.
*/
static __attribute__((noreturn)) void
l2arc_dev_rebuild_thread(void *arg)
{
l2arc_dev_t *dev = arg;
VERIFY(!dev->l2ad_rebuild_cancel);
VERIFY(dev->l2ad_rebuild);
(void) l2arc_rebuild(dev);
mutex_enter(&l2arc_rebuild_thr_lock);
dev->l2ad_rebuild_began = B_FALSE;
dev->l2ad_rebuild = B_FALSE;
mutex_exit(&l2arc_rebuild_thr_lock);
thread_exit();
}
/*
* This function implements the actual L2ARC metadata rebuild. It:
* starts reading the log block chain and restores each block's contents
* to memory (reconstructing arc_buf_hdr_t's).
*
* Operation stops under any of the following conditions:
*
* 1) We reach the end of the log block chain.
* 2) We encounter *any* error condition (cksum errors, io errors)
*/
static int
l2arc_rebuild(l2arc_dev_t *dev)
{
vdev_t *vd = dev->l2ad_vdev;
spa_t *spa = vd->vdev_spa;
int err = 0;
l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr;
l2arc_log_blk_phys_t *this_lb, *next_lb;
zio_t *this_io = NULL, *next_io = NULL;
l2arc_log_blkptr_t lbps[2];
l2arc_lb_ptr_buf_t *lb_ptr_buf;
boolean_t lock_held;
this_lb = vmem_zalloc(sizeof (*this_lb), KM_SLEEP);
next_lb = vmem_zalloc(sizeof (*next_lb), KM_SLEEP);
/*
* We prevent device removal while issuing reads to the device,
* then during the rebuilding phases we drop this lock again so
* that a spa_unload or device remove can be initiated - this is
* safe, because the spa will signal us to stop before removing
* our device and wait for us to stop.
*/
spa_config_enter(spa, SCL_L2ARC, vd, RW_READER);
lock_held = B_TRUE;
/*
* Retrieve the persistent L2ARC device state.
* L2BLK_GET_PSIZE returns aligned size for log blocks.
*/
dev->l2ad_evict = MAX(l2dhdr->dh_evict, dev->l2ad_start);
dev->l2ad_hand = MAX(l2dhdr->dh_start_lbps[0].lbp_daddr +
L2BLK_GET_PSIZE((&l2dhdr->dh_start_lbps[0])->lbp_prop),
dev->l2ad_start);
dev->l2ad_first = !!(l2dhdr->dh_flags & L2ARC_DEV_HDR_EVICT_FIRST);
vd->vdev_trim_action_time = l2dhdr->dh_trim_action_time;
vd->vdev_trim_state = l2dhdr->dh_trim_state;
/*
* In case the zfs module parameter l2arc_rebuild_enabled is false
* we do not start the rebuild process.
*/
if (!l2arc_rebuild_enabled)
goto out;
/* Prepare the rebuild process */
memcpy(lbps, l2dhdr->dh_start_lbps, sizeof (lbps));
/* Start the rebuild process */
for (;;) {
if (!l2arc_log_blkptr_valid(dev, &lbps[0]))
break;
if ((err = l2arc_log_blk_read(dev, &lbps[0], &lbps[1],
this_lb, next_lb, this_io, &next_io)) != 0)
goto out;
/*
* Our memory pressure valve. If the system is running low
* on memory, rather than swamping memory with new ARC buf
* hdrs, we opt not to rebuild the L2ARC. At this point,
* however, we have already set up our L2ARC dev to chain in
* new metadata log blocks, so the user may choose to offline/
* online the L2ARC dev at a later time (or re-import the pool)
* to reconstruct it (when there's less memory pressure).
*/
if (l2arc_hdr_limit_reached()) {
ARCSTAT_BUMP(arcstat_l2_rebuild_abort_lowmem);
cmn_err(CE_NOTE, "System running low on memory, "
"aborting L2ARC rebuild.");
err = SET_ERROR(ENOMEM);
goto out;
}
spa_config_exit(spa, SCL_L2ARC, vd);
lock_held = B_FALSE;
/*
* Now that we know that the next_lb checks out alright, we
* can start reconstruction from this log block.
* L2BLK_GET_PSIZE returns aligned size for log blocks.
*/
uint64_t asize = L2BLK_GET_PSIZE((&lbps[0])->lbp_prop);
l2arc_log_blk_restore(dev, this_lb, asize);
/*
* log block restored, include its pointer in the list of
* pointers to log blocks present in the L2ARC device.
*/
lb_ptr_buf = kmem_zalloc(sizeof (l2arc_lb_ptr_buf_t), KM_SLEEP);
lb_ptr_buf->lb_ptr = kmem_zalloc(sizeof (l2arc_log_blkptr_t),
KM_SLEEP);
memcpy(lb_ptr_buf->lb_ptr, &lbps[0],
sizeof (l2arc_log_blkptr_t));
mutex_enter(&dev->l2ad_mtx);
list_insert_tail(&dev->l2ad_lbptr_list, lb_ptr_buf);
ARCSTAT_INCR(arcstat_l2_log_blk_asize, asize);
ARCSTAT_BUMP(arcstat_l2_log_blk_count);
zfs_refcount_add_many(&dev->l2ad_lb_asize, asize, lb_ptr_buf);
zfs_refcount_add(&dev->l2ad_lb_count, lb_ptr_buf);
mutex_exit(&dev->l2ad_mtx);
vdev_space_update(vd, asize, 0, 0);
/*
* Protection against loops of log blocks:
*
* l2ad_hand l2ad_evict
* V V
* l2ad_start |=======================================| l2ad_end
* -----|||----|||---|||----|||
* (3) (2) (1) (0)
* ---|||---|||----|||---|||
* (7) (6) (5) (4)
*
* In this situation the pointer of log block (4) passes
* l2arc_log_blkptr_valid() but the log block should not be
* restored as it is overwritten by the payload of log block
* (0). Only log blocks (0)-(3) should be restored. We check
* whether l2ad_evict lies in between the payload starting
* offset of the next log block (lbps[1].lbp_payload_start)
* and the payload starting offset of the present log block
* (lbps[0].lbp_payload_start). If true and this isn't the
* first pass, we are looping from the beginning and we should
* stop.
*/
if (l2arc_range_check_overlap(lbps[1].lbp_payload_start,
lbps[0].lbp_payload_start, dev->l2ad_evict) &&
!dev->l2ad_first)
goto out;
kpreempt(KPREEMPT_SYNC);
for (;;) {
mutex_enter(&l2arc_rebuild_thr_lock);
if (dev->l2ad_rebuild_cancel) {
dev->l2ad_rebuild = B_FALSE;
cv_signal(&l2arc_rebuild_thr_cv);
mutex_exit(&l2arc_rebuild_thr_lock);
err = SET_ERROR(ECANCELED);
goto out;
}
mutex_exit(&l2arc_rebuild_thr_lock);
if (spa_config_tryenter(spa, SCL_L2ARC, vd,
RW_READER)) {
lock_held = B_TRUE;
break;
}
/*
* L2ARC config lock held by somebody in writer,
* possibly due to them trying to remove us. They'll
* likely to want us to shut down, so after a little
* delay, we check l2ad_rebuild_cancel and retry
* the lock again.
*/
delay(1);
}
/*
* Continue with the next log block.
*/
lbps[0] = lbps[1];
lbps[1] = this_lb->lb_prev_lbp;
PTR_SWAP(this_lb, next_lb);
this_io = next_io;
next_io = NULL;
}
if (this_io != NULL)
l2arc_log_blk_fetch_abort(this_io);
out:
if (next_io != NULL)
l2arc_log_blk_fetch_abort(next_io);
vmem_free(this_lb, sizeof (*this_lb));
vmem_free(next_lb, sizeof (*next_lb));
if (!l2arc_rebuild_enabled) {
spa_history_log_internal(spa, "L2ARC rebuild", NULL,
"disabled");
} else if (err == 0 && zfs_refcount_count(&dev->l2ad_lb_count) > 0) {
ARCSTAT_BUMP(arcstat_l2_rebuild_success);
spa_history_log_internal(spa, "L2ARC rebuild", NULL,
"successful, restored %llu blocks",
(u_longlong_t)zfs_refcount_count(&dev->l2ad_lb_count));
} else if (err == 0 && zfs_refcount_count(&dev->l2ad_lb_count) == 0) {
/*
* No error but also nothing restored, meaning the lbps array
* in the device header points to invalid/non-present log
* blocks. Reset the header.
*/
spa_history_log_internal(spa, "L2ARC rebuild", NULL,
"no valid log blocks");
memset(l2dhdr, 0, dev->l2ad_dev_hdr_asize);
l2arc_dev_hdr_update(dev);
} else if (err == ECANCELED) {
/*
* In case the rebuild was canceled do not log to spa history
* log as the pool may be in the process of being removed.
*/
zfs_dbgmsg("L2ARC rebuild aborted, restored %llu blocks",
(u_longlong_t)zfs_refcount_count(&dev->l2ad_lb_count));
} else if (err != 0) {
spa_history_log_internal(spa, "L2ARC rebuild", NULL,
"aborted, restored %llu blocks",
(u_longlong_t)zfs_refcount_count(&dev->l2ad_lb_count));
}
if (lock_held)
spa_config_exit(spa, SCL_L2ARC, vd);
return (err);
}
/*
* Attempts to read the device header on the provided L2ARC device and writes
* it to `hdr'. On success, this function returns 0, otherwise the appropriate
* error code is returned.
*/
static int
l2arc_dev_hdr_read(l2arc_dev_t *dev)
{
int err;
uint64_t guid;
l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr;
const uint64_t l2dhdr_asize = dev->l2ad_dev_hdr_asize;
abd_t *abd;
guid = spa_guid(dev->l2ad_vdev->vdev_spa);
abd = abd_get_from_buf(l2dhdr, l2dhdr_asize);
err = zio_wait(zio_read_phys(NULL, dev->l2ad_vdev,
VDEV_LABEL_START_SIZE, l2dhdr_asize, abd,
ZIO_CHECKSUM_LABEL, NULL, NULL, ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY |
ZIO_FLAG_SPECULATIVE, B_FALSE));
abd_free(abd);
if (err != 0) {
ARCSTAT_BUMP(arcstat_l2_rebuild_abort_dh_errors);
zfs_dbgmsg("L2ARC IO error (%d) while reading device header, "
"vdev guid: %llu", err,
(u_longlong_t)dev->l2ad_vdev->vdev_guid);
return (err);
}
if (l2dhdr->dh_magic == BSWAP_64(L2ARC_DEV_HDR_MAGIC))
byteswap_uint64_array(l2dhdr, sizeof (*l2dhdr));
if (l2dhdr->dh_magic != L2ARC_DEV_HDR_MAGIC ||
l2dhdr->dh_spa_guid != guid ||
l2dhdr->dh_vdev_guid != dev->l2ad_vdev->vdev_guid ||
l2dhdr->dh_version != L2ARC_PERSISTENT_VERSION ||
l2dhdr->dh_log_entries != dev->l2ad_log_entries ||
l2dhdr->dh_end != dev->l2ad_end ||
!l2arc_range_check_overlap(dev->l2ad_start, dev->l2ad_end,
l2dhdr->dh_evict) ||
(l2dhdr->dh_trim_state != VDEV_TRIM_COMPLETE &&
l2arc_trim_ahead > 0)) {
/*
* Attempt to rebuild a device containing no actual dev hdr
* or containing a header from some other pool or from another
* version of persistent L2ARC.
*/
ARCSTAT_BUMP(arcstat_l2_rebuild_abort_unsupported);
return (SET_ERROR(ENOTSUP));
}
return (0);
}
/*
* Reads L2ARC log blocks from storage and validates their contents.
*
* This function implements a simple fetcher to make sure that while
* we're processing one buffer the L2ARC is already fetching the next
* one in the chain.
*
* The arguments this_lp and next_lp point to the current and next log block
* address in the block chain. Similarly, this_lb and next_lb hold the
* l2arc_log_blk_phys_t's of the current and next L2ARC blk.
*
* The `this_io' and `next_io' arguments are used for block fetching.
* When issuing the first blk IO during rebuild, you should pass NULL for
* `this_io'. This function will then issue a sync IO to read the block and
* also issue an async IO to fetch the next block in the block chain. The
* fetched IO is returned in `next_io'. On subsequent calls to this
* function, pass the value returned in `next_io' from the previous call
* as `this_io' and a fresh `next_io' pointer to hold the next fetch IO.
* Prior to the call, you should initialize your `next_io' pointer to be
* NULL. If no fetch IO was issued, the pointer is left set at NULL.
*
* On success, this function returns 0, otherwise it returns an appropriate
* error code. On error the fetching IO is aborted and cleared before
* returning from this function. Therefore, if we return `success', the
* caller can assume that we have taken care of cleanup of fetch IOs.
*/
static int
l2arc_log_blk_read(l2arc_dev_t *dev,
const l2arc_log_blkptr_t *this_lbp, const l2arc_log_blkptr_t *next_lbp,
l2arc_log_blk_phys_t *this_lb, l2arc_log_blk_phys_t *next_lb,
zio_t *this_io, zio_t **next_io)
{
int err = 0;
zio_cksum_t cksum;
abd_t *abd = NULL;
uint64_t asize;
ASSERT(this_lbp != NULL && next_lbp != NULL);
ASSERT(this_lb != NULL && next_lb != NULL);
ASSERT(next_io != NULL && *next_io == NULL);
ASSERT(l2arc_log_blkptr_valid(dev, this_lbp));
/*
* Check to see if we have issued the IO for this log block in a
* previous run. If not, this is the first call, so issue it now.
*/
if (this_io == NULL) {
this_io = l2arc_log_blk_fetch(dev->l2ad_vdev, this_lbp,
this_lb);
}
/*
* Peek to see if we can start issuing the next IO immediately.
*/
if (l2arc_log_blkptr_valid(dev, next_lbp)) {
/*
* Start issuing IO for the next log block early - this
* should help keep the L2ARC device busy while we
* decompress and restore this log block.
*/
*next_io = l2arc_log_blk_fetch(dev->l2ad_vdev, next_lbp,
next_lb);
}
/* Wait for the IO to read this log block to complete */
if ((err = zio_wait(this_io)) != 0) {
ARCSTAT_BUMP(arcstat_l2_rebuild_abort_io_errors);
zfs_dbgmsg("L2ARC IO error (%d) while reading log block, "
"offset: %llu, vdev guid: %llu", err,
(u_longlong_t)this_lbp->lbp_daddr,
(u_longlong_t)dev->l2ad_vdev->vdev_guid);
goto cleanup;
}
/*
* Make sure the buffer checks out.
* L2BLK_GET_PSIZE returns aligned size for log blocks.
*/
asize = L2BLK_GET_PSIZE((this_lbp)->lbp_prop);
fletcher_4_native(this_lb, asize, NULL, &cksum);
if (!ZIO_CHECKSUM_EQUAL(cksum, this_lbp->lbp_cksum)) {
ARCSTAT_BUMP(arcstat_l2_rebuild_abort_cksum_lb_errors);
zfs_dbgmsg("L2ARC log block cksum failed, offset: %llu, "
"vdev guid: %llu, l2ad_hand: %llu, l2ad_evict: %llu",
(u_longlong_t)this_lbp->lbp_daddr,
(u_longlong_t)dev->l2ad_vdev->vdev_guid,
(u_longlong_t)dev->l2ad_hand,
(u_longlong_t)dev->l2ad_evict);
err = SET_ERROR(ECKSUM);
goto cleanup;
}
/* Now we can take our time decoding this buffer */
switch (L2BLK_GET_COMPRESS((this_lbp)->lbp_prop)) {
case ZIO_COMPRESS_OFF:
break;
case ZIO_COMPRESS_LZ4:
abd = abd_alloc_for_io(asize, B_TRUE);
abd_copy_from_buf_off(abd, this_lb, 0, asize);
if ((err = zio_decompress_data(
L2BLK_GET_COMPRESS((this_lbp)->lbp_prop),
abd, this_lb, asize, sizeof (*this_lb), NULL)) != 0) {
err = SET_ERROR(EINVAL);
goto cleanup;
}
break;
default:
err = SET_ERROR(EINVAL);
goto cleanup;
}
if (this_lb->lb_magic == BSWAP_64(L2ARC_LOG_BLK_MAGIC))
byteswap_uint64_array(this_lb, sizeof (*this_lb));
if (this_lb->lb_magic != L2ARC_LOG_BLK_MAGIC) {
err = SET_ERROR(EINVAL);
goto cleanup;
}
cleanup:
/* Abort an in-flight fetch I/O in case of error */
if (err != 0 && *next_io != NULL) {
l2arc_log_blk_fetch_abort(*next_io);
*next_io = NULL;
}
if (abd != NULL)
abd_free(abd);
return (err);
}
/*
* Restores the payload of a log block to ARC. This creates empty ARC hdr
* entries which only contain an l2arc hdr, essentially restoring the
* buffers to their L2ARC evicted state. This function also updates space
* usage on the L2ARC vdev to make sure it tracks restored buffers.
*/
static void
l2arc_log_blk_restore(l2arc_dev_t *dev, const l2arc_log_blk_phys_t *lb,
uint64_t lb_asize)
{
uint64_t size = 0, asize = 0;
uint64_t log_entries = dev->l2ad_log_entries;
/*
* Usually arc_adapt() is called only for data, not headers, but
* since we may allocate significant amount of memory here, let ARC
* grow its arc_c.
*/
arc_adapt(log_entries * HDR_L2ONLY_SIZE);
for (int i = log_entries - 1; i >= 0; i--) {
/*
* Restore goes in the reverse temporal direction to preserve
* correct temporal ordering of buffers in the l2ad_buflist.
* l2arc_hdr_restore also does a list_insert_tail instead of
* list_insert_head on the l2ad_buflist:
*
* LIST l2ad_buflist LIST
* HEAD <------ (time) ------ TAIL
* direction +-----+-----+-----+-----+-----+ direction
* of l2arc <== | buf | buf | buf | buf | buf | ===> of rebuild
* fill +-----+-----+-----+-----+-----+
* ^ ^
* | |
* | |
* l2arc_feed_thread l2arc_rebuild
* will place new bufs here restores bufs here
*
* During l2arc_rebuild() the device is not used by
* l2arc_feed_thread() as dev->l2ad_rebuild is set to true.
*/
size += L2BLK_GET_LSIZE((&lb->lb_entries[i])->le_prop);
asize += vdev_psize_to_asize(dev->l2ad_vdev,
L2BLK_GET_PSIZE((&lb->lb_entries[i])->le_prop));
l2arc_hdr_restore(&lb->lb_entries[i], dev);
}
/*
* Record rebuild stats:
* size Logical size of restored buffers in the L2ARC
* asize Aligned size of restored buffers in the L2ARC
*/
ARCSTAT_INCR(arcstat_l2_rebuild_size, size);
ARCSTAT_INCR(arcstat_l2_rebuild_asize, asize);
ARCSTAT_INCR(arcstat_l2_rebuild_bufs, log_entries);
ARCSTAT_F_AVG(arcstat_l2_log_blk_avg_asize, lb_asize);
ARCSTAT_F_AVG(arcstat_l2_data_to_meta_ratio, asize / lb_asize);
ARCSTAT_BUMP(arcstat_l2_rebuild_log_blks);
}
/*
* Restores a single ARC buf hdr from a log entry. The ARC buffer is put
* into a state indicating that it has been evicted to L2ARC.
*/
static void
l2arc_hdr_restore(const l2arc_log_ent_phys_t *le, l2arc_dev_t *dev)
{
arc_buf_hdr_t *hdr, *exists;
kmutex_t *hash_lock;
arc_buf_contents_t type = L2BLK_GET_TYPE((le)->le_prop);
uint64_t asize;
/*
* Do all the allocation before grabbing any locks, this lets us
* sleep if memory is full and we don't have to deal with failed
* allocations.
*/
hdr = arc_buf_alloc_l2only(L2BLK_GET_LSIZE((le)->le_prop), type,
dev, le->le_dva, le->le_daddr,
L2BLK_GET_PSIZE((le)->le_prop), le->le_birth,
L2BLK_GET_COMPRESS((le)->le_prop), le->le_complevel,
L2BLK_GET_PROTECTED((le)->le_prop),
L2BLK_GET_PREFETCH((le)->le_prop),
L2BLK_GET_STATE((le)->le_prop));
asize = vdev_psize_to_asize(dev->l2ad_vdev,
L2BLK_GET_PSIZE((le)->le_prop));
/*
* vdev_space_update() has to be called before arc_hdr_destroy() to
* avoid underflow since the latter also calls vdev_space_update().
*/
l2arc_hdr_arcstats_increment(hdr);
vdev_space_update(dev->l2ad_vdev, asize, 0, 0);
mutex_enter(&dev->l2ad_mtx);
list_insert_tail(&dev->l2ad_buflist, hdr);
(void) zfs_refcount_add_many(&dev->l2ad_alloc, arc_hdr_size(hdr), hdr);
mutex_exit(&dev->l2ad_mtx);
exists = buf_hash_insert(hdr, &hash_lock);
if (exists) {
/* Buffer was already cached, no need to restore it. */
arc_hdr_destroy(hdr);
/*
* If the buffer is already cached, check whether it has
* L2ARC metadata. If not, enter them and update the flag.
* This is important is case of onlining a cache device, since
* we previously evicted all L2ARC metadata from ARC.
*/
if (!HDR_HAS_L2HDR(exists)) {
arc_hdr_set_flags(exists, ARC_FLAG_HAS_L2HDR);
exists->b_l2hdr.b_dev = dev;
exists->b_l2hdr.b_daddr = le->le_daddr;
exists->b_l2hdr.b_arcs_state =
L2BLK_GET_STATE((le)->le_prop);
mutex_enter(&dev->l2ad_mtx);
list_insert_tail(&dev->l2ad_buflist, exists);
(void) zfs_refcount_add_many(&dev->l2ad_alloc,
arc_hdr_size(exists), exists);
mutex_exit(&dev->l2ad_mtx);
l2arc_hdr_arcstats_increment(exists);
vdev_space_update(dev->l2ad_vdev, asize, 0, 0);
}
ARCSTAT_BUMP(arcstat_l2_rebuild_bufs_precached);
}
mutex_exit(hash_lock);
}
/*
* Starts an asynchronous read IO to read a log block. This is used in log
* block reconstruction to start reading the next block before we are done
* decoding and reconstructing the current block, to keep the l2arc device
* nice and hot with read IO to process.
* The returned zio will contain a newly allocated memory buffers for the IO
* data which should then be freed by the caller once the zio is no longer
* needed (i.e. due to it having completed). If you wish to abort this
* zio, you should do so using l2arc_log_blk_fetch_abort, which takes
* care of disposing of the allocated buffers correctly.
*/
static zio_t *
l2arc_log_blk_fetch(vdev_t *vd, const l2arc_log_blkptr_t *lbp,
l2arc_log_blk_phys_t *lb)
{
uint32_t asize;
zio_t *pio;
l2arc_read_callback_t *cb;
/* L2BLK_GET_PSIZE returns aligned size for log blocks */
asize = L2BLK_GET_PSIZE((lbp)->lbp_prop);
ASSERT(asize <= sizeof (l2arc_log_blk_phys_t));
cb = kmem_zalloc(sizeof (l2arc_read_callback_t), KM_SLEEP);
cb->l2rcb_abd = abd_get_from_buf(lb, asize);
pio = zio_root(vd->vdev_spa, l2arc_blk_fetch_done, cb,
ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY);
(void) zio_nowait(zio_read_phys(pio, vd, lbp->lbp_daddr, asize,
cb->l2rcb_abd, ZIO_CHECKSUM_OFF, NULL, NULL,
ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL |
ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY, B_FALSE));
return (pio);
}
/*
* Aborts a zio returned from l2arc_log_blk_fetch and frees the data
* buffers allocated for it.
*/
static void
l2arc_log_blk_fetch_abort(zio_t *zio)
{
(void) zio_wait(zio);
}
/*
* Creates a zio to update the device header on an l2arc device.
*/
void
l2arc_dev_hdr_update(l2arc_dev_t *dev)
{
l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr;
const uint64_t l2dhdr_asize = dev->l2ad_dev_hdr_asize;
abd_t *abd;
int err;
VERIFY(spa_config_held(dev->l2ad_spa, SCL_STATE_ALL, RW_READER));
l2dhdr->dh_magic = L2ARC_DEV_HDR_MAGIC;
l2dhdr->dh_version = L2ARC_PERSISTENT_VERSION;
l2dhdr->dh_spa_guid = spa_guid(dev->l2ad_vdev->vdev_spa);
l2dhdr->dh_vdev_guid = dev->l2ad_vdev->vdev_guid;
l2dhdr->dh_log_entries = dev->l2ad_log_entries;
l2dhdr->dh_evict = dev->l2ad_evict;
l2dhdr->dh_start = dev->l2ad_start;
l2dhdr->dh_end = dev->l2ad_end;
l2dhdr->dh_lb_asize = zfs_refcount_count(&dev->l2ad_lb_asize);
l2dhdr->dh_lb_count = zfs_refcount_count(&dev->l2ad_lb_count);
l2dhdr->dh_flags = 0;
l2dhdr->dh_trim_action_time = dev->l2ad_vdev->vdev_trim_action_time;
l2dhdr->dh_trim_state = dev->l2ad_vdev->vdev_trim_state;
if (dev->l2ad_first)
l2dhdr->dh_flags |= L2ARC_DEV_HDR_EVICT_FIRST;
abd = abd_get_from_buf(l2dhdr, l2dhdr_asize);
err = zio_wait(zio_write_phys(NULL, dev->l2ad_vdev,
VDEV_LABEL_START_SIZE, l2dhdr_asize, abd, ZIO_CHECKSUM_LABEL, NULL,
NULL, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_CANFAIL, B_FALSE));
abd_free(abd);
if (err != 0) {
zfs_dbgmsg("L2ARC IO error (%d) while writing device header, "
"vdev guid: %llu", err,
(u_longlong_t)dev->l2ad_vdev->vdev_guid);
}
}
/*
* Commits a log block to the L2ARC device. This routine is invoked from
* l2arc_write_buffers when the log block fills up.
* This function allocates some memory to temporarily hold the serialized
* buffer to be written. This is then released in l2arc_write_done.
*/
static uint64_t
l2arc_log_blk_commit(l2arc_dev_t *dev, zio_t *pio, l2arc_write_callback_t *cb)
{
l2arc_log_blk_phys_t *lb = &dev->l2ad_log_blk;
l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr;
uint64_t psize, asize;
zio_t *wzio;
l2arc_lb_abd_buf_t *abd_buf;
uint8_t *tmpbuf = NULL;
l2arc_lb_ptr_buf_t *lb_ptr_buf;
VERIFY3S(dev->l2ad_log_ent_idx, ==, dev->l2ad_log_entries);
abd_buf = zio_buf_alloc(sizeof (*abd_buf));
abd_buf->abd = abd_get_from_buf(lb, sizeof (*lb));
lb_ptr_buf = kmem_zalloc(sizeof (l2arc_lb_ptr_buf_t), KM_SLEEP);
lb_ptr_buf->lb_ptr = kmem_zalloc(sizeof (l2arc_log_blkptr_t), KM_SLEEP);
/* link the buffer into the block chain */
lb->lb_prev_lbp = l2dhdr->dh_start_lbps[1];
lb->lb_magic = L2ARC_LOG_BLK_MAGIC;
/*
* l2arc_log_blk_commit() may be called multiple times during a single
* l2arc_write_buffers() call. Save the allocated abd buffers in a list
* so we can free them in l2arc_write_done() later on.
*/
list_insert_tail(&cb->l2wcb_abd_list, abd_buf);
/* try to compress the buffer */
psize = zio_compress_data(ZIO_COMPRESS_LZ4,
abd_buf->abd, (void **) &tmpbuf, sizeof (*lb), 0);
/* a log block is never entirely zero */
ASSERT(psize != 0);
asize = vdev_psize_to_asize(dev->l2ad_vdev, psize);
ASSERT(asize <= sizeof (*lb));
/*
* Update the start log block pointer in the device header to point
* to the log block we're about to write.
*/
l2dhdr->dh_start_lbps[1] = l2dhdr->dh_start_lbps[0];
l2dhdr->dh_start_lbps[0].lbp_daddr = dev->l2ad_hand;
l2dhdr->dh_start_lbps[0].lbp_payload_asize =
dev->l2ad_log_blk_payload_asize;
l2dhdr->dh_start_lbps[0].lbp_payload_start =
dev->l2ad_log_blk_payload_start;
L2BLK_SET_LSIZE(
(&l2dhdr->dh_start_lbps[0])->lbp_prop, sizeof (*lb));
L2BLK_SET_PSIZE(
(&l2dhdr->dh_start_lbps[0])->lbp_prop, asize);
L2BLK_SET_CHECKSUM(
(&l2dhdr->dh_start_lbps[0])->lbp_prop,
ZIO_CHECKSUM_FLETCHER_4);
if (asize < sizeof (*lb)) {
/* compression succeeded */
memset(tmpbuf + psize, 0, asize - psize);
L2BLK_SET_COMPRESS(
(&l2dhdr->dh_start_lbps[0])->lbp_prop,
ZIO_COMPRESS_LZ4);
} else {
/* compression failed */
memcpy(tmpbuf, lb, sizeof (*lb));
L2BLK_SET_COMPRESS(
(&l2dhdr->dh_start_lbps[0])->lbp_prop,
ZIO_COMPRESS_OFF);
}
/* checksum what we're about to write */
fletcher_4_native(tmpbuf, asize, NULL,
&l2dhdr->dh_start_lbps[0].lbp_cksum);
abd_free(abd_buf->abd);
/* perform the write itself */
abd_buf->abd = abd_get_from_buf(tmpbuf, sizeof (*lb));
abd_take_ownership_of_buf(abd_buf->abd, B_TRUE);
wzio = zio_write_phys(pio, dev->l2ad_vdev, dev->l2ad_hand,
asize, abd_buf->abd, ZIO_CHECKSUM_OFF, NULL, NULL,
ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_CANFAIL, B_FALSE);
DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, zio_t *, wzio);
(void) zio_nowait(wzio);
dev->l2ad_hand += asize;
/*
* Include the committed log block's pointer in the list of pointers
* to log blocks present in the L2ARC device.
*/
memcpy(lb_ptr_buf->lb_ptr, &l2dhdr->dh_start_lbps[0],
sizeof (l2arc_log_blkptr_t));
mutex_enter(&dev->l2ad_mtx);
list_insert_head(&dev->l2ad_lbptr_list, lb_ptr_buf);
ARCSTAT_INCR(arcstat_l2_log_blk_asize, asize);
ARCSTAT_BUMP(arcstat_l2_log_blk_count);
zfs_refcount_add_many(&dev->l2ad_lb_asize, asize, lb_ptr_buf);
zfs_refcount_add(&dev->l2ad_lb_count, lb_ptr_buf);
mutex_exit(&dev->l2ad_mtx);
vdev_space_update(dev->l2ad_vdev, asize, 0, 0);
/* bump the kstats */
ARCSTAT_INCR(arcstat_l2_write_bytes, asize);
ARCSTAT_BUMP(arcstat_l2_log_blk_writes);
ARCSTAT_F_AVG(arcstat_l2_log_blk_avg_asize, asize);
ARCSTAT_F_AVG(arcstat_l2_data_to_meta_ratio,
dev->l2ad_log_blk_payload_asize / asize);
/* start a new log block */
dev->l2ad_log_ent_idx = 0;
dev->l2ad_log_blk_payload_asize = 0;
dev->l2ad_log_blk_payload_start = 0;
return (asize);
}
/*
* Validates an L2ARC log block address to make sure that it can be read
* from the provided L2ARC device.
*/
boolean_t
l2arc_log_blkptr_valid(l2arc_dev_t *dev, const l2arc_log_blkptr_t *lbp)
{
/* L2BLK_GET_PSIZE returns aligned size for log blocks */
uint64_t asize = L2BLK_GET_PSIZE((lbp)->lbp_prop);
uint64_t end = lbp->lbp_daddr + asize - 1;
uint64_t start = lbp->lbp_payload_start;
boolean_t evicted = B_FALSE;
/*
* A log block is valid if all of the following conditions are true:
* - it fits entirely (including its payload) between l2ad_start and
* l2ad_end
* - it has a valid size
* - neither the log block itself nor part of its payload was evicted
* by l2arc_evict():
*
* l2ad_hand l2ad_evict
* | | lbp_daddr
* | start | | end
* | | | | |
* V V V V V
* l2ad_start ============================================ l2ad_end
* --------------------------||||
* ^ ^
* | log block
* payload
*/
evicted =
l2arc_range_check_overlap(start, end, dev->l2ad_hand) ||
l2arc_range_check_overlap(start, end, dev->l2ad_evict) ||
l2arc_range_check_overlap(dev->l2ad_hand, dev->l2ad_evict, start) ||
l2arc_range_check_overlap(dev->l2ad_hand, dev->l2ad_evict, end);
return (start >= dev->l2ad_start && end <= dev->l2ad_end &&
asize > 0 && asize <= sizeof (l2arc_log_blk_phys_t) &&
(!evicted || dev->l2ad_first));
}
/*
* Inserts ARC buffer header `hdr' into the current L2ARC log block on
* the device. The buffer being inserted must be present in L2ARC.
* Returns B_TRUE if the L2ARC log block is full and needs to be committed
* to L2ARC, or B_FALSE if it still has room for more ARC buffers.
*/
static boolean_t
l2arc_log_blk_insert(l2arc_dev_t *dev, const arc_buf_hdr_t *hdr)
{
l2arc_log_blk_phys_t *lb = &dev->l2ad_log_blk;
l2arc_log_ent_phys_t *le;
if (dev->l2ad_log_entries == 0)
return (B_FALSE);
int index = dev->l2ad_log_ent_idx++;
ASSERT3S(index, <, dev->l2ad_log_entries);
ASSERT(HDR_HAS_L2HDR(hdr));
le = &lb->lb_entries[index];
memset(le, 0, sizeof (*le));
le->le_dva = hdr->b_dva;
le->le_birth = hdr->b_birth;
le->le_daddr = hdr->b_l2hdr.b_daddr;
if (index == 0)
dev->l2ad_log_blk_payload_start = le->le_daddr;
L2BLK_SET_LSIZE((le)->le_prop, HDR_GET_LSIZE(hdr));
L2BLK_SET_PSIZE((le)->le_prop, HDR_GET_PSIZE(hdr));
L2BLK_SET_COMPRESS((le)->le_prop, HDR_GET_COMPRESS(hdr));
le->le_complevel = hdr->b_complevel;
L2BLK_SET_TYPE((le)->le_prop, hdr->b_type);
L2BLK_SET_PROTECTED((le)->le_prop, !!(HDR_PROTECTED(hdr)));
L2BLK_SET_PREFETCH((le)->le_prop, !!(HDR_PREFETCH(hdr)));
L2BLK_SET_STATE((le)->le_prop, hdr->b_l1hdr.b_state->arcs_state);
dev->l2ad_log_blk_payload_asize += vdev_psize_to_asize(dev->l2ad_vdev,
HDR_GET_PSIZE(hdr));
return (dev->l2ad_log_ent_idx == dev->l2ad_log_entries);
}
/*
* Checks whether a given L2ARC device address sits in a time-sequential
* range. The trick here is that the L2ARC is a rotary buffer, so we can't
* just do a range comparison, we need to handle the situation in which the
* range wraps around the end of the L2ARC device. Arguments:
* bottom -- Lower end of the range to check (written to earlier).
* top -- Upper end of the range to check (written to later).
* check -- The address for which we want to determine if it sits in
* between the top and bottom.
*
* The 3-way conditional below represents the following cases:
*
* bottom < top : Sequentially ordered case:
* <check>--------+-------------------+
* | (overlap here?) |
* L2ARC dev V V
* |---------------<bottom>============<top>--------------|
*
* bottom > top: Looped-around case:
* <check>--------+------------------+
* | (overlap here?) |
* L2ARC dev V V
* |===============<top>---------------<bottom>===========|
* ^ ^
* | (or here?) |
* +---------------+---------<check>
*
* top == bottom : Just a single address comparison.
*/
boolean_t
l2arc_range_check_overlap(uint64_t bottom, uint64_t top, uint64_t check)
{
if (bottom < top)
return (bottom <= check && check <= top);
else if (bottom > top)
return (check <= top || bottom <= check);
else
return (check == top);
}
EXPORT_SYMBOL(arc_buf_size);
EXPORT_SYMBOL(arc_write);
EXPORT_SYMBOL(arc_read);
EXPORT_SYMBOL(arc_buf_info);
EXPORT_SYMBOL(arc_getbuf_func);
EXPORT_SYMBOL(arc_add_prune_callback);
EXPORT_SYMBOL(arc_remove_prune_callback);
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, min, param_set_arc_min,
spl_param_get_u64, ZMOD_RW, "Minimum ARC size in bytes");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, max, param_set_arc_max,
spl_param_get_u64, ZMOD_RW, "Maximum ARC size in bytes");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, meta_balance, UINT, ZMOD_RW,
"Balance between metadata and data on ghost hits.");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, grow_retry, param_set_arc_int,
param_get_uint, ZMOD_RW, "Seconds before growing ARC size");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, shrink_shift, param_set_arc_int,
param_get_uint, ZMOD_RW, "log2(fraction of ARC to reclaim)");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, pc_percent, UINT, ZMOD_RW,
"Percent of pagecache to reclaim ARC to");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, average_blocksize, UINT, ZMOD_RD,
"Target average block size");
ZFS_MODULE_PARAM(zfs, zfs_, compressed_arc_enabled, INT, ZMOD_RW,
"Disable compressed ARC buffers");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, min_prefetch_ms, param_set_arc_int,
param_get_uint, ZMOD_RW, "Min life of prefetch block in ms");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, min_prescient_prefetch_ms,
param_set_arc_int, param_get_uint, ZMOD_RW,
"Min life of prescient prefetched block in ms");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, write_max, U64, ZMOD_RW,
"Max write bytes per interval");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, write_boost, U64, ZMOD_RW,
"Extra write bytes during device warmup");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, headroom, U64, ZMOD_RW,
"Number of max device writes to precache");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, headroom_boost, U64, ZMOD_RW,
"Compressed l2arc_headroom multiplier");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, trim_ahead, U64, ZMOD_RW,
"TRIM ahead L2ARC write size multiplier");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, feed_secs, U64, ZMOD_RW,
"Seconds between L2ARC writing");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, feed_min_ms, U64, ZMOD_RW,
"Min feed interval in milliseconds");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, noprefetch, INT, ZMOD_RW,
"Skip caching prefetched buffers");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, feed_again, INT, ZMOD_RW,
"Turbo L2ARC warmup");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, norw, INT, ZMOD_RW,
"No reads during writes");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, meta_percent, UINT, ZMOD_RW,
"Percent of ARC size allowed for L2ARC-only headers");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, rebuild_enabled, INT, ZMOD_RW,
"Rebuild the L2ARC when importing a pool");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, rebuild_blocks_min_l2size, U64, ZMOD_RW,
"Min size in bytes to write rebuild log blocks in L2ARC");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, mfuonly, INT, ZMOD_RW,
"Cache only MFU data from ARC into L2ARC");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, exclude_special, INT, ZMOD_RW,
"Exclude dbufs on special vdevs from being cached to L2ARC if set.");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, lotsfree_percent, param_set_arc_int,
param_get_uint, ZMOD_RW, "System free memory I/O throttle in bytes");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, sys_free, param_set_arc_u64,
spl_param_get_u64, ZMOD_RW, "System free memory target size in bytes");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, dnode_limit, param_set_arc_u64,
spl_param_get_u64, ZMOD_RW, "Minimum bytes of dnodes in ARC");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, dnode_limit_percent,
param_set_arc_int, param_get_uint, ZMOD_RW,
"Percent of ARC meta buffers for dnodes");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, dnode_reduce_percent, UINT, ZMOD_RW,
"Percentage of excess dnodes to try to unpin");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, eviction_pct, UINT, ZMOD_RW,
"When full, ARC allocation waits for eviction of this % of alloc size");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, evict_batch_limit, UINT, ZMOD_RW,
"The number of headers to evict per sublist before moving to the next");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, prune_task_threads, INT, ZMOD_RW,
"Number of arc_prune threads");
diff --git a/sys/contrib/openzfs/module/zfs/brt.c b/sys/contrib/openzfs/module/zfs/brt.c
index 759bc8d2e2b8..225ddaca1e54 100644
--- a/sys/contrib/openzfs/module/zfs/brt.c
+++ b/sys/contrib/openzfs/module/zfs/brt.c
@@ -1,1753 +1,1737 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2020, 2021, 2022 by Pawel Jakub Dawidek
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/zio.h>
#include <sys/brt.h>
#include <sys/brt_impl.h>
#include <sys/ddt.h>
#include <sys/bitmap.h>
#include <sys/zap.h>
#include <sys/dmu_tx.h>
#include <sys/arc.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_scan.h>
#include <sys/vdev_impl.h>
#include <sys/kstat.h>
#include <sys/wmsum.h>
/*
* Block Cloning design.
*
* Block Cloning allows to manually clone a file (or a subset of its blocks)
* into another (or the same) file by just creating additional references to
* the data blocks without copying the data itself. Those references are kept
* in the Block Reference Tables (BRTs).
*
* In many ways this is similar to the existing deduplication, but there are
* some important differences:
*
* - Deduplication is automatic and Block Cloning is not - one has to use a
* dedicated system call(s) to clone the given file/blocks.
* - Deduplication keeps all data blocks in its table, even those referenced
* just once. Block Cloning creates an entry in its tables only when there
* are at least two references to the given data block. If the block was
* never explicitly cloned or the second to last reference was dropped,
* there will be neither space nor performance overhead.
* - Deduplication needs data to work - one needs to pass real data to the
* write(2) syscall, so hash can be calculated. Block Cloning doesn't require
* data, just block pointers to the data, so it is extremely fast, as we pay
* neither the cost of reading the data, nor the cost of writing the data -
* we operate exclusively on metadata.
* - If the D (dedup) bit is not set in the block pointer, it means that
* the block is not in the dedup table (DDT) and we won't consult the DDT
* when we need to free the block. Block Cloning must be consulted on every
* free, because we cannot modify the source BP (eg. by setting something
* similar to the D bit), thus we have no hint if the block is in the
* Block Reference Table (BRT), so we need to look into the BRT. There is
* an optimization in place that allows us to eliminate the majority of BRT
* lookups which is described below in the "Minimizing free penalty" section.
* - The BRT entry is much smaller than the DDT entry - for BRT we only store
* 64bit offset and 64bit reference counter.
* - Dedup keys are cryptographic hashes, so two blocks that are close to each
* other on disk are most likely in totally different parts of the DDT.
* The BRT entry keys are offsets into a single top-level VDEV, so data blocks
* from one file should have BRT entries close to each other.
* - Scrub will only do a single pass over a block that is referenced multiple
* times in the DDT. Unfortunately it is not currently (if at all) possible
* with Block Cloning and block referenced multiple times will be scrubbed
* multiple times. The new, sorted scrub should be able to eliminate
* duplicated reads given enough memory.
* - Deduplication requires cryptographically strong hash as a checksum or
* additional data verification. Block Cloning works with any checksum
* algorithm or even with checksumming disabled.
*
* As mentioned above, the BRT entries are much smaller than the DDT entries.
* To uniquely identify a block we just need its vdev id and offset. We also
* need to maintain a reference counter. The vdev id will often repeat, as there
* is a small number of top-level VDEVs and a large number of blocks stored in
* each VDEV. We take advantage of that to reduce the BRT entry size further by
* maintaining one BRT for each top-level VDEV, so we can then have only offset
* and counter as the BRT entry.
*
* Minimizing free penalty.
*
* Block Cloning allows creating additional references to any existing block.
* When we free a block there is no hint in the block pointer whether the block
* was cloned or not, so on each free we have to check if there is a
* corresponding entry in the BRT or not. If there is, we need to decrease
* the reference counter. Doing BRT lookup on every free can potentially be
* expensive by requiring additional I/Os if the BRT doesn't fit into memory.
* This is the main problem with deduplication, so we've learned our lesson and
* try not to repeat the same mistake here. How do we do that? We divide each
* top-level VDEV into 16MB regions. For each region we maintain a counter that
* is a sum of all the BRT entries that have offsets within the region. This
* creates the entries count array of 16bit numbers for each top-level VDEV.
* The entries count array is always kept in memory and updated on disk in the
* same transaction group as the BRT updates to keep everything in-sync. We can
* keep the array in memory, because it is very small. With 16MB regions and
* 1TB VDEV the array requires only 128kB of memory (we may decide to decrease
* the region size even further in the future). Now, when we want to free
* a block, we first consult the array. If the counter for the whole region is
* zero, there is no need to look for the BRT entry, as there isn't one for
* sure. If the counter for the region is greater than zero, only then we will
* do a BRT lookup and if an entry is found we will decrease the reference
* counter in the BRT entry and in the entry counters array.
*
* The entry counters array is small, but can potentially be larger for very
* large VDEVs or smaller regions. In this case we don't want to rewrite entire
* array on every change. We then divide the array into 32kB block and keep
* a bitmap of dirty blocks within a transaction group. When we sync the
* transaction group we can only update the parts of the entry counters array
* that were modified. Note: Keeping track of the dirty parts of the entry
* counters array is implemented, but updating only parts of the array on disk
* is not yet implemented - for now we will update entire array if there was
* any change.
*
* The implementation tries to be economic: if BRT is not used, or no longer
* used, there will be no entries in the MOS and no additional memory used (eg.
* the entry counters array is only allocated if needed).
*
* Interaction between Deduplication and Block Cloning.
*
* If both functionalities are in use, we could end up with a block that is
* referenced multiple times in both DDT and BRT. When we free one of the
* references we couldn't tell where it belongs, so we would have to decide
* what table takes the precedence: do we first clear DDT references or BRT
* references? To avoid this dilemma BRT cooperates with DDT - if a given block
* is being cloned using BRT and the BP has the D (dedup) bit set, BRT will
* lookup DDT entry instead and increase the counter there. No BRT entry
* will be created for a block which has the D (dedup) bit set.
* BRT may be more efficient for manual deduplication, but if the block is
* already in the DDT, then creating additional BRT entry would be less
* efficient. This clever idea was proposed by Allan Jude.
*
* Block Cloning across datasets.
*
* Block Cloning is not limited to cloning blocks within the same dataset.
* It is possible (and very useful) to clone blocks between different datasets.
* One use case is recovering files from snapshots. By cloning the files into
* dataset we need no additional storage. Without Block Cloning we would need
* additional space for those files.
* Another interesting use case is moving the files between datasets
* (copying the file content to the new dataset and removing the source file).
* In that case Block Cloning will only be used briefly, because the BRT entries
* will be removed when the source is removed.
- * Note: currently it is not possible to clone blocks between encrypted
- * datasets, even if those datasets use the same encryption key (this includes
- * snapshots of encrypted datasets). Cloning blocks between datasets that use
- * the same keys should be possible and should be implemented in the future.
+ * Block Cloning across encrypted datasets is supported as long as both
+ * datasets share the same master key (e.g. snapshots and clones)
*
* Block Cloning flow through ZFS layers.
*
* Note: Block Cloning can be used both for cloning file system blocks and ZVOL
* blocks. As of this writing no interface is implemented that allows for block
* cloning within a ZVOL.
* FreeBSD and Linux provides copy_file_range(2) system call and we will use it
* for blocking cloning.
*
* ssize_t
* copy_file_range(int infd, off_t *inoffp, int outfd, off_t *outoffp,
* size_t len, unsigned int flags);
*
* Even though offsets and length represent bytes, they have to be
* block-aligned or we will return an error so the upper layer can
* fallback to the generic mechanism that will just copy the data.
* Using copy_file_range(2) will call OS-independent zfs_clone_range() function.
* This function was implemented based on zfs_write(), but instead of writing
* the given data we first read block pointers using the new dmu_read_l0_bps()
* function from the source file. Once we have BPs from the source file we call
* the dmu_brt_clone() function on the destination file. This function
* allocates BPs for us. We iterate over all source BPs. If the given BP is
* a hole or an embedded block, we just copy BP as-is. If it points to a real
* data we place this BP on a BRT pending list using the brt_pending_add()
* function.
*
* We use this pending list to keep track of all BPs that got new references
* within this transaction group.
*
* Some special cases to consider and how we address them:
* - The block we want to clone may have been created within the same
* transaction group that we are trying to clone. Such block has no BP
* allocated yet, so cannot be immediately cloned. We return EAGAIN.
* - The block we want to clone may have been modified within the same
* transaction group. We return EAGAIN.
* - A block may be cloned multiple times during one transaction group (that's
* why pending list is actually a tree and not an append-only list - this
* way we can figure out faster if this block is cloned for the first time
* in this txg or consecutive time).
* - A block may be cloned and freed within the same transaction group
* (see dbuf_undirty()).
* - A block may be cloned and within the same transaction group the clone
* can be cloned again (see dmu_read_l0_bps()).
* - A file might have been deleted, but the caller still has a file descriptor
* open to this file and clones it.
*
* When we free a block we have an additional step in the ZIO pipeline where we
* call the zio_brt_free() function. We then call the brt_entry_decref()
* that loads the corresponding BRT entry (if one exists) and decreases
* reference counter. If this is not the last reference we will stop ZIO
* pipeline here. If this is the last reference or the block is not in the
* BRT, we continue the pipeline and free the block as usual.
*
* At the beginning of spa_sync() where there can be no more block cloning,
* but before issuing frees we call brt_pending_apply(). This function applies
* all the new clones to the BRT table - we load BRT entries and update
* reference counters. To sync new BRT entries to disk, we use brt_sync()
* function. This function will sync all dirty per-top-level-vdev BRTs,
* the entry counters arrays, etc.
*
* Block Cloning and ZIL.
*
* Every clone operation is divided into chunks (similar to write) and each
* chunk is cloned in a separate transaction. The chunk size is determined by
* how many BPs we can fit into a single ZIL entry.
* Replaying clone operation is different from the regular clone operation,
* as when we log clone operations we cannot use the source object - it may
* reside on a different dataset, so we log BPs we want to clone.
* The ZIL is replayed when we mount the given dataset, not when the pool is
* imported. Taking this into account it is possible that the pool is imported
* without mounting datasets and the source dataset is destroyed before the
* destination dataset is mounted and its ZIL replayed.
* To address this situation we leverage zil_claim() mechanism where ZFS will
* parse all the ZILs on pool import. When we come across TX_CLONE_RANGE
* entries, we will bump reference counters for their BPs in the BRT. Then
* on mount and ZIL replay we bump the reference counters once more, while the
* first references are dropped during ZIL destroy by zil_free_clone_range().
* It is possible that after zil_claim() we never mount the destination, so
* we never replay its ZIL and just destroy it. In this case the only taken
* references will be dropped by zil_free_clone_range(), since the cloning is
* not going to ever take place.
*/
static kmem_cache_t *brt_entry_cache;
static kmem_cache_t *brt_pending_entry_cache;
/*
* Enable/disable prefetching of BRT entries that we are going to modify.
*/
int zfs_brt_prefetch = 1;
#ifdef ZFS_DEBUG
#define BRT_DEBUG(...) do { \
if ((zfs_flags & ZFS_DEBUG_BRT) != 0) { \
__dprintf(B_TRUE, __FILE__, __func__, __LINE__, __VA_ARGS__); \
} \
} while (0)
#else
#define BRT_DEBUG(...) do { } while (0)
#endif
int brt_zap_leaf_blockshift = 12;
int brt_zap_indirect_blockshift = 12;
static kstat_t *brt_ksp;
typedef struct brt_stats {
kstat_named_t brt_addref_entry_in_memory;
kstat_named_t brt_addref_entry_not_on_disk;
kstat_named_t brt_addref_entry_on_disk;
kstat_named_t brt_addref_entry_read_lost_race;
kstat_named_t brt_decref_entry_in_memory;
kstat_named_t brt_decref_entry_loaded_from_disk;
kstat_named_t brt_decref_entry_not_in_memory;
kstat_named_t brt_decref_entry_not_on_disk;
kstat_named_t brt_decref_entry_read_lost_race;
kstat_named_t brt_decref_entry_still_referenced;
kstat_named_t brt_decref_free_data_later;
kstat_named_t brt_decref_free_data_now;
kstat_named_t brt_decref_no_entry;
} brt_stats_t;
static brt_stats_t brt_stats = {
{ "addref_entry_in_memory", KSTAT_DATA_UINT64 },
{ "addref_entry_not_on_disk", KSTAT_DATA_UINT64 },
{ "addref_entry_on_disk", KSTAT_DATA_UINT64 },
{ "addref_entry_read_lost_race", KSTAT_DATA_UINT64 },
{ "decref_entry_in_memory", KSTAT_DATA_UINT64 },
{ "decref_entry_loaded_from_disk", KSTAT_DATA_UINT64 },
{ "decref_entry_not_in_memory", KSTAT_DATA_UINT64 },
{ "decref_entry_not_on_disk", KSTAT_DATA_UINT64 },
{ "decref_entry_read_lost_race", KSTAT_DATA_UINT64 },
{ "decref_entry_still_referenced", KSTAT_DATA_UINT64 },
{ "decref_free_data_later", KSTAT_DATA_UINT64 },
{ "decref_free_data_now", KSTAT_DATA_UINT64 },
{ "decref_no_entry", KSTAT_DATA_UINT64 }
};
struct {
wmsum_t brt_addref_entry_in_memory;
wmsum_t brt_addref_entry_not_on_disk;
wmsum_t brt_addref_entry_on_disk;
wmsum_t brt_addref_entry_read_lost_race;
wmsum_t brt_decref_entry_in_memory;
wmsum_t brt_decref_entry_loaded_from_disk;
wmsum_t brt_decref_entry_not_in_memory;
wmsum_t brt_decref_entry_not_on_disk;
wmsum_t brt_decref_entry_read_lost_race;
wmsum_t brt_decref_entry_still_referenced;
wmsum_t brt_decref_free_data_later;
wmsum_t brt_decref_free_data_now;
wmsum_t brt_decref_no_entry;
} brt_sums;
#define BRTSTAT_BUMP(stat) wmsum_add(&brt_sums.stat, 1)
static int brt_entry_compare(const void *x1, const void *x2);
static int brt_pending_entry_compare(const void *x1, const void *x2);
static void
brt_rlock(brt_t *brt)
{
rw_enter(&brt->brt_lock, RW_READER);
}
static void
brt_wlock(brt_t *brt)
{
rw_enter(&brt->brt_lock, RW_WRITER);
}
static void
brt_unlock(brt_t *brt)
{
rw_exit(&brt->brt_lock);
}
static uint16_t
brt_vdev_entcount_get(const brt_vdev_t *brtvd, uint64_t idx)
{
ASSERT3U(idx, <, brtvd->bv_size);
- if (brtvd->bv_need_byteswap) {
+ if (unlikely(brtvd->bv_need_byteswap)) {
return (BSWAP_16(brtvd->bv_entcount[idx]));
} else {
return (brtvd->bv_entcount[idx]);
}
}
static void
brt_vdev_entcount_set(brt_vdev_t *brtvd, uint64_t idx, uint16_t entcnt)
{
ASSERT3U(idx, <, brtvd->bv_size);
- if (brtvd->bv_need_byteswap) {
+ if (unlikely(brtvd->bv_need_byteswap)) {
brtvd->bv_entcount[idx] = BSWAP_16(entcnt);
} else {
brtvd->bv_entcount[idx] = entcnt;
}
}
static void
brt_vdev_entcount_inc(brt_vdev_t *brtvd, uint64_t idx)
{
uint16_t entcnt;
ASSERT3U(idx, <, brtvd->bv_size);
entcnt = brt_vdev_entcount_get(brtvd, idx);
ASSERT(entcnt < UINT16_MAX);
brt_vdev_entcount_set(brtvd, idx, entcnt + 1);
}
static void
brt_vdev_entcount_dec(brt_vdev_t *brtvd, uint64_t idx)
{
uint16_t entcnt;
ASSERT3U(idx, <, brtvd->bv_size);
entcnt = brt_vdev_entcount_get(brtvd, idx);
ASSERT(entcnt > 0);
brt_vdev_entcount_set(brtvd, idx, entcnt - 1);
}
#ifdef ZFS_DEBUG
static void
-brt_vdev_dump(brt_t *brt)
+brt_vdev_dump(brt_vdev_t *brtvd)
{
- brt_vdev_t *brtvd;
- uint64_t vdevid;
-
- if ((zfs_flags & ZFS_DEBUG_BRT) == 0) {
- return;
- }
-
- if (brt->brt_nvdevs == 0) {
- zfs_dbgmsg("BRT empty");
- return;
- }
-
- zfs_dbgmsg("BRT vdev dump:");
- for (vdevid = 0; vdevid < brt->brt_nvdevs; vdevid++) {
- uint64_t idx;
+ uint64_t idx;
- brtvd = &brt->brt_vdevs[vdevid];
- zfs_dbgmsg(" vdevid=%llu/%llu meta_dirty=%d entcount_dirty=%d "
- "size=%llu totalcount=%llu nblocks=%llu bitmapsize=%zu\n",
- (u_longlong_t)vdevid, (u_longlong_t)brtvd->bv_vdevid,
- brtvd->bv_meta_dirty, brtvd->bv_entcount_dirty,
- (u_longlong_t)brtvd->bv_size,
- (u_longlong_t)brtvd->bv_totalcount,
- (u_longlong_t)brtvd->bv_nblocks,
- (size_t)BT_SIZEOFMAP(brtvd->bv_nblocks));
- if (brtvd->bv_totalcount > 0) {
- zfs_dbgmsg(" entcounts:");
- for (idx = 0; idx < brtvd->bv_size; idx++) {
- if (brt_vdev_entcount_get(brtvd, idx) > 0) {
- zfs_dbgmsg(" [%04llu] %hu",
- (u_longlong_t)idx,
- brt_vdev_entcount_get(brtvd, idx));
- }
+ zfs_dbgmsg(" BRT vdevid=%llu meta_dirty=%d entcount_dirty=%d "
+ "size=%llu totalcount=%llu nblocks=%llu bitmapsize=%zu\n",
+ (u_longlong_t)brtvd->bv_vdevid,
+ brtvd->bv_meta_dirty, brtvd->bv_entcount_dirty,
+ (u_longlong_t)brtvd->bv_size,
+ (u_longlong_t)brtvd->bv_totalcount,
+ (u_longlong_t)brtvd->bv_nblocks,
+ (size_t)BT_SIZEOFMAP(brtvd->bv_nblocks));
+ if (brtvd->bv_totalcount > 0) {
+ zfs_dbgmsg(" entcounts:");
+ for (idx = 0; idx < brtvd->bv_size; idx++) {
+ uint16_t entcnt = brt_vdev_entcount_get(brtvd, idx);
+ if (entcnt > 0) {
+ zfs_dbgmsg(" [%04llu] %hu",
+ (u_longlong_t)idx, entcnt);
}
}
- if (brtvd->bv_entcount_dirty) {
- char *bitmap;
+ }
+ if (brtvd->bv_entcount_dirty) {
+ char *bitmap;
- bitmap = kmem_alloc(brtvd->bv_nblocks + 1, KM_SLEEP);
- for (idx = 0; idx < brtvd->bv_nblocks; idx++) {
- bitmap[idx] =
- BT_TEST(brtvd->bv_bitmap, idx) ? 'x' : '.';
- }
- bitmap[idx] = '\0';
- zfs_dbgmsg(" bitmap: %s", bitmap);
- kmem_free(bitmap, brtvd->bv_nblocks + 1);
+ bitmap = kmem_alloc(brtvd->bv_nblocks + 1, KM_SLEEP);
+ for (idx = 0; idx < brtvd->bv_nblocks; idx++) {
+ bitmap[idx] =
+ BT_TEST(brtvd->bv_bitmap, idx) ? 'x' : '.';
}
+ bitmap[idx] = '\0';
+ zfs_dbgmsg(" dirty: %s", bitmap);
+ kmem_free(bitmap, brtvd->bv_nblocks + 1);
}
}
#endif
static brt_vdev_t *
brt_vdev(brt_t *brt, uint64_t vdevid)
{
brt_vdev_t *brtvd;
ASSERT(RW_LOCK_HELD(&brt->brt_lock));
if (vdevid < brt->brt_nvdevs) {
brtvd = &brt->brt_vdevs[vdevid];
} else {
brtvd = NULL;
}
return (brtvd);
}
static void
brt_vdev_create(brt_t *brt, brt_vdev_t *brtvd, dmu_tx_t *tx)
{
char name[64];
ASSERT(RW_WRITE_HELD(&brt->brt_lock));
ASSERT0(brtvd->bv_mos_brtvdev);
ASSERT0(brtvd->bv_mos_entries);
ASSERT(brtvd->bv_entcount != NULL);
ASSERT(brtvd->bv_size > 0);
ASSERT(brtvd->bv_bitmap != NULL);
ASSERT(brtvd->bv_nblocks > 0);
brtvd->bv_mos_entries = zap_create_flags(brt->brt_mos, 0,
ZAP_FLAG_HASH64 | ZAP_FLAG_UINT64_KEY, DMU_OTN_ZAP_METADATA,
brt_zap_leaf_blockshift, brt_zap_indirect_blockshift, DMU_OT_NONE,
0, tx);
VERIFY(brtvd->bv_mos_entries != 0);
BRT_DEBUG("MOS entries created, object=%llu",
(u_longlong_t)brtvd->bv_mos_entries);
/*
* We allocate DMU buffer to store the bv_entcount[] array.
* We will keep array size (bv_size) and cummulative count for all
* bv_entcount[]s (bv_totalcount) in the bonus buffer.
*/
brtvd->bv_mos_brtvdev = dmu_object_alloc(brt->brt_mos,
DMU_OTN_UINT64_METADATA, BRT_BLOCKSIZE,
DMU_OTN_UINT64_METADATA, sizeof (brt_vdev_phys_t), tx);
VERIFY(brtvd->bv_mos_brtvdev != 0);
BRT_DEBUG("MOS BRT VDEV created, object=%llu",
(u_longlong_t)brtvd->bv_mos_brtvdev);
snprintf(name, sizeof (name), "%s%llu", BRT_OBJECT_VDEV_PREFIX,
(u_longlong_t)brtvd->bv_vdevid);
VERIFY0(zap_add(brt->brt_mos, DMU_POOL_DIRECTORY_OBJECT, name,
sizeof (uint64_t), 1, &brtvd->bv_mos_brtvdev, tx));
BRT_DEBUG("Pool directory object created, object=%s", name);
spa_feature_incr(brt->brt_spa, SPA_FEATURE_BLOCK_CLONING, tx);
}
static void
brt_vdev_realloc(brt_t *brt, brt_vdev_t *brtvd)
{
vdev_t *vd;
uint16_t *entcount;
ulong_t *bitmap;
uint64_t nblocks, size;
ASSERT(RW_WRITE_HELD(&brt->brt_lock));
spa_config_enter(brt->brt_spa, SCL_VDEV, FTAG, RW_READER);
vd = vdev_lookup_top(brt->brt_spa, brtvd->bv_vdevid);
size = (vdev_get_min_asize(vd) - 1) / brt->brt_rangesize + 1;
spa_config_exit(brt->brt_spa, SCL_VDEV, FTAG);
entcount = vmem_zalloc(sizeof (entcount[0]) * size, KM_SLEEP);
nblocks = BRT_RANGESIZE_TO_NBLOCKS(size);
bitmap = kmem_zalloc(BT_SIZEOFMAP(nblocks), KM_SLEEP);
if (!brtvd->bv_initiated) {
ASSERT0(brtvd->bv_size);
ASSERT(brtvd->bv_entcount == NULL);
ASSERT(brtvd->bv_bitmap == NULL);
ASSERT0(brtvd->bv_nblocks);
avl_create(&brtvd->bv_tree, brt_entry_compare,
sizeof (brt_entry_t), offsetof(brt_entry_t, bre_node));
} else {
ASSERT(brtvd->bv_size > 0);
ASSERT(brtvd->bv_entcount != NULL);
ASSERT(brtvd->bv_bitmap != NULL);
ASSERT(brtvd->bv_nblocks > 0);
/*
* TODO: Allow vdev shrinking. We only need to implement
* shrinking the on-disk BRT VDEV object.
* dmu_free_range(brt->brt_mos, brtvd->bv_mos_brtvdev, offset,
* size, tx);
*/
ASSERT3U(brtvd->bv_size, <=, size);
memcpy(entcount, brtvd->bv_entcount,
sizeof (entcount[0]) * MIN(size, brtvd->bv_size));
memcpy(bitmap, brtvd->bv_bitmap, MIN(BT_SIZEOFMAP(nblocks),
BT_SIZEOFMAP(brtvd->bv_nblocks)));
vmem_free(brtvd->bv_entcount,
sizeof (entcount[0]) * brtvd->bv_size);
kmem_free(brtvd->bv_bitmap, BT_SIZEOFMAP(brtvd->bv_nblocks));
}
brtvd->bv_size = size;
brtvd->bv_entcount = entcount;
brtvd->bv_bitmap = bitmap;
brtvd->bv_nblocks = nblocks;
if (!brtvd->bv_initiated) {
brtvd->bv_need_byteswap = FALSE;
brtvd->bv_initiated = TRUE;
BRT_DEBUG("BRT VDEV %llu initiated.",
(u_longlong_t)brtvd->bv_vdevid);
}
}
static void
brt_vdev_load(brt_t *brt, brt_vdev_t *brtvd)
{
char name[64];
dmu_buf_t *db;
brt_vdev_phys_t *bvphys;
int error;
snprintf(name, sizeof (name), "%s%llu", BRT_OBJECT_VDEV_PREFIX,
(u_longlong_t)brtvd->bv_vdevid);
error = zap_lookup(brt->brt_mos, DMU_POOL_DIRECTORY_OBJECT, name,
sizeof (uint64_t), 1, &brtvd->bv_mos_brtvdev);
if (error != 0)
return;
ASSERT(brtvd->bv_mos_brtvdev != 0);
error = dmu_bonus_hold(brt->brt_mos, brtvd->bv_mos_brtvdev, FTAG, &db);
ASSERT0(error);
if (error != 0)
return;
bvphys = db->db_data;
if (brt->brt_rangesize == 0) {
brt->brt_rangesize = bvphys->bvp_rangesize;
} else {
ASSERT3U(brt->brt_rangesize, ==, bvphys->bvp_rangesize);
}
ASSERT(!brtvd->bv_initiated);
brt_vdev_realloc(brt, brtvd);
/* TODO: We don't support VDEV shrinking. */
ASSERT3U(bvphys->bvp_size, <=, brtvd->bv_size);
/*
* If VDEV grew, we will leave new bv_entcount[] entries zeroed out.
*/
error = dmu_read(brt->brt_mos, brtvd->bv_mos_brtvdev, 0,
MIN(brtvd->bv_size, bvphys->bvp_size) * sizeof (uint16_t),
brtvd->bv_entcount, DMU_READ_NO_PREFETCH);
ASSERT0(error);
brtvd->bv_mos_entries = bvphys->bvp_mos_entries;
ASSERT(brtvd->bv_mos_entries != 0);
brtvd->bv_need_byteswap =
(bvphys->bvp_byteorder != BRT_NATIVE_BYTEORDER);
brtvd->bv_totalcount = bvphys->bvp_totalcount;
brtvd->bv_usedspace = bvphys->bvp_usedspace;
brtvd->bv_savedspace = bvphys->bvp_savedspace;
brt->brt_usedspace += brtvd->bv_usedspace;
brt->brt_savedspace += brtvd->bv_savedspace;
dmu_buf_rele(db, FTAG);
BRT_DEBUG("MOS BRT VDEV %s loaded: mos_brtvdev=%llu, mos_entries=%llu",
name, (u_longlong_t)brtvd->bv_mos_brtvdev,
(u_longlong_t)brtvd->bv_mos_entries);
}
static void
brt_vdev_dealloc(brt_t *brt, brt_vdev_t *brtvd)
{
ASSERT(RW_WRITE_HELD(&brt->brt_lock));
ASSERT(brtvd->bv_initiated);
vmem_free(brtvd->bv_entcount, sizeof (uint16_t) * brtvd->bv_size);
brtvd->bv_entcount = NULL;
kmem_free(brtvd->bv_bitmap, BT_SIZEOFMAP(brtvd->bv_nblocks));
brtvd->bv_bitmap = NULL;
ASSERT0(avl_numnodes(&brtvd->bv_tree));
avl_destroy(&brtvd->bv_tree);
brtvd->bv_size = 0;
brtvd->bv_nblocks = 0;
brtvd->bv_initiated = FALSE;
BRT_DEBUG("BRT VDEV %llu deallocated.", (u_longlong_t)brtvd->bv_vdevid);
}
static void
brt_vdev_destroy(brt_t *brt, brt_vdev_t *brtvd, dmu_tx_t *tx)
{
char name[64];
uint64_t count;
dmu_buf_t *db;
brt_vdev_phys_t *bvphys;
ASSERT(RW_WRITE_HELD(&brt->brt_lock));
ASSERT(brtvd->bv_mos_brtvdev != 0);
ASSERT(brtvd->bv_mos_entries != 0);
VERIFY0(zap_count(brt->brt_mos, brtvd->bv_mos_entries, &count));
VERIFY0(count);
VERIFY0(zap_destroy(brt->brt_mos, brtvd->bv_mos_entries, tx));
BRT_DEBUG("MOS entries destroyed, object=%llu",
(u_longlong_t)brtvd->bv_mos_entries);
brtvd->bv_mos_entries = 0;
VERIFY0(dmu_bonus_hold(brt->brt_mos, brtvd->bv_mos_brtvdev, FTAG, &db));
bvphys = db->db_data;
ASSERT0(bvphys->bvp_totalcount);
ASSERT0(bvphys->bvp_usedspace);
ASSERT0(bvphys->bvp_savedspace);
dmu_buf_rele(db, FTAG);
VERIFY0(dmu_object_free(brt->brt_mos, brtvd->bv_mos_brtvdev, tx));
BRT_DEBUG("MOS BRT VDEV destroyed, object=%llu",
(u_longlong_t)brtvd->bv_mos_brtvdev);
brtvd->bv_mos_brtvdev = 0;
snprintf(name, sizeof (name), "%s%llu", BRT_OBJECT_VDEV_PREFIX,
(u_longlong_t)brtvd->bv_vdevid);
VERIFY0(zap_remove(brt->brt_mos, DMU_POOL_DIRECTORY_OBJECT, name, tx));
BRT_DEBUG("Pool directory object removed, object=%s", name);
brt_vdev_dealloc(brt, brtvd);
spa_feature_decr(brt->brt_spa, SPA_FEATURE_BLOCK_CLONING, tx);
}
static void
brt_vdevs_expand(brt_t *brt, uint64_t nvdevs)
{
brt_vdev_t *brtvd, *vdevs;
uint64_t vdevid;
ASSERT(RW_WRITE_HELD(&brt->brt_lock));
ASSERT3U(nvdevs, >, brt->brt_nvdevs);
vdevs = kmem_zalloc(sizeof (vdevs[0]) * nvdevs, KM_SLEEP);
if (brt->brt_nvdevs > 0) {
ASSERT(brt->brt_vdevs != NULL);
memcpy(vdevs, brt->brt_vdevs,
sizeof (brt_vdev_t) * brt->brt_nvdevs);
kmem_free(brt->brt_vdevs,
sizeof (brt_vdev_t) * brt->brt_nvdevs);
}
for (vdevid = brt->brt_nvdevs; vdevid < nvdevs; vdevid++) {
brtvd = &vdevs[vdevid];
brtvd->bv_vdevid = vdevid;
brtvd->bv_initiated = FALSE;
}
BRT_DEBUG("BRT VDEVs expanded from %llu to %llu.",
(u_longlong_t)brt->brt_nvdevs, (u_longlong_t)nvdevs);
brt->brt_vdevs = vdevs;
brt->brt_nvdevs = nvdevs;
}
static boolean_t
brt_vdev_lookup(brt_t *brt, brt_vdev_t *brtvd, const brt_entry_t *bre)
{
uint64_t idx;
ASSERT(RW_LOCK_HELD(&brt->brt_lock));
idx = bre->bre_offset / brt->brt_rangesize;
if (brtvd->bv_entcount != NULL && idx < brtvd->bv_size) {
/* VDEV wasn't expanded. */
return (brt_vdev_entcount_get(brtvd, idx) > 0);
}
return (FALSE);
}
static void
brt_vdev_addref(brt_t *brt, brt_vdev_t *brtvd, const brt_entry_t *bre,
uint64_t dsize)
{
uint64_t idx;
ASSERT(RW_LOCK_HELD(&brt->brt_lock));
ASSERT(brtvd != NULL);
ASSERT(brtvd->bv_entcount != NULL);
brt->brt_savedspace += dsize;
brtvd->bv_savedspace += dsize;
brtvd->bv_meta_dirty = TRUE;
if (bre->bre_refcount > 1) {
return;
}
brt->brt_usedspace += dsize;
brtvd->bv_usedspace += dsize;
idx = bre->bre_offset / brt->brt_rangesize;
if (idx >= brtvd->bv_size) {
/* VDEV has been expanded. */
brt_vdev_realloc(brt, brtvd);
}
ASSERT3U(idx, <, brtvd->bv_size);
brtvd->bv_totalcount++;
brt_vdev_entcount_inc(brtvd, idx);
brtvd->bv_entcount_dirty = TRUE;
idx = idx / BRT_BLOCKSIZE / 8;
BT_SET(brtvd->bv_bitmap, idx);
#ifdef ZFS_DEBUG
- brt_vdev_dump(brt);
+ if (zfs_flags & ZFS_DEBUG_BRT)
+ brt_vdev_dump(brtvd);
#endif
}
static void
brt_vdev_decref(brt_t *brt, brt_vdev_t *brtvd, const brt_entry_t *bre,
uint64_t dsize)
{
uint64_t idx;
ASSERT(RW_WRITE_HELD(&brt->brt_lock));
ASSERT(brtvd != NULL);
ASSERT(brtvd->bv_entcount != NULL);
brt->brt_savedspace -= dsize;
brtvd->bv_savedspace -= dsize;
brtvd->bv_meta_dirty = TRUE;
if (bre->bre_refcount > 0) {
return;
}
brt->brt_usedspace -= dsize;
brtvd->bv_usedspace -= dsize;
idx = bre->bre_offset / brt->brt_rangesize;
ASSERT3U(idx, <, brtvd->bv_size);
ASSERT(brtvd->bv_totalcount > 0);
brtvd->bv_totalcount--;
brt_vdev_entcount_dec(brtvd, idx);
brtvd->bv_entcount_dirty = TRUE;
idx = idx / BRT_BLOCKSIZE / 8;
BT_SET(brtvd->bv_bitmap, idx);
#ifdef ZFS_DEBUG
- brt_vdev_dump(brt);
+ if (zfs_flags & ZFS_DEBUG_BRT)
+ brt_vdev_dump(brtvd);
#endif
}
static void
brt_vdev_sync(brt_t *brt, brt_vdev_t *brtvd, dmu_tx_t *tx)
{
dmu_buf_t *db;
brt_vdev_phys_t *bvphys;
ASSERT(brtvd->bv_meta_dirty);
ASSERT(brtvd->bv_mos_brtvdev != 0);
ASSERT(dmu_tx_is_syncing(tx));
VERIFY0(dmu_bonus_hold(brt->brt_mos, brtvd->bv_mos_brtvdev, FTAG, &db));
if (brtvd->bv_entcount_dirty) {
/*
* TODO: Walk brtvd->bv_bitmap and write only the dirty blocks.
*/
dmu_write(brt->brt_mos, brtvd->bv_mos_brtvdev, 0,
brtvd->bv_size * sizeof (brtvd->bv_entcount[0]),
brtvd->bv_entcount, tx);
memset(brtvd->bv_bitmap, 0, BT_SIZEOFMAP(brtvd->bv_nblocks));
brtvd->bv_entcount_dirty = FALSE;
}
dmu_buf_will_dirty(db, tx);
bvphys = db->db_data;
bvphys->bvp_mos_entries = brtvd->bv_mos_entries;
bvphys->bvp_size = brtvd->bv_size;
if (brtvd->bv_need_byteswap) {
bvphys->bvp_byteorder = BRT_NON_NATIVE_BYTEORDER;
} else {
bvphys->bvp_byteorder = BRT_NATIVE_BYTEORDER;
}
bvphys->bvp_totalcount = brtvd->bv_totalcount;
bvphys->bvp_rangesize = brt->brt_rangesize;
bvphys->bvp_usedspace = brtvd->bv_usedspace;
bvphys->bvp_savedspace = brtvd->bv_savedspace;
dmu_buf_rele(db, FTAG);
brtvd->bv_meta_dirty = FALSE;
}
static void
brt_vdevs_alloc(brt_t *brt, boolean_t load)
{
brt_vdev_t *brtvd;
uint64_t vdevid;
brt_wlock(brt);
brt_vdevs_expand(brt, brt->brt_spa->spa_root_vdev->vdev_children);
if (load) {
for (vdevid = 0; vdevid < brt->brt_nvdevs; vdevid++) {
brtvd = &brt->brt_vdevs[vdevid];
ASSERT(brtvd->bv_entcount == NULL);
brt_vdev_load(brt, brtvd);
}
}
if (brt->brt_rangesize == 0) {
brt->brt_rangesize = BRT_RANGESIZE;
}
brt_unlock(brt);
}
static void
brt_vdevs_free(brt_t *brt)
{
brt_vdev_t *brtvd;
uint64_t vdevid;
brt_wlock(brt);
for (vdevid = 0; vdevid < brt->brt_nvdevs; vdevid++) {
brtvd = &brt->brt_vdevs[vdevid];
if (brtvd->bv_initiated)
brt_vdev_dealloc(brt, brtvd);
}
kmem_free(brt->brt_vdevs, sizeof (brt_vdev_t) * brt->brt_nvdevs);
brt_unlock(brt);
}
static void
brt_entry_fill(const blkptr_t *bp, brt_entry_t *bre, uint64_t *vdevidp)
{
bre->bre_offset = DVA_GET_OFFSET(&bp->blk_dva[0]);
bre->bre_refcount = 0;
*vdevidp = DVA_GET_VDEV(&bp->blk_dva[0]);
}
static int
brt_entry_compare(const void *x1, const void *x2)
{
const brt_entry_t *bre1 = x1;
const brt_entry_t *bre2 = x2;
return (TREE_CMP(bre1->bre_offset, bre2->bre_offset));
}
static int
brt_entry_lookup(brt_t *brt, brt_vdev_t *brtvd, brt_entry_t *bre)
{
uint64_t mos_entries;
uint64_t one, physsize;
int error;
ASSERT(RW_LOCK_HELD(&brt->brt_lock));
if (!brt_vdev_lookup(brt, brtvd, bre))
return (SET_ERROR(ENOENT));
/*
* Remember mos_entries object number. After we reacquire the BRT lock,
* the brtvd pointer may be invalid.
*/
mos_entries = brtvd->bv_mos_entries;
if (mos_entries == 0)
return (SET_ERROR(ENOENT));
brt_unlock(brt);
error = zap_length_uint64(brt->brt_mos, mos_entries, &bre->bre_offset,
BRT_KEY_WORDS, &one, &physsize);
if (error == 0) {
ASSERT3U(one, ==, 1);
ASSERT3U(physsize, ==, sizeof (bre->bre_refcount));
error = zap_lookup_uint64(brt->brt_mos, mos_entries,
&bre->bre_offset, BRT_KEY_WORDS, 1,
sizeof (bre->bre_refcount), &bre->bre_refcount);
BRT_DEBUG("ZAP lookup: object=%llu vdev=%llu offset=%llu "
"count=%llu error=%d", (u_longlong_t)mos_entries,
(u_longlong_t)brtvd->bv_vdevid,
(u_longlong_t)bre->bre_offset,
error == 0 ? (u_longlong_t)bre->bre_refcount : 0, error);
}
brt_wlock(brt);
return (error);
}
static void
brt_entry_prefetch(brt_t *brt, uint64_t vdevid, brt_entry_t *bre)
{
brt_vdev_t *brtvd;
uint64_t mos_entries = 0;
brt_rlock(brt);
brtvd = brt_vdev(brt, vdevid);
if (brtvd != NULL)
mos_entries = brtvd->bv_mos_entries;
brt_unlock(brt);
if (mos_entries == 0)
return;
BRT_DEBUG("ZAP prefetch: object=%llu vdev=%llu offset=%llu",
(u_longlong_t)mos_entries, (u_longlong_t)vdevid,
(u_longlong_t)bre->bre_offset);
(void) zap_prefetch_uint64(brt->brt_mos, mos_entries,
(uint64_t *)&bre->bre_offset, BRT_KEY_WORDS);
}
static int
brt_entry_update(brt_t *brt, brt_vdev_t *brtvd, brt_entry_t *bre, dmu_tx_t *tx)
{
int error;
ASSERT(RW_LOCK_HELD(&brt->brt_lock));
ASSERT(brtvd->bv_mos_entries != 0);
ASSERT(bre->bre_refcount > 0);
error = zap_update_uint64(brt->brt_mos, brtvd->bv_mos_entries,
(uint64_t *)&bre->bre_offset, BRT_KEY_WORDS, 1,
sizeof (bre->bre_refcount), &bre->bre_refcount, tx);
BRT_DEBUG("ZAP update: object=%llu vdev=%llu offset=%llu count=%llu "
"error=%d", (u_longlong_t)brtvd->bv_mos_entries,
(u_longlong_t)brtvd->bv_vdevid, (u_longlong_t)bre->bre_offset,
(u_longlong_t)bre->bre_refcount, error);
return (error);
}
static int
brt_entry_remove(brt_t *brt, brt_vdev_t *brtvd, brt_entry_t *bre, dmu_tx_t *tx)
{
int error;
ASSERT(RW_LOCK_HELD(&brt->brt_lock));
ASSERT(brtvd->bv_mos_entries != 0);
ASSERT0(bre->bre_refcount);
error = zap_remove_uint64(brt->brt_mos, brtvd->bv_mos_entries,
(uint64_t *)&bre->bre_offset, BRT_KEY_WORDS, tx);
BRT_DEBUG("ZAP remove: object=%llu vdev=%llu offset=%llu count=%llu "
"error=%d", (u_longlong_t)brtvd->bv_mos_entries,
(u_longlong_t)brtvd->bv_vdevid, (u_longlong_t)bre->bre_offset,
(u_longlong_t)bre->bre_refcount, error);
return (error);
}
/*
* Return TRUE if we _can_ have BRT entry for this bp. It might be false
* positive, but gives us quick answer if we should look into BRT, which
* may require reads and thus will be more expensive.
*/
boolean_t
brt_maybe_exists(spa_t *spa, const blkptr_t *bp)
{
brt_t *brt = spa->spa_brt;
brt_vdev_t *brtvd;
brt_entry_t bre_search;
boolean_t mayexists = FALSE;
uint64_t vdevid;
brt_entry_fill(bp, &bre_search, &vdevid);
brt_rlock(brt);
brtvd = brt_vdev(brt, vdevid);
if (brtvd != NULL && brtvd->bv_initiated) {
if (!avl_is_empty(&brtvd->bv_tree) ||
brt_vdev_lookup(brt, brtvd, &bre_search)) {
mayexists = TRUE;
}
}
brt_unlock(brt);
return (mayexists);
}
uint64_t
brt_get_dspace(spa_t *spa)
{
brt_t *brt = spa->spa_brt;
if (brt == NULL)
return (0);
return (brt->brt_savedspace);
}
uint64_t
brt_get_used(spa_t *spa)
{
brt_t *brt = spa->spa_brt;
if (brt == NULL)
return (0);
return (brt->brt_usedspace);
}
uint64_t
brt_get_saved(spa_t *spa)
{
brt_t *brt = spa->spa_brt;
if (brt == NULL)
return (0);
return (brt->brt_savedspace);
}
uint64_t
brt_get_ratio(spa_t *spa)
{
brt_t *brt = spa->spa_brt;
if (brt->brt_usedspace == 0)
return (100);
return ((brt->brt_usedspace + brt->brt_savedspace) * 100 /
brt->brt_usedspace);
}
static int
brt_kstats_update(kstat_t *ksp, int rw)
{
brt_stats_t *bs = ksp->ks_data;
if (rw == KSTAT_WRITE)
return (EACCES);
bs->brt_addref_entry_in_memory.value.ui64 =
wmsum_value(&brt_sums.brt_addref_entry_in_memory);
bs->brt_addref_entry_not_on_disk.value.ui64 =
wmsum_value(&brt_sums.brt_addref_entry_not_on_disk);
bs->brt_addref_entry_on_disk.value.ui64 =
wmsum_value(&brt_sums.brt_addref_entry_on_disk);
bs->brt_addref_entry_read_lost_race.value.ui64 =
wmsum_value(&brt_sums.brt_addref_entry_read_lost_race);
bs->brt_decref_entry_in_memory.value.ui64 =
wmsum_value(&brt_sums.brt_decref_entry_in_memory);
bs->brt_decref_entry_loaded_from_disk.value.ui64 =
wmsum_value(&brt_sums.brt_decref_entry_loaded_from_disk);
bs->brt_decref_entry_not_in_memory.value.ui64 =
wmsum_value(&brt_sums.brt_decref_entry_not_in_memory);
bs->brt_decref_entry_not_on_disk.value.ui64 =
wmsum_value(&brt_sums.brt_decref_entry_not_on_disk);
bs->brt_decref_entry_read_lost_race.value.ui64 =
wmsum_value(&brt_sums.brt_decref_entry_read_lost_race);
bs->brt_decref_entry_still_referenced.value.ui64 =
wmsum_value(&brt_sums.brt_decref_entry_still_referenced);
bs->brt_decref_free_data_later.value.ui64 =
wmsum_value(&brt_sums.brt_decref_free_data_later);
bs->brt_decref_free_data_now.value.ui64 =
wmsum_value(&brt_sums.brt_decref_free_data_now);
bs->brt_decref_no_entry.value.ui64 =
wmsum_value(&brt_sums.brt_decref_no_entry);
return (0);
}
static void
brt_stat_init(void)
{
wmsum_init(&brt_sums.brt_addref_entry_in_memory, 0);
wmsum_init(&brt_sums.brt_addref_entry_not_on_disk, 0);
wmsum_init(&brt_sums.brt_addref_entry_on_disk, 0);
wmsum_init(&brt_sums.brt_addref_entry_read_lost_race, 0);
wmsum_init(&brt_sums.brt_decref_entry_in_memory, 0);
wmsum_init(&brt_sums.brt_decref_entry_loaded_from_disk, 0);
wmsum_init(&brt_sums.brt_decref_entry_not_in_memory, 0);
wmsum_init(&brt_sums.brt_decref_entry_not_on_disk, 0);
wmsum_init(&brt_sums.brt_decref_entry_read_lost_race, 0);
wmsum_init(&brt_sums.brt_decref_entry_still_referenced, 0);
wmsum_init(&brt_sums.brt_decref_free_data_later, 0);
wmsum_init(&brt_sums.brt_decref_free_data_now, 0);
wmsum_init(&brt_sums.brt_decref_no_entry, 0);
brt_ksp = kstat_create("zfs", 0, "brtstats", "misc", KSTAT_TYPE_NAMED,
sizeof (brt_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
if (brt_ksp != NULL) {
brt_ksp->ks_data = &brt_stats;
brt_ksp->ks_update = brt_kstats_update;
kstat_install(brt_ksp);
}
}
static void
brt_stat_fini(void)
{
if (brt_ksp != NULL) {
kstat_delete(brt_ksp);
brt_ksp = NULL;
}
wmsum_fini(&brt_sums.brt_addref_entry_in_memory);
wmsum_fini(&brt_sums.brt_addref_entry_not_on_disk);
wmsum_fini(&brt_sums.brt_addref_entry_on_disk);
wmsum_fini(&brt_sums.brt_addref_entry_read_lost_race);
wmsum_fini(&brt_sums.brt_decref_entry_in_memory);
wmsum_fini(&brt_sums.brt_decref_entry_loaded_from_disk);
wmsum_fini(&brt_sums.brt_decref_entry_not_in_memory);
wmsum_fini(&brt_sums.brt_decref_entry_not_on_disk);
wmsum_fini(&brt_sums.brt_decref_entry_read_lost_race);
wmsum_fini(&brt_sums.brt_decref_entry_still_referenced);
wmsum_fini(&brt_sums.brt_decref_free_data_later);
wmsum_fini(&brt_sums.brt_decref_free_data_now);
wmsum_fini(&brt_sums.brt_decref_no_entry);
}
void
brt_init(void)
{
brt_entry_cache = kmem_cache_create("brt_entry_cache",
sizeof (brt_entry_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
brt_pending_entry_cache = kmem_cache_create("brt_pending_entry_cache",
sizeof (brt_pending_entry_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
brt_stat_init();
}
void
brt_fini(void)
{
brt_stat_fini();
kmem_cache_destroy(brt_entry_cache);
kmem_cache_destroy(brt_pending_entry_cache);
}
static brt_entry_t *
brt_entry_alloc(const brt_entry_t *bre_init)
{
brt_entry_t *bre;
bre = kmem_cache_alloc(brt_entry_cache, KM_SLEEP);
bre->bre_offset = bre_init->bre_offset;
bre->bre_refcount = bre_init->bre_refcount;
return (bre);
}
static void
brt_entry_free(brt_entry_t *bre)
{
kmem_cache_free(brt_entry_cache, bre);
}
static void
brt_entry_addref(brt_t *brt, const blkptr_t *bp)
{
brt_vdev_t *brtvd;
brt_entry_t *bre, *racebre;
brt_entry_t bre_search;
avl_index_t where;
uint64_t vdevid;
int error;
ASSERT(!RW_WRITE_HELD(&brt->brt_lock));
brt_entry_fill(bp, &bre_search, &vdevid);
brt_wlock(brt);
brtvd = brt_vdev(brt, vdevid);
if (brtvd == NULL) {
ASSERT3U(vdevid, >=, brt->brt_nvdevs);
/* New VDEV was added. */
brt_vdevs_expand(brt, vdevid + 1);
brtvd = brt_vdev(brt, vdevid);
}
ASSERT(brtvd != NULL);
if (!brtvd->bv_initiated)
brt_vdev_realloc(brt, brtvd);
bre = avl_find(&brtvd->bv_tree, &bre_search, NULL);
if (bre != NULL) {
BRTSTAT_BUMP(brt_addref_entry_in_memory);
} else {
/*
* brt_entry_lookup() may drop the BRT (read) lock and
* reacquire it (write).
*/
error = brt_entry_lookup(brt, brtvd, &bre_search);
/* bre_search now contains correct bre_refcount */
ASSERT(error == 0 || error == ENOENT);
if (error == 0)
BRTSTAT_BUMP(brt_addref_entry_on_disk);
else
BRTSTAT_BUMP(brt_addref_entry_not_on_disk);
/*
* When the BRT lock was dropped, brt_vdevs[] may have been
* expanded and reallocated, we need to update brtvd's pointer.
*/
brtvd = brt_vdev(brt, vdevid);
ASSERT(brtvd != NULL);
racebre = avl_find(&brtvd->bv_tree, &bre_search, &where);
if (racebre == NULL) {
bre = brt_entry_alloc(&bre_search);
ASSERT(RW_WRITE_HELD(&brt->brt_lock));
avl_insert(&brtvd->bv_tree, bre, where);
brt->brt_nentries++;
} else {
/*
* The entry was added when the BRT lock was dropped in
* brt_entry_lookup().
*/
BRTSTAT_BUMP(brt_addref_entry_read_lost_race);
bre = racebre;
}
}
bre->bre_refcount++;
brt_vdev_addref(brt, brtvd, bre, bp_get_dsize(brt->brt_spa, bp));
brt_unlock(brt);
}
/* Return TRUE if block should be freed immediately. */
boolean_t
brt_entry_decref(spa_t *spa, const blkptr_t *bp)
{
brt_t *brt = spa->spa_brt;
brt_vdev_t *brtvd;
brt_entry_t *bre, *racebre;
brt_entry_t bre_search;
avl_index_t where;
uint64_t vdevid;
int error;
brt_entry_fill(bp, &bre_search, &vdevid);
brt_wlock(brt);
brtvd = brt_vdev(brt, vdevid);
ASSERT(brtvd != NULL);
bre = avl_find(&brtvd->bv_tree, &bre_search, NULL);
if (bre != NULL) {
BRTSTAT_BUMP(brt_decref_entry_in_memory);
goto out;
} else {
BRTSTAT_BUMP(brt_decref_entry_not_in_memory);
}
/*
* brt_entry_lookup() may drop the BRT lock and reacquire it.
*/
error = brt_entry_lookup(brt, brtvd, &bre_search);
/* bre_search now contains correct bre_refcount */
ASSERT(error == 0 || error == ENOENT);
/*
* When the BRT lock was dropped, brt_vdevs[] may have been expanded
* and reallocated, we need to update brtvd's pointer.
*/
brtvd = brt_vdev(brt, vdevid);
ASSERT(brtvd != NULL);
if (error == ENOENT) {
BRTSTAT_BUMP(brt_decref_entry_not_on_disk);
bre = NULL;
goto out;
}
racebre = avl_find(&brtvd->bv_tree, &bre_search, &where);
if (racebre != NULL) {
/*
* The entry was added when the BRT lock was dropped in
* brt_entry_lookup().
*/
BRTSTAT_BUMP(brt_decref_entry_read_lost_race);
bre = racebre;
goto out;
}
BRTSTAT_BUMP(brt_decref_entry_loaded_from_disk);
bre = brt_entry_alloc(&bre_search);
ASSERT(RW_WRITE_HELD(&brt->brt_lock));
avl_insert(&brtvd->bv_tree, bre, where);
brt->brt_nentries++;
out:
if (bre == NULL) {
/*
* This is a free of a regular (not cloned) block.
*/
brt_unlock(brt);
BRTSTAT_BUMP(brt_decref_no_entry);
return (B_TRUE);
}
if (bre->bre_refcount == 0) {
brt_unlock(brt);
BRTSTAT_BUMP(brt_decref_free_data_now);
return (B_TRUE);
}
ASSERT(bre->bre_refcount > 0);
bre->bre_refcount--;
if (bre->bre_refcount == 0)
BRTSTAT_BUMP(brt_decref_free_data_later);
else
BRTSTAT_BUMP(brt_decref_entry_still_referenced);
brt_vdev_decref(brt, brtvd, bre, bp_get_dsize(brt->brt_spa, bp));
brt_unlock(brt);
return (B_FALSE);
}
uint64_t
brt_entry_get_refcount(spa_t *spa, const blkptr_t *bp)
{
brt_t *brt = spa->spa_brt;
brt_vdev_t *brtvd;
brt_entry_t bre_search, *bre;
uint64_t vdevid, refcnt;
int error;
brt_entry_fill(bp, &bre_search, &vdevid);
brt_rlock(brt);
brtvd = brt_vdev(brt, vdevid);
ASSERT(brtvd != NULL);
bre = avl_find(&brtvd->bv_tree, &bre_search, NULL);
if (bre == NULL) {
error = brt_entry_lookup(brt, brtvd, &bre_search);
ASSERT(error == 0 || error == ENOENT);
if (error == ENOENT)
refcnt = 0;
else
refcnt = bre_search.bre_refcount;
} else
refcnt = bre->bre_refcount;
brt_unlock(brt);
return (refcnt);
}
static void
brt_prefetch(brt_t *brt, const blkptr_t *bp)
{
brt_entry_t bre;
uint64_t vdevid;
ASSERT(bp != NULL);
if (!zfs_brt_prefetch)
return;
brt_entry_fill(bp, &bre, &vdevid);
brt_entry_prefetch(brt, vdevid, &bre);
}
static int
brt_pending_entry_compare(const void *x1, const void *x2)
{
const brt_pending_entry_t *bpe1 = x1, *bpe2 = x2;
const blkptr_t *bp1 = &bpe1->bpe_bp, *bp2 = &bpe2->bpe_bp;
int cmp;
cmp = TREE_CMP(BP_PHYSICAL_BIRTH(bp1), BP_PHYSICAL_BIRTH(bp2));
if (cmp == 0) {
cmp = TREE_CMP(DVA_GET_VDEV(&bp1->blk_dva[0]),
DVA_GET_VDEV(&bp2->blk_dva[0]));
if (cmp == 0) {
cmp = TREE_CMP(DVA_GET_OFFSET(&bp1->blk_dva[0]),
DVA_GET_OFFSET(&bp2->blk_dva[0]));
}
}
return (cmp);
}
void
brt_pending_add(spa_t *spa, const blkptr_t *bp, dmu_tx_t *tx)
{
brt_t *brt;
avl_tree_t *pending_tree;
kmutex_t *pending_lock;
brt_pending_entry_t *bpe, *newbpe;
avl_index_t where;
uint64_t txg;
brt = spa->spa_brt;
txg = dmu_tx_get_txg(tx);
ASSERT3U(txg, !=, 0);
pending_tree = &brt->brt_pending_tree[txg & TXG_MASK];
pending_lock = &brt->brt_pending_lock[txg & TXG_MASK];
newbpe = kmem_cache_alloc(brt_pending_entry_cache, KM_SLEEP);
newbpe->bpe_bp = *bp;
newbpe->bpe_count = 1;
mutex_enter(pending_lock);
bpe = avl_find(pending_tree, newbpe, &where);
if (bpe == NULL) {
avl_insert(pending_tree, newbpe, where);
newbpe = NULL;
} else {
bpe->bpe_count++;
}
mutex_exit(pending_lock);
if (newbpe != NULL) {
ASSERT(bpe != NULL);
ASSERT(bpe != newbpe);
kmem_cache_free(brt_pending_entry_cache, newbpe);
} else {
ASSERT(bpe == NULL);
}
/* Prefetch BRT entry, as we will need it in the syncing context. */
brt_prefetch(brt, bp);
}
void
brt_pending_remove(spa_t *spa, const blkptr_t *bp, dmu_tx_t *tx)
{
brt_t *brt;
avl_tree_t *pending_tree;
kmutex_t *pending_lock;
brt_pending_entry_t *bpe, bpe_search;
uint64_t txg;
brt = spa->spa_brt;
txg = dmu_tx_get_txg(tx);
ASSERT3U(txg, !=, 0);
pending_tree = &brt->brt_pending_tree[txg & TXG_MASK];
pending_lock = &brt->brt_pending_lock[txg & TXG_MASK];
bpe_search.bpe_bp = *bp;
mutex_enter(pending_lock);
bpe = avl_find(pending_tree, &bpe_search, NULL);
/* I believe we should always find bpe when this function is called. */
if (bpe != NULL) {
ASSERT(bpe->bpe_count > 0);
bpe->bpe_count--;
if (bpe->bpe_count == 0) {
avl_remove(pending_tree, bpe);
kmem_cache_free(brt_pending_entry_cache, bpe);
}
}
mutex_exit(pending_lock);
}
void
brt_pending_apply(spa_t *spa, uint64_t txg)
{
brt_t *brt;
brt_pending_entry_t *bpe;
avl_tree_t *pending_tree;
kmutex_t *pending_lock;
void *c;
ASSERT3U(txg, !=, 0);
brt = spa->spa_brt;
pending_tree = &brt->brt_pending_tree[txg & TXG_MASK];
pending_lock = &brt->brt_pending_lock[txg & TXG_MASK];
mutex_enter(pending_lock);
c = NULL;
while ((bpe = avl_destroy_nodes(pending_tree, &c)) != NULL) {
boolean_t added_to_ddt;
mutex_exit(pending_lock);
for (int i = 0; i < bpe->bpe_count; i++) {
/*
* If the block has DEDUP bit set, it means that it
* already exists in the DEDUP table, so we can just
* use that instead of creating new entry in
* the BRT table.
*/
if (BP_GET_DEDUP(&bpe->bpe_bp)) {
added_to_ddt = ddt_addref(spa, &bpe->bpe_bp);
} else {
added_to_ddt = B_FALSE;
}
if (!added_to_ddt)
brt_entry_addref(brt, &bpe->bpe_bp);
}
kmem_cache_free(brt_pending_entry_cache, bpe);
mutex_enter(pending_lock);
}
mutex_exit(pending_lock);
}
static void
brt_sync_entry(brt_t *brt, brt_vdev_t *brtvd, brt_entry_t *bre, dmu_tx_t *tx)
{
ASSERT(RW_WRITE_HELD(&brt->brt_lock));
ASSERT(brtvd->bv_mos_entries != 0);
if (bre->bre_refcount == 0) {
int error;
error = brt_entry_remove(brt, brtvd, bre, tx);
ASSERT(error == 0 || error == ENOENT);
/*
* If error == ENOENT then zfs_clone_range() was done from a
* removed (but opened) file (open(), unlink()).
*/
ASSERT(brt_entry_lookup(brt, brtvd, bre) == ENOENT);
} else {
VERIFY0(brt_entry_update(brt, brtvd, bre, tx));
}
}
static void
brt_sync_table(brt_t *brt, dmu_tx_t *tx)
{
brt_vdev_t *brtvd;
brt_entry_t *bre;
uint64_t vdevid;
void *c;
brt_wlock(brt);
for (vdevid = 0; vdevid < brt->brt_nvdevs; vdevid++) {
brtvd = &brt->brt_vdevs[vdevid];
if (!brtvd->bv_initiated)
continue;
if (!brtvd->bv_meta_dirty) {
ASSERT(!brtvd->bv_entcount_dirty);
ASSERT0(avl_numnodes(&brtvd->bv_tree));
continue;
}
ASSERT(!brtvd->bv_entcount_dirty ||
avl_numnodes(&brtvd->bv_tree) != 0);
if (brtvd->bv_mos_brtvdev == 0)
brt_vdev_create(brt, brtvd, tx);
c = NULL;
while ((bre = avl_destroy_nodes(&brtvd->bv_tree, &c)) != NULL) {
brt_sync_entry(brt, brtvd, bre, tx);
brt_entry_free(bre);
ASSERT(brt->brt_nentries > 0);
brt->brt_nentries--;
}
brt_vdev_sync(brt, brtvd, tx);
if (brtvd->bv_totalcount == 0)
brt_vdev_destroy(brt, brtvd, tx);
}
ASSERT0(brt->brt_nentries);
brt_unlock(brt);
}
void
brt_sync(spa_t *spa, uint64_t txg)
{
dmu_tx_t *tx;
brt_t *brt;
ASSERT(spa_syncing_txg(spa) == txg);
brt = spa->spa_brt;
brt_rlock(brt);
if (brt->brt_nentries == 0) {
/* No changes. */
brt_unlock(brt);
return;
}
brt_unlock(brt);
tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
brt_sync_table(brt, tx);
dmu_tx_commit(tx);
}
static void
brt_table_alloc(brt_t *brt)
{
for (int i = 0; i < TXG_SIZE; i++) {
avl_create(&brt->brt_pending_tree[i],
brt_pending_entry_compare,
sizeof (brt_pending_entry_t),
offsetof(brt_pending_entry_t, bpe_node));
mutex_init(&brt->brt_pending_lock[i], NULL, MUTEX_DEFAULT,
NULL);
}
}
static void
brt_table_free(brt_t *brt)
{
for (int i = 0; i < TXG_SIZE; i++) {
ASSERT(avl_is_empty(&brt->brt_pending_tree[i]));
avl_destroy(&brt->brt_pending_tree[i]);
mutex_destroy(&brt->brt_pending_lock[i]);
}
}
static void
brt_alloc(spa_t *spa)
{
brt_t *brt;
ASSERT(spa->spa_brt == NULL);
brt = kmem_zalloc(sizeof (*brt), KM_SLEEP);
rw_init(&brt->brt_lock, NULL, RW_DEFAULT, NULL);
brt->brt_spa = spa;
brt->brt_rangesize = 0;
brt->brt_nentries = 0;
brt->brt_vdevs = NULL;
brt->brt_nvdevs = 0;
brt_table_alloc(brt);
spa->spa_brt = brt;
}
void
brt_create(spa_t *spa)
{
brt_alloc(spa);
brt_vdevs_alloc(spa->spa_brt, B_FALSE);
}
int
brt_load(spa_t *spa)
{
brt_alloc(spa);
brt_vdevs_alloc(spa->spa_brt, B_TRUE);
return (0);
}
void
brt_unload(spa_t *spa)
{
brt_t *brt = spa->spa_brt;
if (brt == NULL)
return;
brt_vdevs_free(brt);
brt_table_free(brt);
rw_destroy(&brt->brt_lock);
kmem_free(brt, sizeof (*brt));
spa->spa_brt = NULL;
}
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_brt, zfs_brt_, prefetch, INT, ZMOD_RW,
"Enable prefetching of BRT entries");
#ifdef ZFS_BRT_DEBUG
ZFS_MODULE_PARAM(zfs_brt, zfs_brt_, debug, INT, ZMOD_RW, "BRT debug");
#endif
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/dataset_kstats.c b/sys/contrib/openzfs/module/zfs/dataset_kstats.c
index 767a461e0026..2ac058fd2c93 100644
--- a/sys/contrib/openzfs/module/zfs/dataset_kstats.c
+++ b/sys/contrib/openzfs/module/zfs/dataset_kstats.c
@@ -1,243 +1,255 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2018 by Delphix. All rights reserved.
* Copyright (c) 2018 Datto Inc.
*/
#include <sys/dataset_kstats.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_dataset.h>
#include <sys/spa.h>
static dataset_kstat_values_t empty_dataset_kstats = {
{ "dataset_name", KSTAT_DATA_STRING },
{ "writes", KSTAT_DATA_UINT64 },
{ "nwritten", KSTAT_DATA_UINT64 },
{ "reads", KSTAT_DATA_UINT64 },
{ "nread", KSTAT_DATA_UINT64 },
{ "nunlinks", KSTAT_DATA_UINT64 },
{ "nunlinked", KSTAT_DATA_UINT64 },
{
{ "zil_commit_count", KSTAT_DATA_UINT64 },
{ "zil_commit_writer_count", KSTAT_DATA_UINT64 },
{ "zil_itx_count", KSTAT_DATA_UINT64 },
{ "zil_itx_indirect_count", KSTAT_DATA_UINT64 },
{ "zil_itx_indirect_bytes", KSTAT_DATA_UINT64 },
{ "zil_itx_copied_count", KSTAT_DATA_UINT64 },
{ "zil_itx_copied_bytes", KSTAT_DATA_UINT64 },
{ "zil_itx_needcopy_count", KSTAT_DATA_UINT64 },
{ "zil_itx_needcopy_bytes", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_normal_count", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_normal_bytes", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_normal_write", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_normal_alloc", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_slog_count", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_slog_bytes", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_slog_write", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_slog_alloc", KSTAT_DATA_UINT64 }
}
};
static int
dataset_kstats_update(kstat_t *ksp, int rw)
{
dataset_kstats_t *dk = ksp->ks_private;
dataset_kstat_values_t *dkv = ksp->ks_data;
ASSERT3P(dk->dk_kstats->ks_data, ==, dkv);
if (rw == KSTAT_WRITE)
return (EACCES);
dkv->dkv_writes.value.ui64 =
wmsum_value(&dk->dk_sums.dss_writes);
dkv->dkv_nwritten.value.ui64 =
wmsum_value(&dk->dk_sums.dss_nwritten);
dkv->dkv_reads.value.ui64 =
wmsum_value(&dk->dk_sums.dss_reads);
dkv->dkv_nread.value.ui64 =
wmsum_value(&dk->dk_sums.dss_nread);
dkv->dkv_nunlinks.value.ui64 =
wmsum_value(&dk->dk_sums.dss_nunlinks);
dkv->dkv_nunlinked.value.ui64 =
wmsum_value(&dk->dk_sums.dss_nunlinked);
zil_kstat_values_update(&dkv->dkv_zil_stats, &dk->dk_zil_sums);
return (0);
}
int
dataset_kstats_create(dataset_kstats_t *dk, objset_t *objset)
{
/*
* There should not be anything wrong with having kstats for
* snapshots. Since we are not sure how useful they would be
* though nor how much their memory overhead would matter in
* a filesystem with many snapshots, we skip them for now.
*/
if (dmu_objset_is_snapshot(objset))
return (0);
/*
* At the time of this writing, KSTAT_STRLEN is 255 in Linux,
* and the spa_name can theoretically be up to 256 characters.
* In reality though the spa_name can be 240 characters max
* [see origin directory name check in pool_namecheck()]. Thus,
* the naming scheme for the module name below should not cause
* any truncations. In the event that a truncation does happen
* though, due to some future change, we silently skip creating
* the kstat and log the event.
*/
char kstat_module_name[KSTAT_STRLEN];
int n = snprintf(kstat_module_name, sizeof (kstat_module_name),
"zfs/%s", spa_name(dmu_objset_spa(objset)));
if (n < 0) {
zfs_dbgmsg("failed to create dataset kstat for objset %lld: "
" snprintf() for kstat module name returned %d",
(unsigned long long)dmu_objset_id(objset), n);
return (SET_ERROR(EINVAL));
} else if (n >= KSTAT_STRLEN) {
zfs_dbgmsg("failed to create dataset kstat for objset %lld: "
"kstat module name length (%d) exceeds limit (%d)",
(unsigned long long)dmu_objset_id(objset),
n, KSTAT_STRLEN);
return (SET_ERROR(ENAMETOOLONG));
}
char kstat_name[KSTAT_STRLEN];
n = snprintf(kstat_name, sizeof (kstat_name), "objset-0x%llx",
(unsigned long long)dmu_objset_id(objset));
if (n < 0) {
zfs_dbgmsg("failed to create dataset kstat for objset %lld: "
" snprintf() for kstat name returned %d",
(unsigned long long)dmu_objset_id(objset), n);
return (SET_ERROR(EINVAL));
} else if (n >= KSTAT_STRLEN) {
zfs_dbgmsg("failed to create dataset kstat for objset %lld: "
"kstat name length (%d) exceeds limit (%d)",
(unsigned long long)dmu_objset_id(objset),
n, KSTAT_STRLEN);
return (SET_ERROR(ENAMETOOLONG));
}
kstat_t *kstat = kstat_create(kstat_module_name, 0, kstat_name,
"dataset", KSTAT_TYPE_NAMED,
sizeof (empty_dataset_kstats) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL);
if (kstat == NULL)
return (SET_ERROR(ENOMEM));
dataset_kstat_values_t *dk_kstats =
kmem_alloc(sizeof (empty_dataset_kstats), KM_SLEEP);
memcpy(dk_kstats, &empty_dataset_kstats,
sizeof (empty_dataset_kstats));
char *ds_name = kmem_zalloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
dsl_dataset_name(objset->os_dsl_dataset, ds_name);
KSTAT_NAMED_STR_PTR(&dk_kstats->dkv_ds_name) = ds_name;
KSTAT_NAMED_STR_BUFLEN(&dk_kstats->dkv_ds_name) =
ZFS_MAX_DATASET_NAME_LEN;
kstat->ks_data = dk_kstats;
kstat->ks_update = dataset_kstats_update;
kstat->ks_private = dk;
kstat->ks_data_size += ZFS_MAX_DATASET_NAME_LEN;
wmsum_init(&dk->dk_sums.dss_writes, 0);
wmsum_init(&dk->dk_sums.dss_nwritten, 0);
wmsum_init(&dk->dk_sums.dss_reads, 0);
wmsum_init(&dk->dk_sums.dss_nread, 0);
wmsum_init(&dk->dk_sums.dss_nunlinks, 0);
wmsum_init(&dk->dk_sums.dss_nunlinked, 0);
zil_sums_init(&dk->dk_zil_sums);
dk->dk_kstats = kstat;
kstat_install(kstat);
return (0);
}
void
dataset_kstats_destroy(dataset_kstats_t *dk)
{
if (dk->dk_kstats == NULL)
return;
dataset_kstat_values_t *dkv = dk->dk_kstats->ks_data;
kstat_delete(dk->dk_kstats);
dk->dk_kstats = NULL;
kmem_free(KSTAT_NAMED_STR_PTR(&dkv->dkv_ds_name),
KSTAT_NAMED_STR_BUFLEN(&dkv->dkv_ds_name));
kmem_free(dkv, sizeof (empty_dataset_kstats));
wmsum_fini(&dk->dk_sums.dss_writes);
wmsum_fini(&dk->dk_sums.dss_nwritten);
wmsum_fini(&dk->dk_sums.dss_reads);
wmsum_fini(&dk->dk_sums.dss_nread);
wmsum_fini(&dk->dk_sums.dss_nunlinks);
wmsum_fini(&dk->dk_sums.dss_nunlinked);
zil_sums_fini(&dk->dk_zil_sums);
}
+void
+dataset_kstats_rename(dataset_kstats_t *dk, const char *name)
+{
+ dataset_kstat_values_t *dkv = dk->dk_kstats->ks_data;
+ char *ds_name;
+
+ ds_name = KSTAT_NAMED_STR_PTR(&dkv->dkv_ds_name);
+ ASSERT3S(ds_name, !=, NULL);
+ (void) strlcpy(ds_name, name,
+ KSTAT_NAMED_STR_BUFLEN(&dkv->dkv_ds_name));
+}
+
void
dataset_kstats_update_write_kstats(dataset_kstats_t *dk,
int64_t nwritten)
{
ASSERT3S(nwritten, >=, 0);
if (dk->dk_kstats == NULL)
return;
wmsum_add(&dk->dk_sums.dss_writes, 1);
wmsum_add(&dk->dk_sums.dss_nwritten, nwritten);
}
void
dataset_kstats_update_read_kstats(dataset_kstats_t *dk,
int64_t nread)
{
ASSERT3S(nread, >=, 0);
if (dk->dk_kstats == NULL)
return;
wmsum_add(&dk->dk_sums.dss_reads, 1);
wmsum_add(&dk->dk_sums.dss_nread, nread);
}
void
dataset_kstats_update_nunlinks_kstat(dataset_kstats_t *dk, int64_t delta)
{
if (dk->dk_kstats == NULL)
return;
wmsum_add(&dk->dk_sums.dss_nunlinks, delta);
}
void
dataset_kstats_update_nunlinked_kstat(dataset_kstats_t *dk, int64_t delta)
{
if (dk->dk_kstats == NULL)
return;
wmsum_add(&dk->dk_sums.dss_nunlinked, delta);
}
diff --git a/sys/contrib/openzfs/module/zfs/dbuf.c b/sys/contrib/openzfs/module/zfs/dbuf.c
index 5a7fe42b602a..280001bc34b6 100644
--- a/sys/contrib/openzfs/module/zfs/dbuf.c
+++ b/sys/contrib/openzfs/module/zfs/dbuf.c
@@ -1,5201 +1,5226 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2012, 2020 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019, Allan Jude
* Copyright (c) 2021, 2022 by Pawel Jakub Dawidek
*/
#include <sys/zfs_context.h>
#include <sys/arc.h>
#include <sys/dmu.h>
#include <sys/dmu_send.h>
#include <sys/dmu_impl.h>
#include <sys/dbuf.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dmu_tx.h>
#include <sys/spa.h>
#include <sys/zio.h>
#include <sys/dmu_zfetch.h>
#include <sys/sa.h>
#include <sys/sa_impl.h>
#include <sys/zfeature.h>
#include <sys/blkptr.h>
#include <sys/range_tree.h>
#include <sys/trace_zfs.h>
#include <sys/callb.h>
#include <sys/abd.h>
#include <sys/brt.h>
#include <sys/vdev.h>
#include <cityhash.h>
#include <sys/spa_impl.h>
#include <sys/wmsum.h>
#include <sys/vdev_impl.h>
static kstat_t *dbuf_ksp;
typedef struct dbuf_stats {
/*
* Various statistics about the size of the dbuf cache.
*/
kstat_named_t cache_count;
kstat_named_t cache_size_bytes;
kstat_named_t cache_size_bytes_max;
/*
* Statistics regarding the bounds on the dbuf cache size.
*/
kstat_named_t cache_target_bytes;
kstat_named_t cache_lowater_bytes;
kstat_named_t cache_hiwater_bytes;
/*
* Total number of dbuf cache evictions that have occurred.
*/
kstat_named_t cache_total_evicts;
/*
* The distribution of dbuf levels in the dbuf cache and
* the total size of all dbufs at each level.
*/
kstat_named_t cache_levels[DN_MAX_LEVELS];
kstat_named_t cache_levels_bytes[DN_MAX_LEVELS];
/*
* Statistics about the dbuf hash table.
*/
kstat_named_t hash_hits;
kstat_named_t hash_misses;
kstat_named_t hash_collisions;
kstat_named_t hash_elements;
kstat_named_t hash_elements_max;
/*
* Number of sublists containing more than one dbuf in the dbuf
* hash table. Keep track of the longest hash chain.
*/
kstat_named_t hash_chains;
kstat_named_t hash_chain_max;
/*
* Number of times a dbuf_create() discovers that a dbuf was
* already created and in the dbuf hash table.
*/
kstat_named_t hash_insert_race;
/*
* Number of entries in the hash table dbuf and mutex arrays.
*/
kstat_named_t hash_table_count;
kstat_named_t hash_mutex_count;
/*
* Statistics about the size of the metadata dbuf cache.
*/
kstat_named_t metadata_cache_count;
kstat_named_t metadata_cache_size_bytes;
kstat_named_t metadata_cache_size_bytes_max;
/*
* For diagnostic purposes, this is incremented whenever we can't add
* something to the metadata cache because it's full, and instead put
* the data in the regular dbuf cache.
*/
kstat_named_t metadata_cache_overflow;
} dbuf_stats_t;
dbuf_stats_t dbuf_stats = {
{ "cache_count", KSTAT_DATA_UINT64 },
{ "cache_size_bytes", KSTAT_DATA_UINT64 },
{ "cache_size_bytes_max", KSTAT_DATA_UINT64 },
{ "cache_target_bytes", KSTAT_DATA_UINT64 },
{ "cache_lowater_bytes", KSTAT_DATA_UINT64 },
{ "cache_hiwater_bytes", KSTAT_DATA_UINT64 },
{ "cache_total_evicts", KSTAT_DATA_UINT64 },
{ { "cache_levels_N", KSTAT_DATA_UINT64 } },
{ { "cache_levels_bytes_N", KSTAT_DATA_UINT64 } },
{ "hash_hits", KSTAT_DATA_UINT64 },
{ "hash_misses", KSTAT_DATA_UINT64 },
{ "hash_collisions", KSTAT_DATA_UINT64 },
{ "hash_elements", KSTAT_DATA_UINT64 },
{ "hash_elements_max", KSTAT_DATA_UINT64 },
{ "hash_chains", KSTAT_DATA_UINT64 },
{ "hash_chain_max", KSTAT_DATA_UINT64 },
{ "hash_insert_race", KSTAT_DATA_UINT64 },
{ "hash_table_count", KSTAT_DATA_UINT64 },
{ "hash_mutex_count", KSTAT_DATA_UINT64 },
{ "metadata_cache_count", KSTAT_DATA_UINT64 },
{ "metadata_cache_size_bytes", KSTAT_DATA_UINT64 },
{ "metadata_cache_size_bytes_max", KSTAT_DATA_UINT64 },
{ "metadata_cache_overflow", KSTAT_DATA_UINT64 }
};
struct {
wmsum_t cache_count;
wmsum_t cache_total_evicts;
wmsum_t cache_levels[DN_MAX_LEVELS];
wmsum_t cache_levels_bytes[DN_MAX_LEVELS];
wmsum_t hash_hits;
wmsum_t hash_misses;
wmsum_t hash_collisions;
wmsum_t hash_chains;
wmsum_t hash_insert_race;
wmsum_t metadata_cache_count;
wmsum_t metadata_cache_overflow;
} dbuf_sums;
#define DBUF_STAT_INCR(stat, val) \
wmsum_add(&dbuf_sums.stat, val);
#define DBUF_STAT_DECR(stat, val) \
DBUF_STAT_INCR(stat, -(val));
#define DBUF_STAT_BUMP(stat) \
DBUF_STAT_INCR(stat, 1);
#define DBUF_STAT_BUMPDOWN(stat) \
DBUF_STAT_INCR(stat, -1);
#define DBUF_STAT_MAX(stat, v) { \
uint64_t _m; \
while ((v) > (_m = dbuf_stats.stat.value.ui64) && \
(_m != atomic_cas_64(&dbuf_stats.stat.value.ui64, _m, (v))))\
continue; \
}
static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
static void dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr);
static int dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags);
/*
* Global data structures and functions for the dbuf cache.
*/
static kmem_cache_t *dbuf_kmem_cache;
static taskq_t *dbu_evict_taskq;
static kthread_t *dbuf_cache_evict_thread;
static kmutex_t dbuf_evict_lock;
static kcondvar_t dbuf_evict_cv;
static boolean_t dbuf_evict_thread_exit;
/*
* There are two dbuf caches; each dbuf can only be in one of them at a time.
*
* 1. Cache of metadata dbufs, to help make read-heavy administrative commands
* from /sbin/zfs run faster. The "metadata cache" specifically stores dbufs
* that represent the metadata that describes filesystems/snapshots/
* bookmarks/properties/etc. We only evict from this cache when we export a
* pool, to short-circuit as much I/O as possible for all administrative
* commands that need the metadata. There is no eviction policy for this
* cache, because we try to only include types in it which would occupy a
* very small amount of space per object but create a large impact on the
* performance of these commands. Instead, after it reaches a maximum size
* (which should only happen on very small memory systems with a very large
* number of filesystem objects), we stop taking new dbufs into the
* metadata cache, instead putting them in the normal dbuf cache.
*
* 2. LRU cache of dbufs. The dbuf cache maintains a list of dbufs that
* are not currently held but have been recently released. These dbufs
* are not eligible for arc eviction until they are aged out of the cache.
* Dbufs that are aged out of the cache will be immediately destroyed and
* become eligible for arc eviction.
*
* Dbufs are added to these caches once the last hold is released. If a dbuf is
* later accessed and still exists in the dbuf cache, then it will be removed
* from the cache and later re-added to the head of the cache.
*
* If a given dbuf meets the requirements for the metadata cache, it will go
* there, otherwise it will be considered for the generic LRU dbuf cache. The
* caches and the refcounts tracking their sizes are stored in an array indexed
* by those caches' matching enum values (from dbuf_cached_state_t).
*/
typedef struct dbuf_cache {
multilist_t cache;
zfs_refcount_t size ____cacheline_aligned;
} dbuf_cache_t;
dbuf_cache_t dbuf_caches[DB_CACHE_MAX];
/* Size limits for the caches */
static uint64_t dbuf_cache_max_bytes = UINT64_MAX;
static uint64_t dbuf_metadata_cache_max_bytes = UINT64_MAX;
/* Set the default sizes of the caches to log2 fraction of arc size */
static uint_t dbuf_cache_shift = 5;
static uint_t dbuf_metadata_cache_shift = 6;
/* Set the dbuf hash mutex count as log2 shift (dynamic by default) */
static uint_t dbuf_mutex_cache_shift = 0;
static unsigned long dbuf_cache_target_bytes(void);
static unsigned long dbuf_metadata_cache_target_bytes(void);
/*
* The LRU dbuf cache uses a three-stage eviction policy:
* - A low water marker designates when the dbuf eviction thread
* should stop evicting from the dbuf cache.
* - When we reach the maximum size (aka mid water mark), we
* signal the eviction thread to run.
* - The high water mark indicates when the eviction thread
* is unable to keep up with the incoming load and eviction must
* happen in the context of the calling thread.
*
* The dbuf cache:
* (max size)
* low water mid water hi water
* +----------------------------------------+----------+----------+
* | | | |
* | | | |
* | | | |
* | | | |
* +----------------------------------------+----------+----------+
* stop signal evict
* evicting eviction directly
* thread
*
* The high and low water marks indicate the operating range for the eviction
* thread. The low water mark is, by default, 90% of the total size of the
* cache and the high water mark is at 110% (both of these percentages can be
* changed by setting dbuf_cache_lowater_pct and dbuf_cache_hiwater_pct,
* respectively). The eviction thread will try to ensure that the cache remains
* within this range by waking up every second and checking if the cache is
* above the low water mark. The thread can also be woken up by callers adding
* elements into the cache if the cache is larger than the mid water (i.e max
* cache size). Once the eviction thread is woken up and eviction is required,
* it will continue evicting buffers until it's able to reduce the cache size
* to the low water mark. If the cache size continues to grow and hits the high
* water mark, then callers adding elements to the cache will begin to evict
* directly from the cache until the cache is no longer above the high water
* mark.
*/
/*
* The percentage above and below the maximum cache size.
*/
static uint_t dbuf_cache_hiwater_pct = 10;
static uint_t dbuf_cache_lowater_pct = 10;
static int
dbuf_cons(void *vdb, void *unused, int kmflag)
{
(void) unused, (void) kmflag;
dmu_buf_impl_t *db = vdb;
memset(db, 0, sizeof (dmu_buf_impl_t));
mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
rw_init(&db->db_rwlock, NULL, RW_DEFAULT, NULL);
cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
multilist_link_init(&db->db_cache_link);
zfs_refcount_create(&db->db_holds);
return (0);
}
static void
dbuf_dest(void *vdb, void *unused)
{
(void) unused;
dmu_buf_impl_t *db = vdb;
mutex_destroy(&db->db_mtx);
rw_destroy(&db->db_rwlock);
cv_destroy(&db->db_changed);
ASSERT(!multilist_link_active(&db->db_cache_link));
zfs_refcount_destroy(&db->db_holds);
}
/*
* dbuf hash table routines
*/
static dbuf_hash_table_t dbuf_hash_table;
/*
* We use Cityhash for this. It's fast, and has good hash properties without
* requiring any large static buffers.
*/
static uint64_t
dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid)
{
return (cityhash4((uintptr_t)os, obj, (uint64_t)lvl, blkid));
}
#define DTRACE_SET_STATE(db, why) \
DTRACE_PROBE2(dbuf__state_change, dmu_buf_impl_t *, db, \
const char *, why)
#define DBUF_EQUAL(dbuf, os, obj, level, blkid) \
((dbuf)->db.db_object == (obj) && \
(dbuf)->db_objset == (os) && \
(dbuf)->db_level == (level) && \
(dbuf)->db_blkid == (blkid))
dmu_buf_impl_t *
dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid,
uint64_t *hash_out)
{
dbuf_hash_table_t *h = &dbuf_hash_table;
uint64_t hv;
uint64_t idx;
dmu_buf_impl_t *db;
hv = dbuf_hash(os, obj, level, blkid);
idx = hv & h->hash_table_mask;
mutex_enter(DBUF_HASH_MUTEX(h, idx));
for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
if (DBUF_EQUAL(db, os, obj, level, blkid)) {
mutex_enter(&db->db_mtx);
if (db->db_state != DB_EVICTING) {
mutex_exit(DBUF_HASH_MUTEX(h, idx));
return (db);
}
mutex_exit(&db->db_mtx);
}
}
mutex_exit(DBUF_HASH_MUTEX(h, idx));
if (hash_out != NULL)
*hash_out = hv;
return (NULL);
}
static dmu_buf_impl_t *
dbuf_find_bonus(objset_t *os, uint64_t object)
{
dnode_t *dn;
dmu_buf_impl_t *db = NULL;
if (dnode_hold(os, object, FTAG, &dn) == 0) {
rw_enter(&dn->dn_struct_rwlock, RW_READER);
if (dn->dn_bonus != NULL) {
db = dn->dn_bonus;
mutex_enter(&db->db_mtx);
}
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
}
return (db);
}
/*
* Insert an entry into the hash table. If there is already an element
* equal to elem in the hash table, then the already existing element
* will be returned and the new element will not be inserted.
* Otherwise returns NULL.
*/
static dmu_buf_impl_t *
dbuf_hash_insert(dmu_buf_impl_t *db)
{
dbuf_hash_table_t *h = &dbuf_hash_table;
objset_t *os = db->db_objset;
uint64_t obj = db->db.db_object;
int level = db->db_level;
uint64_t blkid, idx;
dmu_buf_impl_t *dbf;
uint32_t i;
blkid = db->db_blkid;
ASSERT3U(dbuf_hash(os, obj, level, blkid), ==, db->db_hash);
idx = db->db_hash & h->hash_table_mask;
mutex_enter(DBUF_HASH_MUTEX(h, idx));
for (dbf = h->hash_table[idx], i = 0; dbf != NULL;
dbf = dbf->db_hash_next, i++) {
if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
mutex_enter(&dbf->db_mtx);
if (dbf->db_state != DB_EVICTING) {
mutex_exit(DBUF_HASH_MUTEX(h, idx));
return (dbf);
}
mutex_exit(&dbf->db_mtx);
}
}
if (i > 0) {
DBUF_STAT_BUMP(hash_collisions);
if (i == 1)
DBUF_STAT_BUMP(hash_chains);
DBUF_STAT_MAX(hash_chain_max, i);
}
mutex_enter(&db->db_mtx);
db->db_hash_next = h->hash_table[idx];
h->hash_table[idx] = db;
mutex_exit(DBUF_HASH_MUTEX(h, idx));
uint64_t he = atomic_inc_64_nv(&dbuf_stats.hash_elements.value.ui64);
DBUF_STAT_MAX(hash_elements_max, he);
return (NULL);
}
/*
* This returns whether this dbuf should be stored in the metadata cache, which
* is based on whether it's from one of the dnode types that store data related
* to traversing dataset hierarchies.
*/
static boolean_t
dbuf_include_in_metadata_cache(dmu_buf_impl_t *db)
{
DB_DNODE_ENTER(db);
dmu_object_type_t type = DB_DNODE(db)->dn_type;
DB_DNODE_EXIT(db);
/* Check if this dbuf is one of the types we care about */
if (DMU_OT_IS_METADATA_CACHED(type)) {
/* If we hit this, then we set something up wrong in dmu_ot */
ASSERT(DMU_OT_IS_METADATA(type));
/*
* Sanity check for small-memory systems: don't allocate too
* much memory for this purpose.
*/
if (zfs_refcount_count(
&dbuf_caches[DB_DBUF_METADATA_CACHE].size) >
dbuf_metadata_cache_target_bytes()) {
DBUF_STAT_BUMP(metadata_cache_overflow);
return (B_FALSE);
}
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Remove an entry from the hash table. It must be in the EVICTING state.
*/
static void
dbuf_hash_remove(dmu_buf_impl_t *db)
{
dbuf_hash_table_t *h = &dbuf_hash_table;
uint64_t idx;
dmu_buf_impl_t *dbf, **dbp;
ASSERT3U(dbuf_hash(db->db_objset, db->db.db_object, db->db_level,
db->db_blkid), ==, db->db_hash);
idx = db->db_hash & h->hash_table_mask;
/*
* We mustn't hold db_mtx to maintain lock ordering:
* DBUF_HASH_MUTEX > db_mtx.
*/
ASSERT(zfs_refcount_is_zero(&db->db_holds));
ASSERT(db->db_state == DB_EVICTING);
ASSERT(!MUTEX_HELD(&db->db_mtx));
mutex_enter(DBUF_HASH_MUTEX(h, idx));
dbp = &h->hash_table[idx];
while ((dbf = *dbp) != db) {
dbp = &dbf->db_hash_next;
ASSERT(dbf != NULL);
}
*dbp = db->db_hash_next;
db->db_hash_next = NULL;
if (h->hash_table[idx] &&
h->hash_table[idx]->db_hash_next == NULL)
DBUF_STAT_BUMPDOWN(hash_chains);
mutex_exit(DBUF_HASH_MUTEX(h, idx));
atomic_dec_64(&dbuf_stats.hash_elements.value.ui64);
}
typedef enum {
DBVU_EVICTING,
DBVU_NOT_EVICTING
} dbvu_verify_type_t;
static void
dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type)
{
#ifdef ZFS_DEBUG
int64_t holds;
if (db->db_user == NULL)
return;
/* Only data blocks support the attachment of user data. */
ASSERT(db->db_level == 0);
/* Clients must resolve a dbuf before attaching user data. */
ASSERT(db->db.db_data != NULL);
ASSERT3U(db->db_state, ==, DB_CACHED);
holds = zfs_refcount_count(&db->db_holds);
if (verify_type == DBVU_EVICTING) {
/*
* Immediate eviction occurs when holds == dirtycnt.
* For normal eviction buffers, holds is zero on
* eviction, except when dbuf_fix_old_data() calls
* dbuf_clear_data(). However, the hold count can grow
* during eviction even though db_mtx is held (see
* dmu_bonus_hold() for an example), so we can only
* test the generic invariant that holds >= dirtycnt.
*/
ASSERT3U(holds, >=, db->db_dirtycnt);
} else {
if (db->db_user_immediate_evict == TRUE)
ASSERT3U(holds, >=, db->db_dirtycnt);
else
ASSERT3U(holds, >, 0);
}
#endif
}
static void
dbuf_evict_user(dmu_buf_impl_t *db)
{
dmu_buf_user_t *dbu = db->db_user;
ASSERT(MUTEX_HELD(&db->db_mtx));
if (dbu == NULL)
return;
dbuf_verify_user(db, DBVU_EVICTING);
db->db_user = NULL;
#ifdef ZFS_DEBUG
if (dbu->dbu_clear_on_evict_dbufp != NULL)
*dbu->dbu_clear_on_evict_dbufp = NULL;
#endif
/*
* There are two eviction callbacks - one that we call synchronously
* and one that we invoke via a taskq. The async one is useful for
* avoiding lock order reversals and limiting stack depth.
*
* Note that if we have a sync callback but no async callback,
* it's likely that the sync callback will free the structure
* containing the dbu. In that case we need to take care to not
* dereference dbu after calling the sync evict func.
*/
boolean_t has_async = (dbu->dbu_evict_func_async != NULL);
if (dbu->dbu_evict_func_sync != NULL)
dbu->dbu_evict_func_sync(dbu);
if (has_async) {
taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func_async,
dbu, 0, &dbu->dbu_tqent);
}
}
boolean_t
dbuf_is_metadata(dmu_buf_impl_t *db)
{
/*
* Consider indirect blocks and spill blocks to be meta data.
*/
if (db->db_level > 0 || db->db_blkid == DMU_SPILL_BLKID) {
return (B_TRUE);
} else {
boolean_t is_metadata;
DB_DNODE_ENTER(db);
is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type);
DB_DNODE_EXIT(db);
return (is_metadata);
}
}
/*
* We want to exclude buffers that are on a special allocation class from
* L2ARC.
*/
boolean_t
dbuf_is_l2cacheable(dmu_buf_impl_t *db)
{
if (db->db_objset->os_secondary_cache == ZFS_CACHE_ALL ||
(db->db_objset->os_secondary_cache ==
ZFS_CACHE_METADATA && dbuf_is_metadata(db))) {
if (l2arc_exclude_special == 0)
return (B_TRUE);
blkptr_t *bp = db->db_blkptr;
if (bp == NULL || BP_IS_HOLE(bp))
return (B_FALSE);
uint64_t vdev = DVA_GET_VDEV(bp->blk_dva);
vdev_t *rvd = db->db_objset->os_spa->spa_root_vdev;
vdev_t *vd = NULL;
if (vdev < rvd->vdev_children)
vd = rvd->vdev_child[vdev];
if (vd == NULL)
return (B_TRUE);
if (vd->vdev_alloc_bias != VDEV_BIAS_SPECIAL &&
vd->vdev_alloc_bias != VDEV_BIAS_DEDUP)
return (B_TRUE);
}
return (B_FALSE);
}
static inline boolean_t
dnode_level_is_l2cacheable(blkptr_t *bp, dnode_t *dn, int64_t level)
{
if (dn->dn_objset->os_secondary_cache == ZFS_CACHE_ALL ||
(dn->dn_objset->os_secondary_cache == ZFS_CACHE_METADATA &&
(level > 0 ||
DMU_OT_IS_METADATA(dn->dn_handle->dnh_dnode->dn_type)))) {
if (l2arc_exclude_special == 0)
return (B_TRUE);
if (bp == NULL || BP_IS_HOLE(bp))
return (B_FALSE);
uint64_t vdev = DVA_GET_VDEV(bp->blk_dva);
vdev_t *rvd = dn->dn_objset->os_spa->spa_root_vdev;
vdev_t *vd = NULL;
if (vdev < rvd->vdev_children)
vd = rvd->vdev_child[vdev];
if (vd == NULL)
return (B_TRUE);
if (vd->vdev_alloc_bias != VDEV_BIAS_SPECIAL &&
vd->vdev_alloc_bias != VDEV_BIAS_DEDUP)
return (B_TRUE);
}
return (B_FALSE);
}
/*
* This function *must* return indices evenly distributed between all
* sublists of the multilist. This is needed due to how the dbuf eviction
* code is laid out; dbuf_evict_thread() assumes dbufs are evenly
* distributed between all sublists and uses this assumption when
* deciding which sublist to evict from and how much to evict from it.
*/
static unsigned int
dbuf_cache_multilist_index_func(multilist_t *ml, void *obj)
{
dmu_buf_impl_t *db = obj;
/*
* The assumption here, is the hash value for a given
* dmu_buf_impl_t will remain constant throughout it's lifetime
* (i.e. it's objset, object, level and blkid fields don't change).
* Thus, we don't need to store the dbuf's sublist index
* on insertion, as this index can be recalculated on removal.
*
* Also, the low order bits of the hash value are thought to be
* distributed evenly. Otherwise, in the case that the multilist
* has a power of two number of sublists, each sublists' usage
* would not be evenly distributed. In this context full 64bit
* division would be a waste of time, so limit it to 32 bits.
*/
return ((unsigned int)dbuf_hash(db->db_objset, db->db.db_object,
db->db_level, db->db_blkid) %
multilist_get_num_sublists(ml));
}
/*
* The target size of the dbuf cache can grow with the ARC target,
* unless limited by the tunable dbuf_cache_max_bytes.
*/
static inline unsigned long
dbuf_cache_target_bytes(void)
{
return (MIN(dbuf_cache_max_bytes,
arc_target_bytes() >> dbuf_cache_shift));
}
/*
* The target size of the dbuf metadata cache can grow with the ARC target,
* unless limited by the tunable dbuf_metadata_cache_max_bytes.
*/
static inline unsigned long
dbuf_metadata_cache_target_bytes(void)
{
return (MIN(dbuf_metadata_cache_max_bytes,
arc_target_bytes() >> dbuf_metadata_cache_shift));
}
static inline uint64_t
dbuf_cache_hiwater_bytes(void)
{
uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
return (dbuf_cache_target +
(dbuf_cache_target * dbuf_cache_hiwater_pct) / 100);
}
static inline uint64_t
dbuf_cache_lowater_bytes(void)
{
uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
return (dbuf_cache_target -
(dbuf_cache_target * dbuf_cache_lowater_pct) / 100);
}
static inline boolean_t
dbuf_cache_above_lowater(void)
{
return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
dbuf_cache_lowater_bytes());
}
/*
* Evict the oldest eligible dbuf from the dbuf cache.
*/
static void
dbuf_evict_one(void)
{
int idx = multilist_get_random_index(&dbuf_caches[DB_DBUF_CACHE].cache);
multilist_sublist_t *mls = multilist_sublist_lock(
&dbuf_caches[DB_DBUF_CACHE].cache, idx);
ASSERT(!MUTEX_HELD(&dbuf_evict_lock));
dmu_buf_impl_t *db = multilist_sublist_tail(mls);
while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) {
db = multilist_sublist_prev(mls, db);
}
DTRACE_PROBE2(dbuf__evict__one, dmu_buf_impl_t *, db,
multilist_sublist_t *, mls);
if (db != NULL) {
multilist_sublist_remove(mls, db);
multilist_sublist_unlock(mls);
(void) zfs_refcount_remove_many(
&dbuf_caches[DB_DBUF_CACHE].size, db->db.db_size, db);
DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
DBUF_STAT_BUMPDOWN(cache_count);
DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
db->db.db_size);
ASSERT3U(db->db_caching_status, ==, DB_DBUF_CACHE);
db->db_caching_status = DB_NO_CACHE;
dbuf_destroy(db);
DBUF_STAT_BUMP(cache_total_evicts);
} else {
multilist_sublist_unlock(mls);
}
}
/*
* The dbuf evict thread is responsible for aging out dbufs from the
* cache. Once the cache has reached it's maximum size, dbufs are removed
* and destroyed. The eviction thread will continue running until the size
* of the dbuf cache is at or below the maximum size. Once the dbuf is aged
* out of the cache it is destroyed and becomes eligible for arc eviction.
*/
static __attribute__((noreturn)) void
dbuf_evict_thread(void *unused)
{
(void) unused;
callb_cpr_t cpr;
CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG);
mutex_enter(&dbuf_evict_lock);
while (!dbuf_evict_thread_exit) {
while (!dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
CALLB_CPR_SAFE_BEGIN(&cpr);
(void) cv_timedwait_idle_hires(&dbuf_evict_cv,
&dbuf_evict_lock, SEC2NSEC(1), MSEC2NSEC(1), 0);
CALLB_CPR_SAFE_END(&cpr, &dbuf_evict_lock);
}
mutex_exit(&dbuf_evict_lock);
/*
* Keep evicting as long as we're above the low water mark
* for the cache. We do this without holding the locks to
* minimize lock contention.
*/
while (dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
dbuf_evict_one();
}
mutex_enter(&dbuf_evict_lock);
}
dbuf_evict_thread_exit = B_FALSE;
cv_broadcast(&dbuf_evict_cv);
CALLB_CPR_EXIT(&cpr); /* drops dbuf_evict_lock */
thread_exit();
}
/*
* Wake up the dbuf eviction thread if the dbuf cache is at its max size.
* If the dbuf cache is at its high water mark, then evict a dbuf from the
* dbuf cache using the caller's context.
*/
static void
dbuf_evict_notify(uint64_t size)
{
/*
* We check if we should evict without holding the dbuf_evict_lock,
* because it's OK to occasionally make the wrong decision here,
* and grabbing the lock results in massive lock contention.
*/
if (size > dbuf_cache_target_bytes()) {
if (size > dbuf_cache_hiwater_bytes())
dbuf_evict_one();
cv_signal(&dbuf_evict_cv);
}
}
static int
dbuf_kstat_update(kstat_t *ksp, int rw)
{
dbuf_stats_t *ds = ksp->ks_data;
dbuf_hash_table_t *h = &dbuf_hash_table;
if (rw == KSTAT_WRITE)
return (SET_ERROR(EACCES));
ds->cache_count.value.ui64 =
wmsum_value(&dbuf_sums.cache_count);
ds->cache_size_bytes.value.ui64 =
zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size);
ds->cache_target_bytes.value.ui64 = dbuf_cache_target_bytes();
ds->cache_hiwater_bytes.value.ui64 = dbuf_cache_hiwater_bytes();
ds->cache_lowater_bytes.value.ui64 = dbuf_cache_lowater_bytes();
ds->cache_total_evicts.value.ui64 =
wmsum_value(&dbuf_sums.cache_total_evicts);
for (int i = 0; i < DN_MAX_LEVELS; i++) {
ds->cache_levels[i].value.ui64 =
wmsum_value(&dbuf_sums.cache_levels[i]);
ds->cache_levels_bytes[i].value.ui64 =
wmsum_value(&dbuf_sums.cache_levels_bytes[i]);
}
ds->hash_hits.value.ui64 =
wmsum_value(&dbuf_sums.hash_hits);
ds->hash_misses.value.ui64 =
wmsum_value(&dbuf_sums.hash_misses);
ds->hash_collisions.value.ui64 =
wmsum_value(&dbuf_sums.hash_collisions);
ds->hash_chains.value.ui64 =
wmsum_value(&dbuf_sums.hash_chains);
ds->hash_insert_race.value.ui64 =
wmsum_value(&dbuf_sums.hash_insert_race);
ds->hash_table_count.value.ui64 = h->hash_table_mask + 1;
ds->hash_mutex_count.value.ui64 = h->hash_mutex_mask + 1;
ds->metadata_cache_count.value.ui64 =
wmsum_value(&dbuf_sums.metadata_cache_count);
ds->metadata_cache_size_bytes.value.ui64 = zfs_refcount_count(
&dbuf_caches[DB_DBUF_METADATA_CACHE].size);
ds->metadata_cache_overflow.value.ui64 =
wmsum_value(&dbuf_sums.metadata_cache_overflow);
return (0);
}
void
dbuf_init(void)
{
uint64_t hmsize, hsize = 1ULL << 16;
dbuf_hash_table_t *h = &dbuf_hash_table;
/*
* The hash table is big enough to fill one eighth of physical memory
* with an average block size of zfs_arc_average_blocksize (default 8K).
* By default, the table will take up
* totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
*/
while (hsize * zfs_arc_average_blocksize < arc_all_memory() / 8)
hsize <<= 1;
h->hash_table = NULL;
while (h->hash_table == NULL) {
h->hash_table_mask = hsize - 1;
h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_SLEEP);
if (h->hash_table == NULL)
hsize >>= 1;
ASSERT3U(hsize, >=, 1ULL << 10);
}
/*
* The hash table buckets are protected by an array of mutexes where
* each mutex is reponsible for protecting 128 buckets. A minimum
* array size of 8192 is targeted to avoid contention.
*/
if (dbuf_mutex_cache_shift == 0)
hmsize = MAX(hsize >> 7, 1ULL << 13);
else
hmsize = 1ULL << MIN(dbuf_mutex_cache_shift, 24);
h->hash_mutexes = NULL;
while (h->hash_mutexes == NULL) {
h->hash_mutex_mask = hmsize - 1;
h->hash_mutexes = vmem_zalloc(hmsize * sizeof (kmutex_t),
KM_SLEEP);
if (h->hash_mutexes == NULL)
hmsize >>= 1;
}
dbuf_kmem_cache = kmem_cache_create("dmu_buf_impl_t",
sizeof (dmu_buf_impl_t),
0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0);
for (int i = 0; i < hmsize; i++)
mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL);
dbuf_stats_init(h);
/*
* All entries are queued via taskq_dispatch_ent(), so min/maxalloc
* configuration is not required.
*/
dbu_evict_taskq = taskq_create("dbu_evict", 1, defclsyspri, 0, 0, 0);
for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
multilist_create(&dbuf_caches[dcs].cache,
sizeof (dmu_buf_impl_t),
offsetof(dmu_buf_impl_t, db_cache_link),
dbuf_cache_multilist_index_func);
zfs_refcount_create(&dbuf_caches[dcs].size);
}
dbuf_evict_thread_exit = B_FALSE;
mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL);
dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread,
NULL, 0, &p0, TS_RUN, minclsyspri);
wmsum_init(&dbuf_sums.cache_count, 0);
wmsum_init(&dbuf_sums.cache_total_evicts, 0);
for (int i = 0; i < DN_MAX_LEVELS; i++) {
wmsum_init(&dbuf_sums.cache_levels[i], 0);
wmsum_init(&dbuf_sums.cache_levels_bytes[i], 0);
}
wmsum_init(&dbuf_sums.hash_hits, 0);
wmsum_init(&dbuf_sums.hash_misses, 0);
wmsum_init(&dbuf_sums.hash_collisions, 0);
wmsum_init(&dbuf_sums.hash_chains, 0);
wmsum_init(&dbuf_sums.hash_insert_race, 0);
wmsum_init(&dbuf_sums.metadata_cache_count, 0);
wmsum_init(&dbuf_sums.metadata_cache_overflow, 0);
dbuf_ksp = kstat_create("zfs", 0, "dbufstats", "misc",
KSTAT_TYPE_NAMED, sizeof (dbuf_stats) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL);
if (dbuf_ksp != NULL) {
for (int i = 0; i < DN_MAX_LEVELS; i++) {
snprintf(dbuf_stats.cache_levels[i].name,
KSTAT_STRLEN, "cache_level_%d", i);
dbuf_stats.cache_levels[i].data_type =
KSTAT_DATA_UINT64;
snprintf(dbuf_stats.cache_levels_bytes[i].name,
KSTAT_STRLEN, "cache_level_%d_bytes", i);
dbuf_stats.cache_levels_bytes[i].data_type =
KSTAT_DATA_UINT64;
}
dbuf_ksp->ks_data = &dbuf_stats;
dbuf_ksp->ks_update = dbuf_kstat_update;
kstat_install(dbuf_ksp);
}
}
void
dbuf_fini(void)
{
dbuf_hash_table_t *h = &dbuf_hash_table;
dbuf_stats_destroy();
for (int i = 0; i < (h->hash_mutex_mask + 1); i++)
mutex_destroy(&h->hash_mutexes[i]);
vmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
vmem_free(h->hash_mutexes, (h->hash_mutex_mask + 1) *
sizeof (kmutex_t));
kmem_cache_destroy(dbuf_kmem_cache);
taskq_destroy(dbu_evict_taskq);
mutex_enter(&dbuf_evict_lock);
dbuf_evict_thread_exit = B_TRUE;
while (dbuf_evict_thread_exit) {
cv_signal(&dbuf_evict_cv);
cv_wait(&dbuf_evict_cv, &dbuf_evict_lock);
}
mutex_exit(&dbuf_evict_lock);
mutex_destroy(&dbuf_evict_lock);
cv_destroy(&dbuf_evict_cv);
for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
zfs_refcount_destroy(&dbuf_caches[dcs].size);
multilist_destroy(&dbuf_caches[dcs].cache);
}
if (dbuf_ksp != NULL) {
kstat_delete(dbuf_ksp);
dbuf_ksp = NULL;
}
wmsum_fini(&dbuf_sums.cache_count);
wmsum_fini(&dbuf_sums.cache_total_evicts);
for (int i = 0; i < DN_MAX_LEVELS; i++) {
wmsum_fini(&dbuf_sums.cache_levels[i]);
wmsum_fini(&dbuf_sums.cache_levels_bytes[i]);
}
wmsum_fini(&dbuf_sums.hash_hits);
wmsum_fini(&dbuf_sums.hash_misses);
wmsum_fini(&dbuf_sums.hash_collisions);
wmsum_fini(&dbuf_sums.hash_chains);
wmsum_fini(&dbuf_sums.hash_insert_race);
wmsum_fini(&dbuf_sums.metadata_cache_count);
wmsum_fini(&dbuf_sums.metadata_cache_overflow);
}
/*
* Other stuff.
*/
#ifdef ZFS_DEBUG
static void
dbuf_verify(dmu_buf_impl_t *db)
{
dnode_t *dn;
dbuf_dirty_record_t *dr;
uint32_t txg_prev;
ASSERT(MUTEX_HELD(&db->db_mtx));
if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY))
return;
ASSERT(db->db_objset != NULL);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
if (dn == NULL) {
ASSERT(db->db_parent == NULL);
ASSERT(db->db_blkptr == NULL);
} else {
ASSERT3U(db->db.db_object, ==, dn->dn_object);
ASSERT3P(db->db_objset, ==, dn->dn_objset);
ASSERT3U(db->db_level, <, dn->dn_nlevels);
ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
db->db_blkid == DMU_SPILL_BLKID ||
!avl_is_empty(&dn->dn_dbufs));
}
if (db->db_blkid == DMU_BONUS_BLKID) {
ASSERT(dn != NULL);
ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID);
} else if (db->db_blkid == DMU_SPILL_BLKID) {
ASSERT(dn != NULL);
ASSERT0(db->db.db_offset);
} else {
ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size);
}
if ((dr = list_head(&db->db_dirty_records)) != NULL) {
ASSERT(dr->dr_dbuf == db);
txg_prev = dr->dr_txg;
for (dr = list_next(&db->db_dirty_records, dr); dr != NULL;
dr = list_next(&db->db_dirty_records, dr)) {
ASSERT(dr->dr_dbuf == db);
ASSERT(txg_prev > dr->dr_txg);
txg_prev = dr->dr_txg;
}
}
/*
* We can't assert that db_size matches dn_datablksz because it
* can be momentarily different when another thread is doing
* dnode_set_blksz().
*/
if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) {
dr = db->db_data_pending;
/*
* It should only be modified in syncing context, so
* make sure we only have one copy of the data.
*/
ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf);
}
/* verify db->db_blkptr */
if (db->db_blkptr) {
if (db->db_parent == dn->dn_dbuf) {
/* db is pointed to by the dnode */
/* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
if (DMU_OBJECT_IS_SPECIAL(db->db.db_object))
ASSERT(db->db_parent == NULL);
else
ASSERT(db->db_parent != NULL);
if (db->db_blkid != DMU_SPILL_BLKID)
ASSERT3P(db->db_blkptr, ==,
&dn->dn_phys->dn_blkptr[db->db_blkid]);
} else {
/* db is pointed to by an indirect block */
int epb __maybe_unused = db->db_parent->db.db_size >>
SPA_BLKPTRSHIFT;
ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
ASSERT3U(db->db_parent->db.db_object, ==,
db->db.db_object);
/*
* dnode_grow_indblksz() can make this fail if we don't
* have the parent's rwlock. XXX indblksz no longer
* grows. safe to do this now?
*/
if (RW_LOCK_HELD(&db->db_parent->db_rwlock)) {
ASSERT3P(db->db_blkptr, ==,
((blkptr_t *)db->db_parent->db.db_data +
db->db_blkid % epb));
}
}
}
if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) &&
(db->db_buf == NULL || db->db_buf->b_data) &&
db->db.db_data && db->db_blkid != DMU_BONUS_BLKID &&
db->db_state != DB_FILL && (dn == NULL || !dn->dn_free_txg)) {
/*
* If the blkptr isn't set but they have nonzero data,
* it had better be dirty, otherwise we'll lose that
* data when we evict this buffer.
*
* There is an exception to this rule for indirect blocks; in
* this case, if the indirect block is a hole, we fill in a few
* fields on each of the child blocks (importantly, birth time)
* to prevent hole birth times from being lost when you
* partially fill in a hole.
*/
if (db->db_dirtycnt == 0) {
if (db->db_level == 0) {
uint64_t *buf = db->db.db_data;
int i;
for (i = 0; i < db->db.db_size >> 3; i++) {
ASSERT(buf[i] == 0);
}
} else {
blkptr_t *bps = db->db.db_data;
ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==,
db->db.db_size);
/*
* We want to verify that all the blkptrs in the
* indirect block are holes, but we may have
* automatically set up a few fields for them.
* We iterate through each blkptr and verify
* they only have those fields set.
*/
for (int i = 0;
i < db->db.db_size / sizeof (blkptr_t);
i++) {
blkptr_t *bp = &bps[i];
ASSERT(ZIO_CHECKSUM_IS_ZERO(
&bp->blk_cksum));
ASSERT(
DVA_IS_EMPTY(&bp->blk_dva[0]) &&
DVA_IS_EMPTY(&bp->blk_dva[1]) &&
DVA_IS_EMPTY(&bp->blk_dva[2]));
ASSERT0(bp->blk_fill);
ASSERT0(bp->blk_pad[0]);
ASSERT0(bp->blk_pad[1]);
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT(BP_IS_HOLE(bp));
ASSERT0(bp->blk_phys_birth);
}
}
}
}
DB_DNODE_EXIT(db);
}
#endif
static void
dbuf_clear_data(dmu_buf_impl_t *db)
{
ASSERT(MUTEX_HELD(&db->db_mtx));
dbuf_evict_user(db);
ASSERT3P(db->db_buf, ==, NULL);
db->db.db_data = NULL;
if (db->db_state != DB_NOFILL) {
db->db_state = DB_UNCACHED;
DTRACE_SET_STATE(db, "clear data");
}
}
static void
dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf)
{
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT(buf != NULL);
db->db_buf = buf;
ASSERT(buf->b_data != NULL);
db->db.db_data = buf->b_data;
}
static arc_buf_t *
dbuf_alloc_arcbuf(dmu_buf_impl_t *db)
{
spa_t *spa = db->db_objset->os_spa;
return (arc_alloc_buf(spa, db, DBUF_GET_BUFC_TYPE(db), db->db.db_size));
}
/*
* Loan out an arc_buf for read. Return the loaned arc_buf.
*/
arc_buf_t *
dbuf_loan_arcbuf(dmu_buf_impl_t *db)
{
arc_buf_t *abuf;
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
mutex_enter(&db->db_mtx);
if (arc_released(db->db_buf) || zfs_refcount_count(&db->db_holds) > 1) {
int blksz = db->db.db_size;
spa_t *spa = db->db_objset->os_spa;
mutex_exit(&db->db_mtx);
abuf = arc_loan_buf(spa, B_FALSE, blksz);
memcpy(abuf->b_data, db->db.db_data, blksz);
} else {
abuf = db->db_buf;
arc_loan_inuse_buf(abuf, db);
db->db_buf = NULL;
dbuf_clear_data(db);
mutex_exit(&db->db_mtx);
}
return (abuf);
}
/*
* Calculate which level n block references the data at the level 0 offset
* provided.
*/
uint64_t
dbuf_whichblock(const dnode_t *dn, const int64_t level, const uint64_t offset)
{
if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) {
/*
* The level n blkid is equal to the level 0 blkid divided by
* the number of level 0s in a level n block.
*
* The level 0 blkid is offset >> datablkshift =
* offset / 2^datablkshift.
*
* The number of level 0s in a level n is the number of block
* pointers in an indirect block, raised to the power of level.
* This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level =
* 2^(level*(indblkshift - SPA_BLKPTRSHIFT)).
*
* Thus, the level n blkid is: offset /
* ((2^datablkshift)*(2^(level*(indblkshift-SPA_BLKPTRSHIFT))))
* = offset / 2^(datablkshift + level *
* (indblkshift - SPA_BLKPTRSHIFT))
* = offset >> (datablkshift + level *
* (indblkshift - SPA_BLKPTRSHIFT))
*/
const unsigned exp = dn->dn_datablkshift +
level * (dn->dn_indblkshift - SPA_BLKPTRSHIFT);
if (exp >= 8 * sizeof (offset)) {
/* This only happens on the highest indirection level */
ASSERT3U(level, ==, dn->dn_nlevels - 1);
return (0);
}
ASSERT3U(exp, <, 8 * sizeof (offset));
return (offset >> exp);
} else {
ASSERT3U(offset, <, dn->dn_datablksz);
return (0);
}
}
/*
* This function is used to lock the parent of the provided dbuf. This should be
* used when modifying or reading db_blkptr.
*/
db_lock_type_t
dmu_buf_lock_parent(dmu_buf_impl_t *db, krw_t rw, const void *tag)
{
enum db_lock_type ret = DLT_NONE;
if (db->db_parent != NULL) {
rw_enter(&db->db_parent->db_rwlock, rw);
ret = DLT_PARENT;
} else if (dmu_objset_ds(db->db_objset) != NULL) {
rrw_enter(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, rw,
tag);
ret = DLT_OBJSET;
}
/*
* We only return a DLT_NONE lock when it's the top-most indirect block
* of the meta-dnode of the MOS.
*/
return (ret);
}
/*
* We need to pass the lock type in because it's possible that the block will
* move from being the topmost indirect block in a dnode (and thus, have no
* parent) to not the top-most via an indirection increase. This would cause a
* panic if we didn't pass the lock type in.
*/
void
dmu_buf_unlock_parent(dmu_buf_impl_t *db, db_lock_type_t type, const void *tag)
{
if (type == DLT_PARENT)
rw_exit(&db->db_parent->db_rwlock);
else if (type == DLT_OBJSET)
rrw_exit(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, tag);
}
static void
dbuf_read_done(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
arc_buf_t *buf, void *vdb)
{
(void) zb, (void) bp;
dmu_buf_impl_t *db = vdb;
mutex_enter(&db->db_mtx);
ASSERT3U(db->db_state, ==, DB_READ);
/*
* All reads are synchronous, so we must have a hold on the dbuf
*/
ASSERT(zfs_refcount_count(&db->db_holds) > 0);
ASSERT(db->db_buf == NULL);
ASSERT(db->db.db_data == NULL);
if (buf == NULL) {
/* i/o error */
ASSERT(zio == NULL || zio->io_error != 0);
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
ASSERT3P(db->db_buf, ==, NULL);
db->db_state = DB_UNCACHED;
DTRACE_SET_STATE(db, "i/o error");
} else if (db->db_level == 0 && db->db_freed_in_flight) {
/* freed in flight */
ASSERT(zio == NULL || zio->io_error == 0);
arc_release(buf, db);
memset(buf->b_data, 0, db->db.db_size);
arc_buf_freeze(buf);
db->db_freed_in_flight = FALSE;
dbuf_set_data(db, buf);
db->db_state = DB_CACHED;
DTRACE_SET_STATE(db, "freed in flight");
} else {
/* success */
ASSERT(zio == NULL || zio->io_error == 0);
dbuf_set_data(db, buf);
db->db_state = DB_CACHED;
DTRACE_SET_STATE(db, "successful read");
}
cv_broadcast(&db->db_changed);
dbuf_rele_and_unlock(db, NULL, B_FALSE);
}
/*
* Shortcut for performing reads on bonus dbufs. Returns
* an error if we fail to verify the dnode associated with
* a decrypted block. Otherwise success.
*/
static int
dbuf_read_bonus(dmu_buf_impl_t *db, dnode_t *dn, uint32_t flags)
{
int bonuslen, max_bonuslen, err;
err = dbuf_read_verify_dnode_crypt(db, flags);
if (err)
return (err);
bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen);
max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT(DB_DNODE_HELD(db));
ASSERT3U(bonuslen, <=, db->db.db_size);
db->db.db_data = kmem_alloc(max_bonuslen, KM_SLEEP);
arc_space_consume(max_bonuslen, ARC_SPACE_BONUS);
if (bonuslen < max_bonuslen)
memset(db->db.db_data, 0, max_bonuslen);
if (bonuslen)
memcpy(db->db.db_data, DN_BONUS(dn->dn_phys), bonuslen);
db->db_state = DB_CACHED;
DTRACE_SET_STATE(db, "bonus buffer filled");
return (0);
}
static void
dbuf_handle_indirect_hole(dmu_buf_impl_t *db, dnode_t *dn, blkptr_t *dbbp)
{
blkptr_t *bps = db->db.db_data;
uint32_t indbs = 1ULL << dn->dn_indblkshift;
int n_bps = indbs >> SPA_BLKPTRSHIFT;
for (int i = 0; i < n_bps; i++) {
blkptr_t *bp = &bps[i];
ASSERT3U(BP_GET_LSIZE(dbbp), ==, indbs);
BP_SET_LSIZE(bp, BP_GET_LEVEL(dbbp) == 1 ?
dn->dn_datablksz : BP_GET_LSIZE(dbbp));
BP_SET_TYPE(bp, BP_GET_TYPE(dbbp));
BP_SET_LEVEL(bp, BP_GET_LEVEL(dbbp) - 1);
BP_SET_BIRTH(bp, dbbp->blk_birth, 0);
}
}
/*
* Handle reads on dbufs that are holes, if necessary. This function
* requires that the dbuf's mutex is held. Returns success (0) if action
* was taken, ENOENT if no action was taken.
*/
static int
dbuf_read_hole(dmu_buf_impl_t *db, dnode_t *dn, blkptr_t *bp)
{
ASSERT(MUTEX_HELD(&db->db_mtx));
int is_hole = bp == NULL || BP_IS_HOLE(bp);
/*
* For level 0 blocks only, if the above check fails:
* Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync()
* processes the delete record and clears the bp while we are waiting
* for the dn_mtx (resulting in a "no" from block_freed).
*/
if (!is_hole && db->db_level == 0)
is_hole = dnode_block_freed(dn, db->db_blkid) || BP_IS_HOLE(bp);
if (is_hole) {
dbuf_set_data(db, dbuf_alloc_arcbuf(db));
memset(db->db.db_data, 0, db->db.db_size);
if (bp != NULL && db->db_level > 0 && BP_IS_HOLE(bp) &&
bp->blk_birth != 0) {
dbuf_handle_indirect_hole(db, dn, bp);
}
db->db_state = DB_CACHED;
DTRACE_SET_STATE(db, "hole read satisfied");
return (0);
}
return (ENOENT);
}
/*
* This function ensures that, when doing a decrypting read of a block,
* we make sure we have decrypted the dnode associated with it. We must do
* this so that we ensure we are fully authenticating the checksum-of-MACs
* tree from the root of the objset down to this block. Indirect blocks are
* always verified against their secure checksum-of-MACs assuming that the
* dnode containing them is correct. Now that we are doing a decrypting read,
* we can be sure that the key is loaded and verify that assumption. This is
* especially important considering that we always read encrypted dnode
* blocks as raw data (without verifying their MACs) to start, and
* decrypt / authenticate them when we need to read an encrypted bonus buffer.
*/
static int
dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags)
{
int err = 0;
objset_t *os = db->db_objset;
arc_buf_t *dnode_abuf;
dnode_t *dn;
zbookmark_phys_t zb;
ASSERT(MUTEX_HELD(&db->db_mtx));
if ((flags & DB_RF_NO_DECRYPT) != 0 ||
!os->os_encrypted || os->os_raw_receive)
return (0);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
dnode_abuf = (dn->dn_dbuf != NULL) ? dn->dn_dbuf->db_buf : NULL;
if (dnode_abuf == NULL || !arc_is_encrypted(dnode_abuf)) {
DB_DNODE_EXIT(db);
return (0);
}
SET_BOOKMARK(&zb, dmu_objset_id(os),
DMU_META_DNODE_OBJECT, 0, dn->dn_dbuf->db_blkid);
err = arc_untransform(dnode_abuf, os->os_spa, &zb, B_TRUE);
/*
* An error code of EACCES tells us that the key is still not
* available. This is ok if we are only reading authenticated
* (and therefore non-encrypted) blocks.
*/
if (err == EACCES && ((db->db_blkid != DMU_BONUS_BLKID &&
!DMU_OT_IS_ENCRYPTED(dn->dn_type)) ||
(db->db_blkid == DMU_BONUS_BLKID &&
!DMU_OT_IS_ENCRYPTED(dn->dn_bonustype))))
err = 0;
DB_DNODE_EXIT(db);
return (err);
}
/*
* Drops db_mtx and the parent lock specified by dblt and tag before
* returning.
*/
static int
dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags,
db_lock_type_t dblt, const void *tag)
{
dnode_t *dn;
zbookmark_phys_t zb;
uint32_t aflags = ARC_FLAG_NOWAIT;
int err, zio_flags;
blkptr_t bp, *bpp;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
ASSERT(db->db_buf == NULL);
ASSERT(db->db_parent == NULL ||
RW_LOCK_HELD(&db->db_parent->db_rwlock));
if (db->db_blkid == DMU_BONUS_BLKID) {
err = dbuf_read_bonus(db, dn, flags);
goto early_unlock;
}
if (db->db_state == DB_UNCACHED) {
if (db->db_blkptr == NULL) {
bpp = NULL;
} else {
bp = *db->db_blkptr;
bpp = &bp;
}
} else {
dbuf_dirty_record_t *dr;
ASSERT3S(db->db_state, ==, DB_NOFILL);
/*
* Block cloning: If we have a pending block clone,
* we don't want to read the underlying block, but the content
* of the block being cloned, so we have the most recent data.
*/
dr = list_head(&db->db_dirty_records);
if (dr == NULL || !dr->dt.dl.dr_brtwrite) {
err = EIO;
goto early_unlock;
}
bp = dr->dt.dl.dr_overridden_by;
bpp = &bp;
}
err = dbuf_read_hole(db, dn, bpp);
if (err == 0)
goto early_unlock;
ASSERT(bpp != NULL);
/*
* Any attempt to read a redacted block should result in an error. This
* will never happen under normal conditions, but can be useful for
* debugging purposes.
*/
if (BP_IS_REDACTED(bpp)) {
ASSERT(dsl_dataset_feature_is_active(
db->db_objset->os_dsl_dataset,
SPA_FEATURE_REDACTED_DATASETS));
err = SET_ERROR(EIO);
goto early_unlock;
}
SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
db->db.db_object, db->db_level, db->db_blkid);
/*
* All bps of an encrypted os should have the encryption bit set.
* If this is not true it indicates tampering and we report an error.
*/
if (db->db_objset->os_encrypted && !BP_USES_CRYPT(bpp)) {
spa_log_error(db->db_objset->os_spa, &zb, &bpp->blk_birth);
- zfs_panic_recover("unencrypted block in encrypted "
- "object set %llu", dmu_objset_id(db->db_objset));
err = SET_ERROR(EIO);
goto early_unlock;
}
err = dbuf_read_verify_dnode_crypt(db, flags);
if (err != 0)
goto early_unlock;
DB_DNODE_EXIT(db);
db->db_state = DB_READ;
DTRACE_SET_STATE(db, "read issued");
mutex_exit(&db->db_mtx);
if (!DBUF_IS_CACHEABLE(db))
aflags |= ARC_FLAG_UNCACHED;
else if (dbuf_is_l2cacheable(db))
aflags |= ARC_FLAG_L2CACHE;
dbuf_add_ref(db, NULL);
zio_flags = (flags & DB_RF_CANFAIL) ?
ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED;
if ((flags & DB_RF_NO_DECRYPT) && BP_IS_PROTECTED(db->db_blkptr))
zio_flags |= ZIO_FLAG_RAW;
/*
* The zio layer will copy the provided blkptr later, but we have our
* own copy so that we can release the parent's rwlock. We have to
* do that so that if dbuf_read_done is called synchronously (on
* an l1 cache hit) we don't acquire the db_mtx while holding the
* parent's rwlock, which would be a lock ordering violation.
*/
dmu_buf_unlock_parent(db, dblt, tag);
(void) arc_read(zio, db->db_objset->os_spa, bpp,
dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, zio_flags,
&aflags, &zb);
return (err);
early_unlock:
DB_DNODE_EXIT(db);
mutex_exit(&db->db_mtx);
dmu_buf_unlock_parent(db, dblt, tag);
return (err);
}
/*
* This is our just-in-time copy function. It makes a copy of buffers that
* have been modified in a previous transaction group before we access them in
* the current active group.
*
* This function is used in three places: when we are dirtying a buffer for the
* first time in a txg, when we are freeing a range in a dnode that includes
* this buffer, and when we are accessing a buffer which was received compressed
* and later referenced in a WRITE_BYREF record.
*
* Note that when we are called from dbuf_free_range() we do not put a hold on
* the buffer, we just traverse the active dbuf list for the dnode.
*/
static void
dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
{
dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT(db->db.db_data != NULL);
ASSERT(db->db_level == 0);
ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
if (dr == NULL ||
(dr->dt.dl.dr_data !=
((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf)))
return;
/*
* If the last dirty record for this dbuf has not yet synced
* and its referencing the dbuf data, either:
* reset the reference to point to a new copy,
* or (if there a no active holders)
* just null out the current db_data pointer.
*/
ASSERT3U(dr->dr_txg, >=, txg - 2);
if (db->db_blkid == DMU_BONUS_BLKID) {
dnode_t *dn = DB_DNODE(db);
int bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
dr->dt.dl.dr_data = kmem_alloc(bonuslen, KM_SLEEP);
arc_space_consume(bonuslen, ARC_SPACE_BONUS);
memcpy(dr->dt.dl.dr_data, db->db.db_data, bonuslen);
} else if (zfs_refcount_count(&db->db_holds) > db->db_dirtycnt) {
dnode_t *dn = DB_DNODE(db);
int size = arc_buf_size(db->db_buf);
arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
spa_t *spa = db->db_objset->os_spa;
enum zio_compress compress_type =
arc_get_compression(db->db_buf);
uint8_t complevel = arc_get_complevel(db->db_buf);
if (arc_is_encrypted(db->db_buf)) {
boolean_t byteorder;
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
arc_get_raw_params(db->db_buf, &byteorder, salt,
iv, mac);
dr->dt.dl.dr_data = arc_alloc_raw_buf(spa, db,
dmu_objset_id(dn->dn_objset), byteorder, salt, iv,
mac, dn->dn_type, size, arc_buf_lsize(db->db_buf),
compress_type, complevel);
} else if (compress_type != ZIO_COMPRESS_OFF) {
ASSERT3U(type, ==, ARC_BUFC_DATA);
dr->dt.dl.dr_data = arc_alloc_compressed_buf(spa, db,
size, arc_buf_lsize(db->db_buf), compress_type,
complevel);
} else {
dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size);
}
memcpy(dr->dt.dl.dr_data->b_data, db->db.db_data, size);
} else {
db->db_buf = NULL;
dbuf_clear_data(db);
}
}
int
dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
{
int err = 0;
boolean_t prefetch;
dnode_t *dn;
/*
* We don't have to hold the mutex to check db_state because it
* can't be freed while we have a hold on the buffer.
*/
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
(flags & DB_RF_NOPREFETCH) == 0 && dn != NULL;
mutex_enter(&db->db_mtx);
if (flags & DB_RF_PARTIAL_FIRST)
db->db_partial_read = B_TRUE;
else if (!(flags & DB_RF_PARTIAL_MORE))
db->db_partial_read = B_FALSE;
if (db->db_state == DB_CACHED) {
/*
* Ensure that this block's dnode has been decrypted if
* the caller has requested decrypted data.
*/
err = dbuf_read_verify_dnode_crypt(db, flags);
/*
* If the arc buf is compressed or encrypted and the caller
* requested uncompressed data, we need to untransform it
* before returning. We also call arc_untransform() on any
* unauthenticated blocks, which will verify their MAC if
* the key is now available.
*/
if (err == 0 && db->db_buf != NULL &&
(flags & DB_RF_NO_DECRYPT) == 0 &&
(arc_is_encrypted(db->db_buf) ||
arc_is_unauthenticated(db->db_buf) ||
arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF)) {
spa_t *spa = dn->dn_objset->os_spa;
zbookmark_phys_t zb;
SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
db->db.db_object, db->db_level, db->db_blkid);
dbuf_fix_old_data(db, spa_syncing_txg(spa));
err = arc_untransform(db->db_buf, spa, &zb, B_FALSE);
dbuf_set_data(db, db->db_buf);
}
mutex_exit(&db->db_mtx);
if (err == 0 && prefetch) {
dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
B_FALSE, flags & DB_RF_HAVESTRUCT);
}
DB_DNODE_EXIT(db);
DBUF_STAT_BUMP(hash_hits);
} else if (db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL) {
boolean_t need_wait = B_FALSE;
db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
if (zio == NULL && (db->db_state == DB_NOFILL ||
(db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)))) {
spa_t *spa = dn->dn_objset->os_spa;
zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
need_wait = B_TRUE;
}
err = dbuf_read_impl(db, zio, flags, dblt, FTAG);
/*
* dbuf_read_impl has dropped db_mtx and our parent's rwlock
* for us
*/
if (!err && prefetch) {
dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
db->db_state != DB_CACHED,
flags & DB_RF_HAVESTRUCT);
}
DB_DNODE_EXIT(db);
DBUF_STAT_BUMP(hash_misses);
/*
* If we created a zio_root we must execute it to avoid
* leaking it, even if it isn't attached to any work due
* to an error in dbuf_read_impl().
*/
if (need_wait) {
if (err == 0)
err = zio_wait(zio);
else
VERIFY0(zio_wait(zio));
}
} else {
/*
* Another reader came in while the dbuf was in flight
* between UNCACHED and CACHED. Either a writer will finish
* writing the buffer (sending the dbuf to CACHED) or the
* first reader's request will reach the read_done callback
* and send the dbuf to CACHED. Otherwise, a failure
* occurred and the dbuf went to UNCACHED.
*/
mutex_exit(&db->db_mtx);
if (prefetch) {
dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
B_TRUE, flags & DB_RF_HAVESTRUCT);
}
DB_DNODE_EXIT(db);
DBUF_STAT_BUMP(hash_misses);
/* Skip the wait per the caller's request. */
if ((flags & DB_RF_NEVERWAIT) == 0) {
mutex_enter(&db->db_mtx);
while (db->db_state == DB_READ ||
db->db_state == DB_FILL) {
ASSERT(db->db_state == DB_READ ||
(flags & DB_RF_HAVESTRUCT) == 0);
DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *,
db, zio_t *, zio);
cv_wait(&db->db_changed, &db->db_mtx);
}
if (db->db_state == DB_UNCACHED)
err = SET_ERROR(EIO);
mutex_exit(&db->db_mtx);
}
}
return (err);
}
static void
dbuf_noread(dmu_buf_impl_t *db)
{
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
mutex_enter(&db->db_mtx);
while (db->db_state == DB_READ || db->db_state == DB_FILL)
cv_wait(&db->db_changed, &db->db_mtx);
if (db->db_state == DB_UNCACHED) {
ASSERT(db->db_buf == NULL);
ASSERT(db->db.db_data == NULL);
dbuf_set_data(db, dbuf_alloc_arcbuf(db));
db->db_state = DB_FILL;
DTRACE_SET_STATE(db, "assigning filled buffer");
} else if (db->db_state == DB_NOFILL) {
dbuf_clear_data(db);
} else {
ASSERT3U(db->db_state, ==, DB_CACHED);
}
mutex_exit(&db->db_mtx);
}
void
dbuf_unoverride(dbuf_dirty_record_t *dr)
{
dmu_buf_impl_t *db = dr->dr_dbuf;
blkptr_t *bp = &dr->dt.dl.dr_overridden_by;
uint64_t txg = dr->dr_txg;
- boolean_t release;
ASSERT(MUTEX_HELD(&db->db_mtx));
/*
* This assert is valid because dmu_sync() expects to be called by
* a zilog's get_data while holding a range lock. This call only
* comes from dbuf_dirty() callers who must also hold a range lock.
*/
ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
ASSERT(db->db_level == 0);
if (db->db_blkid == DMU_BONUS_BLKID ||
dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
return;
ASSERT(db->db_data_pending != dr);
/* free this block */
if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite)
zio_free(db->db_objset->os_spa, txg, bp);
- release = !dr->dt.dl.dr_brtwrite;
+ if (dr->dt.dl.dr_brtwrite) {
+ ASSERT0(dr->dt.dl.dr_data);
+ dr->dt.dl.dr_data = db->db_buf;
+ }
dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
dr->dt.dl.dr_nopwrite = B_FALSE;
dr->dt.dl.dr_brtwrite = B_FALSE;
dr->dt.dl.dr_has_raw_params = B_FALSE;
/*
* Release the already-written buffer, so we leave it in
* a consistent dirty state. Note that all callers are
* modifying the buffer, so they will immediately do
* another (redundant) arc_release(). Therefore, leave
* the buf thawed to save the effort of freezing &
* immediately re-thawing it.
*/
- if (release)
+ if (dr->dt.dl.dr_data)
arc_release(dr->dt.dl.dr_data, db);
}
/*
* Evict (if its unreferenced) or clear (if its referenced) any level-0
* data blocks in the free range, so that any future readers will find
* empty blocks.
*/
void
dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
dmu_tx_t *tx)
{
dmu_buf_impl_t *db_search;
dmu_buf_impl_t *db, *db_next;
uint64_t txg = tx->tx_txg;
avl_index_t where;
dbuf_dirty_record_t *dr;
if (end_blkid > dn->dn_maxblkid &&
!(start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID))
end_blkid = dn->dn_maxblkid;
dprintf_dnode(dn, "start=%llu end=%llu\n", (u_longlong_t)start_blkid,
(u_longlong_t)end_blkid);
db_search = kmem_alloc(sizeof (dmu_buf_impl_t), KM_SLEEP);
db_search->db_level = 0;
db_search->db_blkid = start_blkid;
db_search->db_state = DB_SEARCH;
mutex_enter(&dn->dn_dbufs_mtx);
db = avl_find(&dn->dn_dbufs, db_search, &where);
ASSERT3P(db, ==, NULL);
db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
for (; db != NULL; db = db_next) {
db_next = AVL_NEXT(&dn->dn_dbufs, db);
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
if (db->db_level != 0 || db->db_blkid > end_blkid) {
break;
}
ASSERT3U(db->db_blkid, >=, start_blkid);
/* found a level 0 buffer in the range */
mutex_enter(&db->db_mtx);
if (dbuf_undirty(db, tx)) {
/* mutex has been dropped and dbuf destroyed */
continue;
}
if (db->db_state == DB_UNCACHED ||
db->db_state == DB_NOFILL ||
db->db_state == DB_EVICTING) {
ASSERT(db->db.db_data == NULL);
mutex_exit(&db->db_mtx);
continue;
}
if (db->db_state == DB_READ || db->db_state == DB_FILL) {
/* will be handled in dbuf_read_done or dbuf_rele */
db->db_freed_in_flight = TRUE;
mutex_exit(&db->db_mtx);
continue;
}
if (zfs_refcount_count(&db->db_holds) == 0) {
ASSERT(db->db_buf);
dbuf_destroy(db);
continue;
}
/* The dbuf is referenced */
dr = list_head(&db->db_dirty_records);
if (dr != NULL) {
if (dr->dr_txg == txg) {
/*
* This buffer is "in-use", re-adjust the file
* size to reflect that this buffer may
* contain new data when we sync.
*/
if (db->db_blkid != DMU_SPILL_BLKID &&
db->db_blkid > dn->dn_maxblkid)
dn->dn_maxblkid = db->db_blkid;
dbuf_unoverride(dr);
} else {
/*
* This dbuf is not dirty in the open context.
* Either uncache it (if its not referenced in
* the open context) or reset its contents to
* empty.
*/
dbuf_fix_old_data(db, txg);
}
}
/* clear the contents if its cached */
if (db->db_state == DB_CACHED) {
ASSERT(db->db.db_data != NULL);
arc_release(db->db_buf, db);
rw_enter(&db->db_rwlock, RW_WRITER);
memset(db->db.db_data, 0, db->db.db_size);
rw_exit(&db->db_rwlock);
arc_buf_freeze(db->db_buf);
}
mutex_exit(&db->db_mtx);
}
mutex_exit(&dn->dn_dbufs_mtx);
kmem_free(db_search, sizeof (dmu_buf_impl_t));
}
void
dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
{
arc_buf_t *buf, *old_buf;
dbuf_dirty_record_t *dr;
int osize = db->db.db_size;
arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
dnode_t *dn;
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
/*
* XXX we should be doing a dbuf_read, checking the return
* value and returning that up to our callers
*/
dmu_buf_will_dirty(&db->db, tx);
/* create the data buffer for the new block */
buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size);
/* copy old block data to the new block */
old_buf = db->db_buf;
memcpy(buf->b_data, old_buf->b_data, MIN(osize, size));
/* zero the remainder */
if (size > osize)
memset((uint8_t *)buf->b_data + osize, 0, size - osize);
mutex_enter(&db->db_mtx);
dbuf_set_data(db, buf);
arc_buf_destroy(old_buf, db);
db->db.db_size = size;
dr = list_head(&db->db_dirty_records);
/* dirty record added by dmu_buf_will_dirty() */
VERIFY(dr != NULL);
if (db->db_level == 0)
dr->dt.dl.dr_data = buf;
ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
ASSERT3U(dr->dr_accounted, ==, osize);
dr->dr_accounted = size;
mutex_exit(&db->db_mtx);
dmu_objset_willuse_space(dn->dn_objset, size - osize, tx);
DB_DNODE_EXIT(db);
}
void
dbuf_release_bp(dmu_buf_impl_t *db)
{
objset_t *os __maybe_unused = db->db_objset;
ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
ASSERT(arc_released(os->os_phys_buf) ||
list_link_active(&os->os_dsl_dataset->ds_synced_link));
ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf));
(void) arc_release(db->db_buf, db);
}
/*
* We already have a dirty record for this TXG, and we are being
* dirtied again.
*/
static void
dbuf_redirty(dbuf_dirty_record_t *dr)
{
dmu_buf_impl_t *db = dr->dr_dbuf;
ASSERT(MUTEX_HELD(&db->db_mtx));
if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) {
/*
* If this buffer has already been written out,
* we now need to reset its state.
*/
dbuf_unoverride(dr);
if (db->db.db_object != DMU_META_DNODE_OBJECT &&
db->db_state != DB_NOFILL) {
/* Already released on initial dirty, so just thaw. */
ASSERT(arc_released(db->db_buf));
arc_buf_thaw(db->db_buf);
}
}
}
dbuf_dirty_record_t *
dbuf_dirty_lightweight(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx)
{
rw_enter(&dn->dn_struct_rwlock, RW_READER);
IMPLY(dn->dn_objset->os_raw_receive, dn->dn_maxblkid >= blkid);
dnode_new_blkid(dn, blkid, tx, B_TRUE, B_FALSE);
ASSERT(dn->dn_maxblkid >= blkid);
dbuf_dirty_record_t *dr = kmem_zalloc(sizeof (*dr), KM_SLEEP);
list_link_init(&dr->dr_dirty_node);
list_link_init(&dr->dr_dbuf_node);
dr->dr_dnode = dn;
dr->dr_txg = tx->tx_txg;
dr->dt.dll.dr_blkid = blkid;
dr->dr_accounted = dn->dn_datablksz;
/*
* There should not be any dbuf for the block that we're dirtying.
* Otherwise the buffer contents could be inconsistent between the
* dbuf and the lightweight dirty record.
*/
ASSERT3P(NULL, ==, dbuf_find(dn->dn_objset, dn->dn_object, 0, blkid,
NULL));
mutex_enter(&dn->dn_mtx);
int txgoff = tx->tx_txg & TXG_MASK;
if (dn->dn_free_ranges[txgoff] != NULL) {
range_tree_clear(dn->dn_free_ranges[txgoff], blkid, 1);
}
if (dn->dn_nlevels == 1) {
ASSERT3U(blkid, <, dn->dn_nblkptr);
list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
mutex_exit(&dn->dn_mtx);
rw_exit(&dn->dn_struct_rwlock);
dnode_setdirty(dn, tx);
} else {
mutex_exit(&dn->dn_mtx);
int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
dmu_buf_impl_t *parent_db = dbuf_hold_level(dn,
1, blkid >> epbs, FTAG);
rw_exit(&dn->dn_struct_rwlock);
if (parent_db == NULL) {
kmem_free(dr, sizeof (*dr));
return (NULL);
}
int err = dbuf_read(parent_db, NULL,
(DB_RF_NOPREFETCH | DB_RF_CANFAIL));
if (err != 0) {
dbuf_rele(parent_db, FTAG);
kmem_free(dr, sizeof (*dr));
return (NULL);
}
dbuf_dirty_record_t *parent_dr = dbuf_dirty(parent_db, tx);
dbuf_rele(parent_db, FTAG);
mutex_enter(&parent_dr->dt.di.dr_mtx);
ASSERT3U(parent_dr->dr_txg, ==, tx->tx_txg);
list_insert_tail(&parent_dr->dt.di.dr_children, dr);
mutex_exit(&parent_dr->dt.di.dr_mtx);
dr->dr_parent = parent_dr;
}
dmu_objset_willuse_space(dn->dn_objset, dr->dr_accounted, tx);
return (dr);
}
dbuf_dirty_record_t *
dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
{
dnode_t *dn;
objset_t *os;
dbuf_dirty_record_t *dr, *dr_next, *dr_head;
int txgoff = tx->tx_txg & TXG_MASK;
boolean_t drop_struct_rwlock = B_FALSE;
ASSERT(tx->tx_txg != 0);
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
DMU_TX_DIRTY_BUF(tx, db);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
/*
* Shouldn't dirty a regular buffer in syncing context. Private
* objects may be dirtied in syncing context, but only if they
* were already pre-dirtied in open context.
*/
#ifdef ZFS_DEBUG
if (dn->dn_objset->os_dsl_dataset != NULL) {
rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock,
RW_READER, FTAG);
}
ASSERT(!dmu_tx_is_syncing(tx) ||
BP_IS_HOLE(dn->dn_objset->os_rootbp) ||
DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
dn->dn_objset->os_dsl_dataset == NULL);
if (dn->dn_objset->os_dsl_dataset != NULL)
rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG);
#endif
/*
* We make this assert for private objects as well, but after we
* check if we're already dirty. They are allowed to re-dirty
* in syncing context.
*/
ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
(dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
mutex_enter(&db->db_mtx);
/*
* XXX make this true for indirects too? The problem is that
* transactions created with dmu_tx_create_assigned() from
* syncing context don't bother holding ahead.
*/
ASSERT(db->db_level != 0 ||
db->db_state == DB_CACHED || db->db_state == DB_FILL ||
db->db_state == DB_NOFILL);
mutex_enter(&dn->dn_mtx);
dnode_set_dirtyctx(dn, tx, db);
if (tx->tx_txg > dn->dn_dirty_txg)
dn->dn_dirty_txg = tx->tx_txg;
mutex_exit(&dn->dn_mtx);
if (db->db_blkid == DMU_SPILL_BLKID)
dn->dn_have_spill = B_TRUE;
/*
* If this buffer is already dirty, we're done.
*/
dr_head = list_head(&db->db_dirty_records);
ASSERT(dr_head == NULL || dr_head->dr_txg <= tx->tx_txg ||
db->db.db_object == DMU_META_DNODE_OBJECT);
dr_next = dbuf_find_dirty_lte(db, tx->tx_txg);
if (dr_next && dr_next->dr_txg == tx->tx_txg) {
DB_DNODE_EXIT(db);
dbuf_redirty(dr_next);
mutex_exit(&db->db_mtx);
return (dr_next);
}
/*
* Only valid if not already dirty.
*/
ASSERT(dn->dn_object == 0 ||
dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
(dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
ASSERT3U(dn->dn_nlevels, >, db->db_level);
/*
* We should only be dirtying in syncing context if it's the
* mos or we're initializing the os or it's a special object.
* However, we are allowed to dirty in syncing context provided
* we already dirtied it in open context. Hence we must make
* this assertion only if we're not already dirty.
*/
os = dn->dn_objset;
VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa));
#ifdef ZFS_DEBUG
if (dn->dn_objset->os_dsl_dataset != NULL)
rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG);
ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp));
if (dn->dn_objset->os_dsl_dataset != NULL)
rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG);
#endif
ASSERT(db->db.db_size != 0);
dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
if (db->db_blkid != DMU_BONUS_BLKID && db->db_state != DB_NOFILL) {
dmu_objset_willuse_space(os, db->db.db_size, tx);
}
/*
* If this buffer is dirty in an old transaction group we need
* to make a copy of it so that the changes we make in this
* transaction group won't leak out when we sync the older txg.
*/
dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP);
list_link_init(&dr->dr_dirty_node);
list_link_init(&dr->dr_dbuf_node);
dr->dr_dnode = dn;
if (db->db_level == 0) {
void *data_old = db->db_buf;
if (db->db_state != DB_NOFILL) {
if (db->db_blkid == DMU_BONUS_BLKID) {
dbuf_fix_old_data(db, tx->tx_txg);
data_old = db->db.db_data;
} else if (db->db.db_object != DMU_META_DNODE_OBJECT) {
/*
* Release the data buffer from the cache so
* that we can modify it without impacting
* possible other users of this cached data
* block. Note that indirect blocks and
* private objects are not released until the
* syncing state (since they are only modified
* then).
*/
arc_release(db->db_buf, db);
dbuf_fix_old_data(db, tx->tx_txg);
data_old = db->db_buf;
}
ASSERT(data_old != NULL);
}
dr->dt.dl.dr_data = data_old;
} else {
mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_NOLOCKDEP, NULL);
list_create(&dr->dt.di.dr_children,
sizeof (dbuf_dirty_record_t),
offsetof(dbuf_dirty_record_t, dr_dirty_node));
}
if (db->db_blkid != DMU_BONUS_BLKID && db->db_state != DB_NOFILL) {
dr->dr_accounted = db->db.db_size;
}
dr->dr_dbuf = db;
dr->dr_txg = tx->tx_txg;
list_insert_before(&db->db_dirty_records, dr_next, dr);
/*
* We could have been freed_in_flight between the dbuf_noread
* and dbuf_dirty. We win, as though the dbuf_noread() had
* happened after the free.
*/
if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
db->db_blkid != DMU_SPILL_BLKID) {
mutex_enter(&dn->dn_mtx);
if (dn->dn_free_ranges[txgoff] != NULL) {
range_tree_clear(dn->dn_free_ranges[txgoff],
db->db_blkid, 1);
}
mutex_exit(&dn->dn_mtx);
db->db_freed_in_flight = FALSE;
}
/*
* This buffer is now part of this txg
*/
dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg);
db->db_dirtycnt += 1;
ASSERT3U(db->db_dirtycnt, <=, 3);
mutex_exit(&db->db_mtx);
if (db->db_blkid == DMU_BONUS_BLKID ||
db->db_blkid == DMU_SPILL_BLKID) {
mutex_enter(&dn->dn_mtx);
ASSERT(!list_link_active(&dr->dr_dirty_node));
list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
mutex_exit(&dn->dn_mtx);
dnode_setdirty(dn, tx);
DB_DNODE_EXIT(db);
return (dr);
}
if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
rw_enter(&dn->dn_struct_rwlock, RW_READER);
drop_struct_rwlock = B_TRUE;
}
/*
* If we are overwriting a dedup BP, then unless it is snapshotted,
* when we get to syncing context we will need to decrement its
* refcount in the DDT. Prefetch the relevant DDT block so that
* syncing context won't have to wait for the i/o.
*/
if (db->db_blkptr != NULL) {
db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
ddt_prefetch(os->os_spa, db->db_blkptr);
dmu_buf_unlock_parent(db, dblt, FTAG);
}
/*
* We need to hold the dn_struct_rwlock to make this assertion,
* because it protects dn_phys / dn_next_nlevels from changing.
*/
ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
dn->dn_phys->dn_nlevels > db->db_level ||
dn->dn_next_nlevels[txgoff] > db->db_level ||
dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
if (db->db_level == 0) {
ASSERT(!db->db_objset->os_raw_receive ||
dn->dn_maxblkid >= db->db_blkid);
dnode_new_blkid(dn, db->db_blkid, tx,
drop_struct_rwlock, B_FALSE);
ASSERT(dn->dn_maxblkid >= db->db_blkid);
}
if (db->db_level+1 < dn->dn_nlevels) {
dmu_buf_impl_t *parent = db->db_parent;
dbuf_dirty_record_t *di;
int parent_held = FALSE;
if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) {
int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
parent = dbuf_hold_level(dn, db->db_level + 1,
db->db_blkid >> epbs, FTAG);
ASSERT(parent != NULL);
parent_held = TRUE;
}
if (drop_struct_rwlock)
rw_exit(&dn->dn_struct_rwlock);
ASSERT3U(db->db_level + 1, ==, parent->db_level);
di = dbuf_dirty(parent, tx);
if (parent_held)
dbuf_rele(parent, FTAG);
mutex_enter(&db->db_mtx);
/*
* Since we've dropped the mutex, it's possible that
* dbuf_undirty() might have changed this out from under us.
*/
if (list_head(&db->db_dirty_records) == dr ||
dn->dn_object == DMU_META_DNODE_OBJECT) {
mutex_enter(&di->dt.di.dr_mtx);
ASSERT3U(di->dr_txg, ==, tx->tx_txg);
ASSERT(!list_link_active(&dr->dr_dirty_node));
list_insert_tail(&di->dt.di.dr_children, dr);
mutex_exit(&di->dt.di.dr_mtx);
dr->dr_parent = di;
}
mutex_exit(&db->db_mtx);
} else {
ASSERT(db->db_level + 1 == dn->dn_nlevels);
ASSERT(db->db_blkid < dn->dn_nblkptr);
ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf);
mutex_enter(&dn->dn_mtx);
ASSERT(!list_link_active(&dr->dr_dirty_node));
list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
mutex_exit(&dn->dn_mtx);
if (drop_struct_rwlock)
rw_exit(&dn->dn_struct_rwlock);
}
dnode_setdirty(dn, tx);
DB_DNODE_EXIT(db);
return (dr);
}
static void
dbuf_undirty_bonus(dbuf_dirty_record_t *dr)
{
dmu_buf_impl_t *db = dr->dr_dbuf;
if (dr->dt.dl.dr_data != db->db.db_data) {
struct dnode *dn = dr->dr_dnode;
int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
kmem_free(dr->dt.dl.dr_data, max_bonuslen);
arc_space_return(max_bonuslen, ARC_SPACE_BONUS);
}
db->db_data_pending = NULL;
ASSERT(list_next(&db->db_dirty_records, dr) == NULL);
list_remove(&db->db_dirty_records, dr);
if (dr->dr_dbuf->db_level != 0) {
mutex_destroy(&dr->dt.di.dr_mtx);
list_destroy(&dr->dt.di.dr_children);
}
kmem_free(dr, sizeof (dbuf_dirty_record_t));
ASSERT3U(db->db_dirtycnt, >, 0);
db->db_dirtycnt -= 1;
}
/*
* Undirty a buffer in the transaction group referenced by the given
* transaction. Return whether this evicted the dbuf.
*/
boolean_t
dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
{
uint64_t txg = tx->tx_txg;
boolean_t brtwrite;
ASSERT(txg != 0);
/*
* Due to our use of dn_nlevels below, this can only be called
* in open context, unless we are operating on the MOS.
* From syncing context, dn_nlevels may be different from the
* dn_nlevels used when dbuf was dirtied.
*/
ASSERT(db->db_objset ==
dmu_objset_pool(db->db_objset)->dp_meta_objset ||
txg != spa_syncing_txg(dmu_objset_spa(db->db_objset)));
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
ASSERT0(db->db_level);
ASSERT(MUTEX_HELD(&db->db_mtx));
/*
* If this buffer is not dirty, we're done.
*/
dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, txg);
if (dr == NULL)
return (B_FALSE);
ASSERT(dr->dr_dbuf == db);
brtwrite = dr->dt.dl.dr_brtwrite;
if (brtwrite) {
/*
* We are freeing a block that we cloned in the same
* transaction group.
*/
brt_pending_remove(dmu_objset_spa(db->db_objset),
&dr->dt.dl.dr_overridden_by, tx);
}
dnode_t *dn = dr->dr_dnode;
dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
ASSERT(db->db.db_size != 0);
dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset),
dr->dr_accounted, txg);
list_remove(&db->db_dirty_records, dr);
/*
* Note that there are three places in dbuf_dirty()
* where this dirty record may be put on a list.
* Make sure to do a list_remove corresponding to
* every one of those list_insert calls.
*/
if (dr->dr_parent) {
mutex_enter(&dr->dr_parent->dt.di.dr_mtx);
list_remove(&dr->dr_parent->dt.di.dr_children, dr);
mutex_exit(&dr->dr_parent->dt.di.dr_mtx);
} else if (db->db_blkid == DMU_SPILL_BLKID ||
db->db_level + 1 == dn->dn_nlevels) {
ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf);
mutex_enter(&dn->dn_mtx);
list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr);
mutex_exit(&dn->dn_mtx);
}
if (db->db_state != DB_NOFILL && !brtwrite) {
dbuf_unoverride(dr);
ASSERT(db->db_buf != NULL);
ASSERT(dr->dt.dl.dr_data != NULL);
if (dr->dt.dl.dr_data != db->db_buf)
arc_buf_destroy(dr->dt.dl.dr_data, db);
}
kmem_free(dr, sizeof (dbuf_dirty_record_t));
ASSERT(db->db_dirtycnt > 0);
db->db_dirtycnt -= 1;
if (zfs_refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
ASSERT(db->db_state == DB_NOFILL || brtwrite ||
arc_released(db->db_buf));
dbuf_destroy(db);
return (B_TRUE);
}
return (B_FALSE);
}
static void
dmu_buf_will_dirty_impl(dmu_buf_t *db_fake, int flags, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
boolean_t undirty = B_FALSE;
ASSERT(tx->tx_txg != 0);
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
/*
* Quick check for dirtiness. For already dirty blocks, this
* reduces runtime of this function by >90%, and overall performance
* by 50% for some workloads (e.g. file deletion with indirect blocks
* cached).
*/
mutex_enter(&db->db_mtx);
if (db->db_state == DB_CACHED || db->db_state == DB_NOFILL) {
dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, tx->tx_txg);
/*
* It's possible that it is already dirty but not cached,
* because there are some calls to dbuf_dirty() that don't
* go through dmu_buf_will_dirty().
*/
if (dr != NULL) {
if (dr->dt.dl.dr_brtwrite) {
/*
* Block cloning: If we are dirtying a cloned
* block, we cannot simply redirty it, because
* this dr has no data associated with it.
* We will go through a full undirtying below,
* before dirtying it again.
*/
undirty = B_TRUE;
} else {
/* This dbuf is already dirty and cached. */
dbuf_redirty(dr);
mutex_exit(&db->db_mtx);
return;
}
}
}
mutex_exit(&db->db_mtx);
DB_DNODE_ENTER(db);
if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock))
flags |= DB_RF_HAVESTRUCT;
DB_DNODE_EXIT(db);
/*
* Block cloning: Do the dbuf_read() before undirtying the dbuf, as we
* want to make sure dbuf_read() will read the pending cloned block and
* not the uderlying block that is being replaced. dbuf_undirty() will
* do dbuf_unoverride(), so we will end up with cloned block content,
* without overridden BP.
*/
(void) dbuf_read(db, NULL, flags);
if (undirty) {
mutex_enter(&db->db_mtx);
VERIFY(!dbuf_undirty(db, tx));
mutex_exit(&db->db_mtx);
}
(void) dbuf_dirty(db, tx);
}
void
dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
{
dmu_buf_will_dirty_impl(db_fake,
DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH, tx);
}
boolean_t
dmu_buf_is_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dbuf_dirty_record_t *dr;
mutex_enter(&db->db_mtx);
dr = dbuf_find_dirty_eq(db, tx->tx_txg);
mutex_exit(&db->db_mtx);
return (dr != NULL);
}
void
dmu_buf_will_clone(dmu_buf_t *db_fake, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
/*
* Block cloning: We are going to clone into this block, so undirty
* modifications done to this block so far in this txg. This includes
* writes and clones into this block.
*/
mutex_enter(&db->db_mtx);
DBUF_VERIFY(db);
VERIFY(!dbuf_undirty(db, tx));
ASSERT3P(dbuf_find_dirty_eq(db, tx->tx_txg), ==, NULL);
if (db->db_buf != NULL) {
arc_buf_destroy(db->db_buf, db);
db->db_buf = NULL;
dbuf_clear_data(db);
}
db->db_state = DB_NOFILL;
DTRACE_SET_STATE(db, "allocating NOFILL buffer for clone");
DBUF_VERIFY(db);
mutex_exit(&db->db_mtx);
dbuf_noread(db);
(void) dbuf_dirty(db, tx);
}
void
dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
mutex_enter(&db->db_mtx);
db->db_state = DB_NOFILL;
DTRACE_SET_STATE(db, "allocating NOFILL buffer");
mutex_exit(&db->db_mtx);
dbuf_noread(db);
(void) dbuf_dirty(db, tx);
}
void
-dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
+dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx, boolean_t canfail)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
ASSERT(tx->tx_txg != 0);
ASSERT(db->db_level == 0);
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
dmu_tx_private_ok(tx));
mutex_enter(&db->db_mtx);
if (db->db_state == DB_NOFILL) {
/*
* Block cloning: We will be completely overwriting a block
* cloned in this transaction group, so let's undirty the
* pending clone and mark the block as uncached. This will be
- * as if the clone was never done.
+ * as if the clone was never done. But if the fill can fail
+ * we should have a way to return back to the cloned data.
*/
+ if (canfail && dbuf_find_dirty_eq(db, tx->tx_txg) != NULL) {
+ mutex_exit(&db->db_mtx);
+ dmu_buf_will_dirty(db_fake, tx);
+ return;
+ }
VERIFY(!dbuf_undirty(db, tx));
db->db_state = DB_UNCACHED;
}
mutex_exit(&db->db_mtx);
dbuf_noread(db);
(void) dbuf_dirty(db, tx);
}
/*
* This function is effectively the same as dmu_buf_will_dirty(), but
* indicates the caller expects raw encrypted data in the db, and provides
* the crypt params (byteorder, salt, iv, mac) which should be stored in the
* blkptr_t when this dbuf is written. This is only used for blocks of
* dnodes, during raw receive.
*/
void
dmu_buf_set_crypt_params(dmu_buf_t *db_fake, boolean_t byteorder,
const uint8_t *salt, const uint8_t *iv, const uint8_t *mac, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dbuf_dirty_record_t *dr;
/*
* dr_has_raw_params is only processed for blocks of dnodes
* (see dbuf_sync_dnode_leaf_crypt()).
*/
ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
ASSERT3U(db->db_level, ==, 0);
ASSERT(db->db_objset->os_raw_receive);
dmu_buf_will_dirty_impl(db_fake,
DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_NO_DECRYPT, tx);
dr = dbuf_find_dirty_eq(db, tx->tx_txg);
ASSERT3P(dr, !=, NULL);
dr->dt.dl.dr_has_raw_params = B_TRUE;
dr->dt.dl.dr_byteorder = byteorder;
memcpy(dr->dt.dl.dr_salt, salt, ZIO_DATA_SALT_LEN);
memcpy(dr->dt.dl.dr_iv, iv, ZIO_DATA_IV_LEN);
memcpy(dr->dt.dl.dr_mac, mac, ZIO_DATA_MAC_LEN);
}
static void
dbuf_override_impl(dmu_buf_impl_t *db, const blkptr_t *bp, dmu_tx_t *tx)
{
struct dirty_leaf *dl;
dbuf_dirty_record_t *dr;
dr = list_head(&db->db_dirty_records);
ASSERT3P(dr, !=, NULL);
ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
dl = &dr->dt.dl;
dl->dr_overridden_by = *bp;
dl->dr_override_state = DR_OVERRIDDEN;
dl->dr_overridden_by.blk_birth = dr->dr_txg;
}
-void
-dmu_buf_fill_done(dmu_buf_t *dbuf, dmu_tx_t *tx)
+boolean_t
+dmu_buf_fill_done(dmu_buf_t *dbuf, dmu_tx_t *tx, boolean_t failed)
{
(void) tx;
dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
- dbuf_states_t old_state;
mutex_enter(&db->db_mtx);
DBUF_VERIFY(db);
- old_state = db->db_state;
- db->db_state = DB_CACHED;
- if (old_state == DB_FILL) {
+ if (db->db_state == DB_FILL) {
if (db->db_level == 0 && db->db_freed_in_flight) {
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
/* we were freed while filling */
/* XXX dbuf_undirty? */
memset(db->db.db_data, 0, db->db.db_size);
db->db_freed_in_flight = FALSE;
+ db->db_state = DB_CACHED;
DTRACE_SET_STATE(db,
"fill done handling freed in flight");
+ failed = B_FALSE;
+ } else if (failed) {
+ VERIFY(!dbuf_undirty(db, tx));
+ db->db_buf = NULL;
+ dbuf_clear_data(db);
+ DTRACE_SET_STATE(db, "fill failed");
} else {
+ db->db_state = DB_CACHED;
DTRACE_SET_STATE(db, "fill done");
}
cv_broadcast(&db->db_changed);
+ } else {
+ db->db_state = DB_CACHED;
+ failed = B_FALSE;
}
mutex_exit(&db->db_mtx);
+ return (failed);
}
void
dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
bp_embedded_type_t etype, enum zio_compress comp,
int uncompressed_size, int compressed_size, int byteorder,
dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
struct dirty_leaf *dl;
dmu_object_type_t type;
dbuf_dirty_record_t *dr;
if (etype == BP_EMBEDDED_TYPE_DATA) {
ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset),
SPA_FEATURE_EMBEDDED_DATA));
}
DB_DNODE_ENTER(db);
type = DB_DNODE(db)->dn_type;
DB_DNODE_EXIT(db);
ASSERT0(db->db_level);
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
dmu_buf_will_not_fill(dbuf, tx);
dr = list_head(&db->db_dirty_records);
ASSERT3P(dr, !=, NULL);
ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
dl = &dr->dt.dl;
encode_embedded_bp_compressed(&dl->dr_overridden_by,
data, comp, uncompressed_size, compressed_size);
BPE_SET_ETYPE(&dl->dr_overridden_by, etype);
BP_SET_TYPE(&dl->dr_overridden_by, type);
BP_SET_LEVEL(&dl->dr_overridden_by, 0);
BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder);
dl->dr_override_state = DR_OVERRIDDEN;
dl->dr_overridden_by.blk_birth = dr->dr_txg;
}
void
dmu_buf_redact(dmu_buf_t *dbuf, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
dmu_object_type_t type;
ASSERT(dsl_dataset_feature_is_active(db->db_objset->os_dsl_dataset,
SPA_FEATURE_REDACTED_DATASETS));
DB_DNODE_ENTER(db);
type = DB_DNODE(db)->dn_type;
DB_DNODE_EXIT(db);
ASSERT0(db->db_level);
dmu_buf_will_not_fill(dbuf, tx);
blkptr_t bp = { { { {0} } } };
BP_SET_TYPE(&bp, type);
BP_SET_LEVEL(&bp, 0);
BP_SET_BIRTH(&bp, tx->tx_txg, 0);
BP_SET_REDACTED(&bp);
BPE_SET_LSIZE(&bp, dbuf->db_size);
dbuf_override_impl(db, &bp, tx);
}
/*
* Directly assign a provided arc buf to a given dbuf if it's not referenced
* by anybody except our caller. Otherwise copy arcbuf's contents to dbuf.
*/
void
dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
{
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
ASSERT(db->db_level == 0);
ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf));
ASSERT(buf != NULL);
ASSERT3U(arc_buf_lsize(buf), ==, db->db.db_size);
ASSERT(tx->tx_txg != 0);
arc_return_buf(buf, db);
ASSERT(arc_released(buf));
mutex_enter(&db->db_mtx);
while (db->db_state == DB_READ || db->db_state == DB_FILL)
cv_wait(&db->db_changed, &db->db_mtx);
- ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED);
+ ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED ||
+ db->db_state == DB_NOFILL);
if (db->db_state == DB_CACHED &&
zfs_refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
/*
* In practice, we will never have a case where we have an
* encrypted arc buffer while additional holds exist on the
* dbuf. We don't handle this here so we simply assert that
* fact instead.
*/
ASSERT(!arc_is_encrypted(buf));
mutex_exit(&db->db_mtx);
(void) dbuf_dirty(db, tx);
memcpy(db->db.db_data, buf->b_data, db->db.db_size);
arc_buf_destroy(buf, db);
return;
}
if (db->db_state == DB_CACHED) {
dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
ASSERT(db->db_buf != NULL);
if (dr != NULL && dr->dr_txg == tx->tx_txg) {
ASSERT(dr->dt.dl.dr_data == db->db_buf);
if (!arc_released(db->db_buf)) {
ASSERT(dr->dt.dl.dr_override_state ==
DR_OVERRIDDEN);
arc_release(db->db_buf, db);
}
dr->dt.dl.dr_data = buf;
arc_buf_destroy(db->db_buf, db);
} else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) {
arc_release(db->db_buf, db);
arc_buf_destroy(db->db_buf, db);
}
db->db_buf = NULL;
+ } else if (db->db_state == DB_NOFILL) {
+ /*
+ * We will be completely replacing the cloned block. In case
+ * it was cloned in this transaction group, let's undirty the
+ * pending clone and mark the block as uncached. This will be
+ * as if the clone was never done.
+ */
+ VERIFY(!dbuf_undirty(db, tx));
+ db->db_state = DB_UNCACHED;
}
ASSERT(db->db_buf == NULL);
dbuf_set_data(db, buf);
db->db_state = DB_FILL;
DTRACE_SET_STATE(db, "filling assigned arcbuf");
mutex_exit(&db->db_mtx);
(void) dbuf_dirty(db, tx);
- dmu_buf_fill_done(&db->db, tx);
+ dmu_buf_fill_done(&db->db, tx, B_FALSE);
}
void
dbuf_destroy(dmu_buf_impl_t *db)
{
dnode_t *dn;
dmu_buf_impl_t *parent = db->db_parent;
dmu_buf_impl_t *dndb;
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT(zfs_refcount_is_zero(&db->db_holds));
if (db->db_buf != NULL) {
arc_buf_destroy(db->db_buf, db);
db->db_buf = NULL;
}
if (db->db_blkid == DMU_BONUS_BLKID) {
int slots = DB_DNODE(db)->dn_num_slots;
int bonuslen = DN_SLOTS_TO_BONUSLEN(slots);
if (db->db.db_data != NULL) {
kmem_free(db->db.db_data, bonuslen);
arc_space_return(bonuslen, ARC_SPACE_BONUS);
db->db_state = DB_UNCACHED;
DTRACE_SET_STATE(db, "buffer cleared");
}
}
dbuf_clear_data(db);
if (multilist_link_active(&db->db_cache_link)) {
ASSERT(db->db_caching_status == DB_DBUF_CACHE ||
db->db_caching_status == DB_DBUF_METADATA_CACHE);
multilist_remove(&dbuf_caches[db->db_caching_status].cache, db);
(void) zfs_refcount_remove_many(
&dbuf_caches[db->db_caching_status].size,
db->db.db_size, db);
if (db->db_caching_status == DB_DBUF_METADATA_CACHE) {
DBUF_STAT_BUMPDOWN(metadata_cache_count);
} else {
DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
DBUF_STAT_BUMPDOWN(cache_count);
DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
db->db.db_size);
}
db->db_caching_status = DB_NO_CACHE;
}
ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
ASSERT(db->db_data_pending == NULL);
ASSERT(list_is_empty(&db->db_dirty_records));
db->db_state = DB_EVICTING;
DTRACE_SET_STATE(db, "buffer eviction started");
db->db_blkptr = NULL;
/*
* Now that db_state is DB_EVICTING, nobody else can find this via
* the hash table. We can now drop db_mtx, which allows us to
* acquire the dn_dbufs_mtx.
*/
mutex_exit(&db->db_mtx);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
dndb = dn->dn_dbuf;
if (db->db_blkid != DMU_BONUS_BLKID) {
boolean_t needlock = !MUTEX_HELD(&dn->dn_dbufs_mtx);
if (needlock)
mutex_enter_nested(&dn->dn_dbufs_mtx,
NESTED_SINGLE);
avl_remove(&dn->dn_dbufs, db);
membar_producer();
DB_DNODE_EXIT(db);
if (needlock)
mutex_exit(&dn->dn_dbufs_mtx);
/*
* Decrementing the dbuf count means that the hold corresponding
* to the removed dbuf is no longer discounted in dnode_move(),
* so the dnode cannot be moved until after we release the hold.
* The membar_producer() ensures visibility of the decremented
* value in dnode_move(), since DB_DNODE_EXIT doesn't actually
* release any lock.
*/
mutex_enter(&dn->dn_mtx);
dnode_rele_and_unlock(dn, db, B_TRUE);
db->db_dnode_handle = NULL;
dbuf_hash_remove(db);
} else {
DB_DNODE_EXIT(db);
}
ASSERT(zfs_refcount_is_zero(&db->db_holds));
db->db_parent = NULL;
ASSERT(db->db_buf == NULL);
ASSERT(db->db.db_data == NULL);
ASSERT(db->db_hash_next == NULL);
ASSERT(db->db_blkptr == NULL);
ASSERT(db->db_data_pending == NULL);
ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
ASSERT(!multilist_link_active(&db->db_cache_link));
/*
* If this dbuf is referenced from an indirect dbuf,
* decrement the ref count on the indirect dbuf.
*/
if (parent && parent != dndb) {
mutex_enter(&parent->db_mtx);
dbuf_rele_and_unlock(parent, db, B_TRUE);
}
kmem_cache_free(dbuf_kmem_cache, db);
arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
}
/*
* Note: While bpp will always be updated if the function returns success,
* parentp will not be updated if the dnode does not have dn_dbuf filled in;
* this happens when the dnode is the meta-dnode, or {user|group|project}used
* object.
*/
__attribute__((always_inline))
static inline int
dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
dmu_buf_impl_t **parentp, blkptr_t **bpp)
{
*parentp = NULL;
*bpp = NULL;
ASSERT(blkid != DMU_BONUS_BLKID);
if (blkid == DMU_SPILL_BLKID) {
mutex_enter(&dn->dn_mtx);
if (dn->dn_have_spill &&
(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
*bpp = DN_SPILL_BLKPTR(dn->dn_phys);
else
*bpp = NULL;
dbuf_add_ref(dn->dn_dbuf, NULL);
*parentp = dn->dn_dbuf;
mutex_exit(&dn->dn_mtx);
return (0);
}
int nlevels =
(dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels;
int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
ASSERT3U(level * epbs, <, 64);
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
/*
* This assertion shouldn't trip as long as the max indirect block size
* is less than 1M. The reason for this is that up to that point,
* the number of levels required to address an entire object with blocks
* of size SPA_MINBLOCKSIZE satisfies nlevels * epbs + 1 <= 64. In
* other words, if N * epbs + 1 > 64, then if (N-1) * epbs + 1 > 55
* (i.e. we can address the entire object), objects will all use at most
* N-1 levels and the assertion won't overflow. However, once epbs is
* 13, 4 * 13 + 1 = 53, but 5 * 13 + 1 = 66. Then, 4 levels will not be
* enough to address an entire object, so objects will have 5 levels,
* but then this assertion will overflow.
*
* All this is to say that if we ever increase DN_MAX_INDBLKSHIFT, we
* need to redo this logic to handle overflows.
*/
ASSERT(level >= nlevels ||
((nlevels - level - 1) * epbs) +
highbit64(dn->dn_phys->dn_nblkptr) <= 64);
if (level >= nlevels ||
blkid >= ((uint64_t)dn->dn_phys->dn_nblkptr <<
((nlevels - level - 1) * epbs)) ||
(fail_sparse &&
blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
/* the buffer has no parent yet */
return (SET_ERROR(ENOENT));
} else if (level < nlevels-1) {
/* this block is referenced from an indirect block */
int err;
err = dbuf_hold_impl(dn, level + 1,
blkid >> epbs, fail_sparse, FALSE, NULL, parentp);
if (err)
return (err);
err = dbuf_read(*parentp, NULL,
(DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL));
if (err) {
dbuf_rele(*parentp, NULL);
*parentp = NULL;
return (err);
}
rw_enter(&(*parentp)->db_rwlock, RW_READER);
*bpp = ((blkptr_t *)(*parentp)->db.db_data) +
(blkid & ((1ULL << epbs) - 1));
if (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))
ASSERT(BP_IS_HOLE(*bpp));
rw_exit(&(*parentp)->db_rwlock);
return (0);
} else {
/* the block is referenced from the dnode */
ASSERT3U(level, ==, nlevels-1);
ASSERT(dn->dn_phys->dn_nblkptr == 0 ||
blkid < dn->dn_phys->dn_nblkptr);
if (dn->dn_dbuf) {
dbuf_add_ref(dn->dn_dbuf, NULL);
*parentp = dn->dn_dbuf;
}
*bpp = &dn->dn_phys->dn_blkptr[blkid];
return (0);
}
}
static dmu_buf_impl_t *
dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
dmu_buf_impl_t *parent, blkptr_t *blkptr, uint64_t hash)
{
objset_t *os = dn->dn_objset;
dmu_buf_impl_t *db, *odb;
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
ASSERT(dn->dn_type != DMU_OT_NONE);
db = kmem_cache_alloc(dbuf_kmem_cache, KM_SLEEP);
list_create(&db->db_dirty_records, sizeof (dbuf_dirty_record_t),
offsetof(dbuf_dirty_record_t, dr_dbuf_node));
db->db_objset = os;
db->db.db_object = dn->dn_object;
db->db_level = level;
db->db_blkid = blkid;
db->db_dirtycnt = 0;
db->db_dnode_handle = dn->dn_handle;
db->db_parent = parent;
db->db_blkptr = blkptr;
db->db_hash = hash;
db->db_user = NULL;
db->db_user_immediate_evict = FALSE;
db->db_freed_in_flight = FALSE;
db->db_pending_evict = FALSE;
if (blkid == DMU_BONUS_BLKID) {
ASSERT3P(parent, ==, dn->dn_dbuf);
db->db.db_size = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) -
(dn->dn_nblkptr-1) * sizeof (blkptr_t);
ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
db->db.db_offset = DMU_BONUS_BLKID;
db->db_state = DB_UNCACHED;
DTRACE_SET_STATE(db, "bonus buffer created");
db->db_caching_status = DB_NO_CACHE;
/* the bonus dbuf is not placed in the hash table */
arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
return (db);
} else if (blkid == DMU_SPILL_BLKID) {
db->db.db_size = (blkptr != NULL) ?
BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE;
db->db.db_offset = 0;
} else {
int blocksize =
db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz;
db->db.db_size = blocksize;
db->db.db_offset = db->db_blkid * blocksize;
}
/*
* Hold the dn_dbufs_mtx while we get the new dbuf
* in the hash table *and* added to the dbufs list.
* This prevents a possible deadlock with someone
* trying to look up this dbuf before it's added to the
* dn_dbufs list.
*/
mutex_enter(&dn->dn_dbufs_mtx);
db->db_state = DB_EVICTING; /* not worth logging this state change */
if ((odb = dbuf_hash_insert(db)) != NULL) {
/* someone else inserted it first */
mutex_exit(&dn->dn_dbufs_mtx);
kmem_cache_free(dbuf_kmem_cache, db);
DBUF_STAT_BUMP(hash_insert_race);
return (odb);
}
avl_add(&dn->dn_dbufs, db);
db->db_state = DB_UNCACHED;
DTRACE_SET_STATE(db, "regular buffer created");
db->db_caching_status = DB_NO_CACHE;
mutex_exit(&dn->dn_dbufs_mtx);
arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
if (parent && parent != dn->dn_dbuf)
dbuf_add_ref(parent, db);
ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
zfs_refcount_count(&dn->dn_holds) > 0);
(void) zfs_refcount_add(&dn->dn_holds, db);
dprintf_dbuf(db, "db=%p\n", db);
return (db);
}
/*
* This function returns a block pointer and information about the object,
* given a dnode and a block. This is a publicly accessible version of
* dbuf_findbp that only returns some information, rather than the
* dbuf. Note that the dnode passed in must be held, and the dn_struct_rwlock
* should be locked as (at least) a reader.
*/
int
dbuf_dnode_findbp(dnode_t *dn, uint64_t level, uint64_t blkid,
blkptr_t *bp, uint16_t *datablkszsec, uint8_t *indblkshift)
{
dmu_buf_impl_t *dbp = NULL;
blkptr_t *bp2;
int err = 0;
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
err = dbuf_findbp(dn, level, blkid, B_FALSE, &dbp, &bp2);
if (err == 0) {
ASSERT3P(bp2, !=, NULL);
*bp = *bp2;
if (dbp != NULL)
dbuf_rele(dbp, NULL);
if (datablkszsec != NULL)
*datablkszsec = dn->dn_phys->dn_datablkszsec;
if (indblkshift != NULL)
*indblkshift = dn->dn_phys->dn_indblkshift;
}
return (err);
}
typedef struct dbuf_prefetch_arg {
spa_t *dpa_spa; /* The spa to issue the prefetch in. */
zbookmark_phys_t dpa_zb; /* The target block to prefetch. */
int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */
int dpa_curlevel; /* The current level that we're reading */
dnode_t *dpa_dnode; /* The dnode associated with the prefetch */
zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */
zio_t *dpa_zio; /* The parent zio_t for all prefetches. */
arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */
dbuf_prefetch_fn dpa_cb; /* prefetch completion callback */
void *dpa_arg; /* prefetch completion arg */
} dbuf_prefetch_arg_t;
static void
dbuf_prefetch_fini(dbuf_prefetch_arg_t *dpa, boolean_t io_done)
{
if (dpa->dpa_cb != NULL) {
dpa->dpa_cb(dpa->dpa_arg, dpa->dpa_zb.zb_level,
dpa->dpa_zb.zb_blkid, io_done);
}
kmem_free(dpa, sizeof (*dpa));
}
static void
dbuf_issue_final_prefetch_done(zio_t *zio, const zbookmark_phys_t *zb,
const blkptr_t *iobp, arc_buf_t *abuf, void *private)
{
(void) zio, (void) zb, (void) iobp;
dbuf_prefetch_arg_t *dpa = private;
if (abuf != NULL)
arc_buf_destroy(abuf, private);
dbuf_prefetch_fini(dpa, B_TRUE);
}
/*
* Actually issue the prefetch read for the block given.
*/
static void
dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp)
{
ASSERT(!BP_IS_REDACTED(bp) ||
dsl_dataset_feature_is_active(
dpa->dpa_dnode->dn_objset->os_dsl_dataset,
SPA_FEATURE_REDACTED_DATASETS));
if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp))
return (dbuf_prefetch_fini(dpa, B_FALSE));
int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE;
arc_flags_t aflags =
dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH |
ARC_FLAG_NO_BUF;
/* dnodes are always read as raw and then converted later */
if (BP_GET_TYPE(bp) == DMU_OT_DNODE && BP_IS_PROTECTED(bp) &&
dpa->dpa_curlevel == 0)
zio_flags |= ZIO_FLAG_RAW;
ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level);
ASSERT(dpa->dpa_zio != NULL);
(void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp,
dbuf_issue_final_prefetch_done, dpa,
dpa->dpa_prio, zio_flags, &aflags, &dpa->dpa_zb);
}
/*
* Called when an indirect block above our prefetch target is read in. This
* will either read in the next indirect block down the tree or issue the actual
* prefetch if the next block down is our target.
*/
static void
dbuf_prefetch_indirect_done(zio_t *zio, const zbookmark_phys_t *zb,
const blkptr_t *iobp, arc_buf_t *abuf, void *private)
{
(void) zb, (void) iobp;
dbuf_prefetch_arg_t *dpa = private;
ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel);
ASSERT3S(dpa->dpa_curlevel, >, 0);
if (abuf == NULL) {
ASSERT(zio == NULL || zio->io_error != 0);
dbuf_prefetch_fini(dpa, B_TRUE);
return;
}
ASSERT(zio == NULL || zio->io_error == 0);
/*
* The dpa_dnode is only valid if we are called with a NULL
* zio. This indicates that the arc_read() returned without
* first calling zio_read() to issue a physical read. Once
* a physical read is made the dpa_dnode must be invalidated
* as the locks guarding it may have been dropped. If the
* dpa_dnode is still valid, then we want to add it to the dbuf
* cache. To do so, we must hold the dbuf associated with the block
* we just prefetched, read its contents so that we associate it
* with an arc_buf_t, and then release it.
*/
if (zio != NULL) {
ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel);
if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS) {
ASSERT3U(BP_GET_PSIZE(zio->io_bp), ==, zio->io_size);
} else {
ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size);
}
ASSERT3P(zio->io_spa, ==, dpa->dpa_spa);
dpa->dpa_dnode = NULL;
} else if (dpa->dpa_dnode != NULL) {
uint64_t curblkid = dpa->dpa_zb.zb_blkid >>
(dpa->dpa_epbs * (dpa->dpa_curlevel -
dpa->dpa_zb.zb_level));
dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode,
dpa->dpa_curlevel, curblkid, FTAG);
if (db == NULL) {
arc_buf_destroy(abuf, private);
dbuf_prefetch_fini(dpa, B_TRUE);
return;
}
(void) dbuf_read(db, NULL,
DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT);
dbuf_rele(db, FTAG);
}
dpa->dpa_curlevel--;
uint64_t nextblkid = dpa->dpa_zb.zb_blkid >>
(dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level));
blkptr_t *bp = ((blkptr_t *)abuf->b_data) +
P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs);
ASSERT(!BP_IS_REDACTED(bp) || (dpa->dpa_dnode &&
dsl_dataset_feature_is_active(
dpa->dpa_dnode->dn_objset->os_dsl_dataset,
SPA_FEATURE_REDACTED_DATASETS)));
if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp)) {
arc_buf_destroy(abuf, private);
dbuf_prefetch_fini(dpa, B_TRUE);
return;
} else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) {
ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid);
dbuf_issue_final_prefetch(dpa, bp);
} else {
arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
zbookmark_phys_t zb;
/* flag if L2ARC eligible, l2arc_noprefetch then decides */
if (dpa->dpa_aflags & ARC_FLAG_L2CACHE)
iter_aflags |= ARC_FLAG_L2CACHE;
ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset,
dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid);
(void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
bp, dbuf_prefetch_indirect_done, dpa,
ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
&iter_aflags, &zb);
}
arc_buf_destroy(abuf, private);
}
/*
* Issue prefetch reads for the given block on the given level. If the indirect
* blocks above that block are not in memory, we will read them in
* asynchronously. As a result, this call never blocks waiting for a read to
* complete. Note that the prefetch might fail if the dataset is encrypted and
* the encryption key is unmapped before the IO completes.
*/
int
dbuf_prefetch_impl(dnode_t *dn, int64_t level, uint64_t blkid,
zio_priority_t prio, arc_flags_t aflags, dbuf_prefetch_fn cb,
void *arg)
{
blkptr_t bp;
int epbs, nlevels, curlevel;
uint64_t curblkid;
ASSERT(blkid != DMU_BONUS_BLKID);
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
if (blkid > dn->dn_maxblkid)
goto no_issue;
if (level == 0 && dnode_block_freed(dn, blkid))
goto no_issue;
/*
* This dnode hasn't been written to disk yet, so there's nothing to
* prefetch.
*/
nlevels = dn->dn_phys->dn_nlevels;
if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0)
goto no_issue;
epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level))
goto no_issue;
dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object,
level, blkid, NULL);
if (db != NULL) {
mutex_exit(&db->db_mtx);
/*
* This dbuf already exists. It is either CACHED, or
* (we assume) about to be read or filled.
*/
goto no_issue;
}
/*
* Find the closest ancestor (indirect block) of the target block
* that is present in the cache. In this indirect block, we will
* find the bp that is at curlevel, curblkid.
*/
curlevel = level;
curblkid = blkid;
while (curlevel < nlevels - 1) {
int parent_level = curlevel + 1;
uint64_t parent_blkid = curblkid >> epbs;
dmu_buf_impl_t *db;
if (dbuf_hold_impl(dn, parent_level, parent_blkid,
FALSE, TRUE, FTAG, &db) == 0) {
blkptr_t *bpp = db->db_buf->b_data;
bp = bpp[P2PHASE(curblkid, 1 << epbs)];
dbuf_rele(db, FTAG);
break;
}
curlevel = parent_level;
curblkid = parent_blkid;
}
if (curlevel == nlevels - 1) {
/* No cached indirect blocks found. */
ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr);
bp = dn->dn_phys->dn_blkptr[curblkid];
}
ASSERT(!BP_IS_REDACTED(&bp) ||
dsl_dataset_feature_is_active(dn->dn_objset->os_dsl_dataset,
SPA_FEATURE_REDACTED_DATASETS));
if (BP_IS_HOLE(&bp) || BP_IS_REDACTED(&bp))
goto no_issue;
ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp));
zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL,
ZIO_FLAG_CANFAIL);
dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP);
dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
dn->dn_object, level, blkid);
dpa->dpa_curlevel = curlevel;
dpa->dpa_prio = prio;
dpa->dpa_aflags = aflags;
dpa->dpa_spa = dn->dn_objset->os_spa;
dpa->dpa_dnode = dn;
dpa->dpa_epbs = epbs;
dpa->dpa_zio = pio;
dpa->dpa_cb = cb;
dpa->dpa_arg = arg;
if (!DNODE_LEVEL_IS_CACHEABLE(dn, level))
dpa->dpa_aflags |= ARC_FLAG_UNCACHED;
else if (dnode_level_is_l2cacheable(&bp, dn, level))
dpa->dpa_aflags |= ARC_FLAG_L2CACHE;
/*
* If we have the indirect just above us, no need to do the asynchronous
* prefetch chain; we'll just run the last step ourselves. If we're at
* a higher level, though, we want to issue the prefetches for all the
* indirect blocks asynchronously, so we can go on with whatever we were
* doing.
*/
if (curlevel == level) {
ASSERT3U(curblkid, ==, blkid);
dbuf_issue_final_prefetch(dpa, &bp);
} else {
arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
zbookmark_phys_t zb;
/* flag if L2ARC eligible, l2arc_noprefetch then decides */
if (dnode_level_is_l2cacheable(&bp, dn, level))
iter_aflags |= ARC_FLAG_L2CACHE;
SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
dn->dn_object, curlevel, curblkid);
(void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
&bp, dbuf_prefetch_indirect_done, dpa,
ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
&iter_aflags, &zb);
}
/*
* We use pio here instead of dpa_zio since it's possible that
* dpa may have already been freed.
*/
zio_nowait(pio);
return (1);
no_issue:
if (cb != NULL)
cb(arg, level, blkid, B_FALSE);
return (0);
}
int
dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio,
arc_flags_t aflags)
{
return (dbuf_prefetch_impl(dn, level, blkid, prio, aflags, NULL, NULL));
}
/*
* Helper function for dbuf_hold_impl() to copy a buffer. Handles
* the case of encrypted, compressed and uncompressed buffers by
* allocating the new buffer, respectively, with arc_alloc_raw_buf(),
* arc_alloc_compressed_buf() or arc_alloc_buf().*
*
* NOTE: Declared noinline to avoid stack bloat in dbuf_hold_impl().
*/
noinline static void
dbuf_hold_copy(dnode_t *dn, dmu_buf_impl_t *db)
{
dbuf_dirty_record_t *dr = db->db_data_pending;
arc_buf_t *data = dr->dt.dl.dr_data;
enum zio_compress compress_type = arc_get_compression(data);
uint8_t complevel = arc_get_complevel(data);
if (arc_is_encrypted(data)) {
boolean_t byteorder;
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
arc_get_raw_params(data, &byteorder, salt, iv, mac);
dbuf_set_data(db, arc_alloc_raw_buf(dn->dn_objset->os_spa, db,
dmu_objset_id(dn->dn_objset), byteorder, salt, iv, mac,
dn->dn_type, arc_buf_size(data), arc_buf_lsize(data),
compress_type, complevel));
} else if (compress_type != ZIO_COMPRESS_OFF) {
dbuf_set_data(db, arc_alloc_compressed_buf(
dn->dn_objset->os_spa, db, arc_buf_size(data),
arc_buf_lsize(data), compress_type, complevel));
} else {
dbuf_set_data(db, arc_alloc_buf(dn->dn_objset->os_spa, db,
DBUF_GET_BUFC_TYPE(db), db->db.db_size));
}
rw_enter(&db->db_rwlock, RW_WRITER);
memcpy(db->db.db_data, data->b_data, arc_buf_size(data));
rw_exit(&db->db_rwlock);
}
/*
* Returns with db_holds incremented, and db_mtx not held.
* Note: dn_struct_rwlock must be held.
*/
int
dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid,
boolean_t fail_sparse, boolean_t fail_uncached,
const void *tag, dmu_buf_impl_t **dbp)
{
dmu_buf_impl_t *db, *parent = NULL;
uint64_t hv;
/* If the pool has been created, verify the tx_sync_lock is not held */
spa_t *spa = dn->dn_objset->os_spa;
dsl_pool_t *dp = spa->spa_dsl_pool;
if (dp != NULL) {
ASSERT(!MUTEX_HELD(&dp->dp_tx.tx_sync_lock));
}
ASSERT(blkid != DMU_BONUS_BLKID);
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
ASSERT3U(dn->dn_nlevels, >, level);
*dbp = NULL;
/* dbuf_find() returns with db_mtx held */
db = dbuf_find(dn->dn_objset, dn->dn_object, level, blkid, &hv);
if (db == NULL) {
blkptr_t *bp = NULL;
int err;
if (fail_uncached)
return (SET_ERROR(ENOENT));
ASSERT3P(parent, ==, NULL);
err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp);
if (fail_sparse) {
if (err == 0 && bp && BP_IS_HOLE(bp))
err = SET_ERROR(ENOENT);
if (err) {
if (parent)
dbuf_rele(parent, NULL);
return (err);
}
}
if (err && err != ENOENT)
return (err);
db = dbuf_create(dn, level, blkid, parent, bp, hv);
}
if (fail_uncached && db->db_state != DB_CACHED) {
mutex_exit(&db->db_mtx);
return (SET_ERROR(ENOENT));
}
if (db->db_buf != NULL) {
arc_buf_access(db->db_buf);
ASSERT3P(db->db.db_data, ==, db->db_buf->b_data);
}
ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf));
/*
* If this buffer is currently syncing out, and we are
* still referencing it from db_data, we need to make a copy
* of it in case we decide we want to dirty it again in this txg.
*/
if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
dn->dn_object != DMU_META_DNODE_OBJECT &&
db->db_state == DB_CACHED && db->db_data_pending) {
dbuf_dirty_record_t *dr = db->db_data_pending;
if (dr->dt.dl.dr_data == db->db_buf) {
ASSERT3P(db->db_buf, !=, NULL);
dbuf_hold_copy(dn, db);
}
}
if (multilist_link_active(&db->db_cache_link)) {
ASSERT(zfs_refcount_is_zero(&db->db_holds));
ASSERT(db->db_caching_status == DB_DBUF_CACHE ||
db->db_caching_status == DB_DBUF_METADATA_CACHE);
multilist_remove(&dbuf_caches[db->db_caching_status].cache, db);
(void) zfs_refcount_remove_many(
&dbuf_caches[db->db_caching_status].size,
db->db.db_size, db);
if (db->db_caching_status == DB_DBUF_METADATA_CACHE) {
DBUF_STAT_BUMPDOWN(metadata_cache_count);
} else {
DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
DBUF_STAT_BUMPDOWN(cache_count);
DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
db->db.db_size);
}
db->db_caching_status = DB_NO_CACHE;
}
(void) zfs_refcount_add(&db->db_holds, tag);
DBUF_VERIFY(db);
mutex_exit(&db->db_mtx);
/* NOTE: we can't rele the parent until after we drop the db_mtx */
if (parent)
dbuf_rele(parent, NULL);
ASSERT3P(DB_DNODE(db), ==, dn);
ASSERT3U(db->db_blkid, ==, blkid);
ASSERT3U(db->db_level, ==, level);
*dbp = db;
return (0);
}
dmu_buf_impl_t *
dbuf_hold(dnode_t *dn, uint64_t blkid, const void *tag)
{
return (dbuf_hold_level(dn, 0, blkid, tag));
}
dmu_buf_impl_t *
dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, const void *tag)
{
dmu_buf_impl_t *db;
int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db);
return (err ? NULL : db);
}
void
dbuf_create_bonus(dnode_t *dn)
{
ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
ASSERT(dn->dn_bonus == NULL);
dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL,
dbuf_hash(dn->dn_objset, dn->dn_object, 0, DMU_BONUS_BLKID));
}
int
dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
if (db->db_blkid != DMU_SPILL_BLKID)
return (SET_ERROR(ENOTSUP));
if (blksz == 0)
blksz = SPA_MINBLOCKSIZE;
ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset)));
blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE);
dbuf_new_size(db, blksz, tx);
return (0);
}
void
dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
{
dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx);
}
#pragma weak dmu_buf_add_ref = dbuf_add_ref
void
dbuf_add_ref(dmu_buf_impl_t *db, const void *tag)
{
int64_t holds = zfs_refcount_add(&db->db_holds, tag);
VERIFY3S(holds, >, 1);
}
#pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref
boolean_t
dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid,
const void *tag)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dmu_buf_impl_t *found_db;
boolean_t result = B_FALSE;
if (blkid == DMU_BONUS_BLKID)
found_db = dbuf_find_bonus(os, obj);
else
found_db = dbuf_find(os, obj, 0, blkid, NULL);
if (found_db != NULL) {
if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) {
(void) zfs_refcount_add(&db->db_holds, tag);
result = B_TRUE;
}
mutex_exit(&found_db->db_mtx);
}
return (result);
}
/*
* If you call dbuf_rele() you had better not be referencing the dnode handle
* unless you have some other direct or indirect hold on the dnode. (An indirect
* hold is a hold on one of the dnode's dbufs, including the bonus buffer.)
* Without that, the dbuf_rele() could lead to a dnode_rele() followed by the
* dnode's parent dbuf evicting its dnode handles.
*/
void
dbuf_rele(dmu_buf_impl_t *db, const void *tag)
{
mutex_enter(&db->db_mtx);
dbuf_rele_and_unlock(db, tag, B_FALSE);
}
void
dmu_buf_rele(dmu_buf_t *db, const void *tag)
{
dbuf_rele((dmu_buf_impl_t *)db, tag);
}
/*
* dbuf_rele() for an already-locked dbuf. This is necessary to allow
* db_dirtycnt and db_holds to be updated atomically. The 'evicting'
* argument should be set if we are already in the dbuf-evicting code
* path, in which case we don't want to recursively evict. This allows us to
* avoid deeply nested stacks that would have a call flow similar to this:
*
* dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify()
* ^ |
* | |
* +-----dbuf_destroy()<--dbuf_evict_one()<--------+
*
*/
void
dbuf_rele_and_unlock(dmu_buf_impl_t *db, const void *tag, boolean_t evicting)
{
int64_t holds;
uint64_t size;
ASSERT(MUTEX_HELD(&db->db_mtx));
DBUF_VERIFY(db);
/*
* Remove the reference to the dbuf before removing its hold on the
* dnode so we can guarantee in dnode_move() that a referenced bonus
* buffer has a corresponding dnode hold.
*/
holds = zfs_refcount_remove(&db->db_holds, tag);
ASSERT(holds >= 0);
/*
* We can't freeze indirects if there is a possibility that they
* may be modified in the current syncing context.
*/
if (db->db_buf != NULL &&
holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) {
arc_buf_freeze(db->db_buf);
}
if (holds == db->db_dirtycnt &&
db->db_level == 0 && db->db_user_immediate_evict)
dbuf_evict_user(db);
if (holds == 0) {
if (db->db_blkid == DMU_BONUS_BLKID) {
dnode_t *dn;
boolean_t evict_dbuf = db->db_pending_evict;
/*
* If the dnode moves here, we cannot cross this
* barrier until the move completes.
*/
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
atomic_dec_32(&dn->dn_dbufs_count);
/*
* Decrementing the dbuf count means that the bonus
* buffer's dnode hold is no longer discounted in
* dnode_move(). The dnode cannot move until after
* the dnode_rele() below.
*/
DB_DNODE_EXIT(db);
/*
* Do not reference db after its lock is dropped.
* Another thread may evict it.
*/
mutex_exit(&db->db_mtx);
if (evict_dbuf)
dnode_evict_bonus(dn);
dnode_rele(dn, db);
} else if (db->db_buf == NULL) {
/*
* This is a special case: we never associated this
* dbuf with any data allocated from the ARC.
*/
ASSERT(db->db_state == DB_UNCACHED ||
db->db_state == DB_NOFILL);
dbuf_destroy(db);
} else if (arc_released(db->db_buf)) {
/*
* This dbuf has anonymous data associated with it.
*/
dbuf_destroy(db);
} else if (!(DBUF_IS_CACHEABLE(db) || db->db_partial_read) ||
db->db_pending_evict) {
dbuf_destroy(db);
} else if (!multilist_link_active(&db->db_cache_link)) {
ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
dbuf_cached_state_t dcs =
dbuf_include_in_metadata_cache(db) ?
DB_DBUF_METADATA_CACHE : DB_DBUF_CACHE;
db->db_caching_status = dcs;
multilist_insert(&dbuf_caches[dcs].cache, db);
uint64_t db_size = db->db.db_size;
size = zfs_refcount_add_many(
&dbuf_caches[dcs].size, db_size, db);
uint8_t db_level = db->db_level;
mutex_exit(&db->db_mtx);
if (dcs == DB_DBUF_METADATA_CACHE) {
DBUF_STAT_BUMP(metadata_cache_count);
DBUF_STAT_MAX(metadata_cache_size_bytes_max,
size);
} else {
DBUF_STAT_BUMP(cache_count);
DBUF_STAT_MAX(cache_size_bytes_max, size);
DBUF_STAT_BUMP(cache_levels[db_level]);
DBUF_STAT_INCR(cache_levels_bytes[db_level],
db_size);
}
if (dcs == DB_DBUF_CACHE && !evicting)
dbuf_evict_notify(size);
}
} else {
mutex_exit(&db->db_mtx);
}
}
#pragma weak dmu_buf_refcount = dbuf_refcount
uint64_t
dbuf_refcount(dmu_buf_impl_t *db)
{
return (zfs_refcount_count(&db->db_holds));
}
uint64_t
dmu_buf_user_refcount(dmu_buf_t *db_fake)
{
uint64_t holds;
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
mutex_enter(&db->db_mtx);
ASSERT3U(zfs_refcount_count(&db->db_holds), >=, db->db_dirtycnt);
holds = zfs_refcount_count(&db->db_holds) - db->db_dirtycnt;
mutex_exit(&db->db_mtx);
return (holds);
}
void *
dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user,
dmu_buf_user_t *new_user)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
mutex_enter(&db->db_mtx);
dbuf_verify_user(db, DBVU_NOT_EVICTING);
if (db->db_user == old_user)
db->db_user = new_user;
else
old_user = db->db_user;
dbuf_verify_user(db, DBVU_NOT_EVICTING);
mutex_exit(&db->db_mtx);
return (old_user);
}
void *
dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
{
return (dmu_buf_replace_user(db_fake, NULL, user));
}
void *
dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
db->db_user_immediate_evict = TRUE;
return (dmu_buf_set_user(db_fake, user));
}
void *
dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
{
return (dmu_buf_replace_user(db_fake, user, NULL));
}
void *
dmu_buf_get_user(dmu_buf_t *db_fake)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dbuf_verify_user(db, DBVU_NOT_EVICTING);
return (db->db_user);
}
void
dmu_buf_user_evict_wait(void)
{
taskq_wait(dbu_evict_taskq);
}
blkptr_t *
dmu_buf_get_blkptr(dmu_buf_t *db)
{
dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
return (dbi->db_blkptr);
}
objset_t *
dmu_buf_get_objset(dmu_buf_t *db)
{
dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
return (dbi->db_objset);
}
dnode_t *
dmu_buf_dnode_enter(dmu_buf_t *db)
{
dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
DB_DNODE_ENTER(dbi);
return (DB_DNODE(dbi));
}
void
dmu_buf_dnode_exit(dmu_buf_t *db)
{
dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
DB_DNODE_EXIT(dbi);
}
static void
dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
{
/* ASSERT(dmu_tx_is_syncing(tx) */
ASSERT(MUTEX_HELD(&db->db_mtx));
if (db->db_blkptr != NULL)
return;
if (db->db_blkid == DMU_SPILL_BLKID) {
db->db_blkptr = DN_SPILL_BLKPTR(dn->dn_phys);
BP_ZERO(db->db_blkptr);
return;
}
if (db->db_level == dn->dn_phys->dn_nlevels-1) {
/*
* This buffer was allocated at a time when there was
* no available blkptrs from the dnode, or it was
* inappropriate to hook it in (i.e., nlevels mismatch).
*/
ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
ASSERT(db->db_parent == NULL);
db->db_parent = dn->dn_dbuf;
db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
DBUF_VERIFY(db);
} else {
dmu_buf_impl_t *parent = db->db_parent;
int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
ASSERT(dn->dn_phys->dn_nlevels > 1);
if (parent == NULL) {
mutex_exit(&db->db_mtx);
rw_enter(&dn->dn_struct_rwlock, RW_READER);
parent = dbuf_hold_level(dn, db->db_level + 1,
db->db_blkid >> epbs, db);
rw_exit(&dn->dn_struct_rwlock);
mutex_enter(&db->db_mtx);
db->db_parent = parent;
}
db->db_blkptr = (blkptr_t *)parent->db.db_data +
(db->db_blkid & ((1ULL << epbs) - 1));
DBUF_VERIFY(db);
}
}
static void
dbuf_sync_bonus(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = dr->dr_dbuf;
void *data = dr->dt.dl.dr_data;
ASSERT0(db->db_level);
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT(db->db_blkid == DMU_BONUS_BLKID);
ASSERT(data != NULL);
dnode_t *dn = dr->dr_dnode;
ASSERT3U(DN_MAX_BONUS_LEN(dn->dn_phys), <=,
DN_SLOTS_TO_BONUSLEN(dn->dn_phys->dn_extra_slots + 1));
memcpy(DN_BONUS(dn->dn_phys), data, DN_MAX_BONUS_LEN(dn->dn_phys));
dbuf_sync_leaf_verify_bonus_dnode(dr);
dbuf_undirty_bonus(dr);
dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE);
}
/*
* When syncing out a blocks of dnodes, adjust the block to deal with
* encryption. Normally, we make sure the block is decrypted before writing
* it. If we have crypt params, then we are writing a raw (encrypted) block,
* from a raw receive. In this case, set the ARC buf's crypt params so
* that the BP will be filled with the correct byteorder, salt, iv, and mac.
*/
static void
dbuf_prepare_encrypted_dnode_leaf(dbuf_dirty_record_t *dr)
{
int err;
dmu_buf_impl_t *db = dr->dr_dbuf;
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
ASSERT3U(db->db_level, ==, 0);
if (!db->db_objset->os_raw_receive && arc_is_encrypted(db->db_buf)) {
zbookmark_phys_t zb;
/*
* Unfortunately, there is currently no mechanism for
* syncing context to handle decryption errors. An error
* here is only possible if an attacker maliciously
* changed a dnode block and updated the associated
* checksums going up the block tree.
*/
SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
db->db.db_object, db->db_level, db->db_blkid);
err = arc_untransform(db->db_buf, db->db_objset->os_spa,
&zb, B_TRUE);
if (err)
panic("Invalid dnode block MAC");
} else if (dr->dt.dl.dr_has_raw_params) {
(void) arc_release(dr->dt.dl.dr_data, db);
arc_convert_to_raw(dr->dt.dl.dr_data,
dmu_objset_id(db->db_objset),
dr->dt.dl.dr_byteorder, DMU_OT_DNODE,
dr->dt.dl.dr_salt, dr->dt.dl.dr_iv, dr->dt.dl.dr_mac);
}
}
/*
* dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it
* is critical the we not allow the compiler to inline this function in to
* dbuf_sync_list() thereby drastically bloating the stack usage.
*/
noinline static void
dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = dr->dr_dbuf;
dnode_t *dn = dr->dr_dnode;
ASSERT(dmu_tx_is_syncing(tx));
dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
mutex_enter(&db->db_mtx);
ASSERT(db->db_level > 0);
DBUF_VERIFY(db);
/* Read the block if it hasn't been read yet. */
if (db->db_buf == NULL) {
mutex_exit(&db->db_mtx);
(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
mutex_enter(&db->db_mtx);
}
ASSERT3U(db->db_state, ==, DB_CACHED);
ASSERT(db->db_buf != NULL);
/* Indirect block size must match what the dnode thinks it is. */
ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
dbuf_check_blkptr(dn, db);
/* Provide the pending dirty record to child dbufs */
db->db_data_pending = dr;
mutex_exit(&db->db_mtx);
dbuf_write(dr, db->db_buf, tx);
zio_t *zio = dr->dr_zio;
mutex_enter(&dr->dt.di.dr_mtx);
dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx);
ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
mutex_exit(&dr->dt.di.dr_mtx);
zio_nowait(zio);
}
/*
* Verify that the size of the data in our bonus buffer does not exceed
* its recorded size.
*
* The purpose of this verification is to catch any cases in development
* where the size of a phys structure (i.e space_map_phys_t) grows and,
* due to incorrect feature management, older pools expect to read more
* data even though they didn't actually write it to begin with.
*
* For a example, this would catch an error in the feature logic where we
* open an older pool and we expect to write the space map histogram of
* a space map with size SPACE_MAP_SIZE_V0.
*/
static void
dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr)
{
#ifdef ZFS_DEBUG
dnode_t *dn = dr->dr_dnode;
/*
* Encrypted bonus buffers can have data past their bonuslen.
* Skip the verification of these blocks.
*/
if (DMU_OT_IS_ENCRYPTED(dn->dn_bonustype))
return;
uint16_t bonuslen = dn->dn_phys->dn_bonuslen;
uint16_t maxbonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
ASSERT3U(bonuslen, <=, maxbonuslen);
arc_buf_t *datap = dr->dt.dl.dr_data;
char *datap_end = ((char *)datap) + bonuslen;
char *datap_max = ((char *)datap) + maxbonuslen;
/* ensure that everything is zero after our data */
for (; datap_end < datap_max; datap_end++)
ASSERT(*datap_end == 0);
#endif
}
static blkptr_t *
dbuf_lightweight_bp(dbuf_dirty_record_t *dr)
{
/* This must be a lightweight dirty record. */
ASSERT3P(dr->dr_dbuf, ==, NULL);
dnode_t *dn = dr->dr_dnode;
if (dn->dn_phys->dn_nlevels == 1) {
VERIFY3U(dr->dt.dll.dr_blkid, <, dn->dn_phys->dn_nblkptr);
return (&dn->dn_phys->dn_blkptr[dr->dt.dll.dr_blkid]);
} else {
dmu_buf_impl_t *parent_db = dr->dr_parent->dr_dbuf;
int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
VERIFY3U(parent_db->db_level, ==, 1);
VERIFY3P(parent_db->db_dnode_handle->dnh_dnode, ==, dn);
VERIFY3U(dr->dt.dll.dr_blkid >> epbs, ==, parent_db->db_blkid);
blkptr_t *bp = parent_db->db.db_data;
return (&bp[dr->dt.dll.dr_blkid & ((1 << epbs) - 1)]);
}
}
static void
dbuf_lightweight_ready(zio_t *zio)
{
dbuf_dirty_record_t *dr = zio->io_private;
blkptr_t *bp = zio->io_bp;
if (zio->io_error != 0)
return;
dnode_t *dn = dr->dr_dnode;
blkptr_t *bp_orig = dbuf_lightweight_bp(dr);
spa_t *spa = dmu_objset_spa(dn->dn_objset);
int64_t delta = bp_get_dsize_sync(spa, bp) -
bp_get_dsize_sync(spa, bp_orig);
dnode_diduse_space(dn, delta);
uint64_t blkid = dr->dt.dll.dr_blkid;
mutex_enter(&dn->dn_mtx);
if (blkid > dn->dn_phys->dn_maxblkid) {
ASSERT0(dn->dn_objset->os_raw_receive);
dn->dn_phys->dn_maxblkid = blkid;
}
mutex_exit(&dn->dn_mtx);
if (!BP_IS_EMBEDDED(bp)) {
uint64_t fill = BP_IS_HOLE(bp) ? 0 : 1;
BP_SET_FILL(bp, fill);
}
dmu_buf_impl_t *parent_db;
EQUIV(dr->dr_parent == NULL, dn->dn_phys->dn_nlevels == 1);
if (dr->dr_parent == NULL) {
parent_db = dn->dn_dbuf;
} else {
parent_db = dr->dr_parent->dr_dbuf;
}
rw_enter(&parent_db->db_rwlock, RW_WRITER);
*bp_orig = *bp;
rw_exit(&parent_db->db_rwlock);
}
static void
dbuf_lightweight_done(zio_t *zio)
{
dbuf_dirty_record_t *dr = zio->io_private;
VERIFY0(zio->io_error);
objset_t *os = dr->dr_dnode->dn_objset;
dmu_tx_t *tx = os->os_synctx;
if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
ASSERT(BP_EQUAL(zio->io_bp, &zio->io_bp_orig));
} else {
dsl_dataset_t *ds = os->os_dsl_dataset;
(void) dsl_dataset_block_kill(ds, &zio->io_bp_orig, tx, B_TRUE);
dsl_dataset_block_born(ds, zio->io_bp, tx);
}
dsl_pool_undirty_space(dmu_objset_pool(os), dr->dr_accounted,
zio->io_txg);
abd_free(dr->dt.dll.dr_abd);
kmem_free(dr, sizeof (*dr));
}
noinline static void
dbuf_sync_lightweight(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
{
dnode_t *dn = dr->dr_dnode;
zio_t *pio;
if (dn->dn_phys->dn_nlevels == 1) {
pio = dn->dn_zio;
} else {
pio = dr->dr_parent->dr_zio;
}
zbookmark_phys_t zb = {
.zb_objset = dmu_objset_id(dn->dn_objset),
.zb_object = dn->dn_object,
.zb_level = 0,
.zb_blkid = dr->dt.dll.dr_blkid,
};
/*
* See comment in dbuf_write(). This is so that zio->io_bp_orig
* will have the old BP in dbuf_lightweight_done().
*/
dr->dr_bp_copy = *dbuf_lightweight_bp(dr);
dr->dr_zio = zio_write(pio, dmu_objset_spa(dn->dn_objset),
dmu_tx_get_txg(tx), &dr->dr_bp_copy, dr->dt.dll.dr_abd,
dn->dn_datablksz, abd_get_size(dr->dt.dll.dr_abd),
&dr->dt.dll.dr_props, dbuf_lightweight_ready, NULL,
dbuf_lightweight_done, dr, ZIO_PRIORITY_ASYNC_WRITE,
ZIO_FLAG_MUSTSUCCEED | dr->dt.dll.dr_flags, &zb);
zio_nowait(dr->dr_zio);
}
/*
* dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is
* critical the we not allow the compiler to inline this function in to
* dbuf_sync_list() thereby drastically bloating the stack usage.
*/
noinline static void
dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
{
arc_buf_t **datap = &dr->dt.dl.dr_data;
dmu_buf_impl_t *db = dr->dr_dbuf;
dnode_t *dn = dr->dr_dnode;
objset_t *os;
uint64_t txg = tx->tx_txg;
ASSERT(dmu_tx_is_syncing(tx));
dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
mutex_enter(&db->db_mtx);
/*
* To be synced, we must be dirtied. But we
* might have been freed after the dirty.
*/
if (db->db_state == DB_UNCACHED) {
/* This buffer has been freed since it was dirtied */
ASSERT(db->db.db_data == NULL);
} else if (db->db_state == DB_FILL) {
/* This buffer was freed and is now being re-filled */
ASSERT(db->db.db_data != dr->dt.dl.dr_data);
} else if (db->db_state == DB_READ) {
/*
* This buffer has a clone we need to write, and an in-flight
* read on the BP we're about to clone. Its safe to issue the
* write here because the read has already been issued and the
* contents won't change.
*/
ASSERT(dr->dt.dl.dr_brtwrite &&
dr->dt.dl.dr_override_state == DR_OVERRIDDEN);
} else {
ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL);
}
DBUF_VERIFY(db);
if (db->db_blkid == DMU_SPILL_BLKID) {
mutex_enter(&dn->dn_mtx);
if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
/*
* In the previous transaction group, the bonus buffer
* was entirely used to store the attributes for the
* dnode which overrode the dn_spill field. However,
* when adding more attributes to the file a spill
* block was required to hold the extra attributes.
*
* Make sure to clear the garbage left in the dn_spill
* field from the previous attributes in the bonus
* buffer. Otherwise, after writing out the spill
* block to the new allocated dva, it will free
* the old block pointed to by the invalid dn_spill.
*/
db->db_blkptr = NULL;
}
dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR;
mutex_exit(&dn->dn_mtx);
}
/*
* If this is a bonus buffer, simply copy the bonus data into the
* dnode. It will be written out when the dnode is synced (and it
* will be synced, since it must have been dirty for dbuf_sync to
* be called).
*/
if (db->db_blkid == DMU_BONUS_BLKID) {
ASSERT(dr->dr_dbuf == db);
dbuf_sync_bonus(dr, tx);
return;
}
os = dn->dn_objset;
/*
* This function may have dropped the db_mtx lock allowing a dmu_sync
* operation to sneak in. As a result, we need to ensure that we
* don't check the dr_override_state until we have returned from
* dbuf_check_blkptr.
*/
dbuf_check_blkptr(dn, db);
/*
* If this buffer is in the middle of an immediate write,
* wait for the synchronous IO to complete.
*/
while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) {
ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
cv_wait(&db->db_changed, &db->db_mtx);
}
/*
* If this is a dnode block, ensure it is appropriately encrypted
* or decrypted, depending on what we are writing to it this txg.
*/
if (os->os_encrypted && dn->dn_object == DMU_META_DNODE_OBJECT)
dbuf_prepare_encrypted_dnode_leaf(dr);
if (db->db_state != DB_NOFILL &&
dn->dn_object != DMU_META_DNODE_OBJECT &&
zfs_refcount_count(&db->db_holds) > 1 &&
dr->dt.dl.dr_override_state != DR_OVERRIDDEN &&
*datap == db->db_buf) {
/*
* If this buffer is currently "in use" (i.e., there
* are active holds and db_data still references it),
* then make a copy before we start the write so that
* any modifications from the open txg will not leak
* into this write.
*
* NOTE: this copy does not need to be made for
* objects only modified in the syncing context (e.g.
* DNONE_DNODE blocks).
*/
int psize = arc_buf_size(*datap);
int lsize = arc_buf_lsize(*datap);
arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
enum zio_compress compress_type = arc_get_compression(*datap);
uint8_t complevel = arc_get_complevel(*datap);
if (arc_is_encrypted(*datap)) {
boolean_t byteorder;
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
arc_get_raw_params(*datap, &byteorder, salt, iv, mac);
*datap = arc_alloc_raw_buf(os->os_spa, db,
dmu_objset_id(os), byteorder, salt, iv, mac,
dn->dn_type, psize, lsize, compress_type,
complevel);
} else if (compress_type != ZIO_COMPRESS_OFF) {
ASSERT3U(type, ==, ARC_BUFC_DATA);
*datap = arc_alloc_compressed_buf(os->os_spa, db,
psize, lsize, compress_type, complevel);
} else {
*datap = arc_alloc_buf(os->os_spa, db, type, psize);
}
memcpy((*datap)->b_data, db->db.db_data, psize);
}
db->db_data_pending = dr;
mutex_exit(&db->db_mtx);
dbuf_write(dr, *datap, tx);
ASSERT(!list_link_active(&dr->dr_dirty_node));
if (dn->dn_object == DMU_META_DNODE_OBJECT) {
list_insert_tail(&dn->dn_dirty_records[txg & TXG_MASK], dr);
} else {
zio_nowait(dr->dr_zio);
}
}
void
dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx)
{
dbuf_dirty_record_t *dr;
while ((dr = list_head(list))) {
if (dr->dr_zio != NULL) {
/*
* If we find an already initialized zio then we
* are processing the meta-dnode, and we have finished.
* The dbufs for all dnodes are put back on the list
* during processing, so that we can zio_wait()
* these IOs after initiating all child IOs.
*/
ASSERT3U(dr->dr_dbuf->db.db_object, ==,
DMU_META_DNODE_OBJECT);
break;
}
list_remove(list, dr);
if (dr->dr_dbuf == NULL) {
dbuf_sync_lightweight(dr, tx);
} else {
if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID &&
dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) {
VERIFY3U(dr->dr_dbuf->db_level, ==, level);
}
if (dr->dr_dbuf->db_level > 0)
dbuf_sync_indirect(dr, tx);
else
dbuf_sync_leaf(dr, tx);
}
}
}
static void
dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
{
(void) buf;
dmu_buf_impl_t *db = vdb;
dnode_t *dn;
blkptr_t *bp = zio->io_bp;
blkptr_t *bp_orig = &zio->io_bp_orig;
spa_t *spa = zio->io_spa;
int64_t delta;
uint64_t fill = 0;
int i;
ASSERT3P(db->db_blkptr, !=, NULL);
ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig);
dnode_diduse_space(dn, delta - zio->io_prev_space_delta);
zio->io_prev_space_delta = delta;
if (bp->blk_birth != 0) {
ASSERT((db->db_blkid != DMU_SPILL_BLKID &&
BP_GET_TYPE(bp) == dn->dn_type) ||
(db->db_blkid == DMU_SPILL_BLKID &&
BP_GET_TYPE(bp) == dn->dn_bonustype) ||
BP_IS_EMBEDDED(bp));
ASSERT(BP_GET_LEVEL(bp) == db->db_level);
}
mutex_enter(&db->db_mtx);
#ifdef ZFS_DEBUG
if (db->db_blkid == DMU_SPILL_BLKID) {
ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
ASSERT(!(BP_IS_HOLE(bp)) &&
db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
}
#endif
if (db->db_level == 0) {
mutex_enter(&dn->dn_mtx);
if (db->db_blkid > dn->dn_phys->dn_maxblkid &&
db->db_blkid != DMU_SPILL_BLKID) {
ASSERT0(db->db_objset->os_raw_receive);
dn->dn_phys->dn_maxblkid = db->db_blkid;
}
mutex_exit(&dn->dn_mtx);
if (dn->dn_type == DMU_OT_DNODE) {
i = 0;
while (i < db->db.db_size) {
dnode_phys_t *dnp =
(void *)(((char *)db->db.db_data) + i);
i += DNODE_MIN_SIZE;
if (dnp->dn_type != DMU_OT_NONE) {
fill++;
for (int j = 0; j < dnp->dn_nblkptr;
j++) {
(void) zfs_blkptr_verify(spa,
&dnp->dn_blkptr[j],
BLK_CONFIG_SKIP,
BLK_VERIFY_HALT);
}
if (dnp->dn_flags &
DNODE_FLAG_SPILL_BLKPTR) {
(void) zfs_blkptr_verify(spa,
DN_SPILL_BLKPTR(dnp),
BLK_CONFIG_SKIP,
BLK_VERIFY_HALT);
}
i += dnp->dn_extra_slots *
DNODE_MIN_SIZE;
}
}
} else {
if (BP_IS_HOLE(bp)) {
fill = 0;
} else {
fill = 1;
}
}
} else {
blkptr_t *ibp = db->db.db_data;
ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) {
if (BP_IS_HOLE(ibp))
continue;
(void) zfs_blkptr_verify(spa, ibp,
BLK_CONFIG_SKIP, BLK_VERIFY_HALT);
fill += BP_GET_FILL(ibp);
}
}
DB_DNODE_EXIT(db);
if (!BP_IS_EMBEDDED(bp))
BP_SET_FILL(bp, fill);
mutex_exit(&db->db_mtx);
db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_WRITER, FTAG);
*db->db_blkptr = *bp;
dmu_buf_unlock_parent(db, dblt, FTAG);
}
/*
* This function gets called just prior to running through the compression
* stage of the zio pipeline. If we're an indirect block comprised of only
* holes, then we want this indirect to be compressed away to a hole. In
* order to do that we must zero out any information about the holes that
* this indirect points to prior to before we try to compress it.
*/
static void
dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
{
(void) zio, (void) buf;
dmu_buf_impl_t *db = vdb;
dnode_t *dn;
blkptr_t *bp;
unsigned int epbs, i;
ASSERT3U(db->db_level, >, 0);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
ASSERT3U(epbs, <, 31);
/* Determine if all our children are holes */
for (i = 0, bp = db->db.db_data; i < 1ULL << epbs; i++, bp++) {
if (!BP_IS_HOLE(bp))
break;
}
/*
* If all the children are holes, then zero them all out so that
* we may get compressed away.
*/
if (i == 1ULL << epbs) {
/*
* We only found holes. Grab the rwlock to prevent
* anybody from reading the blocks we're about to
* zero out.
*/
rw_enter(&db->db_rwlock, RW_WRITER);
memset(db->db.db_data, 0, db->db.db_size);
rw_exit(&db->db_rwlock);
}
DB_DNODE_EXIT(db);
}
static void
dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
{
(void) buf;
dmu_buf_impl_t *db = vdb;
blkptr_t *bp_orig = &zio->io_bp_orig;
blkptr_t *bp = db->db_blkptr;
objset_t *os = db->db_objset;
dmu_tx_t *tx = os->os_synctx;
ASSERT0(zio->io_error);
ASSERT(db->db_blkptr == bp);
/*
* For nopwrites and rewrites we ensure that the bp matches our
* original and bypass all the accounting.
*/
if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
ASSERT(BP_EQUAL(bp, bp_orig));
} else {
dsl_dataset_t *ds = os->os_dsl_dataset;
(void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
dsl_dataset_block_born(ds, bp, tx);
}
mutex_enter(&db->db_mtx);
DBUF_VERIFY(db);
dbuf_dirty_record_t *dr = db->db_data_pending;
dnode_t *dn = dr->dr_dnode;
ASSERT(!list_link_active(&dr->dr_dirty_node));
ASSERT(dr->dr_dbuf == db);
ASSERT(list_next(&db->db_dirty_records, dr) == NULL);
list_remove(&db->db_dirty_records, dr);
#ifdef ZFS_DEBUG
if (db->db_blkid == DMU_SPILL_BLKID) {
ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
}
#endif
if (db->db_level == 0) {
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
if (db->db_state != DB_NOFILL) {
if (dr->dt.dl.dr_data != NULL &&
dr->dt.dl.dr_data != db->db_buf) {
arc_buf_destroy(dr->dt.dl.dr_data, db);
}
}
} else {
ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
if (!BP_IS_HOLE(db->db_blkptr)) {
int epbs __maybe_unused = dn->dn_phys->dn_indblkshift -
SPA_BLKPTRSHIFT;
ASSERT3U(db->db_blkid, <=,
dn->dn_phys->dn_maxblkid >> (db->db_level * epbs));
ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
db->db.db_size);
}
mutex_destroy(&dr->dt.di.dr_mtx);
list_destroy(&dr->dt.di.dr_children);
}
cv_broadcast(&db->db_changed);
ASSERT(db->db_dirtycnt > 0);
db->db_dirtycnt -= 1;
db->db_data_pending = NULL;
dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE);
dsl_pool_undirty_space(dmu_objset_pool(os), dr->dr_accounted,
zio->io_txg);
kmem_free(dr, sizeof (dbuf_dirty_record_t));
}
static void
dbuf_write_nofill_ready(zio_t *zio)
{
dbuf_write_ready(zio, NULL, zio->io_private);
}
static void
dbuf_write_nofill_done(zio_t *zio)
{
dbuf_write_done(zio, NULL, zio->io_private);
}
static void
dbuf_write_override_ready(zio_t *zio)
{
dbuf_dirty_record_t *dr = zio->io_private;
dmu_buf_impl_t *db = dr->dr_dbuf;
dbuf_write_ready(zio, NULL, db);
}
static void
dbuf_write_override_done(zio_t *zio)
{
dbuf_dirty_record_t *dr = zio->io_private;
dmu_buf_impl_t *db = dr->dr_dbuf;
blkptr_t *obp = &dr->dt.dl.dr_overridden_by;
mutex_enter(&db->db_mtx);
if (!BP_EQUAL(zio->io_bp, obp)) {
if (!BP_IS_HOLE(obp))
dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp);
arc_release(dr->dt.dl.dr_data, db);
}
mutex_exit(&db->db_mtx);
dbuf_write_done(zio, NULL, db);
if (zio->io_abd != NULL)
abd_free(zio->io_abd);
}
typedef struct dbuf_remap_impl_callback_arg {
objset_t *drica_os;
uint64_t drica_blk_birth;
dmu_tx_t *drica_tx;
} dbuf_remap_impl_callback_arg_t;
static void
dbuf_remap_impl_callback(uint64_t vdev, uint64_t offset, uint64_t size,
void *arg)
{
dbuf_remap_impl_callback_arg_t *drica = arg;
objset_t *os = drica->drica_os;
spa_t *spa = dmu_objset_spa(os);
dmu_tx_t *tx = drica->drica_tx;
ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
if (os == spa_meta_objset(spa)) {
spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx);
} else {
dsl_dataset_block_remapped(dmu_objset_ds(os), vdev, offset,
size, drica->drica_blk_birth, tx);
}
}
static void
dbuf_remap_impl(dnode_t *dn, blkptr_t *bp, krwlock_t *rw, dmu_tx_t *tx)
{
blkptr_t bp_copy = *bp;
spa_t *spa = dmu_objset_spa(dn->dn_objset);
dbuf_remap_impl_callback_arg_t drica;
ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
drica.drica_os = dn->dn_objset;
drica.drica_blk_birth = bp->blk_birth;
drica.drica_tx = tx;
if (spa_remap_blkptr(spa, &bp_copy, dbuf_remap_impl_callback,
&drica)) {
/*
* If the blkptr being remapped is tracked by a livelist,
* then we need to make sure the livelist reflects the update.
* First, cancel out the old blkptr by appending a 'FREE'
* entry. Next, add an 'ALLOC' to track the new version. This
* way we avoid trying to free an inaccurate blkptr at delete.
* Note that embedded blkptrs are not tracked in livelists.
*/
if (dn->dn_objset != spa_meta_objset(spa)) {
dsl_dataset_t *ds = dmu_objset_ds(dn->dn_objset);
if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) &&
bp->blk_birth > ds->ds_dir->dd_origin_txg) {
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT(dsl_dir_is_clone(ds->ds_dir));
ASSERT(spa_feature_is_enabled(spa,
SPA_FEATURE_LIVELIST));
bplist_append(&ds->ds_dir->dd_pending_frees,
bp);
bplist_append(&ds->ds_dir->dd_pending_allocs,
&bp_copy);
}
}
/*
* The db_rwlock prevents dbuf_read_impl() from
* dereferencing the BP while we are changing it. To
* avoid lock contention, only grab it when we are actually
* changing the BP.
*/
if (rw != NULL)
rw_enter(rw, RW_WRITER);
*bp = bp_copy;
if (rw != NULL)
rw_exit(rw);
}
}
/*
* Remap any existing BP's to concrete vdevs, if possible.
*/
static void
dbuf_remap(dnode_t *dn, dmu_buf_impl_t *db, dmu_tx_t *tx)
{
spa_t *spa = dmu_objset_spa(db->db_objset);
ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
if (!spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL))
return;
if (db->db_level > 0) {
blkptr_t *bp = db->db.db_data;
for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) {
dbuf_remap_impl(dn, &bp[i], &db->db_rwlock, tx);
}
} else if (db->db.db_object == DMU_META_DNODE_OBJECT) {
dnode_phys_t *dnp = db->db.db_data;
ASSERT3U(db->db_dnode_handle->dnh_dnode->dn_type, ==,
DMU_OT_DNODE);
for (int i = 0; i < db->db.db_size >> DNODE_SHIFT;
i += dnp[i].dn_extra_slots + 1) {
for (int j = 0; j < dnp[i].dn_nblkptr; j++) {
krwlock_t *lock = (dn->dn_dbuf == NULL ? NULL :
&dn->dn_dbuf->db_rwlock);
dbuf_remap_impl(dn, &dnp[i].dn_blkptr[j], lock,
tx);
}
}
}
}
/* Issue I/O to commit a dirty buffer to disk. */
static void
dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = dr->dr_dbuf;
dnode_t *dn = dr->dr_dnode;
objset_t *os;
dmu_buf_impl_t *parent = db->db_parent;
uint64_t txg = tx->tx_txg;
zbookmark_phys_t zb;
zio_prop_t zp;
zio_t *pio; /* parent I/O */
int wp_flag = 0;
ASSERT(dmu_tx_is_syncing(tx));
os = dn->dn_objset;
if (db->db_state != DB_NOFILL) {
if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) {
/*
* Private object buffers are released here rather
* than in dbuf_dirty() since they are only modified
* in the syncing context and we don't want the
* overhead of making multiple copies of the data.
*/
if (BP_IS_HOLE(db->db_blkptr)) {
arc_buf_thaw(data);
} else {
dbuf_release_bp(db);
}
dbuf_remap(dn, db, tx);
}
}
if (parent != dn->dn_dbuf) {
/* Our parent is an indirect block. */
/* We have a dirty parent that has been scheduled for write. */
ASSERT(parent && parent->db_data_pending);
/* Our parent's buffer is one level closer to the dnode. */
ASSERT(db->db_level == parent->db_level-1);
/*
* We're about to modify our parent's db_data by modifying
* our block pointer, so the parent must be released.
*/
ASSERT(arc_released(parent->db_buf));
pio = parent->db_data_pending->dr_zio;
} else {
/* Our parent is the dnode itself. */
ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 &&
db->db_blkid != DMU_SPILL_BLKID) ||
(db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0));
if (db->db_blkid != DMU_SPILL_BLKID)
ASSERT3P(db->db_blkptr, ==,
&dn->dn_phys->dn_blkptr[db->db_blkid]);
pio = dn->dn_zio;
}
ASSERT(db->db_level == 0 || data == db->db_buf);
ASSERT3U(db->db_blkptr->blk_birth, <=, txg);
ASSERT(pio);
SET_BOOKMARK(&zb, os->os_dsl_dataset ?
os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
db->db.db_object, db->db_level, db->db_blkid);
if (db->db_blkid == DMU_SPILL_BLKID)
wp_flag = WP_SPILL;
wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0;
dmu_write_policy(os, dn, db->db_level, wp_flag, &zp);
/*
* We copy the blkptr now (rather than when we instantiate the dirty
* record), because its value can change between open context and
* syncing context. We do not need to hold dn_struct_rwlock to read
* db_blkptr because we are in syncing context.
*/
dr->dr_bp_copy = *db->db_blkptr;
if (db->db_level == 0 &&
dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
/*
* The BP for this block has been provided by open context
* (by dmu_sync() or dmu_buf_write_embedded()).
*/
abd_t *contents = (data != NULL) ?
abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL;
dr->dr_zio = zio_write(pio, os->os_spa, txg, &dr->dr_bp_copy,
contents, db->db.db_size, db->db.db_size, &zp,
dbuf_write_override_ready, NULL,
dbuf_write_override_done,
dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
mutex_enter(&db->db_mtx);
dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by,
dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite,
dr->dt.dl.dr_brtwrite);
mutex_exit(&db->db_mtx);
} else if (db->db_state == DB_NOFILL) {
ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF ||
zp.zp_checksum == ZIO_CHECKSUM_NOPARITY);
dr->dr_zio = zio_write(pio, os->os_spa, txg,
&dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp,
dbuf_write_nofill_ready, NULL,
dbuf_write_nofill_done, db,
ZIO_PRIORITY_ASYNC_WRITE,
ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb);
} else {
ASSERT(arc_released(data));
/*
* For indirect blocks, we want to setup the children
* ready callback so that we can properly handle an indirect
* block that only contains holes.
*/
arc_write_done_func_t *children_ready_cb = NULL;
if (db->db_level != 0)
children_ready_cb = dbuf_write_children_ready;
dr->dr_zio = arc_write(pio, os->os_spa, txg,
&dr->dr_bp_copy, data, !DBUF_IS_CACHEABLE(db),
dbuf_is_l2cacheable(db), &zp, dbuf_write_ready,
children_ready_cb, dbuf_write_done, db,
ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
}
}
EXPORT_SYMBOL(dbuf_find);
EXPORT_SYMBOL(dbuf_is_metadata);
EXPORT_SYMBOL(dbuf_destroy);
EXPORT_SYMBOL(dbuf_loan_arcbuf);
EXPORT_SYMBOL(dbuf_whichblock);
EXPORT_SYMBOL(dbuf_read);
EXPORT_SYMBOL(dbuf_unoverride);
EXPORT_SYMBOL(dbuf_free_range);
EXPORT_SYMBOL(dbuf_new_size);
EXPORT_SYMBOL(dbuf_release_bp);
EXPORT_SYMBOL(dbuf_dirty);
EXPORT_SYMBOL(dmu_buf_set_crypt_params);
EXPORT_SYMBOL(dmu_buf_will_dirty);
EXPORT_SYMBOL(dmu_buf_is_dirty);
EXPORT_SYMBOL(dmu_buf_will_clone);
EXPORT_SYMBOL(dmu_buf_will_not_fill);
EXPORT_SYMBOL(dmu_buf_will_fill);
EXPORT_SYMBOL(dmu_buf_fill_done);
EXPORT_SYMBOL(dmu_buf_rele);
EXPORT_SYMBOL(dbuf_assign_arcbuf);
EXPORT_SYMBOL(dbuf_prefetch);
EXPORT_SYMBOL(dbuf_hold_impl);
EXPORT_SYMBOL(dbuf_hold);
EXPORT_SYMBOL(dbuf_hold_level);
EXPORT_SYMBOL(dbuf_create_bonus);
EXPORT_SYMBOL(dbuf_spill_set_blksz);
EXPORT_SYMBOL(dbuf_rm_spill);
EXPORT_SYMBOL(dbuf_add_ref);
EXPORT_SYMBOL(dbuf_rele);
EXPORT_SYMBOL(dbuf_rele_and_unlock);
EXPORT_SYMBOL(dbuf_refcount);
EXPORT_SYMBOL(dbuf_sync_list);
EXPORT_SYMBOL(dmu_buf_set_user);
EXPORT_SYMBOL(dmu_buf_set_user_ie);
EXPORT_SYMBOL(dmu_buf_get_user);
EXPORT_SYMBOL(dmu_buf_get_blkptr);
ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, max_bytes, U64, ZMOD_RW,
"Maximum size in bytes of the dbuf cache.");
ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, hiwater_pct, UINT, ZMOD_RW,
"Percentage over dbuf_cache_max_bytes for direct dbuf eviction.");
ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, lowater_pct, UINT, ZMOD_RW,
"Percentage below dbuf_cache_max_bytes when dbuf eviction stops.");
ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_max_bytes, U64, ZMOD_RW,
"Maximum size in bytes of dbuf metadata cache.");
ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, cache_shift, UINT, ZMOD_RW,
"Set size of dbuf cache to log2 fraction of arc size.");
ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_shift, UINT, ZMOD_RW,
"Set size of dbuf metadata cache to log2 fraction of arc size.");
ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, mutex_cache_shift, UINT, ZMOD_RD,
"Set size of dbuf cache mutex array as log2 shift.");
diff --git a/sys/contrib/openzfs/module/zfs/dmu.c b/sys/contrib/openzfs/module/zfs/dmu.c
index 3f626031de52..3215ab1c2a14 100644
--- a/sys/contrib/openzfs/module/zfs/dmu.c
+++ b/sys/contrib/openzfs/module/zfs/dmu.c
@@ -1,2574 +1,2586 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
* Copyright (c) 2013, Joyent, Inc. All rights reserved.
* Copyright (c) 2016, Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2015 by Chunwei Chen. All rights reserved.
* Copyright (c) 2019 Datto Inc.
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019, Allan Jude
* Copyright (c) 2022 Hewlett Packard Enterprise Development LP.
* Copyright (c) 2021, 2022 by Pawel Jakub Dawidek
*/
#include <sys/dmu.h>
#include <sys/dmu_impl.h>
#include <sys/dmu_tx.h>
#include <sys/dbuf.h>
#include <sys/dnode.h>
#include <sys/zfs_context.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_traverse.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_synctask.h>
#include <sys/dsl_prop.h>
#include <sys/dmu_zfetch.h>
#include <sys/zfs_ioctl.h>
#include <sys/zap.h>
#include <sys/zio_checksum.h>
#include <sys/zio_compress.h>
#include <sys/sa.h>
#include <sys/zfeature.h>
#include <sys/abd.h>
#include <sys/brt.h>
#include <sys/trace_zfs.h>
#include <sys/zfs_racct.h>
#include <sys/zfs_rlock.h>
#ifdef _KERNEL
#include <sys/vmsystm.h>
#include <sys/zfs_znode.h>
#endif
/*
* Enable/disable nopwrite feature.
*/
static int zfs_nopwrite_enabled = 1;
/*
* Tunable to control percentage of dirtied L1 blocks from frees allowed into
* one TXG. After this threshold is crossed, additional dirty blocks from frees
* will wait until the next TXG.
* A value of zero will disable this throttle.
*/
static uint_t zfs_per_txg_dirty_frees_percent = 30;
/*
* Enable/disable forcing txg sync when dirty checking for holes with lseek().
* By default this is enabled to ensure accurate hole reporting, it can result
* in a significant performance penalty for lseek(SEEK_HOLE) heavy workloads.
* Disabling this option will result in holes never being reported in dirty
* files which is always safe.
*/
static int zfs_dmu_offset_next_sync = 1;
/*
* Limit the amount we can prefetch with one call to this amount. This
* helps to limit the amount of memory that can be used by prefetching.
* Larger objects should be prefetched a bit at a time.
*/
#ifdef _ILP32
uint_t dmu_prefetch_max = 8 * 1024 * 1024;
#else
uint_t dmu_prefetch_max = 8 * SPA_MAXBLOCKSIZE;
#endif
const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES] = {
{DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "unallocated" },
{DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "object directory" },
{DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "object array" },
{DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "packed nvlist" },
{DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "packed nvlist size" },
{DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "bpobj" },
{DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "bpobj header" },
{DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "SPA space map header" },
{DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "SPA space map" },
{DMU_BSWAP_UINT64, TRUE, FALSE, TRUE, "ZIL intent log" },
{DMU_BSWAP_DNODE, TRUE, FALSE, TRUE, "DMU dnode" },
{DMU_BSWAP_OBJSET, TRUE, TRUE, FALSE, "DMU objset" },
{DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "DSL directory" },
{DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL directory child map"},
{DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL dataset snap map" },
{DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL props" },
{DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "DSL dataset" },
{DMU_BSWAP_ZNODE, TRUE, FALSE, FALSE, "ZFS znode" },
{DMU_BSWAP_OLDACL, TRUE, FALSE, TRUE, "ZFS V0 ACL" },
{DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "ZFS plain file" },
{DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS directory" },
{DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "ZFS master node" },
{DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS delete queue" },
{DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "zvol object" },
{DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "zvol prop" },
{DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "other uint8[]" },
{DMU_BSWAP_UINT64, FALSE, FALSE, TRUE, "other uint64[]" },
{DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "other ZAP" },
{DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "persistent error log" },
{DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "SPA history" },
{DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "SPA history offsets" },
{DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "Pool properties" },
{DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL permissions" },
{DMU_BSWAP_ACL, TRUE, FALSE, TRUE, "ZFS ACL" },
{DMU_BSWAP_UINT8, TRUE, FALSE, TRUE, "ZFS SYSACL" },
{DMU_BSWAP_UINT8, TRUE, FALSE, TRUE, "FUID table" },
{DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "FUID table size" },
{DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL dataset next clones"},
{DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "scan work queue" },
{DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS user/group/project used" },
{DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS user/group/project quota"},
{DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "snapshot refcount tags"},
{DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "DDT ZAP algorithm" },
{DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "DDT statistics" },
{DMU_BSWAP_UINT8, TRUE, FALSE, TRUE, "System attributes" },
{DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "SA master node" },
{DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "SA attr registration" },
{DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "SA attr layouts" },
{DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "scan translations" },
{DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "deduplicated block" },
{DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL deadlist map" },
{DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "DSL deadlist map hdr" },
{DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL dir clones" },
{DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "bpobj subobj" }
};
dmu_object_byteswap_info_t dmu_ot_byteswap[DMU_BSWAP_NUMFUNCS] = {
{ byteswap_uint8_array, "uint8" },
{ byteswap_uint16_array, "uint16" },
{ byteswap_uint32_array, "uint32" },
{ byteswap_uint64_array, "uint64" },
{ zap_byteswap, "zap" },
{ dnode_buf_byteswap, "dnode" },
{ dmu_objset_byteswap, "objset" },
{ zfs_znode_byteswap, "znode" },
{ zfs_oldacl_byteswap, "oldacl" },
{ zfs_acl_byteswap, "acl" }
};
int
dmu_buf_hold_noread_by_dnode(dnode_t *dn, uint64_t offset,
const void *tag, dmu_buf_t **dbp)
{
uint64_t blkid;
dmu_buf_impl_t *db;
rw_enter(&dn->dn_struct_rwlock, RW_READER);
blkid = dbuf_whichblock(dn, 0, offset);
db = dbuf_hold(dn, blkid, tag);
rw_exit(&dn->dn_struct_rwlock);
if (db == NULL) {
*dbp = NULL;
return (SET_ERROR(EIO));
}
*dbp = &db->db;
return (0);
}
int
dmu_buf_hold_noread(objset_t *os, uint64_t object, uint64_t offset,
const void *tag, dmu_buf_t **dbp)
{
dnode_t *dn;
uint64_t blkid;
dmu_buf_impl_t *db;
int err;
err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
rw_enter(&dn->dn_struct_rwlock, RW_READER);
blkid = dbuf_whichblock(dn, 0, offset);
db = dbuf_hold(dn, blkid, tag);
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
if (db == NULL) {
*dbp = NULL;
return (SET_ERROR(EIO));
}
*dbp = &db->db;
return (err);
}
int
dmu_buf_hold_by_dnode(dnode_t *dn, uint64_t offset,
const void *tag, dmu_buf_t **dbp, int flags)
{
int err;
int db_flags = DB_RF_CANFAIL;
if (flags & DMU_READ_NO_PREFETCH)
db_flags |= DB_RF_NOPREFETCH;
if (flags & DMU_READ_NO_DECRYPT)
db_flags |= DB_RF_NO_DECRYPT;
err = dmu_buf_hold_noread_by_dnode(dn, offset, tag, dbp);
if (err == 0) {
dmu_buf_impl_t *db = (dmu_buf_impl_t *)(*dbp);
err = dbuf_read(db, NULL, db_flags);
if (err != 0) {
dbuf_rele(db, tag);
*dbp = NULL;
}
}
return (err);
}
int
dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset,
const void *tag, dmu_buf_t **dbp, int flags)
{
int err;
int db_flags = DB_RF_CANFAIL;
if (flags & DMU_READ_NO_PREFETCH)
db_flags |= DB_RF_NOPREFETCH;
if (flags & DMU_READ_NO_DECRYPT)
db_flags |= DB_RF_NO_DECRYPT;
err = dmu_buf_hold_noread(os, object, offset, tag, dbp);
if (err == 0) {
dmu_buf_impl_t *db = (dmu_buf_impl_t *)(*dbp);
err = dbuf_read(db, NULL, db_flags);
if (err != 0) {
dbuf_rele(db, tag);
*dbp = NULL;
}
}
return (err);
}
int
dmu_bonus_max(void)
{
return (DN_OLD_MAX_BONUSLEN);
}
int
dmu_set_bonus(dmu_buf_t *db_fake, int newsize, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dnode_t *dn;
int error;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
if (dn->dn_bonus != db) {
error = SET_ERROR(EINVAL);
} else if (newsize < 0 || newsize > db_fake->db_size) {
error = SET_ERROR(EINVAL);
} else {
dnode_setbonuslen(dn, newsize, tx);
error = 0;
}
DB_DNODE_EXIT(db);
return (error);
}
int
dmu_set_bonustype(dmu_buf_t *db_fake, dmu_object_type_t type, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dnode_t *dn;
int error;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
if (!DMU_OT_IS_VALID(type)) {
error = SET_ERROR(EINVAL);
} else if (dn->dn_bonus != db) {
error = SET_ERROR(EINVAL);
} else {
dnode_setbonus_type(dn, type, tx);
error = 0;
}
DB_DNODE_EXIT(db);
return (error);
}
dmu_object_type_t
dmu_get_bonustype(dmu_buf_t *db_fake)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dnode_t *dn;
dmu_object_type_t type;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
type = dn->dn_bonustype;
DB_DNODE_EXIT(db);
return (type);
}
int
dmu_rm_spill(objset_t *os, uint64_t object, dmu_tx_t *tx)
{
dnode_t *dn;
int error;
error = dnode_hold(os, object, FTAG, &dn);
dbuf_rm_spill(dn, tx);
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
dnode_rm_spill(dn, tx);
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
return (error);
}
/*
* Lookup and hold the bonus buffer for the provided dnode. If the dnode
* has not yet been allocated a new bonus dbuf a will be allocated.
* Returns ENOENT, EIO, or 0.
*/
int dmu_bonus_hold_by_dnode(dnode_t *dn, const void *tag, dmu_buf_t **dbp,
uint32_t flags)
{
dmu_buf_impl_t *db;
int error;
uint32_t db_flags = DB_RF_MUST_SUCCEED;
if (flags & DMU_READ_NO_PREFETCH)
db_flags |= DB_RF_NOPREFETCH;
if (flags & DMU_READ_NO_DECRYPT)
db_flags |= DB_RF_NO_DECRYPT;
rw_enter(&dn->dn_struct_rwlock, RW_READER);
if (dn->dn_bonus == NULL) {
if (!rw_tryupgrade(&dn->dn_struct_rwlock)) {
rw_exit(&dn->dn_struct_rwlock);
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
}
if (dn->dn_bonus == NULL)
dbuf_create_bonus(dn);
}
db = dn->dn_bonus;
/* as long as the bonus buf is held, the dnode will be held */
if (zfs_refcount_add(&db->db_holds, tag) == 1) {
VERIFY(dnode_add_ref(dn, db));
atomic_inc_32(&dn->dn_dbufs_count);
}
/*
* Wait to drop dn_struct_rwlock until after adding the bonus dbuf's
* hold and incrementing the dbuf count to ensure that dnode_move() sees
* a dnode hold for every dbuf.
*/
rw_exit(&dn->dn_struct_rwlock);
error = dbuf_read(db, NULL, db_flags);
if (error) {
dnode_evict_bonus(dn);
dbuf_rele(db, tag);
*dbp = NULL;
return (error);
}
*dbp = &db->db;
return (0);
}
int
dmu_bonus_hold(objset_t *os, uint64_t object, const void *tag, dmu_buf_t **dbp)
{
dnode_t *dn;
int error;
error = dnode_hold(os, object, FTAG, &dn);
if (error)
return (error);
error = dmu_bonus_hold_by_dnode(dn, tag, dbp, DMU_READ_NO_PREFETCH);
dnode_rele(dn, FTAG);
return (error);
}
/*
* returns ENOENT, EIO, or 0.
*
* This interface will allocate a blank spill dbuf when a spill blk
* doesn't already exist on the dnode.
*
* if you only want to find an already existing spill db, then
* dmu_spill_hold_existing() should be used.
*/
int
dmu_spill_hold_by_dnode(dnode_t *dn, uint32_t flags, const void *tag,
dmu_buf_t **dbp)
{
dmu_buf_impl_t *db = NULL;
int err;
if ((flags & DB_RF_HAVESTRUCT) == 0)
rw_enter(&dn->dn_struct_rwlock, RW_READER);
db = dbuf_hold(dn, DMU_SPILL_BLKID, tag);
if ((flags & DB_RF_HAVESTRUCT) == 0)
rw_exit(&dn->dn_struct_rwlock);
if (db == NULL) {
*dbp = NULL;
return (SET_ERROR(EIO));
}
err = dbuf_read(db, NULL, flags);
if (err == 0)
*dbp = &db->db;
else {
dbuf_rele(db, tag);
*dbp = NULL;
}
return (err);
}
int
dmu_spill_hold_existing(dmu_buf_t *bonus, const void *tag, dmu_buf_t **dbp)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus;
dnode_t *dn;
int err;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_SA) {
err = SET_ERROR(EINVAL);
} else {
rw_enter(&dn->dn_struct_rwlock, RW_READER);
if (!dn->dn_have_spill) {
err = SET_ERROR(ENOENT);
} else {
err = dmu_spill_hold_by_dnode(dn,
DB_RF_HAVESTRUCT | DB_RF_CANFAIL, tag, dbp);
}
rw_exit(&dn->dn_struct_rwlock);
}
DB_DNODE_EXIT(db);
return (err);
}
int
dmu_spill_hold_by_bonus(dmu_buf_t *bonus, uint32_t flags, const void *tag,
dmu_buf_t **dbp)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus;
dnode_t *dn;
int err;
uint32_t db_flags = DB_RF_CANFAIL;
if (flags & DMU_READ_NO_DECRYPT)
db_flags |= DB_RF_NO_DECRYPT;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
err = dmu_spill_hold_by_dnode(dn, db_flags, tag, dbp);
DB_DNODE_EXIT(db);
return (err);
}
/*
* Note: longer-term, we should modify all of the dmu_buf_*() interfaces
* to take a held dnode rather than <os, object> -- the lookup is wasteful,
* and can induce severe lock contention when writing to several files
* whose dnodes are in the same block.
*/
int
dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length,
boolean_t read, const void *tag, int *numbufsp, dmu_buf_t ***dbpp,
uint32_t flags)
{
dmu_buf_t **dbp;
zstream_t *zs = NULL;
uint64_t blkid, nblks, i;
uint32_t dbuf_flags;
int err;
zio_t *zio = NULL;
boolean_t missed = B_FALSE;
ASSERT(!read || length <= DMU_MAX_ACCESS);
/*
* Note: We directly notify the prefetch code of this read, so that
* we can tell it about the multi-block read. dbuf_read() only knows
* about the one block it is accessing.
*/
dbuf_flags = DB_RF_CANFAIL | DB_RF_NEVERWAIT | DB_RF_HAVESTRUCT |
DB_RF_NOPREFETCH;
if ((flags & DMU_READ_NO_DECRYPT) != 0)
dbuf_flags |= DB_RF_NO_DECRYPT;
rw_enter(&dn->dn_struct_rwlock, RW_READER);
if (dn->dn_datablkshift) {
int blkshift = dn->dn_datablkshift;
nblks = (P2ROUNDUP(offset + length, 1ULL << blkshift) -
P2ALIGN(offset, 1ULL << blkshift)) >> blkshift;
} else {
if (offset + length > dn->dn_datablksz) {
zfs_panic_recover("zfs: accessing past end of object "
"%llx/%llx (size=%u access=%llu+%llu)",
(longlong_t)dn->dn_objset->
os_dsl_dataset->ds_object,
(longlong_t)dn->dn_object, dn->dn_datablksz,
(longlong_t)offset, (longlong_t)length);
rw_exit(&dn->dn_struct_rwlock);
return (SET_ERROR(EIO));
}
nblks = 1;
}
dbp = kmem_zalloc(sizeof (dmu_buf_t *) * nblks, KM_SLEEP);
if (read)
zio = zio_root(dn->dn_objset->os_spa, NULL, NULL,
ZIO_FLAG_CANFAIL);
blkid = dbuf_whichblock(dn, 0, offset);
if ((flags & DMU_READ_NO_PREFETCH) == 0) {
/*
* Prepare the zfetch before initiating the demand reads, so
* that if multiple threads block on same indirect block, we
* base predictions on the original less racy request order.
*/
zs = dmu_zfetch_prepare(&dn->dn_zfetch, blkid, nblks, read,
B_TRUE);
}
for (i = 0; i < nblks; i++) {
dmu_buf_impl_t *db = dbuf_hold(dn, blkid + i, tag);
if (db == NULL) {
if (zs)
dmu_zfetch_run(zs, missed, B_TRUE);
rw_exit(&dn->dn_struct_rwlock);
dmu_buf_rele_array(dbp, nblks, tag);
if (read)
zio_nowait(zio);
return (SET_ERROR(EIO));
}
/*
* Initiate async demand data read.
* We check the db_state after calling dbuf_read() because
* (1) dbuf_read() may change the state to CACHED due to a
* hit in the ARC, and (2) on a cache miss, a child will
* have been added to "zio" but not yet completed, so the
* state will not yet be CACHED.
*/
if (read) {
if (i == nblks - 1 && blkid + i < dn->dn_maxblkid &&
offset + length < db->db.db_offset +
db->db.db_size) {
if (offset <= db->db.db_offset)
dbuf_flags |= DB_RF_PARTIAL_FIRST;
else
dbuf_flags |= DB_RF_PARTIAL_MORE;
}
(void) dbuf_read(db, zio, dbuf_flags);
if (db->db_state != DB_CACHED)
missed = B_TRUE;
}
dbp[i] = &db->db;
}
if (!read)
zfs_racct_write(length, nblks);
if (zs)
dmu_zfetch_run(zs, missed, B_TRUE);
rw_exit(&dn->dn_struct_rwlock);
if (read) {
/* wait for async read i/o */
err = zio_wait(zio);
if (err) {
dmu_buf_rele_array(dbp, nblks, tag);
return (err);
}
/* wait for other io to complete */
for (i = 0; i < nblks; i++) {
dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbp[i];
mutex_enter(&db->db_mtx);
while (db->db_state == DB_READ ||
db->db_state == DB_FILL)
cv_wait(&db->db_changed, &db->db_mtx);
if (db->db_state == DB_UNCACHED)
err = SET_ERROR(EIO);
mutex_exit(&db->db_mtx);
if (err) {
dmu_buf_rele_array(dbp, nblks, tag);
return (err);
}
}
}
*numbufsp = nblks;
*dbpp = dbp;
return (0);
}
int
dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset,
uint64_t length, int read, const void *tag, int *numbufsp,
dmu_buf_t ***dbpp)
{
dnode_t *dn;
int err;
err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag,
numbufsp, dbpp, DMU_READ_PREFETCH);
dnode_rele(dn, FTAG);
return (err);
}
int
dmu_buf_hold_array_by_bonus(dmu_buf_t *db_fake, uint64_t offset,
uint64_t length, boolean_t read, const void *tag, int *numbufsp,
dmu_buf_t ***dbpp)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dnode_t *dn;
int err;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag,
numbufsp, dbpp, DMU_READ_PREFETCH);
DB_DNODE_EXIT(db);
return (err);
}
void
dmu_buf_rele_array(dmu_buf_t **dbp_fake, int numbufs, const void *tag)
{
int i;
dmu_buf_impl_t **dbp = (dmu_buf_impl_t **)dbp_fake;
if (numbufs == 0)
return;
for (i = 0; i < numbufs; i++) {
if (dbp[i])
dbuf_rele(dbp[i], tag);
}
kmem_free(dbp, sizeof (dmu_buf_t *) * numbufs);
}
/*
* Issue prefetch i/os for the given blocks. If level is greater than 0, the
* indirect blocks prefetched will be those that point to the blocks containing
* the data starting at offset, and continuing to offset + len.
*
* Note that if the indirect blocks above the blocks being prefetched are not
* in cache, they will be asynchronously read in.
*/
void
dmu_prefetch(objset_t *os, uint64_t object, int64_t level, uint64_t offset,
uint64_t len, zio_priority_t pri)
{
dnode_t *dn;
uint64_t blkid;
int nblks, err;
if (len == 0) { /* they're interested in the bonus buffer */
dn = DMU_META_DNODE(os);
if (object == 0 || object >= DN_MAX_OBJECT)
return;
rw_enter(&dn->dn_struct_rwlock, RW_READER);
blkid = dbuf_whichblock(dn, level,
object * sizeof (dnode_phys_t));
dbuf_prefetch(dn, level, blkid, pri, 0);
rw_exit(&dn->dn_struct_rwlock);
return;
}
/*
* See comment before the definition of dmu_prefetch_max.
*/
len = MIN(len, dmu_prefetch_max);
/*
* XXX - Note, if the dnode for the requested object is not
* already cached, we will do a *synchronous* read in the
* dnode_hold() call. The same is true for any indirects.
*/
err = dnode_hold(os, object, FTAG, &dn);
if (err != 0)
return;
/*
* offset + len - 1 is the last byte we want to prefetch for, and offset
* is the first. Then dbuf_whichblk(dn, level, off + len - 1) is the
* last block we want to prefetch, and dbuf_whichblock(dn, level,
* offset) is the first. Then the number we need to prefetch is the
* last - first + 1.
*/
rw_enter(&dn->dn_struct_rwlock, RW_READER);
if (level > 0 || dn->dn_datablkshift != 0) {
nblks = dbuf_whichblock(dn, level, offset + len - 1) -
dbuf_whichblock(dn, level, offset) + 1;
} else {
nblks = (offset < dn->dn_datablksz);
}
if (nblks != 0) {
blkid = dbuf_whichblock(dn, level, offset);
for (int i = 0; i < nblks; i++)
dbuf_prefetch(dn, level, blkid + i, pri, 0);
}
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
}
/*
* Get the next "chunk" of file data to free. We traverse the file from
* the end so that the file gets shorter over time (if we crashes in the
* middle, this will leave us in a better state). We find allocated file
* data by simply searching the allocated level 1 indirects.
*
* On input, *start should be the first offset that does not need to be
* freed (e.g. "offset + length"). On return, *start will be the first
* offset that should be freed and l1blks is set to the number of level 1
* indirect blocks found within the chunk.
*/
static int
get_next_chunk(dnode_t *dn, uint64_t *start, uint64_t minimum, uint64_t *l1blks)
{
uint64_t blks;
uint64_t maxblks = DMU_MAX_ACCESS >> (dn->dn_indblkshift + 1);
/* bytes of data covered by a level-1 indirect block */
uint64_t iblkrange = (uint64_t)dn->dn_datablksz *
EPB(dn->dn_indblkshift, SPA_BLKPTRSHIFT);
ASSERT3U(minimum, <=, *start);
/*
* Check if we can free the entire range assuming that all of the
* L1 blocks in this range have data. If we can, we use this
* worst case value as an estimate so we can avoid having to look
* at the object's actual data.
*/
uint64_t total_l1blks =
(roundup(*start, iblkrange) - (minimum / iblkrange * iblkrange)) /
iblkrange;
if (total_l1blks <= maxblks) {
*l1blks = total_l1blks;
*start = minimum;
return (0);
}
ASSERT(ISP2(iblkrange));
for (blks = 0; *start > minimum && blks < maxblks; blks++) {
int err;
/*
* dnode_next_offset(BACKWARDS) will find an allocated L1
* indirect block at or before the input offset. We must
* decrement *start so that it is at the end of the region
* to search.
*/
(*start)--;
err = dnode_next_offset(dn,
DNODE_FIND_BACKWARDS, start, 2, 1, 0);
/* if there are no indirect blocks before start, we are done */
if (err == ESRCH) {
*start = minimum;
break;
} else if (err != 0) {
*l1blks = blks;
return (err);
}
/* set start to the beginning of this L1 indirect */
*start = P2ALIGN(*start, iblkrange);
}
if (*start < minimum)
*start = minimum;
*l1blks = blks;
return (0);
}
/*
* If this objset is of type OST_ZFS return true if vfs's unmounted flag is set,
* otherwise return false.
* Used below in dmu_free_long_range_impl() to enable abort when unmounting
*/
static boolean_t
dmu_objset_zfs_unmounting(objset_t *os)
{
#ifdef _KERNEL
if (dmu_objset_type(os) == DMU_OST_ZFS)
return (zfs_get_vfs_flag_unmounted(os));
#else
(void) os;
#endif
return (B_FALSE);
}
static int
dmu_free_long_range_impl(objset_t *os, dnode_t *dn, uint64_t offset,
uint64_t length)
{
uint64_t object_size;
int err;
uint64_t dirty_frees_threshold;
dsl_pool_t *dp = dmu_objset_pool(os);
if (dn == NULL)
return (SET_ERROR(EINVAL));
object_size = (dn->dn_maxblkid + 1) * dn->dn_datablksz;
if (offset >= object_size)
return (0);
if (zfs_per_txg_dirty_frees_percent <= 100)
dirty_frees_threshold =
zfs_per_txg_dirty_frees_percent * zfs_dirty_data_max / 100;
else
dirty_frees_threshold = zfs_dirty_data_max / 20;
if (length == DMU_OBJECT_END || offset + length > object_size)
length = object_size - offset;
while (length != 0) {
uint64_t chunk_end, chunk_begin, chunk_len;
uint64_t l1blks;
dmu_tx_t *tx;
if (dmu_objset_zfs_unmounting(dn->dn_objset))
return (SET_ERROR(EINTR));
chunk_end = chunk_begin = offset + length;
/* move chunk_begin backwards to the beginning of this chunk */
err = get_next_chunk(dn, &chunk_begin, offset, &l1blks);
if (err)
return (err);
ASSERT3U(chunk_begin, >=, offset);
ASSERT3U(chunk_begin, <=, chunk_end);
chunk_len = chunk_end - chunk_begin;
tx = dmu_tx_create(os);
dmu_tx_hold_free(tx, dn->dn_object, chunk_begin, chunk_len);
/*
* Mark this transaction as typically resulting in a net
* reduction in space used.
*/
dmu_tx_mark_netfree(tx);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err) {
dmu_tx_abort(tx);
return (err);
}
uint64_t txg = dmu_tx_get_txg(tx);
mutex_enter(&dp->dp_lock);
uint64_t long_free_dirty =
dp->dp_long_free_dirty_pertxg[txg & TXG_MASK];
mutex_exit(&dp->dp_lock);
/*
* To avoid filling up a TXG with just frees, wait for
* the next TXG to open before freeing more chunks if
* we have reached the threshold of frees.
*/
if (dirty_frees_threshold != 0 &&
long_free_dirty >= dirty_frees_threshold) {
DMU_TX_STAT_BUMP(dmu_tx_dirty_frees_delay);
dmu_tx_commit(tx);
txg_wait_open(dp, 0, B_TRUE);
continue;
}
/*
* In order to prevent unnecessary write throttling, for each
* TXG, we track the cumulative size of L1 blocks being dirtied
* in dnode_free_range() below. We compare this number to a
* tunable threshold, past which we prevent new L1 dirty freeing
* blocks from being added into the open TXG. See
* dmu_free_long_range_impl() for details. The threshold
* prevents write throttle activation due to dirty freeing L1
* blocks taking up a large percentage of zfs_dirty_data_max.
*/
mutex_enter(&dp->dp_lock);
dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] +=
l1blks << dn->dn_indblkshift;
mutex_exit(&dp->dp_lock);
DTRACE_PROBE3(free__long__range,
uint64_t, long_free_dirty, uint64_t, chunk_len,
uint64_t, txg);
dnode_free_range(dn, chunk_begin, chunk_len, tx);
dmu_tx_commit(tx);
length -= chunk_len;
}
return (0);
}
int
dmu_free_long_range(objset_t *os, uint64_t object,
uint64_t offset, uint64_t length)
{
dnode_t *dn;
int err;
err = dnode_hold(os, object, FTAG, &dn);
if (err != 0)
return (err);
err = dmu_free_long_range_impl(os, dn, offset, length);
/*
* It is important to zero out the maxblkid when freeing the entire
* file, so that (a) subsequent calls to dmu_free_long_range_impl()
* will take the fast path, and (b) dnode_reallocate() can verify
* that the entire file has been freed.
*/
if (err == 0 && offset == 0 && length == DMU_OBJECT_END)
dn->dn_maxblkid = 0;
dnode_rele(dn, FTAG);
return (err);
}
int
dmu_free_long_object(objset_t *os, uint64_t object)
{
dmu_tx_t *tx;
int err;
err = dmu_free_long_range(os, object, 0, DMU_OBJECT_END);
if (err != 0)
return (err);
tx = dmu_tx_create(os);
dmu_tx_hold_bonus(tx, object);
dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END);
dmu_tx_mark_netfree(tx);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err == 0) {
err = dmu_object_free(os, object, tx);
dmu_tx_commit(tx);
} else {
dmu_tx_abort(tx);
}
return (err);
}
int
dmu_free_range(objset_t *os, uint64_t object, uint64_t offset,
uint64_t size, dmu_tx_t *tx)
{
dnode_t *dn;
int err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
ASSERT(offset < UINT64_MAX);
ASSERT(size == DMU_OBJECT_END || size <= UINT64_MAX - offset);
dnode_free_range(dn, offset, size, tx);
dnode_rele(dn, FTAG);
return (0);
}
static int
dmu_read_impl(dnode_t *dn, uint64_t offset, uint64_t size,
void *buf, uint32_t flags)
{
dmu_buf_t **dbp;
int numbufs, err = 0;
/*
* Deal with odd block sizes, where there can't be data past the first
* block. If we ever do the tail block optimization, we will need to
* handle that here as well.
*/
if (dn->dn_maxblkid == 0) {
uint64_t newsz = offset > dn->dn_datablksz ? 0 :
MIN(size, dn->dn_datablksz - offset);
memset((char *)buf + newsz, 0, size - newsz);
size = newsz;
}
while (size > 0) {
uint64_t mylen = MIN(size, DMU_MAX_ACCESS / 2);
int i;
/*
* NB: we could do this block-at-a-time, but it's nice
* to be reading in parallel.
*/
err = dmu_buf_hold_array_by_dnode(dn, offset, mylen,
TRUE, FTAG, &numbufs, &dbp, flags);
if (err)
break;
for (i = 0; i < numbufs; i++) {
uint64_t tocpy;
int64_t bufoff;
dmu_buf_t *db = dbp[i];
ASSERT(size > 0);
bufoff = offset - db->db_offset;
tocpy = MIN(db->db_size - bufoff, size);
(void) memcpy(buf, (char *)db->db_data + bufoff, tocpy);
offset += tocpy;
size -= tocpy;
buf = (char *)buf + tocpy;
}
dmu_buf_rele_array(dbp, numbufs, FTAG);
}
return (err);
}
int
dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
void *buf, uint32_t flags)
{
dnode_t *dn;
int err;
err = dnode_hold(os, object, FTAG, &dn);
if (err != 0)
return (err);
err = dmu_read_impl(dn, offset, size, buf, flags);
dnode_rele(dn, FTAG);
return (err);
}
int
dmu_read_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size, void *buf,
uint32_t flags)
{
return (dmu_read_impl(dn, offset, size, buf, flags));
}
static void
dmu_write_impl(dmu_buf_t **dbp, int numbufs, uint64_t offset, uint64_t size,
const void *buf, dmu_tx_t *tx)
{
int i;
for (i = 0; i < numbufs; i++) {
uint64_t tocpy;
int64_t bufoff;
dmu_buf_t *db = dbp[i];
ASSERT(size > 0);
bufoff = offset - db->db_offset;
tocpy = MIN(db->db_size - bufoff, size);
ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
if (tocpy == db->db_size)
- dmu_buf_will_fill(db, tx);
+ dmu_buf_will_fill(db, tx, B_FALSE);
else
dmu_buf_will_dirty(db, tx);
(void) memcpy((char *)db->db_data + bufoff, buf, tocpy);
if (tocpy == db->db_size)
- dmu_buf_fill_done(db, tx);
+ dmu_buf_fill_done(db, tx, B_FALSE);
offset += tocpy;
size -= tocpy;
buf = (char *)buf + tocpy;
}
}
void
dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
const void *buf, dmu_tx_t *tx)
{
dmu_buf_t **dbp;
int numbufs;
if (size == 0)
return;
VERIFY0(dmu_buf_hold_array(os, object, offset, size,
FALSE, FTAG, &numbufs, &dbp));
dmu_write_impl(dbp, numbufs, offset, size, buf, tx);
dmu_buf_rele_array(dbp, numbufs, FTAG);
}
/*
* Note: Lustre is an external consumer of this interface.
*/
void
dmu_write_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size,
const void *buf, dmu_tx_t *tx)
{
dmu_buf_t **dbp;
int numbufs;
if (size == 0)
return;
VERIFY0(dmu_buf_hold_array_by_dnode(dn, offset, size,
FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH));
dmu_write_impl(dbp, numbufs, offset, size, buf, tx);
dmu_buf_rele_array(dbp, numbufs, FTAG);
}
void
dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
dmu_tx_t *tx)
{
dmu_buf_t **dbp;
int numbufs, i;
if (size == 0)
return;
VERIFY(0 == dmu_buf_hold_array(os, object, offset, size,
FALSE, FTAG, &numbufs, &dbp));
for (i = 0; i < numbufs; i++) {
dmu_buf_t *db = dbp[i];
dmu_buf_will_not_fill(db, tx);
}
dmu_buf_rele_array(dbp, numbufs, FTAG);
}
void
dmu_write_embedded(objset_t *os, uint64_t object, uint64_t offset,
void *data, uint8_t etype, uint8_t comp, int uncompressed_size,
int compressed_size, int byteorder, dmu_tx_t *tx)
{
dmu_buf_t *db;
ASSERT3U(etype, <, NUM_BP_EMBEDDED_TYPES);
ASSERT3U(comp, <, ZIO_COMPRESS_FUNCTIONS);
VERIFY0(dmu_buf_hold_noread(os, object, offset,
FTAG, &db));
dmu_buf_write_embedded(db,
data, (bp_embedded_type_t)etype, (enum zio_compress)comp,
uncompressed_size, compressed_size, byteorder, tx);
dmu_buf_rele(db, FTAG);
}
void
dmu_redact(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
dmu_tx_t *tx)
{
int numbufs, i;
dmu_buf_t **dbp;
VERIFY0(dmu_buf_hold_array(os, object, offset, size, FALSE, FTAG,
&numbufs, &dbp));
for (i = 0; i < numbufs; i++)
dmu_buf_redact(dbp[i], tx);
dmu_buf_rele_array(dbp, numbufs, FTAG);
}
#ifdef _KERNEL
int
dmu_read_uio_dnode(dnode_t *dn, zfs_uio_t *uio, uint64_t size)
{
dmu_buf_t **dbp;
int numbufs, i, err;
/*
* NB: we could do this block-at-a-time, but it's nice
* to be reading in parallel.
*/
err = dmu_buf_hold_array_by_dnode(dn, zfs_uio_offset(uio), size,
TRUE, FTAG, &numbufs, &dbp, 0);
if (err)
return (err);
for (i = 0; i < numbufs; i++) {
uint64_t tocpy;
int64_t bufoff;
dmu_buf_t *db = dbp[i];
ASSERT(size > 0);
bufoff = zfs_uio_offset(uio) - db->db_offset;
tocpy = MIN(db->db_size - bufoff, size);
err = zfs_uio_fault_move((char *)db->db_data + bufoff, tocpy,
UIO_READ, uio);
if (err)
break;
size -= tocpy;
}
dmu_buf_rele_array(dbp, numbufs, FTAG);
return (err);
}
/*
* Read 'size' bytes into the uio buffer.
* From object zdb->db_object.
* Starting at zfs_uio_offset(uio).
*
* If the caller already has a dbuf in the target object
* (e.g. its bonus buffer), this routine is faster than dmu_read_uio(),
* because we don't have to find the dnode_t for the object.
*/
int
dmu_read_uio_dbuf(dmu_buf_t *zdb, zfs_uio_t *uio, uint64_t size)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb;
dnode_t *dn;
int err;
if (size == 0)
return (0);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
err = dmu_read_uio_dnode(dn, uio, size);
DB_DNODE_EXIT(db);
return (err);
}
/*
* Read 'size' bytes into the uio buffer.
* From the specified object
* Starting at offset zfs_uio_offset(uio).
*/
int
dmu_read_uio(objset_t *os, uint64_t object, zfs_uio_t *uio, uint64_t size)
{
dnode_t *dn;
int err;
if (size == 0)
return (0);
err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
err = dmu_read_uio_dnode(dn, uio, size);
dnode_rele(dn, FTAG);
return (err);
}
int
dmu_write_uio_dnode(dnode_t *dn, zfs_uio_t *uio, uint64_t size, dmu_tx_t *tx)
{
dmu_buf_t **dbp;
int numbufs;
int err = 0;
int i;
err = dmu_buf_hold_array_by_dnode(dn, zfs_uio_offset(uio), size,
FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH);
if (err)
return (err);
for (i = 0; i < numbufs; i++) {
uint64_t tocpy;
int64_t bufoff;
dmu_buf_t *db = dbp[i];
ASSERT(size > 0);
- bufoff = zfs_uio_offset(uio) - db->db_offset;
+ offset_t off = zfs_uio_offset(uio);
+ bufoff = off - db->db_offset;
tocpy = MIN(db->db_size - bufoff, size);
ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
if (tocpy == db->db_size)
- dmu_buf_will_fill(db, tx);
+ dmu_buf_will_fill(db, tx, B_TRUE);
else
dmu_buf_will_dirty(db, tx);
- /*
- * XXX zfs_uiomove could block forever (eg.nfs-backed
- * pages). There needs to be a uiolockdown() function
- * to lock the pages in memory, so that zfs_uiomove won't
- * block.
- */
err = zfs_uio_fault_move((char *)db->db_data + bufoff,
tocpy, UIO_WRITE, uio);
- if (tocpy == db->db_size)
- dmu_buf_fill_done(db, tx);
+ if (tocpy == db->db_size && dmu_buf_fill_done(db, tx, err)) {
+ /* The fill was reverted. Undo any uio progress. */
+ zfs_uio_advance(uio, off - zfs_uio_offset(uio));
+ }
if (err)
break;
size -= tocpy;
}
dmu_buf_rele_array(dbp, numbufs, FTAG);
return (err);
}
/*
* Write 'size' bytes from the uio buffer.
* To object zdb->db_object.
* Starting at offset zfs_uio_offset(uio).
*
* If the caller already has a dbuf in the target object
* (e.g. its bonus buffer), this routine is faster than dmu_write_uio(),
* because we don't have to find the dnode_t for the object.
*/
int
dmu_write_uio_dbuf(dmu_buf_t *zdb, zfs_uio_t *uio, uint64_t size,
dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb;
dnode_t *dn;
int err;
if (size == 0)
return (0);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
err = dmu_write_uio_dnode(dn, uio, size, tx);
DB_DNODE_EXIT(db);
return (err);
}
/*
* Write 'size' bytes from the uio buffer.
* To the specified object.
* Starting at offset zfs_uio_offset(uio).
*/
int
dmu_write_uio(objset_t *os, uint64_t object, zfs_uio_t *uio, uint64_t size,
dmu_tx_t *tx)
{
dnode_t *dn;
int err;
if (size == 0)
return (0);
err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
err = dmu_write_uio_dnode(dn, uio, size, tx);
dnode_rele(dn, FTAG);
return (err);
}
#endif /* _KERNEL */
/*
* Allocate a loaned anonymous arc buffer.
*/
arc_buf_t *
dmu_request_arcbuf(dmu_buf_t *handle, int size)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)handle;
return (arc_loan_buf(db->db_objset->os_spa, B_FALSE, size));
}
/*
* Free a loaned arc buffer.
*/
void
dmu_return_arcbuf(arc_buf_t *buf)
{
arc_return_buf(buf, FTAG);
arc_buf_destroy(buf, FTAG);
}
/*
* A "lightweight" write is faster than a regular write (e.g.
* dmu_write_by_dnode() or dmu_assign_arcbuf_by_dnode()), because it avoids the
* CPU cost of creating a dmu_buf_impl_t and arc_buf_[hdr_]_t. However, the
* data can not be read or overwritten until the transaction's txg has been
* synced. This makes it appropriate for workloads that are known to be
* (temporarily) write-only, like "zfs receive".
*
* A single block is written, starting at the specified offset in bytes. If
* the call is successful, it returns 0 and the provided abd has been
* consumed (the caller should not free it).
*/
int
dmu_lightweight_write_by_dnode(dnode_t *dn, uint64_t offset, abd_t *abd,
const zio_prop_t *zp, zio_flag_t flags, dmu_tx_t *tx)
{
dbuf_dirty_record_t *dr =
dbuf_dirty_lightweight(dn, dbuf_whichblock(dn, 0, offset), tx);
if (dr == NULL)
return (SET_ERROR(EIO));
dr->dt.dll.dr_abd = abd;
dr->dt.dll.dr_props = *zp;
dr->dt.dll.dr_flags = flags;
return (0);
}
/*
* When possible directly assign passed loaned arc buffer to a dbuf.
* If this is not possible copy the contents of passed arc buf via
* dmu_write().
*/
int
dmu_assign_arcbuf_by_dnode(dnode_t *dn, uint64_t offset, arc_buf_t *buf,
dmu_tx_t *tx)
{
dmu_buf_impl_t *db;
objset_t *os = dn->dn_objset;
uint64_t object = dn->dn_object;
uint32_t blksz = (uint32_t)arc_buf_lsize(buf);
uint64_t blkid;
rw_enter(&dn->dn_struct_rwlock, RW_READER);
blkid = dbuf_whichblock(dn, 0, offset);
db = dbuf_hold(dn, blkid, FTAG);
+ rw_exit(&dn->dn_struct_rwlock);
if (db == NULL)
return (SET_ERROR(EIO));
- rw_exit(&dn->dn_struct_rwlock);
/*
* We can only assign if the offset is aligned and the arc buf is the
* same size as the dbuf.
*/
if (offset == db->db.db_offset && blksz == db->db.db_size) {
zfs_racct_write(blksz, 1);
dbuf_assign_arcbuf(db, buf, tx);
dbuf_rele(db, FTAG);
} else {
/* compressed bufs must always be assignable to their dbuf */
ASSERT3U(arc_get_compression(buf), ==, ZIO_COMPRESS_OFF);
ASSERT(!(buf->b_flags & ARC_BUF_FLAG_COMPRESSED));
dbuf_rele(db, FTAG);
dmu_write(os, object, offset, blksz, buf->b_data, tx);
dmu_return_arcbuf(buf);
}
return (0);
}
int
dmu_assign_arcbuf_by_dbuf(dmu_buf_t *handle, uint64_t offset, arc_buf_t *buf,
dmu_tx_t *tx)
{
int err;
dmu_buf_impl_t *dbuf = (dmu_buf_impl_t *)handle;
DB_DNODE_ENTER(dbuf);
err = dmu_assign_arcbuf_by_dnode(DB_DNODE(dbuf), offset, buf, tx);
DB_DNODE_EXIT(dbuf);
return (err);
}
typedef struct {
dbuf_dirty_record_t *dsa_dr;
dmu_sync_cb_t *dsa_done;
zgd_t *dsa_zgd;
dmu_tx_t *dsa_tx;
} dmu_sync_arg_t;
static void
dmu_sync_ready(zio_t *zio, arc_buf_t *buf, void *varg)
{
(void) buf;
dmu_sync_arg_t *dsa = varg;
dmu_buf_t *db = dsa->dsa_zgd->zgd_db;
blkptr_t *bp = zio->io_bp;
if (zio->io_error == 0) {
if (BP_IS_HOLE(bp)) {
/*
* A block of zeros may compress to a hole, but the
* block size still needs to be known for replay.
*/
BP_SET_LSIZE(bp, db->db_size);
} else if (!BP_IS_EMBEDDED(bp)) {
ASSERT(BP_GET_LEVEL(bp) == 0);
BP_SET_FILL(bp, 1);
}
}
}
static void
dmu_sync_late_arrival_ready(zio_t *zio)
{
dmu_sync_ready(zio, NULL, zio->io_private);
}
static void
dmu_sync_done(zio_t *zio, arc_buf_t *buf, void *varg)
{
(void) buf;
dmu_sync_arg_t *dsa = varg;
dbuf_dirty_record_t *dr = dsa->dsa_dr;
dmu_buf_impl_t *db = dr->dr_dbuf;
zgd_t *zgd = dsa->dsa_zgd;
/*
* Record the vdev(s) backing this blkptr so they can be flushed after
* the writes for the lwb have completed.
*/
if (zio->io_error == 0) {
zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp);
}
mutex_enter(&db->db_mtx);
ASSERT(dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC);
if (zio->io_error == 0) {
dr->dt.dl.dr_nopwrite = !!(zio->io_flags & ZIO_FLAG_NOPWRITE);
if (dr->dt.dl.dr_nopwrite) {
blkptr_t *bp = zio->io_bp;
blkptr_t *bp_orig = &zio->io_bp_orig;
uint8_t chksum = BP_GET_CHECKSUM(bp_orig);
ASSERT(BP_EQUAL(bp, bp_orig));
VERIFY(BP_EQUAL(bp, db->db_blkptr));
ASSERT(zio->io_prop.zp_compress != ZIO_COMPRESS_OFF);
VERIFY(zio_checksum_table[chksum].ci_flags &
ZCHECKSUM_FLAG_NOPWRITE);
}
dr->dt.dl.dr_overridden_by = *zio->io_bp;
dr->dt.dl.dr_override_state = DR_OVERRIDDEN;
dr->dt.dl.dr_copies = zio->io_prop.zp_copies;
/*
* Old style holes are filled with all zeros, whereas
* new-style holes maintain their lsize, type, level,
* and birth time (see zio_write_compress). While we
* need to reset the BP_SET_LSIZE() call that happened
* in dmu_sync_ready for old style holes, we do *not*
* want to wipe out the information contained in new
* style holes. Thus, only zero out the block pointer if
* it's an old style hole.
*/
if (BP_IS_HOLE(&dr->dt.dl.dr_overridden_by) &&
dr->dt.dl.dr_overridden_by.blk_birth == 0)
BP_ZERO(&dr->dt.dl.dr_overridden_by);
} else {
dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
}
cv_broadcast(&db->db_changed);
mutex_exit(&db->db_mtx);
dsa->dsa_done(dsa->dsa_zgd, zio->io_error);
kmem_free(dsa, sizeof (*dsa));
}
static void
dmu_sync_late_arrival_done(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
dmu_sync_arg_t *dsa = zio->io_private;
zgd_t *zgd = dsa->dsa_zgd;
if (zio->io_error == 0) {
/*
* Record the vdev(s) backing this blkptr so they can be
* flushed after the writes for the lwb have completed.
*/
zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp);
if (!BP_IS_HOLE(bp)) {
blkptr_t *bp_orig __maybe_unused = &zio->io_bp_orig;
ASSERT(!(zio->io_flags & ZIO_FLAG_NOPWRITE));
ASSERT(BP_IS_HOLE(bp_orig) || !BP_EQUAL(bp, bp_orig));
ASSERT(zio->io_bp->blk_birth == zio->io_txg);
ASSERT(zio->io_txg > spa_syncing_txg(zio->io_spa));
zio_free(zio->io_spa, zio->io_txg, zio->io_bp);
}
}
dmu_tx_commit(dsa->dsa_tx);
dsa->dsa_done(dsa->dsa_zgd, zio->io_error);
abd_free(zio->io_abd);
kmem_free(dsa, sizeof (*dsa));
}
static int
dmu_sync_late_arrival(zio_t *pio, objset_t *os, dmu_sync_cb_t *done, zgd_t *zgd,
zio_prop_t *zp, zbookmark_phys_t *zb)
{
dmu_sync_arg_t *dsa;
dmu_tx_t *tx;
int error;
error = dbuf_read((dmu_buf_impl_t *)zgd->zgd_db, NULL,
DB_RF_CANFAIL | DB_RF_NOPREFETCH);
if (error != 0)
return (error);
tx = dmu_tx_create(os);
dmu_tx_hold_space(tx, zgd->zgd_db->db_size);
/*
* This transaction does not produce any dirty data or log blocks, so
* it should not be throttled. All other cases wait for TXG sync, by
* which time the log block we are writing will be obsolete, so we can
* skip waiting and just return error here instead.
*/
if (dmu_tx_assign(tx, TXG_NOWAIT | TXG_NOTHROTTLE) != 0) {
dmu_tx_abort(tx);
/* Make zl_get_data do txg_waited_synced() */
return (SET_ERROR(EIO));
}
/*
* In order to prevent the zgd's lwb from being free'd prior to
* dmu_sync_late_arrival_done() being called, we have to ensure
* the lwb's "max txg" takes this tx's txg into account.
*/
zil_lwb_add_txg(zgd->zgd_lwb, dmu_tx_get_txg(tx));
dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP);
dsa->dsa_dr = NULL;
dsa->dsa_done = done;
dsa->dsa_zgd = zgd;
dsa->dsa_tx = tx;
/*
* Since we are currently syncing this txg, it's nontrivial to
* determine what BP to nopwrite against, so we disable nopwrite.
*
* When syncing, the db_blkptr is initially the BP of the previous
* txg. We can not nopwrite against it because it will be changed
* (this is similar to the non-late-arrival case where the dbuf is
* dirty in a future txg).
*
* Then dbuf_write_ready() sets bp_blkptr to the location we will write.
* We can not nopwrite against it because although the BP will not
* (typically) be changed, the data has not yet been persisted to this
* location.
*
* Finally, when dbuf_write_done() is called, it is theoretically
* possible to always nopwrite, because the data that was written in
* this txg is the same data that we are trying to write. However we
* would need to check that this dbuf is not dirty in any future
* txg's (as we do in the normal dmu_sync() path). For simplicity, we
* don't nopwrite in this case.
*/
zp->zp_nopwrite = B_FALSE;
zio_nowait(zio_write(pio, os->os_spa, dmu_tx_get_txg(tx), zgd->zgd_bp,
abd_get_from_buf(zgd->zgd_db->db_data, zgd->zgd_db->db_size),
zgd->zgd_db->db_size, zgd->zgd_db->db_size, zp,
dmu_sync_late_arrival_ready, NULL, dmu_sync_late_arrival_done,
dsa, ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, zb));
return (0);
}
/*
* Intent log support: sync the block associated with db to disk.
* N.B. and XXX: the caller is responsible for making sure that the
* data isn't changing while dmu_sync() is writing it.
*
* Return values:
*
* EEXIST: this txg has already been synced, so there's nothing to do.
* The caller should not log the write.
*
* ENOENT: the block was dbuf_free_range()'d, so there's nothing to do.
* The caller should not log the write.
*
* EALREADY: this block is already in the process of being synced.
* The caller should track its progress (somehow).
*
* EIO: could not do the I/O.
* The caller should do a txg_wait_synced().
*
* 0: the I/O has been initiated.
* The caller should log this blkptr in the done callback.
* It is possible that the I/O will fail, in which case
* the error will be reported to the done callback and
* propagated to pio from zio_done().
*/
int
dmu_sync(zio_t *pio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)zgd->zgd_db;
objset_t *os = db->db_objset;
dsl_dataset_t *ds = os->os_dsl_dataset;
dbuf_dirty_record_t *dr, *dr_next;
dmu_sync_arg_t *dsa;
zbookmark_phys_t zb;
zio_prop_t zp;
dnode_t *dn;
ASSERT(pio != NULL);
ASSERT(txg != 0);
SET_BOOKMARK(&zb, ds->ds_object,
db->db.db_object, db->db_level, db->db_blkid);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
dmu_write_policy(os, dn, db->db_level, WP_DMU_SYNC, &zp);
DB_DNODE_EXIT(db);
/*
* If we're frozen (running ziltest), we always need to generate a bp.
*/
if (txg > spa_freeze_txg(os->os_spa))
return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb));
/*
* Grabbing db_mtx now provides a barrier between dbuf_sync_leaf()
* and us. If we determine that this txg is not yet syncing,
* but it begins to sync a moment later, that's OK because the
* sync thread will block in dbuf_sync_leaf() until we drop db_mtx.
*/
mutex_enter(&db->db_mtx);
if (txg <= spa_last_synced_txg(os->os_spa)) {
/*
* This txg has already synced. There's nothing to do.
*/
mutex_exit(&db->db_mtx);
return (SET_ERROR(EEXIST));
}
if (txg <= spa_syncing_txg(os->os_spa)) {
/*
* This txg is currently syncing, so we can't mess with
* the dirty record anymore; just write a new log block.
*/
mutex_exit(&db->db_mtx);
return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb));
}
dr = dbuf_find_dirty_eq(db, txg);
if (dr == NULL) {
/*
* There's no dr for this dbuf, so it must have been freed.
* There's no need to log writes to freed blocks, so we're done.
*/
mutex_exit(&db->db_mtx);
return (SET_ERROR(ENOENT));
}
dr_next = list_next(&db->db_dirty_records, dr);
ASSERT(dr_next == NULL || dr_next->dr_txg < txg);
if (db->db_blkptr != NULL) {
/*
* We need to fill in zgd_bp with the current blkptr so that
* the nopwrite code can check if we're writing the same
* data that's already on disk. We can only nopwrite if we
* are sure that after making the copy, db_blkptr will not
* change until our i/o completes. We ensure this by
* holding the db_mtx, and only allowing nopwrite if the
* block is not already dirty (see below). This is verified
* by dmu_sync_done(), which VERIFYs that the db_blkptr has
* not changed.
*/
*zgd->zgd_bp = *db->db_blkptr;
}
/*
* Assume the on-disk data is X, the current syncing data (in
* txg - 1) is Y, and the current in-memory data is Z (currently
* in dmu_sync).
*
* We usually want to perform a nopwrite if X and Z are the
* same. However, if Y is different (i.e. the BP is going to
* change before this write takes effect), then a nopwrite will
* be incorrect - we would override with X, which could have
* been freed when Y was written.
*
* (Note that this is not a concern when we are nop-writing from
* syncing context, because X and Y must be identical, because
* all previous txgs have been synced.)
*
* Therefore, we disable nopwrite if the current BP could change
* before this TXG. There are two ways it could change: by
* being dirty (dr_next is non-NULL), or by being freed
* (dnode_block_freed()). This behavior is verified by
* zio_done(), which VERIFYs that the override BP is identical
* to the on-disk BP.
*/
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
if (dr_next != NULL || dnode_block_freed(dn, db->db_blkid))
zp.zp_nopwrite = B_FALSE;
DB_DNODE_EXIT(db);
ASSERT(dr->dr_txg == txg);
if (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC ||
dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
/*
* We have already issued a sync write for this buffer,
* or this buffer has already been synced. It could not
* have been dirtied since, or we would have cleared the state.
*/
mutex_exit(&db->db_mtx);
return (SET_ERROR(EALREADY));
}
ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
dr->dt.dl.dr_override_state = DR_IN_DMU_SYNC;
mutex_exit(&db->db_mtx);
dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP);
dsa->dsa_dr = dr;
dsa->dsa_done = done;
dsa->dsa_zgd = zgd;
dsa->dsa_tx = NULL;
zio_nowait(arc_write(pio, os->os_spa, txg, zgd->zgd_bp,
dr->dt.dl.dr_data, !DBUF_IS_CACHEABLE(db), dbuf_is_l2cacheable(db),
&zp, dmu_sync_ready, NULL, dmu_sync_done, dsa,
ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, &zb));
return (0);
}
int
dmu_object_set_nlevels(objset_t *os, uint64_t object, int nlevels, dmu_tx_t *tx)
{
dnode_t *dn;
int err;
err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
err = dnode_set_nlevels(dn, nlevels, tx);
dnode_rele(dn, FTAG);
return (err);
}
int
dmu_object_set_blocksize(objset_t *os, uint64_t object, uint64_t size, int ibs,
dmu_tx_t *tx)
{
dnode_t *dn;
int err;
err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
err = dnode_set_blksz(dn, size, ibs, tx);
dnode_rele(dn, FTAG);
return (err);
}
int
dmu_object_set_maxblkid(objset_t *os, uint64_t object, uint64_t maxblkid,
dmu_tx_t *tx)
{
dnode_t *dn;
int err;
err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
dnode_new_blkid(dn, maxblkid, tx, B_FALSE, B_TRUE);
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
return (0);
}
void
dmu_object_set_checksum(objset_t *os, uint64_t object, uint8_t checksum,
dmu_tx_t *tx)
{
dnode_t *dn;
/*
* Send streams include each object's checksum function. This
* check ensures that the receiving system can understand the
* checksum function transmitted.
*/
ASSERT3U(checksum, <, ZIO_CHECKSUM_LEGACY_FUNCTIONS);
VERIFY0(dnode_hold(os, object, FTAG, &dn));
ASSERT3U(checksum, <, ZIO_CHECKSUM_FUNCTIONS);
dn->dn_checksum = checksum;
dnode_setdirty(dn, tx);
dnode_rele(dn, FTAG);
}
void
dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress,
dmu_tx_t *tx)
{
dnode_t *dn;
/*
* Send streams include each object's compression function. This
* check ensures that the receiving system can understand the
* compression function transmitted.
*/
ASSERT3U(compress, <, ZIO_COMPRESS_LEGACY_FUNCTIONS);
VERIFY0(dnode_hold(os, object, FTAG, &dn));
dn->dn_compress = compress;
dnode_setdirty(dn, tx);
dnode_rele(dn, FTAG);
}
/*
* When the "redundant_metadata" property is set to "most", only indirect
* blocks of this level and higher will have an additional ditto block.
*/
static const int zfs_redundant_metadata_most_ditto_level = 2;
void
dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp, zio_prop_t *zp)
{
dmu_object_type_t type = dn ? dn->dn_type : DMU_OT_OBJSET;
boolean_t ismd = (level > 0 || DMU_OT_IS_METADATA(type) ||
(wp & WP_SPILL));
enum zio_checksum checksum = os->os_checksum;
enum zio_compress compress = os->os_compress;
uint8_t complevel = os->os_complevel;
enum zio_checksum dedup_checksum = os->os_dedup_checksum;
boolean_t dedup = B_FALSE;
boolean_t nopwrite = B_FALSE;
boolean_t dedup_verify = os->os_dedup_verify;
boolean_t encrypt = B_FALSE;
int copies = os->os_copies;
/*
* We maintain different write policies for each of the following
* types of data:
* 1. metadata
* 2. preallocated blocks (i.e. level-0 blocks of a dump device)
* 3. all other level 0 blocks
*/
if (ismd) {
/*
* XXX -- we should design a compression algorithm
* that specializes in arrays of bps.
*/
compress = zio_compress_select(os->os_spa,
ZIO_COMPRESS_ON, ZIO_COMPRESS_ON);
/*
* Metadata always gets checksummed. If the data
* checksum is multi-bit correctable, and it's not a
* ZBT-style checksum, then it's suitable for metadata
* as well. Otherwise, the metadata checksum defaults
* to fletcher4.
*/
if (!(zio_checksum_table[checksum].ci_flags &
ZCHECKSUM_FLAG_METADATA) ||
(zio_checksum_table[checksum].ci_flags &
ZCHECKSUM_FLAG_EMBEDDED))
checksum = ZIO_CHECKSUM_FLETCHER_4;
switch (os->os_redundant_metadata) {
case ZFS_REDUNDANT_METADATA_ALL:
copies++;
break;
case ZFS_REDUNDANT_METADATA_MOST:
if (level >= zfs_redundant_metadata_most_ditto_level ||
DMU_OT_IS_METADATA(type) || (wp & WP_SPILL))
copies++;
break;
case ZFS_REDUNDANT_METADATA_SOME:
if (DMU_OT_IS_CRITICAL(type))
copies++;
break;
case ZFS_REDUNDANT_METADATA_NONE:
break;
}
} else if (wp & WP_NOFILL) {
ASSERT(level == 0);
/*
* If we're writing preallocated blocks, we aren't actually
* writing them so don't set any policy properties. These
* blocks are currently only used by an external subsystem
* outside of zfs (i.e. dump) and not written by the zio
* pipeline.
*/
compress = ZIO_COMPRESS_OFF;
checksum = ZIO_CHECKSUM_OFF;
} else {
compress = zio_compress_select(os->os_spa, dn->dn_compress,
compress);
complevel = zio_complevel_select(os->os_spa, compress,
complevel, complevel);
checksum = (dedup_checksum == ZIO_CHECKSUM_OFF) ?
zio_checksum_select(dn->dn_checksum, checksum) :
dedup_checksum;
/*
* Determine dedup setting. If we are in dmu_sync(),
* we won't actually dedup now because that's all
* done in syncing context; but we do want to use the
* dedup checksum. If the checksum is not strong
* enough to ensure unique signatures, force
* dedup_verify.
*/
if (dedup_checksum != ZIO_CHECKSUM_OFF) {
dedup = (wp & WP_DMU_SYNC) ? B_FALSE : B_TRUE;
if (!(zio_checksum_table[checksum].ci_flags &
ZCHECKSUM_FLAG_DEDUP))
dedup_verify = B_TRUE;
}
/*
* Enable nopwrite if we have secure enough checksum
* algorithm (see comment in zio_nop_write) and
* compression is enabled. We don't enable nopwrite if
* dedup is enabled as the two features are mutually
* exclusive.
*/
nopwrite = (!dedup && (zio_checksum_table[checksum].ci_flags &
ZCHECKSUM_FLAG_NOPWRITE) &&
compress != ZIO_COMPRESS_OFF && zfs_nopwrite_enabled);
}
/*
* All objects in an encrypted objset are protected from modification
* via a MAC. Encrypted objects store their IV and salt in the last DVA
* in the bp, so we cannot use all copies. Encrypted objects are also
* not subject to nopwrite since writing the same data will still
* result in a new ciphertext. Only encrypted blocks can be dedup'd
* to avoid ambiguity in the dedup code since the DDT does not store
* object types.
*/
if (os->os_encrypted && (wp & WP_NOFILL) == 0) {
encrypt = B_TRUE;
if (DMU_OT_IS_ENCRYPTED(type)) {
copies = MIN(copies, SPA_DVAS_PER_BP - 1);
nopwrite = B_FALSE;
} else {
dedup = B_FALSE;
}
if (level <= 0 &&
(type == DMU_OT_DNODE || type == DMU_OT_OBJSET)) {
compress = ZIO_COMPRESS_EMPTY;
}
}
zp->zp_compress = compress;
zp->zp_complevel = complevel;
zp->zp_checksum = checksum;
zp->zp_type = (wp & WP_SPILL) ? dn->dn_bonustype : type;
zp->zp_level = level;
zp->zp_copies = MIN(copies, spa_max_replication(os->os_spa));
zp->zp_dedup = dedup;
zp->zp_dedup_verify = dedup && dedup_verify;
zp->zp_nopwrite = nopwrite;
zp->zp_encrypt = encrypt;
zp->zp_byteorder = ZFS_HOST_BYTEORDER;
memset(zp->zp_salt, 0, ZIO_DATA_SALT_LEN);
memset(zp->zp_iv, 0, ZIO_DATA_IV_LEN);
memset(zp->zp_mac, 0, ZIO_DATA_MAC_LEN);
zp->zp_zpl_smallblk = DMU_OT_IS_FILE(zp->zp_type) ?
os->os_zpl_special_smallblock : 0;
ASSERT3U(zp->zp_compress, !=, ZIO_COMPRESS_INHERIT);
}
/*
* Reports the location of data and holes in an object. In order to
* accurately report holes all dirty data must be synced to disk. This
* causes extremely poor performance when seeking for holes in a dirty file.
* As a compromise, only provide hole data when the dnode is clean. When
* a dnode is dirty report the dnode as having no holes by returning EBUSY
* which is always safe to do.
*/
int
dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, uint64_t *off)
{
dnode_t *dn;
int restarted = 0, err;
restart:
err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
rw_enter(&dn->dn_struct_rwlock, RW_READER);
if (dnode_is_dirty(dn)) {
/*
* If the zfs_dmu_offset_next_sync module option is enabled
* then hole reporting has been requested. Dirty dnodes
* must be synced to disk to accurately report holes.
*
* Provided a RL_READER rangelock spanning 0-UINT64_MAX is
* held by the caller only a single restart will be required.
* We tolerate callers which do not hold the rangelock by
* returning EBUSY and not reporting holes after one restart.
*/
if (zfs_dmu_offset_next_sync) {
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
if (restarted)
return (SET_ERROR(EBUSY));
txg_wait_synced(dmu_objset_pool(os), 0);
restarted = 1;
goto restart;
}
err = SET_ERROR(EBUSY);
} else {
err = dnode_next_offset(dn, DNODE_FIND_HAVELOCK |
(hole ? DNODE_FIND_HOLE : 0), off, 1, 1, 0);
}
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
return (err);
}
int
dmu_read_l0_bps(objset_t *os, uint64_t object, uint64_t offset, uint64_t length,
blkptr_t *bps, size_t *nbpsp)
{
dmu_buf_t **dbp, *dbuf;
dmu_buf_impl_t *db;
blkptr_t *bp;
int error, numbufs;
error = dmu_buf_hold_array(os, object, offset, length, FALSE, FTAG,
&numbufs, &dbp);
if (error != 0) {
if (error == ESRCH) {
error = SET_ERROR(ENXIO);
}
return (error);
}
ASSERT3U(numbufs, <=, *nbpsp);
for (int i = 0; i < numbufs; i++) {
dbuf = dbp[i];
db = (dmu_buf_impl_t *)dbuf;
mutex_enter(&db->db_mtx);
if (!list_is_empty(&db->db_dirty_records)) {
dbuf_dirty_record_t *dr;
dr = list_head(&db->db_dirty_records);
if (dr->dt.dl.dr_brtwrite) {
/*
* This is very special case where we clone a
* block and in the same transaction group we
* read its BP (most likely to clone the clone).
*/
bp = &dr->dt.dl.dr_overridden_by;
} else {
/*
* The block was modified in the same
* transaction group.
*/
mutex_exit(&db->db_mtx);
error = SET_ERROR(EAGAIN);
goto out;
}
} else {
bp = db->db_blkptr;
}
mutex_exit(&db->db_mtx);
if (bp == NULL) {
/*
* The block was created in this transaction group,
* so it has no BP yet.
*/
error = SET_ERROR(EAGAIN);
goto out;
}
/*
* Make sure we clone only data blocks.
*/
if (BP_IS_METADATA(bp) && !BP_IS_HOLE(bp)) {
error = SET_ERROR(EINVAL);
goto out;
}
+ /*
+ * If the block was allocated in transaction group that is not
+ * yet synced, we could clone it, but we couldn't write this
+ * operation into ZIL, or it may be impossible to replay, since
+ * the block may appear not yet allocated at that point.
+ */
+ if (BP_PHYSICAL_BIRTH(bp) > spa_freeze_txg(os->os_spa)) {
+ error = SET_ERROR(EINVAL);
+ goto out;
+ }
+ if (BP_PHYSICAL_BIRTH(bp) > spa_last_synced_txg(os->os_spa)) {
+ error = SET_ERROR(EAGAIN);
+ goto out;
+ }
+
bps[i] = *bp;
}
*nbpsp = numbufs;
out:
dmu_buf_rele_array(dbp, numbufs, FTAG);
return (error);
}
int
dmu_brt_clone(objset_t *os, uint64_t object, uint64_t offset, uint64_t length,
dmu_tx_t *tx, const blkptr_t *bps, size_t nbps)
{
spa_t *spa;
dmu_buf_t **dbp, *dbuf;
dmu_buf_impl_t *db;
struct dirty_leaf *dl;
dbuf_dirty_record_t *dr;
const blkptr_t *bp;
int error = 0, i, numbufs;
spa = os->os_spa;
VERIFY0(dmu_buf_hold_array(os, object, offset, length, FALSE, FTAG,
&numbufs, &dbp));
ASSERT3U(nbps, ==, numbufs);
/*
* Before we start cloning make sure that the dbufs sizes match new BPs
* sizes. If they don't, that's a no-go, as we are not able to shrink
* dbufs.
*/
for (i = 0; i < numbufs; i++) {
dbuf = dbp[i];
db = (dmu_buf_impl_t *)dbuf;
bp = &bps[i];
ASSERT0(db->db_level);
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
ASSERT(db->db_blkid != DMU_SPILL_BLKID);
if (!BP_IS_HOLE(bp) && BP_GET_LSIZE(bp) != dbuf->db_size) {
error = SET_ERROR(EXDEV);
goto out;
}
}
for (i = 0; i < numbufs; i++) {
dbuf = dbp[i];
db = (dmu_buf_impl_t *)dbuf;
bp = &bps[i];
ASSERT0(db->db_level);
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
ASSERT(db->db_blkid != DMU_SPILL_BLKID);
ASSERT(BP_IS_HOLE(bp) || dbuf->db_size == BP_GET_LSIZE(bp));
dmu_buf_will_clone(dbuf, tx);
mutex_enter(&db->db_mtx);
dr = list_head(&db->db_dirty_records);
VERIFY(dr != NULL);
ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
dl = &dr->dt.dl;
dl->dr_overridden_by = *bp;
dl->dr_brtwrite = B_TRUE;
dl->dr_override_state = DR_OVERRIDDEN;
if (BP_IS_HOLE(bp)) {
dl->dr_overridden_by.blk_birth = 0;
dl->dr_overridden_by.blk_phys_birth = 0;
} else {
dl->dr_overridden_by.blk_birth = dr->dr_txg;
if (!BP_IS_EMBEDDED(bp)) {
dl->dr_overridden_by.blk_phys_birth =
BP_PHYSICAL_BIRTH(bp);
}
}
mutex_exit(&db->db_mtx);
/*
* When data in embedded into BP there is no need to create
* BRT entry as there is no data block. Just copy the BP as
* it contains the data.
*/
if (!BP_IS_HOLE(bp) && !BP_IS_EMBEDDED(bp)) {
brt_pending_add(spa, bp, tx);
}
}
out:
dmu_buf_rele_array(dbp, numbufs, FTAG);
return (error);
}
void
__dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi)
{
dnode_phys_t *dnp = dn->dn_phys;
doi->doi_data_block_size = dn->dn_datablksz;
doi->doi_metadata_block_size = dn->dn_indblkshift ?
1ULL << dn->dn_indblkshift : 0;
doi->doi_type = dn->dn_type;
doi->doi_bonus_type = dn->dn_bonustype;
doi->doi_bonus_size = dn->dn_bonuslen;
doi->doi_dnodesize = dn->dn_num_slots << DNODE_SHIFT;
doi->doi_indirection = dn->dn_nlevels;
doi->doi_checksum = dn->dn_checksum;
doi->doi_compress = dn->dn_compress;
doi->doi_nblkptr = dn->dn_nblkptr;
doi->doi_physical_blocks_512 = (DN_USED_BYTES(dnp) + 256) >> 9;
doi->doi_max_offset = (dn->dn_maxblkid + 1) * dn->dn_datablksz;
doi->doi_fill_count = 0;
for (int i = 0; i < dnp->dn_nblkptr; i++)
doi->doi_fill_count += BP_GET_FILL(&dnp->dn_blkptr[i]);
}
void
dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi)
{
rw_enter(&dn->dn_struct_rwlock, RW_READER);
mutex_enter(&dn->dn_mtx);
__dmu_object_info_from_dnode(dn, doi);
mutex_exit(&dn->dn_mtx);
rw_exit(&dn->dn_struct_rwlock);
}
/*
* Get information on a DMU object.
* If doi is NULL, just indicates whether the object exists.
*/
int
dmu_object_info(objset_t *os, uint64_t object, dmu_object_info_t *doi)
{
dnode_t *dn;
int err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
if (doi != NULL)
dmu_object_info_from_dnode(dn, doi);
dnode_rele(dn, FTAG);
return (0);
}
/*
* As above, but faster; can be used when you have a held dbuf in hand.
*/
void
dmu_object_info_from_db(dmu_buf_t *db_fake, dmu_object_info_t *doi)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
DB_DNODE_ENTER(db);
dmu_object_info_from_dnode(DB_DNODE(db), doi);
DB_DNODE_EXIT(db);
}
/*
* Faster still when you only care about the size.
*/
void
dmu_object_size_from_db(dmu_buf_t *db_fake, uint32_t *blksize,
u_longlong_t *nblk512)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dnode_t *dn;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
*blksize = dn->dn_datablksz;
/* add in number of slots used for the dnode itself */
*nblk512 = ((DN_USED_BYTES(dn->dn_phys) + SPA_MINBLOCKSIZE/2) >>
SPA_MINBLOCKSHIFT) + dn->dn_num_slots;
DB_DNODE_EXIT(db);
}
void
dmu_object_dnsize_from_db(dmu_buf_t *db_fake, int *dnsize)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dnode_t *dn;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
*dnsize = dn->dn_num_slots << DNODE_SHIFT;
DB_DNODE_EXIT(db);
}
void
byteswap_uint64_array(void *vbuf, size_t size)
{
uint64_t *buf = vbuf;
size_t count = size >> 3;
int i;
ASSERT((size & 7) == 0);
for (i = 0; i < count; i++)
buf[i] = BSWAP_64(buf[i]);
}
void
byteswap_uint32_array(void *vbuf, size_t size)
{
uint32_t *buf = vbuf;
size_t count = size >> 2;
int i;
ASSERT((size & 3) == 0);
for (i = 0; i < count; i++)
buf[i] = BSWAP_32(buf[i]);
}
void
byteswap_uint16_array(void *vbuf, size_t size)
{
uint16_t *buf = vbuf;
size_t count = size >> 1;
int i;
ASSERT((size & 1) == 0);
for (i = 0; i < count; i++)
buf[i] = BSWAP_16(buf[i]);
}
void
byteswap_uint8_array(void *vbuf, size_t size)
{
(void) vbuf, (void) size;
}
void
dmu_init(void)
{
abd_init();
zfs_dbgmsg_init();
sa_cache_init();
dmu_objset_init();
dnode_init();
zfetch_init();
dmu_tx_init();
l2arc_init();
arc_init();
dbuf_init();
}
void
dmu_fini(void)
{
arc_fini(); /* arc depends on l2arc, so arc must go first */
l2arc_fini();
dmu_tx_fini();
zfetch_fini();
dbuf_fini();
dnode_fini();
dmu_objset_fini();
sa_cache_fini();
zfs_dbgmsg_fini();
abd_fini();
}
EXPORT_SYMBOL(dmu_bonus_hold);
EXPORT_SYMBOL(dmu_bonus_hold_by_dnode);
EXPORT_SYMBOL(dmu_buf_hold_array_by_bonus);
EXPORT_SYMBOL(dmu_buf_rele_array);
EXPORT_SYMBOL(dmu_prefetch);
EXPORT_SYMBOL(dmu_free_range);
EXPORT_SYMBOL(dmu_free_long_range);
EXPORT_SYMBOL(dmu_free_long_object);
EXPORT_SYMBOL(dmu_read);
EXPORT_SYMBOL(dmu_read_by_dnode);
EXPORT_SYMBOL(dmu_write);
EXPORT_SYMBOL(dmu_write_by_dnode);
EXPORT_SYMBOL(dmu_prealloc);
EXPORT_SYMBOL(dmu_object_info);
EXPORT_SYMBOL(dmu_object_info_from_dnode);
EXPORT_SYMBOL(dmu_object_info_from_db);
EXPORT_SYMBOL(dmu_object_size_from_db);
EXPORT_SYMBOL(dmu_object_dnsize_from_db);
EXPORT_SYMBOL(dmu_object_set_nlevels);
EXPORT_SYMBOL(dmu_object_set_blocksize);
EXPORT_SYMBOL(dmu_object_set_maxblkid);
EXPORT_SYMBOL(dmu_object_set_checksum);
EXPORT_SYMBOL(dmu_object_set_compress);
EXPORT_SYMBOL(dmu_offset_next);
EXPORT_SYMBOL(dmu_write_policy);
EXPORT_SYMBOL(dmu_sync);
EXPORT_SYMBOL(dmu_request_arcbuf);
EXPORT_SYMBOL(dmu_return_arcbuf);
EXPORT_SYMBOL(dmu_assign_arcbuf_by_dnode);
EXPORT_SYMBOL(dmu_assign_arcbuf_by_dbuf);
EXPORT_SYMBOL(dmu_buf_hold);
EXPORT_SYMBOL(dmu_ot);
ZFS_MODULE_PARAM(zfs, zfs_, nopwrite_enabled, INT, ZMOD_RW,
"Enable NOP writes");
ZFS_MODULE_PARAM(zfs, zfs_, per_txg_dirty_frees_percent, UINT, ZMOD_RW,
"Percentage of dirtied blocks from frees in one TXG");
ZFS_MODULE_PARAM(zfs, zfs_, dmu_offset_next_sync, INT, ZMOD_RW,
"Enable forcing txg sync to find holes");
/* CSTYLED */
ZFS_MODULE_PARAM(zfs, , dmu_prefetch_max, UINT, ZMOD_RW,
"Limit one prefetch call to this size");
diff --git a/sys/contrib/openzfs/module/zfs/dmu_recv.c b/sys/contrib/openzfs/module/zfs/dmu_recv.c
index 05ca91717c2f..54aa60259ea1 100644
--- a/sys/contrib/openzfs/module/zfs/dmu_recv.c
+++ b/sys/contrib/openzfs/module/zfs/dmu_recv.c
@@ -1,3801 +1,3801 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2014, Joyent, Inc. All rights reserved.
* Copyright 2014 HybridCluster. All rights reserved.
* Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019, Allan Jude
* Copyright (c) 2019 Datto Inc.
* Copyright (c) 2022 Axcient.
*/
#include <sys/arc.h>
#include <sys/spa_impl.h>
#include <sys/dmu.h>
#include <sys/dmu_impl.h>
#include <sys/dmu_send.h>
#include <sys/dmu_recv.h>
#include <sys/dmu_tx.h>
#include <sys/dbuf.h>
#include <sys/dnode.h>
#include <sys/zfs_context.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_traverse.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_synctask.h>
#include <sys/zfs_ioctl.h>
#include <sys/zap.h>
#include <sys/zvol.h>
#include <sys/zio_checksum.h>
#include <sys/zfs_znode.h>
#include <zfs_fletcher.h>
#include <sys/avl.h>
#include <sys/ddt.h>
#include <sys/zfs_onexit.h>
#include <sys/dsl_destroy.h>
#include <sys/blkptr.h>
#include <sys/dsl_bookmark.h>
#include <sys/zfeature.h>
#include <sys/bqueue.h>
#include <sys/objlist.h>
#ifdef _KERNEL
#include <sys/zfs_vfsops.h>
#endif
#include <sys/zfs_file.h>
static uint_t zfs_recv_queue_length = SPA_MAXBLOCKSIZE;
static uint_t zfs_recv_queue_ff = 20;
static uint_t zfs_recv_write_batch_size = 1024 * 1024;
static int zfs_recv_best_effort_corrective = 0;
static const void *const dmu_recv_tag = "dmu_recv_tag";
const char *const recv_clone_name = "%recv";
typedef enum {
ORNS_NO,
ORNS_YES,
ORNS_MAYBE
} or_need_sync_t;
static int receive_read_payload_and_next_header(dmu_recv_cookie_t *ra, int len,
void *buf);
struct receive_record_arg {
dmu_replay_record_t header;
void *payload; /* Pointer to a buffer containing the payload */
/*
* If the record is a WRITE or SPILL, pointer to the abd containing the
* payload.
*/
abd_t *abd;
int payload_size;
uint64_t bytes_read; /* bytes read from stream when record created */
boolean_t eos_marker; /* Marks the end of the stream */
bqueue_node_t node;
};
struct receive_writer_arg {
objset_t *os;
boolean_t byteswap;
bqueue_t q;
/*
* These three members are used to signal to the main thread when
* we're done.
*/
kmutex_t mutex;
kcondvar_t cv;
boolean_t done;
int err;
const char *tofs;
boolean_t heal;
boolean_t resumable;
boolean_t raw; /* DMU_BACKUP_FEATURE_RAW set */
boolean_t spill; /* DRR_FLAG_SPILL_BLOCK set */
boolean_t full; /* this is a full send stream */
uint64_t last_object;
uint64_t last_offset;
uint64_t max_object; /* highest object ID referenced in stream */
uint64_t bytes_read; /* bytes read when current record created */
list_t write_batch;
/* Encryption parameters for the last received DRR_OBJECT_RANGE */
boolean_t or_crypt_params_present;
uint64_t or_firstobj;
uint64_t or_numslots;
uint8_t or_salt[ZIO_DATA_SALT_LEN];
uint8_t or_iv[ZIO_DATA_IV_LEN];
uint8_t or_mac[ZIO_DATA_MAC_LEN];
boolean_t or_byteorder;
zio_t *heal_pio;
/* Keep track of DRR_FREEOBJECTS right after DRR_OBJECT_RANGE */
or_need_sync_t or_need_sync;
};
typedef struct dmu_recv_begin_arg {
const char *drba_origin;
dmu_recv_cookie_t *drba_cookie;
cred_t *drba_cred;
proc_t *drba_proc;
dsl_crypto_params_t *drba_dcp;
} dmu_recv_begin_arg_t;
static void
byteswap_record(dmu_replay_record_t *drr)
{
#define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
#define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
drr->drr_type = BSWAP_32(drr->drr_type);
drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen);
switch (drr->drr_type) {
case DRR_BEGIN:
DO64(drr_begin.drr_magic);
DO64(drr_begin.drr_versioninfo);
DO64(drr_begin.drr_creation_time);
DO32(drr_begin.drr_type);
DO32(drr_begin.drr_flags);
DO64(drr_begin.drr_toguid);
DO64(drr_begin.drr_fromguid);
break;
case DRR_OBJECT:
DO64(drr_object.drr_object);
DO32(drr_object.drr_type);
DO32(drr_object.drr_bonustype);
DO32(drr_object.drr_blksz);
DO32(drr_object.drr_bonuslen);
DO32(drr_object.drr_raw_bonuslen);
DO64(drr_object.drr_toguid);
DO64(drr_object.drr_maxblkid);
break;
case DRR_FREEOBJECTS:
DO64(drr_freeobjects.drr_firstobj);
DO64(drr_freeobjects.drr_numobjs);
DO64(drr_freeobjects.drr_toguid);
break;
case DRR_WRITE:
DO64(drr_write.drr_object);
DO32(drr_write.drr_type);
DO64(drr_write.drr_offset);
DO64(drr_write.drr_logical_size);
DO64(drr_write.drr_toguid);
ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write.drr_key.ddk_cksum);
DO64(drr_write.drr_key.ddk_prop);
DO64(drr_write.drr_compressed_size);
break;
case DRR_WRITE_EMBEDDED:
DO64(drr_write_embedded.drr_object);
DO64(drr_write_embedded.drr_offset);
DO64(drr_write_embedded.drr_length);
DO64(drr_write_embedded.drr_toguid);
DO32(drr_write_embedded.drr_lsize);
DO32(drr_write_embedded.drr_psize);
break;
case DRR_FREE:
DO64(drr_free.drr_object);
DO64(drr_free.drr_offset);
DO64(drr_free.drr_length);
DO64(drr_free.drr_toguid);
break;
case DRR_SPILL:
DO64(drr_spill.drr_object);
DO64(drr_spill.drr_length);
DO64(drr_spill.drr_toguid);
DO64(drr_spill.drr_compressed_size);
DO32(drr_spill.drr_type);
break;
case DRR_OBJECT_RANGE:
DO64(drr_object_range.drr_firstobj);
DO64(drr_object_range.drr_numslots);
DO64(drr_object_range.drr_toguid);
break;
case DRR_REDACT:
DO64(drr_redact.drr_object);
DO64(drr_redact.drr_offset);
DO64(drr_redact.drr_length);
DO64(drr_redact.drr_toguid);
break;
case DRR_END:
DO64(drr_end.drr_toguid);
ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_end.drr_checksum);
break;
default:
break;
}
if (drr->drr_type != DRR_BEGIN) {
ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_checksum.drr_checksum);
}
#undef DO64
#undef DO32
}
static boolean_t
redact_snaps_contains(uint64_t *snaps, uint64_t num_snaps, uint64_t guid)
{
for (int i = 0; i < num_snaps; i++) {
if (snaps[i] == guid)
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Check that the new stream we're trying to receive is redacted with respect to
* a subset of the snapshots that the origin was redacted with respect to. For
* the reasons behind this, see the man page on redacted zfs sends and receives.
*/
static boolean_t
compatible_redact_snaps(uint64_t *origin_snaps, uint64_t origin_num_snaps,
uint64_t *redact_snaps, uint64_t num_redact_snaps)
{
/*
* Short circuit the comparison; if we are redacted with respect to
* more snapshots than the origin, we can't be redacted with respect
* to a subset.
*/
if (num_redact_snaps > origin_num_snaps) {
return (B_FALSE);
}
for (int i = 0; i < num_redact_snaps; i++) {
if (!redact_snaps_contains(origin_snaps, origin_num_snaps,
redact_snaps[i])) {
return (B_FALSE);
}
}
return (B_TRUE);
}
static boolean_t
redact_check(dmu_recv_begin_arg_t *drba, dsl_dataset_t *origin)
{
uint64_t *origin_snaps;
uint64_t origin_num_snaps;
dmu_recv_cookie_t *drc = drba->drba_cookie;
struct drr_begin *drrb = drc->drc_drrb;
int featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
int err = 0;
boolean_t ret = B_TRUE;
uint64_t *redact_snaps;
uint_t numredactsnaps;
/*
* If this is a full send stream, we're safe no matter what.
*/
if (drrb->drr_fromguid == 0)
return (ret);
VERIFY(dsl_dataset_get_uint64_array_feature(origin,
SPA_FEATURE_REDACTED_DATASETS, &origin_num_snaps, &origin_snaps));
if (nvlist_lookup_uint64_array(drc->drc_begin_nvl,
BEGINNV_REDACT_FROM_SNAPS, &redact_snaps, &numredactsnaps) ==
0) {
/*
* If the send stream was sent from the redaction bookmark or
* the redacted version of the dataset, then we're safe. Verify
* that this is from the a compatible redaction bookmark or
* redacted dataset.
*/
if (!compatible_redact_snaps(origin_snaps, origin_num_snaps,
redact_snaps, numredactsnaps)) {
err = EINVAL;
}
} else if (featureflags & DMU_BACKUP_FEATURE_REDACTED) {
/*
* If the stream is redacted, it must be redacted with respect
* to a subset of what the origin is redacted with respect to.
* See case number 2 in the zfs man page section on redacted zfs
* send.
*/
err = nvlist_lookup_uint64_array(drc->drc_begin_nvl,
BEGINNV_REDACT_SNAPS, &redact_snaps, &numredactsnaps);
if (err != 0 || !compatible_redact_snaps(origin_snaps,
origin_num_snaps, redact_snaps, numredactsnaps)) {
err = EINVAL;
}
} else if (!redact_snaps_contains(origin_snaps, origin_num_snaps,
drrb->drr_toguid)) {
/*
* If the stream isn't redacted but the origin is, this must be
* one of the snapshots the origin is redacted with respect to.
* See case number 1 in the zfs man page section on redacted zfs
* send.
*/
err = EINVAL;
}
if (err != 0)
ret = B_FALSE;
return (ret);
}
/*
* If we previously received a stream with --large-block, we don't support
* receiving an incremental on top of it without --large-block. This avoids
* forcing a read-modify-write or trying to re-aggregate a string of WRITE
* records.
*/
static int
recv_check_large_blocks(dsl_dataset_t *ds, uint64_t featureflags)
{
if (dsl_dataset_feature_is_active(ds, SPA_FEATURE_LARGE_BLOCKS) &&
!(featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS))
return (SET_ERROR(ZFS_ERR_STREAM_LARGE_BLOCK_MISMATCH));
return (0);
}
static int
recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds,
uint64_t fromguid, uint64_t featureflags)
{
uint64_t obj;
uint64_t children;
int error;
dsl_dataset_t *snap;
dsl_pool_t *dp = ds->ds_dir->dd_pool;
boolean_t encrypted = ds->ds_dir->dd_crypto_obj != 0;
boolean_t raw = (featureflags & DMU_BACKUP_FEATURE_RAW) != 0;
boolean_t embed = (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) != 0;
/* Temporary clone name must not exist. */
error = zap_lookup(dp->dp_meta_objset,
dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, recv_clone_name,
8, 1, &obj);
if (error != ENOENT)
return (error == 0 ? SET_ERROR(EBUSY) : error);
/* Resume state must not be set. */
if (dsl_dataset_has_resume_receive_state(ds))
return (SET_ERROR(EBUSY));
/* New snapshot name must not exist if we're not healing it. */
error = zap_lookup(dp->dp_meta_objset,
dsl_dataset_phys(ds)->ds_snapnames_zapobj,
drba->drba_cookie->drc_tosnap, 8, 1, &obj);
if (drba->drba_cookie->drc_heal) {
if (error != 0)
return (error);
} else if (error != ENOENT) {
return (error == 0 ? SET_ERROR(EEXIST) : error);
}
/* Must not have children if receiving a ZVOL. */
error = zap_count(dp->dp_meta_objset,
dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, &children);
if (error != 0)
return (error);
if (drba->drba_cookie->drc_drrb->drr_type != DMU_OST_ZFS &&
children > 0)
return (SET_ERROR(ZFS_ERR_WRONG_PARENT));
/*
* Check snapshot limit before receiving. We'll recheck again at the
* end, but might as well abort before receiving if we're already over
* the limit.
*
* Note that we do not check the file system limit with
* dsl_dir_fscount_check because the temporary %clones don't count
* against that limit.
*/
error = dsl_fs_ss_limit_check(ds->ds_dir, 1, ZFS_PROP_SNAPSHOT_LIMIT,
NULL, drba->drba_cred, drba->drba_proc);
if (error != 0)
return (error);
if (drba->drba_cookie->drc_heal) {
/* Encryption is incompatible with embedded data. */
if (encrypted && embed)
return (SET_ERROR(EINVAL));
/* Healing is not supported when in 'force' mode. */
if (drba->drba_cookie->drc_force)
return (SET_ERROR(EINVAL));
/* Must have keys loaded if doing encrypted non-raw recv. */
if (encrypted && !raw) {
if (spa_keystore_lookup_key(dp->dp_spa, ds->ds_object,
NULL, NULL) != 0)
return (SET_ERROR(EACCES));
}
error = dsl_dataset_hold_obj(dp, obj, FTAG, &snap);
if (error != 0)
return (error);
/*
* When not doing best effort corrective recv healing can only
* be done if the send stream is for the same snapshot as the
* one we are trying to heal.
*/
if (zfs_recv_best_effort_corrective == 0 &&
drba->drba_cookie->drc_drrb->drr_toguid !=
dsl_dataset_phys(snap)->ds_guid) {
dsl_dataset_rele(snap, FTAG);
return (SET_ERROR(ENOTSUP));
}
dsl_dataset_rele(snap, FTAG);
} else if (fromguid != 0) {
/* Sanity check the incremental recv */
uint64_t obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
/* Can't perform a raw receive on top of a non-raw receive */
if (!encrypted && raw)
return (SET_ERROR(EINVAL));
/* Encryption is incompatible with embedded data */
if (encrypted && embed)
return (SET_ERROR(EINVAL));
/* Find snapshot in this dir that matches fromguid. */
while (obj != 0) {
error = dsl_dataset_hold_obj(dp, obj, FTAG,
&snap);
if (error != 0)
return (SET_ERROR(ENODEV));
if (snap->ds_dir != ds->ds_dir) {
dsl_dataset_rele(snap, FTAG);
return (SET_ERROR(ENODEV));
}
if (dsl_dataset_phys(snap)->ds_guid == fromguid)
break;
obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
dsl_dataset_rele(snap, FTAG);
}
if (obj == 0)
return (SET_ERROR(ENODEV));
if (drba->drba_cookie->drc_force) {
drba->drba_cookie->drc_fromsnapobj = obj;
} else {
/*
* If we are not forcing, there must be no
* changes since fromsnap. Raw sends have an
* additional constraint that requires that
* no "noop" snapshots exist between fromsnap
* and tosnap for the IVset checking code to
* work properly.
*/
if (dsl_dataset_modified_since_snap(ds, snap) ||
(raw &&
dsl_dataset_phys(ds)->ds_prev_snap_obj !=
snap->ds_object)) {
dsl_dataset_rele(snap, FTAG);
return (SET_ERROR(ETXTBSY));
}
drba->drba_cookie->drc_fromsnapobj =
ds->ds_prev->ds_object;
}
if (dsl_dataset_feature_is_active(snap,
SPA_FEATURE_REDACTED_DATASETS) && !redact_check(drba,
snap)) {
dsl_dataset_rele(snap, FTAG);
return (SET_ERROR(EINVAL));
}
error = recv_check_large_blocks(snap, featureflags);
if (error != 0) {
dsl_dataset_rele(snap, FTAG);
return (error);
}
dsl_dataset_rele(snap, FTAG);
} else {
/* If full and not healing then must be forced. */
if (!drba->drba_cookie->drc_force)
return (SET_ERROR(EEXIST));
/*
* We don't support using zfs recv -F to blow away
* encrypted filesystems. This would require the
* dsl dir to point to the old encryption key and
* the new one at the same time during the receive.
*/
if ((!encrypted && raw) || encrypted)
return (SET_ERROR(EINVAL));
/*
* Perform the same encryption checks we would if
* we were creating a new dataset from scratch.
*/
if (!raw) {
boolean_t will_encrypt;
error = dmu_objset_create_crypt_check(
ds->ds_dir->dd_parent, drba->drba_dcp,
&will_encrypt);
if (error != 0)
return (error);
if (will_encrypt && embed)
return (SET_ERROR(EINVAL));
}
}
return (0);
}
/*
* Check that any feature flags used in the data stream we're receiving are
* supported by the pool we are receiving into.
*
* Note that some of the features we explicitly check here have additional
* (implicit) features they depend on, but those dependencies are enforced
* through the zfeature_register() calls declaring the features that we
* explicitly check.
*/
static int
recv_begin_check_feature_flags_impl(uint64_t featureflags, spa_t *spa)
{
/*
* Check if there are any unsupported feature flags.
*/
if (!DMU_STREAM_SUPPORTED(featureflags)) {
return (SET_ERROR(ZFS_ERR_UNKNOWN_SEND_STREAM_FEATURE));
}
/* Verify pool version supports SA if SA_SPILL feature set */
if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
spa_version(spa) < SPA_VERSION_SA)
return (SET_ERROR(ENOTSUP));
/*
* LZ4 compressed, ZSTD compressed, embedded, mooched, large blocks,
* and large_dnodes in the stream can only be used if those pool
* features are enabled because we don't attempt to decompress /
* un-embed / un-mooch / split up the blocks / dnodes during the
* receive process.
*/
if ((featureflags & DMU_BACKUP_FEATURE_LZ4) &&
!spa_feature_is_enabled(spa, SPA_FEATURE_LZ4_COMPRESS))
return (SET_ERROR(ENOTSUP));
if ((featureflags & DMU_BACKUP_FEATURE_ZSTD) &&
!spa_feature_is_enabled(spa, SPA_FEATURE_ZSTD_COMPRESS))
return (SET_ERROR(ENOTSUP));
if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) &&
!spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA))
return (SET_ERROR(ENOTSUP));
if ((featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
!spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS))
return (SET_ERROR(ENOTSUP));
if ((featureflags & DMU_BACKUP_FEATURE_LARGE_DNODE) &&
!spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE))
return (SET_ERROR(ENOTSUP));
/*
* Receiving redacted streams requires that redacted datasets are
* enabled.
*/
if ((featureflags & DMU_BACKUP_FEATURE_REDACTED) &&
!spa_feature_is_enabled(spa, SPA_FEATURE_REDACTED_DATASETS))
return (SET_ERROR(ENOTSUP));
return (0);
}
static int
dmu_recv_begin_check(void *arg, dmu_tx_t *tx)
{
dmu_recv_begin_arg_t *drba = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
uint64_t fromguid = drrb->drr_fromguid;
int flags = drrb->drr_flags;
ds_hold_flags_t dsflags = DS_HOLD_FLAG_NONE;
int error;
uint64_t featureflags = drba->drba_cookie->drc_featureflags;
dsl_dataset_t *ds;
const char *tofs = drba->drba_cookie->drc_tofs;
/* already checked */
ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
ASSERT(!(featureflags & DMU_BACKUP_FEATURE_RESUMING));
if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
DMU_COMPOUNDSTREAM ||
drrb->drr_type >= DMU_OST_NUMTYPES ||
((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL))
return (SET_ERROR(EINVAL));
error = recv_begin_check_feature_flags_impl(featureflags, dp->dp_spa);
if (error != 0)
return (error);
/* Resumable receives require extensible datasets */
if (drba->drba_cookie->drc_resumable &&
!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EXTENSIBLE_DATASET))
return (SET_ERROR(ENOTSUP));
if (featureflags & DMU_BACKUP_FEATURE_RAW) {
/* raw receives require the encryption feature */
if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_ENCRYPTION))
return (SET_ERROR(ENOTSUP));
/* embedded data is incompatible with encryption and raw recv */
if (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)
return (SET_ERROR(EINVAL));
/* raw receives require spill block allocation flag */
if (!(flags & DRR_FLAG_SPILL_BLOCK))
return (SET_ERROR(ZFS_ERR_SPILL_BLOCK_FLAG_MISSING));
} else {
/*
* We support unencrypted datasets below encrypted ones now,
* so add the DS_HOLD_FLAG_DECRYPT flag only if we are dealing
* with a dataset we may encrypt.
*/
if (drba->drba_dcp == NULL ||
drba->drba_dcp->cp_crypt != ZIO_CRYPT_OFF) {
dsflags |= DS_HOLD_FLAG_DECRYPT;
}
}
error = dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds);
if (error == 0) {
/* target fs already exists; recv into temp clone */
/* Can't recv a clone into an existing fs */
if (flags & DRR_FLAG_CLONE || drba->drba_origin) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EINVAL));
}
error = recv_begin_check_existing_impl(drba, ds, fromguid,
featureflags);
dsl_dataset_rele_flags(ds, dsflags, FTAG);
} else if (error == ENOENT) {
/* target fs does not exist; must be a full backup or clone */
char buf[ZFS_MAX_DATASET_NAME_LEN];
objset_t *os;
/* healing recv must be done "into" an existing snapshot */
if (drba->drba_cookie->drc_heal == B_TRUE)
return (SET_ERROR(ENOTSUP));
/*
* If it's a non-clone incremental, we are missing the
* target fs, so fail the recv.
*/
if (fromguid != 0 && !((flags & DRR_FLAG_CLONE) ||
drba->drba_origin))
return (SET_ERROR(ENOENT));
/*
* If we're receiving a full send as a clone, and it doesn't
* contain all the necessary free records and freeobject
* records, reject it.
*/
if (fromguid == 0 && drba->drba_origin != NULL &&
!(flags & DRR_FLAG_FREERECORDS))
return (SET_ERROR(EINVAL));
/* Open the parent of tofs */
ASSERT3U(strlen(tofs), <, sizeof (buf));
(void) strlcpy(buf, tofs, strrchr(tofs, '/') - tofs + 1);
error = dsl_dataset_hold(dp, buf, FTAG, &ds);
if (error != 0)
return (error);
if ((featureflags & DMU_BACKUP_FEATURE_RAW) == 0 &&
drba->drba_origin == NULL) {
boolean_t will_encrypt;
/*
* Check that we aren't breaking any encryption rules
* and that we have all the parameters we need to
* create an encrypted dataset if necessary. If we are
* making an encrypted dataset the stream can't have
* embedded data.
*/
error = dmu_objset_create_crypt_check(ds->ds_dir,
drba->drba_dcp, &will_encrypt);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
if (will_encrypt &&
(featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EINVAL));
}
}
/*
* Check filesystem and snapshot limits before receiving. We'll
* recheck snapshot limits again at the end (we create the
* filesystems and increment those counts during begin_sync).
*/
error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
ZFS_PROP_FILESYSTEM_LIMIT, NULL,
drba->drba_cred, drba->drba_proc);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
ZFS_PROP_SNAPSHOT_LIMIT, NULL,
drba->drba_cred, drba->drba_proc);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
/* can't recv below anything but filesystems (eg. no ZVOLs) */
error = dmu_objset_from_ds(ds, &os);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
if (dmu_objset_type(os) != DMU_OST_ZFS) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(ZFS_ERR_WRONG_PARENT));
}
if (drba->drba_origin != NULL) {
dsl_dataset_t *origin;
error = dsl_dataset_hold_flags(dp, drba->drba_origin,
dsflags, FTAG, &origin);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
if (!origin->ds_is_snapshot) {
dsl_dataset_rele_flags(origin, dsflags, FTAG);
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EINVAL));
}
if (dsl_dataset_phys(origin)->ds_guid != fromguid &&
fromguid != 0) {
dsl_dataset_rele_flags(origin, dsflags, FTAG);
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(ENODEV));
}
if (origin->ds_dir->dd_crypto_obj != 0 &&
(featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)) {
dsl_dataset_rele_flags(origin, dsflags, FTAG);
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EINVAL));
}
/*
* If the origin is redacted we need to verify that this
* send stream can safely be received on top of the
* origin.
*/
if (dsl_dataset_feature_is_active(origin,
SPA_FEATURE_REDACTED_DATASETS)) {
if (!redact_check(drba, origin)) {
dsl_dataset_rele_flags(origin, dsflags,
FTAG);
dsl_dataset_rele_flags(ds, dsflags,
FTAG);
return (SET_ERROR(EINVAL));
}
}
error = recv_check_large_blocks(ds, featureflags);
if (error != 0) {
dsl_dataset_rele_flags(origin, dsflags, FTAG);
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (error);
}
dsl_dataset_rele_flags(origin, dsflags, FTAG);
}
dsl_dataset_rele(ds, FTAG);
error = 0;
}
return (error);
}
static void
dmu_recv_begin_sync(void *arg, dmu_tx_t *tx)
{
dmu_recv_begin_arg_t *drba = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
objset_t *mos = dp->dp_meta_objset;
dmu_recv_cookie_t *drc = drba->drba_cookie;
struct drr_begin *drrb = drc->drc_drrb;
const char *tofs = drc->drc_tofs;
uint64_t featureflags = drc->drc_featureflags;
dsl_dataset_t *ds, *newds;
objset_t *os;
uint64_t dsobj;
ds_hold_flags_t dsflags = DS_HOLD_FLAG_NONE;
int error;
uint64_t crflags = 0;
dsl_crypto_params_t dummy_dcp = { 0 };
dsl_crypto_params_t *dcp = drba->drba_dcp;
if (drrb->drr_flags & DRR_FLAG_CI_DATA)
crflags |= DS_FLAG_CI_DATASET;
if ((featureflags & DMU_BACKUP_FEATURE_RAW) == 0)
dsflags |= DS_HOLD_FLAG_DECRYPT;
/*
* Raw, non-incremental recvs always use a dummy dcp with
* the raw cmd set. Raw incremental recvs do not use a dcp
* since the encryption parameters are already set in stone.
*/
if (dcp == NULL && drrb->drr_fromguid == 0 &&
drba->drba_origin == NULL) {
ASSERT3P(dcp, ==, NULL);
dcp = &dummy_dcp;
if (featureflags & DMU_BACKUP_FEATURE_RAW)
dcp->cp_cmd = DCP_CMD_RAW_RECV;
}
error = dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds);
if (error == 0) {
/* Create temporary clone unless we're doing corrective recv */
dsl_dataset_t *snap = NULL;
if (drba->drba_cookie->drc_fromsnapobj != 0) {
VERIFY0(dsl_dataset_hold_obj(dp,
drba->drba_cookie->drc_fromsnapobj, FTAG, &snap));
ASSERT3P(dcp, ==, NULL);
}
if (drc->drc_heal) {
/* When healing we want to use the provided snapshot */
VERIFY0(dsl_dataset_snap_lookup(ds, drc->drc_tosnap,
&dsobj));
} else {
dsobj = dsl_dataset_create_sync(ds->ds_dir,
recv_clone_name, snap, crflags, drba->drba_cred,
dcp, tx);
}
if (drba->drba_cookie->drc_fromsnapobj != 0)
dsl_dataset_rele(snap, FTAG);
dsl_dataset_rele_flags(ds, dsflags, FTAG);
} else {
dsl_dir_t *dd;
const char *tail;
dsl_dataset_t *origin = NULL;
VERIFY0(dsl_dir_hold(dp, tofs, FTAG, &dd, &tail));
if (drba->drba_origin != NULL) {
VERIFY0(dsl_dataset_hold(dp, drba->drba_origin,
FTAG, &origin));
ASSERT3P(dcp, ==, NULL);
}
/* Create new dataset. */
dsobj = dsl_dataset_create_sync(dd, strrchr(tofs, '/') + 1,
origin, crflags, drba->drba_cred, dcp, tx);
if (origin != NULL)
dsl_dataset_rele(origin, FTAG);
dsl_dir_rele(dd, FTAG);
drc->drc_newfs = B_TRUE;
}
VERIFY0(dsl_dataset_own_obj_force(dp, dsobj, dsflags, dmu_recv_tag,
&newds));
if (dsl_dataset_feature_is_active(newds,
SPA_FEATURE_REDACTED_DATASETS)) {
/*
* If the origin dataset is redacted, the child will be redacted
* when we create it. We clear the new dataset's
* redaction info; if it should be redacted, we'll fill
* in its information later.
*/
dsl_dataset_deactivate_feature(newds,
SPA_FEATURE_REDACTED_DATASETS, tx);
}
VERIFY0(dmu_objset_from_ds(newds, &os));
if (drc->drc_resumable) {
dsl_dataset_zapify(newds, tx);
if (drrb->drr_fromguid != 0) {
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_FROMGUID,
8, 1, &drrb->drr_fromguid, tx));
}
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TOGUID,
8, 1, &drrb->drr_toguid, tx));
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TONAME,
1, strlen(drrb->drr_toname) + 1, drrb->drr_toname, tx));
uint64_t one = 1;
uint64_t zero = 0;
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OBJECT,
8, 1, &one, tx));
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OFFSET,
8, 1, &zero, tx));
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_BYTES,
8, 1, &zero, tx));
if (featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) {
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_LARGEBLOCK,
8, 1, &one, tx));
}
if (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) {
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_EMBEDOK,
8, 1, &one, tx));
}
if (featureflags & DMU_BACKUP_FEATURE_COMPRESSED) {
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_COMPRESSOK,
8, 1, &one, tx));
}
if (featureflags & DMU_BACKUP_FEATURE_RAW) {
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_RAWOK,
8, 1, &one, tx));
}
uint64_t *redact_snaps;
uint_t numredactsnaps;
if (nvlist_lookup_uint64_array(drc->drc_begin_nvl,
BEGINNV_REDACT_FROM_SNAPS, &redact_snaps,
&numredactsnaps) == 0) {
VERIFY0(zap_add(mos, dsobj,
DS_FIELD_RESUME_REDACT_BOOKMARK_SNAPS,
sizeof (*redact_snaps), numredactsnaps,
redact_snaps, tx));
}
}
/*
* Usually the os->os_encrypted value is tied to the presence of a
* DSL Crypto Key object in the dd. However, that will not be received
* until dmu_recv_stream(), so we set the value manually for now.
*/
if (featureflags & DMU_BACKUP_FEATURE_RAW) {
os->os_encrypted = B_TRUE;
drba->drba_cookie->drc_raw = B_TRUE;
}
if (featureflags & DMU_BACKUP_FEATURE_REDACTED) {
uint64_t *redact_snaps;
uint_t numredactsnaps;
VERIFY0(nvlist_lookup_uint64_array(drc->drc_begin_nvl,
BEGINNV_REDACT_SNAPS, &redact_snaps, &numredactsnaps));
dsl_dataset_activate_redaction(newds, redact_snaps,
numredactsnaps, tx);
}
dmu_buf_will_dirty(newds->ds_dbuf, tx);
dsl_dataset_phys(newds)->ds_flags |= DS_FLAG_INCONSISTENT;
/*
* If we actually created a non-clone, we need to create the objset
* in our new dataset. If this is a raw send we postpone this until
* dmu_recv_stream() so that we can allocate the metadnode with the
* properties from the DRR_BEGIN payload.
*/
rrw_enter(&newds->ds_bp_rwlock, RW_READER, FTAG);
if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds)) &&
(featureflags & DMU_BACKUP_FEATURE_RAW) == 0 &&
!drc->drc_heal) {
(void) dmu_objset_create_impl(dp->dp_spa,
newds, dsl_dataset_get_blkptr(newds), drrb->drr_type, tx);
}
rrw_exit(&newds->ds_bp_rwlock, FTAG);
drba->drba_cookie->drc_ds = newds;
drba->drba_cookie->drc_os = os;
spa_history_log_internal_ds(newds, "receive", tx, " ");
}
static int
dmu_recv_resume_begin_check(void *arg, dmu_tx_t *tx)
{
dmu_recv_begin_arg_t *drba = arg;
dmu_recv_cookie_t *drc = drba->drba_cookie;
dsl_pool_t *dp = dmu_tx_pool(tx);
struct drr_begin *drrb = drc->drc_drrb;
int error;
ds_hold_flags_t dsflags = DS_HOLD_FLAG_NONE;
dsl_dataset_t *ds;
const char *tofs = drc->drc_tofs;
/* already checked */
ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
ASSERT(drc->drc_featureflags & DMU_BACKUP_FEATURE_RESUMING);
if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
DMU_COMPOUNDSTREAM ||
drrb->drr_type >= DMU_OST_NUMTYPES)
return (SET_ERROR(EINVAL));
/*
* This is mostly a sanity check since we should have already done these
* checks during a previous attempt to receive the data.
*/
error = recv_begin_check_feature_flags_impl(drc->drc_featureflags,
dp->dp_spa);
if (error != 0)
return (error);
/* 6 extra bytes for /%recv */
char recvname[ZFS_MAX_DATASET_NAME_LEN + 6];
(void) snprintf(recvname, sizeof (recvname), "%s/%s",
tofs, recv_clone_name);
if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RAW) {
/* raw receives require spill block allocation flag */
if (!(drrb->drr_flags & DRR_FLAG_SPILL_BLOCK))
return (SET_ERROR(ZFS_ERR_SPILL_BLOCK_FLAG_MISSING));
} else {
dsflags |= DS_HOLD_FLAG_DECRYPT;
}
boolean_t recvexist = B_TRUE;
if (dsl_dataset_hold_flags(dp, recvname, dsflags, FTAG, &ds) != 0) {
/* %recv does not exist; continue in tofs */
recvexist = B_FALSE;
error = dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds);
if (error != 0)
return (error);
}
/*
* Resume of full/newfs recv on existing dataset should be done with
* force flag
*/
if (recvexist && drrb->drr_fromguid == 0 && !drc->drc_force) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(ZFS_ERR_RESUME_EXISTS));
}
/* check that ds is marked inconsistent */
if (!DS_IS_INCONSISTENT(ds)) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EINVAL));
}
/* check that there is resuming data, and that the toguid matches */
if (!dsl_dataset_is_zapified(ds)) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EINVAL));
}
uint64_t val;
error = zap_lookup(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_TOGUID, sizeof (val), 1, &val);
if (error != 0 || drrb->drr_toguid != val) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EINVAL));
}
/*
* Check if the receive is still running. If so, it will be owned.
* Note that nothing else can own the dataset (e.g. after the receive
* fails) because it will be marked inconsistent.
*/
if (dsl_dataset_has_owner(ds)) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EBUSY));
}
/* There should not be any snapshots of this fs yet. */
if (ds->ds_prev != NULL && ds->ds_prev->ds_dir == ds->ds_dir) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EINVAL));
}
/*
* Note: resume point will be checked when we process the first WRITE
* record.
*/
/* check that the origin matches */
val = 0;
(void) zap_lookup(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_FROMGUID, sizeof (val), 1, &val);
if (drrb->drr_fromguid != val) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EINVAL));
}
if (ds->ds_prev != NULL && drrb->drr_fromguid != 0)
drc->drc_fromsnapobj = ds->ds_prev->ds_object;
/*
* If we're resuming, and the send is redacted, then the original send
* must have been redacted, and must have been redacted with respect to
* the same snapshots.
*/
if (drc->drc_featureflags & DMU_BACKUP_FEATURE_REDACTED) {
uint64_t num_ds_redact_snaps;
uint64_t *ds_redact_snaps;
uint_t num_stream_redact_snaps;
uint64_t *stream_redact_snaps;
if (nvlist_lookup_uint64_array(drc->drc_begin_nvl,
BEGINNV_REDACT_SNAPS, &stream_redact_snaps,
&num_stream_redact_snaps) != 0) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EINVAL));
}
if (!dsl_dataset_get_uint64_array_feature(ds,
SPA_FEATURE_REDACTED_DATASETS, &num_ds_redact_snaps,
&ds_redact_snaps)) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EINVAL));
}
for (int i = 0; i < num_ds_redact_snaps; i++) {
if (!redact_snaps_contains(ds_redact_snaps,
num_ds_redact_snaps, stream_redact_snaps[i])) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EINVAL));
}
}
}
error = recv_check_large_blocks(ds, drc->drc_featureflags);
if (error != 0) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (error);
}
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (0);
}
static void
dmu_recv_resume_begin_sync(void *arg, dmu_tx_t *tx)
{
dmu_recv_begin_arg_t *drba = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
const char *tofs = drba->drba_cookie->drc_tofs;
uint64_t featureflags = drba->drba_cookie->drc_featureflags;
dsl_dataset_t *ds;
ds_hold_flags_t dsflags = DS_HOLD_FLAG_NONE;
/* 6 extra bytes for /%recv */
char recvname[ZFS_MAX_DATASET_NAME_LEN + 6];
(void) snprintf(recvname, sizeof (recvname), "%s/%s", tofs,
recv_clone_name);
if (featureflags & DMU_BACKUP_FEATURE_RAW) {
drba->drba_cookie->drc_raw = B_TRUE;
} else {
dsflags |= DS_HOLD_FLAG_DECRYPT;
}
if (dsl_dataset_own_force(dp, recvname, dsflags, dmu_recv_tag, &ds)
!= 0) {
/* %recv does not exist; continue in tofs */
VERIFY0(dsl_dataset_own_force(dp, tofs, dsflags, dmu_recv_tag,
&ds));
drba->drba_cookie->drc_newfs = B_TRUE;
}
ASSERT(DS_IS_INCONSISTENT(ds));
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
ASSERT(!BP_IS_HOLE(dsl_dataset_get_blkptr(ds)) ||
drba->drba_cookie->drc_raw);
rrw_exit(&ds->ds_bp_rwlock, FTAG);
drba->drba_cookie->drc_ds = ds;
VERIFY0(dmu_objset_from_ds(ds, &drba->drba_cookie->drc_os));
drba->drba_cookie->drc_should_save = B_TRUE;
spa_history_log_internal_ds(ds, "resume receive", tx, " ");
}
/*
* NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
* succeeds; otherwise we will leak the holds on the datasets.
*/
int
dmu_recv_begin(const char *tofs, const char *tosnap,
dmu_replay_record_t *drr_begin, boolean_t force, boolean_t heal,
boolean_t resumable, nvlist_t *localprops, nvlist_t *hidden_args,
const char *origin, dmu_recv_cookie_t *drc, zfs_file_t *fp,
offset_t *voffp)
{
dmu_recv_begin_arg_t drba = { 0 };
int err = 0;
memset(drc, 0, sizeof (dmu_recv_cookie_t));
drc->drc_drr_begin = drr_begin;
drc->drc_drrb = &drr_begin->drr_u.drr_begin;
drc->drc_tosnap = tosnap;
drc->drc_tofs = tofs;
drc->drc_force = force;
drc->drc_heal = heal;
drc->drc_resumable = resumable;
drc->drc_cred = CRED();
drc->drc_proc = curproc;
drc->drc_clone = (origin != NULL);
if (drc->drc_drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) {
drc->drc_byteswap = B_TRUE;
(void) fletcher_4_incremental_byteswap(drr_begin,
sizeof (dmu_replay_record_t), &drc->drc_cksum);
byteswap_record(drr_begin);
} else if (drc->drc_drrb->drr_magic == DMU_BACKUP_MAGIC) {
(void) fletcher_4_incremental_native(drr_begin,
sizeof (dmu_replay_record_t), &drc->drc_cksum);
} else {
return (SET_ERROR(EINVAL));
}
drc->drc_fp = fp;
drc->drc_voff = *voffp;
drc->drc_featureflags =
DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo);
uint32_t payloadlen = drc->drc_drr_begin->drr_payloadlen;
/*
* Since OpenZFS 2.0.0, we have enforced a 64MB limit in userspace
* configurable via ZFS_SENDRECV_MAX_NVLIST. We enforce 256MB as a hard
* upper limit. Systems with less than 1GB of RAM will see a lower
* limit from `arc_all_memory() / 4`.
*/
if (payloadlen > (MIN((1U << 28), arc_all_memory() / 4)))
return (E2BIG);
if (payloadlen != 0) {
void *payload = vmem_alloc(payloadlen, KM_SLEEP);
/*
* For compatibility with recursive send streams, we don't do
* this here if the stream could be part of a package. Instead,
* we'll do it in dmu_recv_stream. If we pull the next header
* too early, and it's the END record, we break the `recv_skip`
* logic.
*/
err = receive_read_payload_and_next_header(drc, payloadlen,
payload);
if (err != 0) {
vmem_free(payload, payloadlen);
return (err);
}
err = nvlist_unpack(payload, payloadlen, &drc->drc_begin_nvl,
KM_SLEEP);
vmem_free(payload, payloadlen);
if (err != 0) {
kmem_free(drc->drc_next_rrd,
sizeof (*drc->drc_next_rrd));
return (err);
}
}
if (drc->drc_drrb->drr_flags & DRR_FLAG_SPILL_BLOCK)
drc->drc_spill = B_TRUE;
drba.drba_origin = origin;
drba.drba_cookie = drc;
drba.drba_cred = CRED();
drba.drba_proc = curproc;
if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RESUMING) {
err = dsl_sync_task(tofs,
dmu_recv_resume_begin_check, dmu_recv_resume_begin_sync,
&drba, 5, ZFS_SPACE_CHECK_NORMAL);
} else {
/*
* For non-raw, non-incremental, non-resuming receives the
* user can specify encryption parameters on the command line
* with "zfs recv -o". For these receives we create a dcp and
* pass it to the sync task. Creating the dcp will implicitly
* remove the encryption params from the localprops nvlist,
* which avoids errors when trying to set these normally
* read-only properties. Any other kind of receive that
* attempts to set these properties will fail as a result.
*/
if ((DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo) &
DMU_BACKUP_FEATURE_RAW) == 0 &&
origin == NULL && drc->drc_drrb->drr_fromguid == 0) {
err = dsl_crypto_params_create_nvlist(DCP_CMD_NONE,
localprops, hidden_args, &drba.drba_dcp);
}
if (err == 0) {
err = dsl_sync_task(tofs,
dmu_recv_begin_check, dmu_recv_begin_sync,
&drba, 5, ZFS_SPACE_CHECK_NORMAL);
dsl_crypto_params_free(drba.drba_dcp, !!err);
}
}
if (err != 0) {
kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd));
nvlist_free(drc->drc_begin_nvl);
}
return (err);
}
/*
* Holds data need for corrective recv callback
*/
typedef struct cr_cb_data {
uint64_t size;
zbookmark_phys_t zb;
spa_t *spa;
} cr_cb_data_t;
static void
corrective_read_done(zio_t *zio)
{
cr_cb_data_t *data = zio->io_private;
/* Corruption corrected; update error log if needed */
if (zio->io_error == 0)
spa_remove_error(data->spa, &data->zb, &zio->io_bp->blk_birth);
kmem_free(data, sizeof (cr_cb_data_t));
abd_free(zio->io_abd);
}
/*
* zio_rewrite the data pointed to by bp with the data from the rrd's abd.
*/
static int
do_corrective_recv(struct receive_writer_arg *rwa, struct drr_write *drrw,
struct receive_record_arg *rrd, blkptr_t *bp)
{
int err;
zio_t *io;
zbookmark_phys_t zb;
dnode_t *dn;
abd_t *abd = rrd->abd;
zio_cksum_t bp_cksum = bp->blk_cksum;
zio_flag_t flags = ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_RETRY |
ZIO_FLAG_CANFAIL;
if (rwa->raw)
flags |= ZIO_FLAG_RAW;
err = dnode_hold(rwa->os, drrw->drr_object, FTAG, &dn);
if (err != 0)
return (err);
SET_BOOKMARK(&zb, dmu_objset_id(rwa->os), drrw->drr_object, 0,
dbuf_whichblock(dn, 0, drrw->drr_offset));
dnode_rele(dn, FTAG);
if (!rwa->raw && DRR_WRITE_COMPRESSED(drrw)) {
/* Decompress the stream data */
abd_t *dabd = abd_alloc_linear(
drrw->drr_logical_size, B_FALSE);
err = zio_decompress_data(drrw->drr_compressiontype,
abd, abd_to_buf(dabd), abd_get_size(abd),
abd_get_size(dabd), NULL);
if (err != 0) {
abd_free(dabd);
return (err);
}
/* Swap in the newly decompressed data into the abd */
abd_free(abd);
abd = dabd;
}
if (!rwa->raw && BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF) {
/* Recompress the data */
abd_t *cabd = abd_alloc_linear(BP_GET_PSIZE(bp),
B_FALSE);
void *buf = abd_to_buf(cabd);
uint64_t csize = zio_compress_data(BP_GET_COMPRESS(bp),
abd, &buf, abd_get_size(abd),
rwa->os->os_complevel);
abd_zero_off(cabd, csize, BP_GET_PSIZE(bp) - csize);
/* Swap in newly compressed data into the abd */
abd_free(abd);
abd = cabd;
flags |= ZIO_FLAG_RAW_COMPRESS;
}
/*
* The stream is not encrypted but the data on-disk is.
* We need to re-encrypt the buf using the same
* encryption type, salt, iv, and mac that was used to encrypt
* the block previosly.
*/
if (!rwa->raw && BP_USES_CRYPT(bp)) {
dsl_dataset_t *ds;
dsl_crypto_key_t *dck = NULL;
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
boolean_t no_crypt = B_FALSE;
dsl_pool_t *dp = dmu_objset_pool(rwa->os);
abd_t *eabd = abd_alloc_linear(BP_GET_PSIZE(bp), B_FALSE);
zio_crypt_decode_params_bp(bp, salt, iv);
zio_crypt_decode_mac_bp(bp, mac);
dsl_pool_config_enter(dp, FTAG);
err = dsl_dataset_hold_flags(dp, rwa->tofs,
DS_HOLD_FLAG_DECRYPT, FTAG, &ds);
if (err != 0) {
dsl_pool_config_exit(dp, FTAG);
abd_free(eabd);
return (SET_ERROR(EACCES));
}
/* Look up the key from the spa's keystore */
err = spa_keystore_lookup_key(rwa->os->os_spa,
zb.zb_objset, FTAG, &dck);
if (err != 0) {
dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT,
FTAG);
dsl_pool_config_exit(dp, FTAG);
abd_free(eabd);
return (SET_ERROR(EACCES));
}
err = zio_do_crypt_abd(B_TRUE, &dck->dck_key,
BP_GET_TYPE(bp), BP_SHOULD_BYTESWAP(bp), salt, iv,
mac, abd_get_size(abd), abd, eabd, &no_crypt);
spa_keystore_dsl_key_rele(rwa->os->os_spa, dck, FTAG);
dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
dsl_pool_config_exit(dp, FTAG);
ASSERT0(no_crypt);
if (err != 0) {
abd_free(eabd);
return (err);
}
/* Swap in the newly encrypted data into the abd */
abd_free(abd);
abd = eabd;
/*
* We want to prevent zio_rewrite() from trying to
* encrypt the data again
*/
flags |= ZIO_FLAG_RAW_ENCRYPT;
}
rrd->abd = abd;
io = zio_rewrite(NULL, rwa->os->os_spa, bp->blk_birth, bp, abd,
BP_GET_PSIZE(bp), NULL, NULL, ZIO_PRIORITY_SYNC_WRITE, flags, &zb);
ASSERT(abd_get_size(abd) == BP_GET_LSIZE(bp) ||
abd_get_size(abd) == BP_GET_PSIZE(bp));
/* compute new bp checksum value and make sure it matches the old one */
zio_checksum_compute(io, BP_GET_CHECKSUM(bp), abd, abd_get_size(abd));
if (!ZIO_CHECKSUM_EQUAL(bp_cksum, io->io_bp->blk_cksum)) {
zio_destroy(io);
if (zfs_recv_best_effort_corrective != 0)
return (0);
return (SET_ERROR(ECKSUM));
}
/* Correct the corruption in place */
err = zio_wait(io);
if (err == 0) {
cr_cb_data_t *cb_data =
kmem_alloc(sizeof (cr_cb_data_t), KM_SLEEP);
cb_data->spa = rwa->os->os_spa;
cb_data->size = drrw->drr_logical_size;
cb_data->zb = zb;
/* Test if healing worked by re-reading the bp */
err = zio_wait(zio_read(rwa->heal_pio, rwa->os->os_spa, bp,
abd_alloc_for_io(drrw->drr_logical_size, B_FALSE),
drrw->drr_logical_size, corrective_read_done,
cb_data, ZIO_PRIORITY_ASYNC_READ, flags, NULL));
}
if (err != 0 && zfs_recv_best_effort_corrective != 0)
err = 0;
return (err);
}
static int
receive_read(dmu_recv_cookie_t *drc, int len, void *buf)
{
int done = 0;
/*
* The code doesn't rely on this (lengths being multiples of 8). See
* comment in dump_bytes.
*/
ASSERT(len % 8 == 0 ||
(drc->drc_featureflags & DMU_BACKUP_FEATURE_RAW) != 0);
while (done < len) {
ssize_t resid = len - done;
zfs_file_t *fp = drc->drc_fp;
int err = zfs_file_read(fp, (char *)buf + done,
len - done, &resid);
if (err == 0 && resid == len - done) {
/*
* Note: ECKSUM or ZFS_ERR_STREAM_TRUNCATED indicates
* that the receive was interrupted and can
* potentially be resumed.
*/
err = SET_ERROR(ZFS_ERR_STREAM_TRUNCATED);
}
drc->drc_voff += len - done - resid;
done = len - resid;
if (err != 0)
return (err);
}
drc->drc_bytes_read += len;
ASSERT3U(done, ==, len);
return (0);
}
static inline uint8_t
deduce_nblkptr(dmu_object_type_t bonus_type, uint64_t bonus_size)
{
if (bonus_type == DMU_OT_SA) {
return (1);
} else {
return (1 +
((DN_OLD_MAX_BONUSLEN -
MIN(DN_OLD_MAX_BONUSLEN, bonus_size)) >> SPA_BLKPTRSHIFT));
}
}
static void
save_resume_state(struct receive_writer_arg *rwa,
uint64_t object, uint64_t offset, dmu_tx_t *tx)
{
int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
if (!rwa->resumable)
return;
/*
* We use ds_resume_bytes[] != 0 to indicate that we need to
* update this on disk, so it must not be 0.
*/
ASSERT(rwa->bytes_read != 0);
/*
* We only resume from write records, which have a valid
* (non-meta-dnode) object number.
*/
ASSERT(object != 0);
/*
* For resuming to work correctly, we must receive records in order,
* sorted by object,offset. This is checked by the callers, but
* assert it here for good measure.
*/
ASSERT3U(object, >=, rwa->os->os_dsl_dataset->ds_resume_object[txgoff]);
ASSERT(object != rwa->os->os_dsl_dataset->ds_resume_object[txgoff] ||
offset >= rwa->os->os_dsl_dataset->ds_resume_offset[txgoff]);
ASSERT3U(rwa->bytes_read, >=,
rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff]);
rwa->os->os_dsl_dataset->ds_resume_object[txgoff] = object;
rwa->os->os_dsl_dataset->ds_resume_offset[txgoff] = offset;
rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff] = rwa->bytes_read;
}
static int
receive_object_is_same_generation(objset_t *os, uint64_t object,
dmu_object_type_t old_bonus_type, dmu_object_type_t new_bonus_type,
const void *new_bonus, boolean_t *samegenp)
{
zfs_file_info_t zoi;
int err;
dmu_buf_t *old_bonus_dbuf;
err = dmu_bonus_hold(os, object, FTAG, &old_bonus_dbuf);
if (err != 0)
return (err);
err = dmu_get_file_info(os, old_bonus_type, old_bonus_dbuf->db_data,
&zoi);
dmu_buf_rele(old_bonus_dbuf, FTAG);
if (err != 0)
return (err);
uint64_t old_gen = zoi.zfi_generation;
err = dmu_get_file_info(os, new_bonus_type, new_bonus, &zoi);
if (err != 0)
return (err);
uint64_t new_gen = zoi.zfi_generation;
*samegenp = (old_gen == new_gen);
return (0);
}
static int
receive_handle_existing_object(const struct receive_writer_arg *rwa,
const struct drr_object *drro, const dmu_object_info_t *doi,
const void *bonus_data,
uint64_t *object_to_hold, uint32_t *new_blksz)
{
uint32_t indblksz = drro->drr_indblkshift ?
1ULL << drro->drr_indblkshift : 0;
int nblkptr = deduce_nblkptr(drro->drr_bonustype,
drro->drr_bonuslen);
uint8_t dn_slots = drro->drr_dn_slots != 0 ?
drro->drr_dn_slots : DNODE_MIN_SLOTS;
boolean_t do_free_range = B_FALSE;
int err;
*object_to_hold = drro->drr_object;
/* nblkptr should be bounded by the bonus size and type */
if (rwa->raw && nblkptr != drro->drr_nblkptr)
return (SET_ERROR(EINVAL));
/*
* After the previous send stream, the sending system may
* have freed this object, and then happened to re-allocate
* this object number in a later txg. In this case, we are
* receiving a different logical file, and the block size may
* appear to be different. i.e. we may have a different
* block size for this object than what the send stream says.
* In this case we need to remove the object's contents,
* so that its structure can be changed and then its contents
* entirely replaced by subsequent WRITE records.
*
* If this is a -L (--large-block) incremental stream, and
* the previous stream was not -L, the block size may appear
* to increase. i.e. we may have a smaller block size for
* this object than what the send stream says. In this case
* we need to keep the object's contents and block size
* intact, so that we don't lose parts of the object's
* contents that are not changed by this incremental send
* stream.
*
* We can distinguish between the two above cases by using
* the ZPL's generation number (see
* receive_object_is_same_generation()). However, we only
* want to rely on the generation number when absolutely
* necessary, because with raw receives, the generation is
* encrypted. We also want to minimize dependence on the
* ZPL, so that other types of datasets can also be received
* (e.g. ZVOLs, although note that ZVOLS currently do not
* reallocate their objects or change their structure).
* Therefore, we check a number of different cases where we
* know it is safe to discard the object's contents, before
* using the ZPL's generation number to make the above
* distinction.
*/
if (drro->drr_blksz != doi->doi_data_block_size) {
if (rwa->raw) {
/*
* RAW streams always have large blocks, so
* we are sure that the data is not needed
* due to changing --large-block to be on.
* Which is fortunate since the bonus buffer
* (which contains the ZPL generation) is
* encrypted, and the key might not be
* loaded.
*/
do_free_range = B_TRUE;
} else if (rwa->full) {
/*
* This is a full send stream, so it always
* replaces what we have. Even if the
* generation numbers happen to match, this
* can not actually be the same logical file.
* This is relevant when receiving a full
* send as a clone.
*/
do_free_range = B_TRUE;
} else if (drro->drr_type !=
DMU_OT_PLAIN_FILE_CONTENTS ||
doi->doi_type != DMU_OT_PLAIN_FILE_CONTENTS) {
/*
* PLAIN_FILE_CONTENTS are the only type of
* objects that have ever been stored with
* large blocks, so we don't need the special
* logic below. ZAP blocks can shrink (when
* there's only one block), so we don't want
* to hit the error below about block size
* only increasing.
*/
do_free_range = B_TRUE;
} else if (doi->doi_max_offset <=
doi->doi_data_block_size) {
/*
* There is only one block. We can free it,
* because its contents will be replaced by a
* WRITE record. This can not be the no-L ->
* -L case, because the no-L case would have
* resulted in multiple blocks. If we
* supported -L -> no-L, it would not be safe
* to free the file's contents. Fortunately,
* that is not allowed (see
* recv_check_large_blocks()).
*/
do_free_range = B_TRUE;
} else {
boolean_t is_same_gen;
err = receive_object_is_same_generation(rwa->os,
drro->drr_object, doi->doi_bonus_type,
drro->drr_bonustype, bonus_data, &is_same_gen);
if (err != 0)
return (SET_ERROR(EINVAL));
if (is_same_gen) {
/*
* This is the same logical file, and
* the block size must be increasing.
* It could only decrease if
* --large-block was changed to be
* off, which is checked in
* recv_check_large_blocks().
*/
if (drro->drr_blksz <=
doi->doi_data_block_size)
return (SET_ERROR(EINVAL));
/*
* We keep the existing blocksize and
* contents.
*/
*new_blksz =
doi->doi_data_block_size;
} else {
do_free_range = B_TRUE;
}
}
}
/* nblkptr can only decrease if the object was reallocated */
if (nblkptr < doi->doi_nblkptr)
do_free_range = B_TRUE;
/* number of slots can only change on reallocation */
if (dn_slots != doi->doi_dnodesize >> DNODE_SHIFT)
do_free_range = B_TRUE;
/*
* For raw sends we also check a few other fields to
* ensure we are preserving the objset structure exactly
* as it was on the receive side:
* - A changed indirect block size
* - A smaller nlevels
*/
if (rwa->raw) {
if (indblksz != doi->doi_metadata_block_size)
do_free_range = B_TRUE;
if (drro->drr_nlevels < doi->doi_indirection)
do_free_range = B_TRUE;
}
if (do_free_range) {
err = dmu_free_long_range(rwa->os, drro->drr_object,
0, DMU_OBJECT_END);
if (err != 0)
return (SET_ERROR(EINVAL));
}
/*
* The dmu does not currently support decreasing nlevels or changing
* indirect block size if there is already one, same as changing the
* number of of dnode slots on an object. For non-raw sends this
* does not matter and the new object can just use the previous one's
* parameters. For raw sends, however, the structure of the received
* dnode (including indirects and dnode slots) must match that of the
* send side. Therefore, instead of using dmu_object_reclaim(), we
* must free the object completely and call dmu_object_claim_dnsize()
* instead.
*/
if ((rwa->raw && ((doi->doi_indirection > 1 &&
indblksz != doi->doi_metadata_block_size) ||
drro->drr_nlevels < doi->doi_indirection)) ||
dn_slots != doi->doi_dnodesize >> DNODE_SHIFT) {
err = dmu_free_long_object(rwa->os, drro->drr_object);
if (err != 0)
return (SET_ERROR(EINVAL));
txg_wait_synced(dmu_objset_pool(rwa->os), 0);
*object_to_hold = DMU_NEW_OBJECT;
}
/*
* For raw receives, free everything beyond the new incoming
* maxblkid. Normally this would be done with a DRR_FREE
* record that would come after this DRR_OBJECT record is
* processed. However, for raw receives we manually set the
* maxblkid from the drr_maxblkid and so we must first free
* everything above that blkid to ensure the DMU is always
* consistent with itself. We will never free the first block
* of the object here because a maxblkid of 0 could indicate
* an object with a single block or one with no blocks. This
* free may be skipped when dmu_free_long_range() was called
* above since it covers the entire object's contents.
*/
if (rwa->raw && *object_to_hold != DMU_NEW_OBJECT && !do_free_range) {
err = dmu_free_long_range(rwa->os, drro->drr_object,
(drro->drr_maxblkid + 1) * doi->doi_data_block_size,
DMU_OBJECT_END);
if (err != 0)
return (SET_ERROR(EINVAL));
}
return (0);
}
noinline static int
receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
void *data)
{
dmu_object_info_t doi;
dmu_tx_t *tx;
int err;
uint32_t new_blksz = drro->drr_blksz;
uint8_t dn_slots = drro->drr_dn_slots != 0 ?
drro->drr_dn_slots : DNODE_MIN_SLOTS;
if (drro->drr_type == DMU_OT_NONE ||
!DMU_OT_IS_VALID(drro->drr_type) ||
!DMU_OT_IS_VALID(drro->drr_bonustype) ||
drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS ||
drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS ||
P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) ||
drro->drr_blksz < SPA_MINBLOCKSIZE ||
drro->drr_blksz > spa_maxblocksize(dmu_objset_spa(rwa->os)) ||
drro->drr_bonuslen >
DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(rwa->os))) ||
dn_slots >
(spa_maxdnodesize(dmu_objset_spa(rwa->os)) >> DNODE_SHIFT)) {
return (SET_ERROR(EINVAL));
}
if (rwa->raw) {
/*
* We should have received a DRR_OBJECT_RANGE record
* containing this block and stored it in rwa.
*/
if (drro->drr_object < rwa->or_firstobj ||
drro->drr_object >= rwa->or_firstobj + rwa->or_numslots ||
drro->drr_raw_bonuslen < drro->drr_bonuslen ||
drro->drr_indblkshift > SPA_MAXBLOCKSHIFT ||
drro->drr_nlevels > DN_MAX_LEVELS ||
drro->drr_nblkptr > DN_MAX_NBLKPTR ||
DN_SLOTS_TO_BONUSLEN(dn_slots) <
drro->drr_raw_bonuslen)
return (SET_ERROR(EINVAL));
} else {
/*
* The DRR_OBJECT_SPILL flag is valid when the DRR_BEGIN
* record indicates this by setting DRR_FLAG_SPILL_BLOCK.
*/
if (((drro->drr_flags & ~(DRR_OBJECT_SPILL))) ||
(!rwa->spill && DRR_OBJECT_HAS_SPILL(drro->drr_flags))) {
return (SET_ERROR(EINVAL));
}
if (drro->drr_raw_bonuslen != 0 || drro->drr_nblkptr != 0 ||
drro->drr_indblkshift != 0 || drro->drr_nlevels != 0) {
return (SET_ERROR(EINVAL));
}
}
err = dmu_object_info(rwa->os, drro->drr_object, &doi);
if (err != 0 && err != ENOENT && err != EEXIST)
return (SET_ERROR(EINVAL));
if (drro->drr_object > rwa->max_object)
rwa->max_object = drro->drr_object;
/*
* If we are losing blkptrs or changing the block size this must
* be a new file instance. We must clear out the previous file
* contents before we can change this type of metadata in the dnode.
* Raw receives will also check that the indirect structure of the
* dnode hasn't changed.
*/
uint64_t object_to_hold;
if (err == 0) {
err = receive_handle_existing_object(rwa, drro, &doi, data,
&object_to_hold, &new_blksz);
if (err != 0)
return (err);
} else if (err == EEXIST) {
/*
* The object requested is currently an interior slot of a
* multi-slot dnode. This will be resolved when the next txg
* is synced out, since the send stream will have told us
* to free this slot when we freed the associated dnode
* earlier in the stream.
*/
txg_wait_synced(dmu_objset_pool(rwa->os), 0);
if (dmu_object_info(rwa->os, drro->drr_object, NULL) != ENOENT)
return (SET_ERROR(EINVAL));
/* object was freed and we are about to allocate a new one */
object_to_hold = DMU_NEW_OBJECT;
} else {
/*
* If the only record in this range so far was DRR_FREEOBJECTS
* with at least one actually freed object, it's possible that
* the block will now be converted to a hole. We need to wait
* for the txg to sync to prevent races.
*/
if (rwa->or_need_sync == ORNS_YES)
txg_wait_synced(dmu_objset_pool(rwa->os), 0);
/* object is free and we are about to allocate a new one */
object_to_hold = DMU_NEW_OBJECT;
}
/* Only relevant for the first object in the range */
rwa->or_need_sync = ORNS_NO;
/*
* If this is a multi-slot dnode there is a chance that this
* object will expand into a slot that is already used by
* another object from the previous snapshot. We must free
* these objects before we attempt to allocate the new dnode.
*/
if (dn_slots > 1) {
boolean_t need_sync = B_FALSE;
for (uint64_t slot = drro->drr_object + 1;
slot < drro->drr_object + dn_slots;
slot++) {
dmu_object_info_t slot_doi;
err = dmu_object_info(rwa->os, slot, &slot_doi);
if (err == ENOENT || err == EEXIST)
continue;
else if (err != 0)
return (err);
err = dmu_free_long_object(rwa->os, slot);
if (err != 0)
return (err);
need_sync = B_TRUE;
}
if (need_sync)
txg_wait_synced(dmu_objset_pool(rwa->os), 0);
}
tx = dmu_tx_create(rwa->os);
dmu_tx_hold_bonus(tx, object_to_hold);
dmu_tx_hold_write(tx, object_to_hold, 0, 0);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err != 0) {
dmu_tx_abort(tx);
return (err);
}
if (object_to_hold == DMU_NEW_OBJECT) {
/* Currently free, wants to be allocated */
err = dmu_object_claim_dnsize(rwa->os, drro->drr_object,
drro->drr_type, new_blksz,
drro->drr_bonustype, drro->drr_bonuslen,
dn_slots << DNODE_SHIFT, tx);
} else if (drro->drr_type != doi.doi_type ||
new_blksz != doi.doi_data_block_size ||
drro->drr_bonustype != doi.doi_bonus_type ||
drro->drr_bonuslen != doi.doi_bonus_size) {
/* Currently allocated, but with different properties */
err = dmu_object_reclaim_dnsize(rwa->os, drro->drr_object,
drro->drr_type, new_blksz,
drro->drr_bonustype, drro->drr_bonuslen,
dn_slots << DNODE_SHIFT, rwa->spill ?
DRR_OBJECT_HAS_SPILL(drro->drr_flags) : B_FALSE, tx);
} else if (rwa->spill && !DRR_OBJECT_HAS_SPILL(drro->drr_flags)) {
/*
* Currently allocated, the existing version of this object
* may reference a spill block that is no longer allocated
* at the source and needs to be freed.
*/
err = dmu_object_rm_spill(rwa->os, drro->drr_object, tx);
}
if (err != 0) {
dmu_tx_commit(tx);
return (SET_ERROR(EINVAL));
}
if (rwa->or_crypt_params_present) {
/*
* Set the crypt params for the buffer associated with this
* range of dnodes. This causes the blkptr_t to have the
* same crypt params (byteorder, salt, iv, mac) as on the
* sending side.
*
* Since we are committing this tx now, it is possible for
* the dnode block to end up on-disk with the incorrect MAC,
* if subsequent objects in this block are received in a
* different txg. However, since the dataset is marked as
* inconsistent, no code paths will do a non-raw read (or
* decrypt the block / verify the MAC). The receive code and
* scrub code can safely do raw reads and verify the
* checksum. They don't need to verify the MAC.
*/
dmu_buf_t *db = NULL;
uint64_t offset = rwa->or_firstobj * DNODE_MIN_SIZE;
err = dmu_buf_hold_by_dnode(DMU_META_DNODE(rwa->os),
offset, FTAG, &db, DMU_READ_PREFETCH | DMU_READ_NO_DECRYPT);
if (err != 0) {
dmu_tx_commit(tx);
return (SET_ERROR(EINVAL));
}
dmu_buf_set_crypt_params(db, rwa->or_byteorder,
rwa->or_salt, rwa->or_iv, rwa->or_mac, tx);
dmu_buf_rele(db, FTAG);
rwa->or_crypt_params_present = B_FALSE;
}
dmu_object_set_checksum(rwa->os, drro->drr_object,
drro->drr_checksumtype, tx);
dmu_object_set_compress(rwa->os, drro->drr_object,
drro->drr_compress, tx);
/* handle more restrictive dnode structuring for raw recvs */
if (rwa->raw) {
/*
* Set the indirect block size, block shift, nlevels.
* This will not fail because we ensured all of the
* blocks were freed earlier if this is a new object.
* For non-new objects block size and indirect block
* shift cannot change and nlevels can only increase.
*/
ASSERT3U(new_blksz, ==, drro->drr_blksz);
VERIFY0(dmu_object_set_blocksize(rwa->os, drro->drr_object,
drro->drr_blksz, drro->drr_indblkshift, tx));
VERIFY0(dmu_object_set_nlevels(rwa->os, drro->drr_object,
drro->drr_nlevels, tx));
/*
* Set the maxblkid. This will always succeed because
* we freed all blocks beyond the new maxblkid above.
*/
VERIFY0(dmu_object_set_maxblkid(rwa->os, drro->drr_object,
drro->drr_maxblkid, tx));
}
if (data != NULL) {
dmu_buf_t *db;
dnode_t *dn;
uint32_t flags = DMU_READ_NO_PREFETCH;
if (rwa->raw)
flags |= DMU_READ_NO_DECRYPT;
VERIFY0(dnode_hold(rwa->os, drro->drr_object, FTAG, &dn));
VERIFY0(dmu_bonus_hold_by_dnode(dn, FTAG, &db, flags));
dmu_buf_will_dirty(db, tx);
ASSERT3U(db->db_size, >=, drro->drr_bonuslen);
memcpy(db->db_data, data, DRR_OBJECT_PAYLOAD_SIZE(drro));
/*
* Raw bonus buffers have their byteorder determined by the
* DRR_OBJECT_RANGE record.
*/
if (rwa->byteswap && !rwa->raw) {
dmu_object_byteswap_t byteswap =
DMU_OT_BYTESWAP(drro->drr_bonustype);
dmu_ot_byteswap[byteswap].ob_func(db->db_data,
DRR_OBJECT_PAYLOAD_SIZE(drro));
}
dmu_buf_rele(db, FTAG);
dnode_rele(dn, FTAG);
}
dmu_tx_commit(tx);
return (0);
}
noinline static int
receive_freeobjects(struct receive_writer_arg *rwa,
struct drr_freeobjects *drrfo)
{
uint64_t obj;
int next_err = 0;
if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj)
return (SET_ERROR(EINVAL));
for (obj = drrfo->drr_firstobj == 0 ? 1 : drrfo->drr_firstobj;
obj < drrfo->drr_firstobj + drrfo->drr_numobjs &&
obj < DN_MAX_OBJECT && next_err == 0;
next_err = dmu_object_next(rwa->os, &obj, FALSE, 0)) {
dmu_object_info_t doi;
int err;
err = dmu_object_info(rwa->os, obj, &doi);
if (err == ENOENT)
continue;
else if (err != 0)
return (err);
err = dmu_free_long_object(rwa->os, obj);
if (err != 0)
return (err);
if (rwa->or_need_sync == ORNS_MAYBE)
rwa->or_need_sync = ORNS_YES;
}
if (next_err != ESRCH)
return (next_err);
return (0);
}
/*
* Note: if this fails, the caller will clean up any records left on the
* rwa->write_batch list.
*/
static int
flush_write_batch_impl(struct receive_writer_arg *rwa)
{
dnode_t *dn;
int err;
if (dnode_hold(rwa->os, rwa->last_object, FTAG, &dn) != 0)
return (SET_ERROR(EINVAL));
struct receive_record_arg *last_rrd = list_tail(&rwa->write_batch);
struct drr_write *last_drrw = &last_rrd->header.drr_u.drr_write;
struct receive_record_arg *first_rrd = list_head(&rwa->write_batch);
struct drr_write *first_drrw = &first_rrd->header.drr_u.drr_write;
ASSERT3U(rwa->last_object, ==, last_drrw->drr_object);
ASSERT3U(rwa->last_offset, ==, last_drrw->drr_offset);
dmu_tx_t *tx = dmu_tx_create(rwa->os);
dmu_tx_hold_write_by_dnode(tx, dn, first_drrw->drr_offset,
last_drrw->drr_offset - first_drrw->drr_offset +
last_drrw->drr_logical_size);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err != 0) {
dmu_tx_abort(tx);
dnode_rele(dn, FTAG);
return (err);
}
struct receive_record_arg *rrd;
while ((rrd = list_head(&rwa->write_batch)) != NULL) {
struct drr_write *drrw = &rrd->header.drr_u.drr_write;
abd_t *abd = rrd->abd;
ASSERT3U(drrw->drr_object, ==, rwa->last_object);
if (drrw->drr_logical_size != dn->dn_datablksz) {
/*
* The WRITE record is larger than the object's block
* size. We must be receiving an incremental
* large-block stream into a dataset that previously did
* a non-large-block receive. Lightweight writes must
* be exactly one block, so we need to decompress the
* data (if compressed) and do a normal dmu_write().
*/
ASSERT3U(drrw->drr_logical_size, >, dn->dn_datablksz);
if (DRR_WRITE_COMPRESSED(drrw)) {
abd_t *decomp_abd =
abd_alloc_linear(drrw->drr_logical_size,
B_FALSE);
err = zio_decompress_data(
drrw->drr_compressiontype,
abd, abd_to_buf(decomp_abd),
abd_get_size(abd),
abd_get_size(decomp_abd), NULL);
if (err == 0) {
dmu_write_by_dnode(dn,
drrw->drr_offset,
drrw->drr_logical_size,
abd_to_buf(decomp_abd), tx);
}
abd_free(decomp_abd);
} else {
dmu_write_by_dnode(dn,
drrw->drr_offset,
drrw->drr_logical_size,
abd_to_buf(abd), tx);
}
if (err == 0)
abd_free(abd);
} else {
zio_prop_t zp = {0};
dmu_write_policy(rwa->os, dn, 0, 0, &zp);
zio_flag_t zio_flags = 0;
if (rwa->raw) {
zp.zp_encrypt = B_TRUE;
zp.zp_compress = drrw->drr_compressiontype;
zp.zp_byteorder = ZFS_HOST_BYTEORDER ^
!!DRR_IS_RAW_BYTESWAPPED(drrw->drr_flags) ^
rwa->byteswap;
memcpy(zp.zp_salt, drrw->drr_salt,
ZIO_DATA_SALT_LEN);
memcpy(zp.zp_iv, drrw->drr_iv,
ZIO_DATA_IV_LEN);
memcpy(zp.zp_mac, drrw->drr_mac,
ZIO_DATA_MAC_LEN);
if (DMU_OT_IS_ENCRYPTED(zp.zp_type)) {
zp.zp_nopwrite = B_FALSE;
zp.zp_copies = MIN(zp.zp_copies,
SPA_DVAS_PER_BP - 1);
}
zio_flags |= ZIO_FLAG_RAW;
} else if (DRR_WRITE_COMPRESSED(drrw)) {
ASSERT3U(drrw->drr_compressed_size, >, 0);
ASSERT3U(drrw->drr_logical_size, >=,
drrw->drr_compressed_size);
zp.zp_compress = drrw->drr_compressiontype;
zio_flags |= ZIO_FLAG_RAW_COMPRESS;
} else if (rwa->byteswap) {
/*
* Note: compressed blocks never need to be
* byteswapped, because WRITE records for
* metadata blocks are never compressed. The
* exception is raw streams, which are written
* in the original byteorder, and the byteorder
* bit is preserved in the BP by setting
* zp_byteorder above.
*/
dmu_object_byteswap_t byteswap =
DMU_OT_BYTESWAP(drrw->drr_type);
dmu_ot_byteswap[byteswap].ob_func(
abd_to_buf(abd),
DRR_WRITE_PAYLOAD_SIZE(drrw));
}
/*
* Since this data can't be read until the receive
* completes, we can do a "lightweight" write for
* improved performance.
*/
err = dmu_lightweight_write_by_dnode(dn,
drrw->drr_offset, abd, &zp, zio_flags, tx);
}
if (err != 0) {
/*
* This rrd is left on the list, so the caller will
* free it (and the abd).
*/
break;
}
/*
* Note: If the receive fails, we want the resume stream to
* start with the same record that we last successfully
* received (as opposed to the next record), so that we can
* verify that we are resuming from the correct location.
*/
save_resume_state(rwa, drrw->drr_object, drrw->drr_offset, tx);
list_remove(&rwa->write_batch, rrd);
kmem_free(rrd, sizeof (*rrd));
}
dmu_tx_commit(tx);
dnode_rele(dn, FTAG);
return (err);
}
noinline static int
flush_write_batch(struct receive_writer_arg *rwa)
{
if (list_is_empty(&rwa->write_batch))
return (0);
int err = rwa->err;
if (err == 0)
err = flush_write_batch_impl(rwa);
if (err != 0) {
struct receive_record_arg *rrd;
while ((rrd = list_remove_head(&rwa->write_batch)) != NULL) {
abd_free(rrd->abd);
kmem_free(rrd, sizeof (*rrd));
}
}
ASSERT(list_is_empty(&rwa->write_batch));
return (err);
}
noinline static int
receive_process_write_record(struct receive_writer_arg *rwa,
struct receive_record_arg *rrd)
{
int err = 0;
ASSERT3U(rrd->header.drr_type, ==, DRR_WRITE);
struct drr_write *drrw = &rrd->header.drr_u.drr_write;
if (drrw->drr_offset + drrw->drr_logical_size < drrw->drr_offset ||
!DMU_OT_IS_VALID(drrw->drr_type))
return (SET_ERROR(EINVAL));
if (rwa->heal) {
blkptr_t *bp;
dmu_buf_t *dbp;
dnode_t *dn;
int flags = DB_RF_CANFAIL;
if (rwa->raw)
flags |= DB_RF_NO_DECRYPT;
if (rwa->byteswap) {
dmu_object_byteswap_t byteswap =
DMU_OT_BYTESWAP(drrw->drr_type);
dmu_ot_byteswap[byteswap].ob_func(abd_to_buf(rrd->abd),
DRR_WRITE_PAYLOAD_SIZE(drrw));
}
err = dmu_buf_hold_noread(rwa->os, drrw->drr_object,
drrw->drr_offset, FTAG, &dbp);
if (err != 0)
return (err);
/* Try to read the object to see if it needs healing */
err = dbuf_read((dmu_buf_impl_t *)dbp, NULL, flags);
/*
* We only try to heal when dbuf_read() returns a ECKSUMs.
* Other errors (even EIO) get returned to caller.
* EIO indicates that the device is not present/accessible,
* so writing to it will likely fail.
* If the block is healthy, we don't want to overwrite it
* unnecessarily.
*/
if (err != ECKSUM) {
dmu_buf_rele(dbp, FTAG);
return (err);
}
dn = dmu_buf_dnode_enter(dbp);
/* Make sure the on-disk block and recv record sizes match */
if (drrw->drr_logical_size !=
dn->dn_datablkszsec << SPA_MINBLOCKSHIFT) {
err = ENOTSUP;
dmu_buf_dnode_exit(dbp);
dmu_buf_rele(dbp, FTAG);
return (err);
}
/* Get the block pointer for the corrupted block */
bp = dmu_buf_get_blkptr(dbp);
err = do_corrective_recv(rwa, drrw, rrd, bp);
dmu_buf_dnode_exit(dbp);
dmu_buf_rele(dbp, FTAG);
return (err);
}
/*
* For resuming to work, records must be in increasing order
* by (object, offset).
*/
if (drrw->drr_object < rwa->last_object ||
(drrw->drr_object == rwa->last_object &&
drrw->drr_offset < rwa->last_offset)) {
return (SET_ERROR(EINVAL));
}
struct receive_record_arg *first_rrd = list_head(&rwa->write_batch);
struct drr_write *first_drrw = &first_rrd->header.drr_u.drr_write;
uint64_t batch_size =
MIN(zfs_recv_write_batch_size, DMU_MAX_ACCESS / 2);
if (first_rrd != NULL &&
(drrw->drr_object != first_drrw->drr_object ||
drrw->drr_offset >= first_drrw->drr_offset + batch_size)) {
err = flush_write_batch(rwa);
if (err != 0)
return (err);
}
rwa->last_object = drrw->drr_object;
rwa->last_offset = drrw->drr_offset;
if (rwa->last_object > rwa->max_object)
rwa->max_object = rwa->last_object;
list_insert_tail(&rwa->write_batch, rrd);
/*
* Return EAGAIN to indicate that we will use this rrd again,
* so the caller should not free it
*/
return (EAGAIN);
}
static int
receive_write_embedded(struct receive_writer_arg *rwa,
struct drr_write_embedded *drrwe, void *data)
{
dmu_tx_t *tx;
int err;
if (drrwe->drr_offset + drrwe->drr_length < drrwe->drr_offset)
return (SET_ERROR(EINVAL));
if (drrwe->drr_psize > BPE_PAYLOAD_SIZE)
return (SET_ERROR(EINVAL));
if (drrwe->drr_etype >= NUM_BP_EMBEDDED_TYPES)
return (SET_ERROR(EINVAL));
if (drrwe->drr_compression >= ZIO_COMPRESS_FUNCTIONS)
return (SET_ERROR(EINVAL));
if (rwa->raw)
return (SET_ERROR(EINVAL));
if (drrwe->drr_object > rwa->max_object)
rwa->max_object = drrwe->drr_object;
tx = dmu_tx_create(rwa->os);
dmu_tx_hold_write(tx, drrwe->drr_object,
drrwe->drr_offset, drrwe->drr_length);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err != 0) {
dmu_tx_abort(tx);
return (err);
}
dmu_write_embedded(rwa->os, drrwe->drr_object,
drrwe->drr_offset, data, drrwe->drr_etype,
drrwe->drr_compression, drrwe->drr_lsize, drrwe->drr_psize,
rwa->byteswap ^ ZFS_HOST_BYTEORDER, tx);
/* See comment in restore_write. */
save_resume_state(rwa, drrwe->drr_object, drrwe->drr_offset, tx);
dmu_tx_commit(tx);
return (0);
}
static int
receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs,
abd_t *abd)
{
dmu_buf_t *db, *db_spill;
int err;
if (drrs->drr_length < SPA_MINBLOCKSIZE ||
drrs->drr_length > spa_maxblocksize(dmu_objset_spa(rwa->os)))
return (SET_ERROR(EINVAL));
/*
* This is an unmodified spill block which was added to the stream
* to resolve an issue with incorrectly removing spill blocks. It
* should be ignored by current versions of the code which support
* the DRR_FLAG_SPILL_BLOCK flag.
*/
if (rwa->spill && DRR_SPILL_IS_UNMODIFIED(drrs->drr_flags)) {
abd_free(abd);
return (0);
}
if (rwa->raw) {
if (!DMU_OT_IS_VALID(drrs->drr_type) ||
drrs->drr_compressiontype >= ZIO_COMPRESS_FUNCTIONS ||
drrs->drr_compressed_size == 0)
return (SET_ERROR(EINVAL));
}
if (dmu_object_info(rwa->os, drrs->drr_object, NULL) != 0)
return (SET_ERROR(EINVAL));
if (drrs->drr_object > rwa->max_object)
rwa->max_object = drrs->drr_object;
VERIFY0(dmu_bonus_hold(rwa->os, drrs->drr_object, FTAG, &db));
if ((err = dmu_spill_hold_by_bonus(db, DMU_READ_NO_DECRYPT, FTAG,
&db_spill)) != 0) {
dmu_buf_rele(db, FTAG);
return (err);
}
dmu_tx_t *tx = dmu_tx_create(rwa->os);
dmu_tx_hold_spill(tx, db->db_object);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err != 0) {
dmu_buf_rele(db, FTAG);
dmu_buf_rele(db_spill, FTAG);
dmu_tx_abort(tx);
return (err);
}
/*
* Spill blocks may both grow and shrink. When a change in size
* occurs any existing dbuf must be updated to match the logical
* size of the provided arc_buf_t.
*/
if (db_spill->db_size != drrs->drr_length) {
- dmu_buf_will_fill(db_spill, tx);
+ dmu_buf_will_fill(db_spill, tx, B_FALSE);
VERIFY0(dbuf_spill_set_blksz(db_spill,
drrs->drr_length, tx));
}
arc_buf_t *abuf;
if (rwa->raw) {
boolean_t byteorder = ZFS_HOST_BYTEORDER ^
!!DRR_IS_RAW_BYTESWAPPED(drrs->drr_flags) ^
rwa->byteswap;
abuf = arc_loan_raw_buf(dmu_objset_spa(rwa->os),
drrs->drr_object, byteorder, drrs->drr_salt,
drrs->drr_iv, drrs->drr_mac, drrs->drr_type,
drrs->drr_compressed_size, drrs->drr_length,
drrs->drr_compressiontype, 0);
} else {
abuf = arc_loan_buf(dmu_objset_spa(rwa->os),
DMU_OT_IS_METADATA(drrs->drr_type),
drrs->drr_length);
if (rwa->byteswap) {
dmu_object_byteswap_t byteswap =
DMU_OT_BYTESWAP(drrs->drr_type);
dmu_ot_byteswap[byteswap].ob_func(abd_to_buf(abd),
DRR_SPILL_PAYLOAD_SIZE(drrs));
}
}
memcpy(abuf->b_data, abd_to_buf(abd), DRR_SPILL_PAYLOAD_SIZE(drrs));
abd_free(abd);
dbuf_assign_arcbuf((dmu_buf_impl_t *)db_spill, abuf, tx);
dmu_buf_rele(db, FTAG);
dmu_buf_rele(db_spill, FTAG);
dmu_tx_commit(tx);
return (0);
}
noinline static int
receive_free(struct receive_writer_arg *rwa, struct drr_free *drrf)
{
int err;
if (drrf->drr_length != -1ULL &&
drrf->drr_offset + drrf->drr_length < drrf->drr_offset)
return (SET_ERROR(EINVAL));
if (dmu_object_info(rwa->os, drrf->drr_object, NULL) != 0)
return (SET_ERROR(EINVAL));
if (drrf->drr_object > rwa->max_object)
rwa->max_object = drrf->drr_object;
err = dmu_free_long_range(rwa->os, drrf->drr_object,
drrf->drr_offset, drrf->drr_length);
return (err);
}
static int
receive_object_range(struct receive_writer_arg *rwa,
struct drr_object_range *drror)
{
/*
* By default, we assume this block is in our native format
* (ZFS_HOST_BYTEORDER). We then take into account whether
* the send stream is byteswapped (rwa->byteswap). Finally,
* we need to byteswap again if this particular block was
* in non-native format on the send side.
*/
boolean_t byteorder = ZFS_HOST_BYTEORDER ^ rwa->byteswap ^
!!DRR_IS_RAW_BYTESWAPPED(drror->drr_flags);
/*
* Since dnode block sizes are constant, we should not need to worry
* about making sure that the dnode block size is the same on the
* sending and receiving sides for the time being. For non-raw sends,
* this does not matter (and in fact we do not send a DRR_OBJECT_RANGE
* record at all). Raw sends require this record type because the
* encryption parameters are used to protect an entire block of bonus
* buffers. If the size of dnode blocks ever becomes variable,
* handling will need to be added to ensure that dnode block sizes
* match on the sending and receiving side.
*/
if (drror->drr_numslots != DNODES_PER_BLOCK ||
P2PHASE(drror->drr_firstobj, DNODES_PER_BLOCK) != 0 ||
!rwa->raw)
return (SET_ERROR(EINVAL));
if (drror->drr_firstobj > rwa->max_object)
rwa->max_object = drror->drr_firstobj;
/*
* The DRR_OBJECT_RANGE handling must be deferred to receive_object()
* so that the block of dnodes is not written out when it's empty,
* and converted to a HOLE BP.
*/
rwa->or_crypt_params_present = B_TRUE;
rwa->or_firstobj = drror->drr_firstobj;
rwa->or_numslots = drror->drr_numslots;
memcpy(rwa->or_salt, drror->drr_salt, ZIO_DATA_SALT_LEN);
memcpy(rwa->or_iv, drror->drr_iv, ZIO_DATA_IV_LEN);
memcpy(rwa->or_mac, drror->drr_mac, ZIO_DATA_MAC_LEN);
rwa->or_byteorder = byteorder;
rwa->or_need_sync = ORNS_MAYBE;
return (0);
}
/*
* Until we have the ability to redact large ranges of data efficiently, we
* process these records as frees.
*/
noinline static int
receive_redact(struct receive_writer_arg *rwa, struct drr_redact *drrr)
{
struct drr_free drrf = {0};
drrf.drr_length = drrr->drr_length;
drrf.drr_object = drrr->drr_object;
drrf.drr_offset = drrr->drr_offset;
drrf.drr_toguid = drrr->drr_toguid;
return (receive_free(rwa, &drrf));
}
/* used to destroy the drc_ds on error */
static void
dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc)
{
dsl_dataset_t *ds = drc->drc_ds;
ds_hold_flags_t dsflags;
dsflags = (drc->drc_raw) ? DS_HOLD_FLAG_NONE : DS_HOLD_FLAG_DECRYPT;
/*
* Wait for the txg sync before cleaning up the receive. For
* resumable receives, this ensures that our resume state has
* been written out to disk. For raw receives, this ensures
* that the user accounting code will not attempt to do anything
* after we stopped receiving the dataset.
*/
txg_wait_synced(ds->ds_dir->dd_pool, 0);
ds->ds_objset->os_raw_receive = B_FALSE;
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
if (drc->drc_resumable && drc->drc_should_save &&
!BP_IS_HOLE(dsl_dataset_get_blkptr(ds))) {
rrw_exit(&ds->ds_bp_rwlock, FTAG);
dsl_dataset_disown(ds, dsflags, dmu_recv_tag);
} else {
char name[ZFS_MAX_DATASET_NAME_LEN];
rrw_exit(&ds->ds_bp_rwlock, FTAG);
dsl_dataset_name(ds, name);
dsl_dataset_disown(ds, dsflags, dmu_recv_tag);
if (!drc->drc_heal)
(void) dsl_destroy_head(name);
}
}
static void
receive_cksum(dmu_recv_cookie_t *drc, int len, void *buf)
{
if (drc->drc_byteswap) {
(void) fletcher_4_incremental_byteswap(buf, len,
&drc->drc_cksum);
} else {
(void) fletcher_4_incremental_native(buf, len, &drc->drc_cksum);
}
}
/*
* Read the payload into a buffer of size len, and update the current record's
* payload field.
* Allocate drc->drc_next_rrd and read the next record's header into
* drc->drc_next_rrd->header.
* Verify checksum of payload and next record.
*/
static int
receive_read_payload_and_next_header(dmu_recv_cookie_t *drc, int len, void *buf)
{
int err;
if (len != 0) {
ASSERT3U(len, <=, SPA_MAXBLOCKSIZE);
err = receive_read(drc, len, buf);
if (err != 0)
return (err);
receive_cksum(drc, len, buf);
/* note: rrd is NULL when reading the begin record's payload */
if (drc->drc_rrd != NULL) {
drc->drc_rrd->payload = buf;
drc->drc_rrd->payload_size = len;
drc->drc_rrd->bytes_read = drc->drc_bytes_read;
}
} else {
ASSERT3P(buf, ==, NULL);
}
drc->drc_prev_cksum = drc->drc_cksum;
drc->drc_next_rrd = kmem_zalloc(sizeof (*drc->drc_next_rrd), KM_SLEEP);
err = receive_read(drc, sizeof (drc->drc_next_rrd->header),
&drc->drc_next_rrd->header);
drc->drc_next_rrd->bytes_read = drc->drc_bytes_read;
if (err != 0) {
kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd));
drc->drc_next_rrd = NULL;
return (err);
}
if (drc->drc_next_rrd->header.drr_type == DRR_BEGIN) {
kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd));
drc->drc_next_rrd = NULL;
return (SET_ERROR(EINVAL));
}
/*
* Note: checksum is of everything up to but not including the
* checksum itself.
*/
ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
receive_cksum(drc,
offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
&drc->drc_next_rrd->header);
zio_cksum_t cksum_orig =
drc->drc_next_rrd->header.drr_u.drr_checksum.drr_checksum;
zio_cksum_t *cksump =
&drc->drc_next_rrd->header.drr_u.drr_checksum.drr_checksum;
if (drc->drc_byteswap)
byteswap_record(&drc->drc_next_rrd->header);
if ((!ZIO_CHECKSUM_IS_ZERO(cksump)) &&
!ZIO_CHECKSUM_EQUAL(drc->drc_cksum, *cksump)) {
kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd));
drc->drc_next_rrd = NULL;
return (SET_ERROR(ECKSUM));
}
receive_cksum(drc, sizeof (cksum_orig), &cksum_orig);
return (0);
}
/*
* Issue the prefetch reads for any necessary indirect blocks.
*
* We use the object ignore list to tell us whether or not to issue prefetches
* for a given object. We do this for both correctness (in case the blocksize
* of an object has changed) and performance (if the object doesn't exist, don't
* needlessly try to issue prefetches). We also trim the list as we go through
* the stream to prevent it from growing to an unbounded size.
*
* The object numbers within will always be in sorted order, and any write
* records we see will also be in sorted order, but they're not sorted with
* respect to each other (i.e. we can get several object records before
* receiving each object's write records). As a result, once we've reached a
* given object number, we can safely remove any reference to lower object
* numbers in the ignore list. In practice, we receive up to 32 object records
* before receiving write records, so the list can have up to 32 nodes in it.
*/
static void
receive_read_prefetch(dmu_recv_cookie_t *drc, uint64_t object, uint64_t offset,
uint64_t length)
{
if (!objlist_exists(drc->drc_ignore_objlist, object)) {
dmu_prefetch(drc->drc_os, object, 1, offset, length,
ZIO_PRIORITY_SYNC_READ);
}
}
/*
* Read records off the stream, issuing any necessary prefetches.
*/
static int
receive_read_record(dmu_recv_cookie_t *drc)
{
int err;
switch (drc->drc_rrd->header.drr_type) {
case DRR_OBJECT:
{
struct drr_object *drro =
&drc->drc_rrd->header.drr_u.drr_object;
uint32_t size = DRR_OBJECT_PAYLOAD_SIZE(drro);
void *buf = NULL;
dmu_object_info_t doi;
if (size != 0)
buf = kmem_zalloc(size, KM_SLEEP);
err = receive_read_payload_and_next_header(drc, size, buf);
if (err != 0) {
kmem_free(buf, size);
return (err);
}
err = dmu_object_info(drc->drc_os, drro->drr_object, &doi);
/*
* See receive_read_prefetch for an explanation why we're
* storing this object in the ignore_obj_list.
*/
if (err == ENOENT || err == EEXIST ||
(err == 0 && doi.doi_data_block_size != drro->drr_blksz)) {
objlist_insert(drc->drc_ignore_objlist,
drro->drr_object);
err = 0;
}
return (err);
}
case DRR_FREEOBJECTS:
{
err = receive_read_payload_and_next_header(drc, 0, NULL);
return (err);
}
case DRR_WRITE:
{
struct drr_write *drrw = &drc->drc_rrd->header.drr_u.drr_write;
int size = DRR_WRITE_PAYLOAD_SIZE(drrw);
abd_t *abd = abd_alloc_linear(size, B_FALSE);
err = receive_read_payload_and_next_header(drc, size,
abd_to_buf(abd));
if (err != 0) {
abd_free(abd);
return (err);
}
drc->drc_rrd->abd = abd;
receive_read_prefetch(drc, drrw->drr_object, drrw->drr_offset,
drrw->drr_logical_size);
return (err);
}
case DRR_WRITE_EMBEDDED:
{
struct drr_write_embedded *drrwe =
&drc->drc_rrd->header.drr_u.drr_write_embedded;
uint32_t size = P2ROUNDUP(drrwe->drr_psize, 8);
void *buf = kmem_zalloc(size, KM_SLEEP);
err = receive_read_payload_and_next_header(drc, size, buf);
if (err != 0) {
kmem_free(buf, size);
return (err);
}
receive_read_prefetch(drc, drrwe->drr_object, drrwe->drr_offset,
drrwe->drr_length);
return (err);
}
case DRR_FREE:
case DRR_REDACT:
{
/*
* It might be beneficial to prefetch indirect blocks here, but
* we don't really have the data to decide for sure.
*/
err = receive_read_payload_and_next_header(drc, 0, NULL);
return (err);
}
case DRR_END:
{
struct drr_end *drre = &drc->drc_rrd->header.drr_u.drr_end;
if (!ZIO_CHECKSUM_EQUAL(drc->drc_prev_cksum,
drre->drr_checksum))
return (SET_ERROR(ECKSUM));
return (0);
}
case DRR_SPILL:
{
struct drr_spill *drrs = &drc->drc_rrd->header.drr_u.drr_spill;
int size = DRR_SPILL_PAYLOAD_SIZE(drrs);
abd_t *abd = abd_alloc_linear(size, B_FALSE);
err = receive_read_payload_and_next_header(drc, size,
abd_to_buf(abd));
if (err != 0)
abd_free(abd);
else
drc->drc_rrd->abd = abd;
return (err);
}
case DRR_OBJECT_RANGE:
{
err = receive_read_payload_and_next_header(drc, 0, NULL);
return (err);
}
default:
return (SET_ERROR(EINVAL));
}
}
static void
dprintf_drr(struct receive_record_arg *rrd, int err)
{
#ifdef ZFS_DEBUG
switch (rrd->header.drr_type) {
case DRR_OBJECT:
{
struct drr_object *drro = &rrd->header.drr_u.drr_object;
dprintf("drr_type = OBJECT obj = %llu type = %u "
"bonustype = %u blksz = %u bonuslen = %u cksumtype = %u "
"compress = %u dn_slots = %u err = %d\n",
(u_longlong_t)drro->drr_object, drro->drr_type,
drro->drr_bonustype, drro->drr_blksz, drro->drr_bonuslen,
drro->drr_checksumtype, drro->drr_compress,
drro->drr_dn_slots, err);
break;
}
case DRR_FREEOBJECTS:
{
struct drr_freeobjects *drrfo =
&rrd->header.drr_u.drr_freeobjects;
dprintf("drr_type = FREEOBJECTS firstobj = %llu "
"numobjs = %llu err = %d\n",
(u_longlong_t)drrfo->drr_firstobj,
(u_longlong_t)drrfo->drr_numobjs, err);
break;
}
case DRR_WRITE:
{
struct drr_write *drrw = &rrd->header.drr_u.drr_write;
dprintf("drr_type = WRITE obj = %llu type = %u offset = %llu "
"lsize = %llu cksumtype = %u flags = %u "
"compress = %u psize = %llu err = %d\n",
(u_longlong_t)drrw->drr_object, drrw->drr_type,
(u_longlong_t)drrw->drr_offset,
(u_longlong_t)drrw->drr_logical_size,
drrw->drr_checksumtype, drrw->drr_flags,
drrw->drr_compressiontype,
(u_longlong_t)drrw->drr_compressed_size, err);
break;
}
case DRR_WRITE_BYREF:
{
struct drr_write_byref *drrwbr =
&rrd->header.drr_u.drr_write_byref;
dprintf("drr_type = WRITE_BYREF obj = %llu offset = %llu "
"length = %llu toguid = %llx refguid = %llx "
"refobject = %llu refoffset = %llu cksumtype = %u "
"flags = %u err = %d\n",
(u_longlong_t)drrwbr->drr_object,
(u_longlong_t)drrwbr->drr_offset,
(u_longlong_t)drrwbr->drr_length,
(u_longlong_t)drrwbr->drr_toguid,
(u_longlong_t)drrwbr->drr_refguid,
(u_longlong_t)drrwbr->drr_refobject,
(u_longlong_t)drrwbr->drr_refoffset,
drrwbr->drr_checksumtype, drrwbr->drr_flags, err);
break;
}
case DRR_WRITE_EMBEDDED:
{
struct drr_write_embedded *drrwe =
&rrd->header.drr_u.drr_write_embedded;
dprintf("drr_type = WRITE_EMBEDDED obj = %llu offset = %llu "
"length = %llu compress = %u etype = %u lsize = %u "
"psize = %u err = %d\n",
(u_longlong_t)drrwe->drr_object,
(u_longlong_t)drrwe->drr_offset,
(u_longlong_t)drrwe->drr_length,
drrwe->drr_compression, drrwe->drr_etype,
drrwe->drr_lsize, drrwe->drr_psize, err);
break;
}
case DRR_FREE:
{
struct drr_free *drrf = &rrd->header.drr_u.drr_free;
dprintf("drr_type = FREE obj = %llu offset = %llu "
"length = %lld err = %d\n",
(u_longlong_t)drrf->drr_object,
(u_longlong_t)drrf->drr_offset,
(longlong_t)drrf->drr_length,
err);
break;
}
case DRR_SPILL:
{
struct drr_spill *drrs = &rrd->header.drr_u.drr_spill;
dprintf("drr_type = SPILL obj = %llu length = %llu "
"err = %d\n", (u_longlong_t)drrs->drr_object,
(u_longlong_t)drrs->drr_length, err);
break;
}
case DRR_OBJECT_RANGE:
{
struct drr_object_range *drror =
&rrd->header.drr_u.drr_object_range;
dprintf("drr_type = OBJECT_RANGE firstobj = %llu "
"numslots = %llu flags = %u err = %d\n",
(u_longlong_t)drror->drr_firstobj,
(u_longlong_t)drror->drr_numslots,
drror->drr_flags, err);
break;
}
default:
return;
}
#endif
}
/*
* Commit the records to the pool.
*/
static int
receive_process_record(struct receive_writer_arg *rwa,
struct receive_record_arg *rrd)
{
int err;
/* Processing in order, therefore bytes_read should be increasing. */
ASSERT3U(rrd->bytes_read, >=, rwa->bytes_read);
rwa->bytes_read = rrd->bytes_read;
/* We can only heal write records; other ones get ignored */
if (rwa->heal && rrd->header.drr_type != DRR_WRITE) {
if (rrd->abd != NULL) {
abd_free(rrd->abd);
rrd->abd = NULL;
} else if (rrd->payload != NULL) {
kmem_free(rrd->payload, rrd->payload_size);
rrd->payload = NULL;
}
return (0);
}
if (!rwa->heal && rrd->header.drr_type != DRR_WRITE) {
err = flush_write_batch(rwa);
if (err != 0) {
if (rrd->abd != NULL) {
abd_free(rrd->abd);
rrd->abd = NULL;
rrd->payload = NULL;
} else if (rrd->payload != NULL) {
kmem_free(rrd->payload, rrd->payload_size);
rrd->payload = NULL;
}
return (err);
}
}
switch (rrd->header.drr_type) {
case DRR_OBJECT:
{
struct drr_object *drro = &rrd->header.drr_u.drr_object;
err = receive_object(rwa, drro, rrd->payload);
kmem_free(rrd->payload, rrd->payload_size);
rrd->payload = NULL;
break;
}
case DRR_FREEOBJECTS:
{
struct drr_freeobjects *drrfo =
&rrd->header.drr_u.drr_freeobjects;
err = receive_freeobjects(rwa, drrfo);
break;
}
case DRR_WRITE:
{
err = receive_process_write_record(rwa, rrd);
if (rwa->heal) {
/*
* If healing - always free the abd after processing
*/
abd_free(rrd->abd);
rrd->abd = NULL;
} else if (err != EAGAIN) {
/*
* On success, a non-healing
* receive_process_write_record() returns
* EAGAIN to indicate that we do not want to free
* the rrd or arc_buf.
*/
ASSERT(err != 0);
abd_free(rrd->abd);
rrd->abd = NULL;
}
break;
}
case DRR_WRITE_EMBEDDED:
{
struct drr_write_embedded *drrwe =
&rrd->header.drr_u.drr_write_embedded;
err = receive_write_embedded(rwa, drrwe, rrd->payload);
kmem_free(rrd->payload, rrd->payload_size);
rrd->payload = NULL;
break;
}
case DRR_FREE:
{
struct drr_free *drrf = &rrd->header.drr_u.drr_free;
err = receive_free(rwa, drrf);
break;
}
case DRR_SPILL:
{
struct drr_spill *drrs = &rrd->header.drr_u.drr_spill;
err = receive_spill(rwa, drrs, rrd->abd);
if (err != 0)
abd_free(rrd->abd);
rrd->abd = NULL;
rrd->payload = NULL;
break;
}
case DRR_OBJECT_RANGE:
{
struct drr_object_range *drror =
&rrd->header.drr_u.drr_object_range;
err = receive_object_range(rwa, drror);
break;
}
case DRR_REDACT:
{
struct drr_redact *drrr = &rrd->header.drr_u.drr_redact;
err = receive_redact(rwa, drrr);
break;
}
default:
err = (SET_ERROR(EINVAL));
}
if (err != 0)
dprintf_drr(rrd, err);
return (err);
}
/*
* dmu_recv_stream's worker thread; pull records off the queue, and then call
* receive_process_record When we're done, signal the main thread and exit.
*/
static __attribute__((noreturn)) void
receive_writer_thread(void *arg)
{
struct receive_writer_arg *rwa = arg;
struct receive_record_arg *rrd;
fstrans_cookie_t cookie = spl_fstrans_mark();
for (rrd = bqueue_dequeue(&rwa->q); !rrd->eos_marker;
rrd = bqueue_dequeue(&rwa->q)) {
/*
* If there's an error, the main thread will stop putting things
* on the queue, but we need to clear everything in it before we
* can exit.
*/
int err = 0;
if (rwa->err == 0) {
err = receive_process_record(rwa, rrd);
} else if (rrd->abd != NULL) {
abd_free(rrd->abd);
rrd->abd = NULL;
rrd->payload = NULL;
} else if (rrd->payload != NULL) {
kmem_free(rrd->payload, rrd->payload_size);
rrd->payload = NULL;
}
/*
* EAGAIN indicates that this record has been saved (on
* raw->write_batch), and will be used again, so we don't
* free it.
* When healing data we always need to free the record.
*/
if (err != EAGAIN || rwa->heal) {
if (rwa->err == 0)
rwa->err = err;
kmem_free(rrd, sizeof (*rrd));
}
}
kmem_free(rrd, sizeof (*rrd));
if (rwa->heal) {
zio_wait(rwa->heal_pio);
} else {
int err = flush_write_batch(rwa);
if (rwa->err == 0)
rwa->err = err;
}
mutex_enter(&rwa->mutex);
rwa->done = B_TRUE;
cv_signal(&rwa->cv);
mutex_exit(&rwa->mutex);
spl_fstrans_unmark(cookie);
thread_exit();
}
static int
resume_check(dmu_recv_cookie_t *drc, nvlist_t *begin_nvl)
{
uint64_t val;
objset_t *mos = dmu_objset_pool(drc->drc_os)->dp_meta_objset;
uint64_t dsobj = dmu_objset_id(drc->drc_os);
uint64_t resume_obj, resume_off;
if (nvlist_lookup_uint64(begin_nvl,
"resume_object", &resume_obj) != 0 ||
nvlist_lookup_uint64(begin_nvl,
"resume_offset", &resume_off) != 0) {
return (SET_ERROR(EINVAL));
}
VERIFY0(zap_lookup(mos, dsobj,
DS_FIELD_RESUME_OBJECT, sizeof (val), 1, &val));
if (resume_obj != val)
return (SET_ERROR(EINVAL));
VERIFY0(zap_lookup(mos, dsobj,
DS_FIELD_RESUME_OFFSET, sizeof (val), 1, &val));
if (resume_off != val)
return (SET_ERROR(EINVAL));
return (0);
}
/*
* Read in the stream's records, one by one, and apply them to the pool. There
* are two threads involved; the thread that calls this function will spin up a
* worker thread, read the records off the stream one by one, and issue
* prefetches for any necessary indirect blocks. It will then push the records
* onto an internal blocking queue. The worker thread will pull the records off
* the queue, and actually write the data into the DMU. This way, the worker
* thread doesn't have to wait for reads to complete, since everything it needs
* (the indirect blocks) will be prefetched.
*
* NB: callers *must* call dmu_recv_end() if this succeeds.
*/
int
dmu_recv_stream(dmu_recv_cookie_t *drc, offset_t *voffp)
{
int err = 0;
struct receive_writer_arg *rwa = kmem_zalloc(sizeof (*rwa), KM_SLEEP);
if (dsl_dataset_has_resume_receive_state(drc->drc_ds)) {
uint64_t bytes = 0;
(void) zap_lookup(drc->drc_ds->ds_dir->dd_pool->dp_meta_objset,
drc->drc_ds->ds_object, DS_FIELD_RESUME_BYTES,
sizeof (bytes), 1, &bytes);
drc->drc_bytes_read += bytes;
}
drc->drc_ignore_objlist = objlist_create();
/* these were verified in dmu_recv_begin */
ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo), ==,
DMU_SUBSTREAM);
ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES);
ASSERT(dsl_dataset_phys(drc->drc_ds)->ds_flags & DS_FLAG_INCONSISTENT);
ASSERT0(drc->drc_os->os_encrypted &&
(drc->drc_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA));
/* handle DSL encryption key payload */
if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RAW) {
nvlist_t *keynvl = NULL;
ASSERT(drc->drc_os->os_encrypted);
ASSERT(drc->drc_raw);
err = nvlist_lookup_nvlist(drc->drc_begin_nvl, "crypt_keydata",
&keynvl);
if (err != 0)
goto out;
if (!drc->drc_heal) {
/*
* If this is a new dataset we set the key immediately.
* Otherwise we don't want to change the key until we
* are sure the rest of the receive succeeded so we
* stash the keynvl away until then.
*/
err = dsl_crypto_recv_raw(spa_name(drc->drc_os->os_spa),
drc->drc_ds->ds_object, drc->drc_fromsnapobj,
drc->drc_drrb->drr_type, keynvl, drc->drc_newfs);
if (err != 0)
goto out;
}
/* see comment in dmu_recv_end_sync() */
drc->drc_ivset_guid = 0;
(void) nvlist_lookup_uint64(keynvl, "to_ivset_guid",
&drc->drc_ivset_guid);
if (!drc->drc_newfs)
drc->drc_keynvl = fnvlist_dup(keynvl);
}
if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RESUMING) {
err = resume_check(drc, drc->drc_begin_nvl);
if (err != 0)
goto out;
}
/*
* For compatibility with recursive send streams, we do this here,
* rather than in dmu_recv_begin. If we pull the next header too
* early, and it's the END record, we break the `recv_skip` logic.
*/
if (drc->drc_drr_begin->drr_payloadlen == 0) {
err = receive_read_payload_and_next_header(drc, 0, NULL);
if (err != 0)
goto out;
}
/*
* If we failed before this point we will clean up any new resume
* state that was created. Now that we've gotten past the initial
* checks we are ok to retain that resume state.
*/
drc->drc_should_save = B_TRUE;
(void) bqueue_init(&rwa->q, zfs_recv_queue_ff,
MAX(zfs_recv_queue_length, 2 * zfs_max_recordsize),
offsetof(struct receive_record_arg, node));
cv_init(&rwa->cv, NULL, CV_DEFAULT, NULL);
mutex_init(&rwa->mutex, NULL, MUTEX_DEFAULT, NULL);
rwa->os = drc->drc_os;
rwa->byteswap = drc->drc_byteswap;
rwa->heal = drc->drc_heal;
rwa->tofs = drc->drc_tofs;
rwa->resumable = drc->drc_resumable;
rwa->raw = drc->drc_raw;
rwa->spill = drc->drc_spill;
rwa->full = (drc->drc_drr_begin->drr_u.drr_begin.drr_fromguid == 0);
rwa->os->os_raw_receive = drc->drc_raw;
if (drc->drc_heal) {
rwa->heal_pio = zio_root(drc->drc_os->os_spa, NULL, NULL,
ZIO_FLAG_GODFATHER);
}
list_create(&rwa->write_batch, sizeof (struct receive_record_arg),
offsetof(struct receive_record_arg, node.bqn_node));
(void) thread_create(NULL, 0, receive_writer_thread, rwa, 0, curproc,
TS_RUN, minclsyspri);
/*
* We're reading rwa->err without locks, which is safe since we are the
* only reader, and the worker thread is the only writer. It's ok if we
* miss a write for an iteration or two of the loop, since the writer
* thread will keep freeing records we send it until we send it an eos
* marker.
*
* We can leave this loop in 3 ways: First, if rwa->err is
* non-zero. In that case, the writer thread will free the rrd we just
* pushed. Second, if we're interrupted; in that case, either it's the
* first loop and drc->drc_rrd was never allocated, or it's later, and
* drc->drc_rrd has been handed off to the writer thread who will free
* it. Finally, if receive_read_record fails or we're at the end of the
* stream, then we free drc->drc_rrd and exit.
*/
while (rwa->err == 0) {
if (issig(JUSTLOOKING) && issig(FORREAL)) {
err = SET_ERROR(EINTR);
break;
}
ASSERT3P(drc->drc_rrd, ==, NULL);
drc->drc_rrd = drc->drc_next_rrd;
drc->drc_next_rrd = NULL;
/* Allocates and loads header into drc->drc_next_rrd */
err = receive_read_record(drc);
if (drc->drc_rrd->header.drr_type == DRR_END || err != 0) {
kmem_free(drc->drc_rrd, sizeof (*drc->drc_rrd));
drc->drc_rrd = NULL;
break;
}
bqueue_enqueue(&rwa->q, drc->drc_rrd,
sizeof (struct receive_record_arg) +
drc->drc_rrd->payload_size);
drc->drc_rrd = NULL;
}
ASSERT3P(drc->drc_rrd, ==, NULL);
drc->drc_rrd = kmem_zalloc(sizeof (*drc->drc_rrd), KM_SLEEP);
drc->drc_rrd->eos_marker = B_TRUE;
bqueue_enqueue_flush(&rwa->q, drc->drc_rrd, 1);
mutex_enter(&rwa->mutex);
while (!rwa->done) {
/*
* We need to use cv_wait_sig() so that any process that may
* be sleeping here can still fork.
*/
(void) cv_wait_sig(&rwa->cv, &rwa->mutex);
}
mutex_exit(&rwa->mutex);
/*
* If we are receiving a full stream as a clone, all object IDs which
* are greater than the maximum ID referenced in the stream are
* by definition unused and must be freed.
*/
if (drc->drc_clone && drc->drc_drrb->drr_fromguid == 0) {
uint64_t obj = rwa->max_object + 1;
int free_err = 0;
int next_err = 0;
while (next_err == 0) {
free_err = dmu_free_long_object(rwa->os, obj);
if (free_err != 0 && free_err != ENOENT)
break;
next_err = dmu_object_next(rwa->os, &obj, FALSE, 0);
}
if (err == 0) {
if (free_err != 0 && free_err != ENOENT)
err = free_err;
else if (next_err != ESRCH)
err = next_err;
}
}
cv_destroy(&rwa->cv);
mutex_destroy(&rwa->mutex);
bqueue_destroy(&rwa->q);
list_destroy(&rwa->write_batch);
if (err == 0)
err = rwa->err;
out:
/*
* If we hit an error before we started the receive_writer_thread
* we need to clean up the next_rrd we create by processing the
* DRR_BEGIN record.
*/
if (drc->drc_next_rrd != NULL)
kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd));
/*
* The objset will be invalidated by dmu_recv_end() when we do
* dsl_dataset_clone_swap_sync_impl().
*/
drc->drc_os = NULL;
kmem_free(rwa, sizeof (*rwa));
nvlist_free(drc->drc_begin_nvl);
if (err != 0) {
/*
* Clean up references. If receive is not resumable,
* destroy what we created, so we don't leave it in
* the inconsistent state.
*/
dmu_recv_cleanup_ds(drc);
nvlist_free(drc->drc_keynvl);
}
objlist_destroy(drc->drc_ignore_objlist);
drc->drc_ignore_objlist = NULL;
*voffp = drc->drc_voff;
return (err);
}
static int
dmu_recv_end_check(void *arg, dmu_tx_t *tx)
{
dmu_recv_cookie_t *drc = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
int error;
ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag);
if (drc->drc_heal) {
error = 0;
} else if (!drc->drc_newfs) {
dsl_dataset_t *origin_head;
error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head);
if (error != 0)
return (error);
if (drc->drc_force) {
/*
* We will destroy any snapshots in tofs (i.e. before
* origin_head) that are after the origin (which is
* the snap before drc_ds, because drc_ds can not
* have any snaps of its own).
*/
uint64_t obj;
obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
while (obj !=
dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
dsl_dataset_t *snap;
error = dsl_dataset_hold_obj(dp, obj, FTAG,
&snap);
if (error != 0)
break;
if (snap->ds_dir != origin_head->ds_dir)
error = SET_ERROR(EINVAL);
if (error == 0) {
error = dsl_destroy_snapshot_check_impl(
snap, B_FALSE);
}
obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
dsl_dataset_rele(snap, FTAG);
if (error != 0)
break;
}
if (error != 0) {
dsl_dataset_rele(origin_head, FTAG);
return (error);
}
}
if (drc->drc_keynvl != NULL) {
error = dsl_crypto_recv_raw_key_check(drc->drc_ds,
drc->drc_keynvl, tx);
if (error != 0) {
dsl_dataset_rele(origin_head, FTAG);
return (error);
}
}
error = dsl_dataset_clone_swap_check_impl(drc->drc_ds,
origin_head, drc->drc_force, drc->drc_owner, tx);
if (error != 0) {
dsl_dataset_rele(origin_head, FTAG);
return (error);
}
error = dsl_dataset_snapshot_check_impl(origin_head,
drc->drc_tosnap, tx, B_TRUE, 1,
drc->drc_cred, drc->drc_proc);
dsl_dataset_rele(origin_head, FTAG);
if (error != 0)
return (error);
error = dsl_destroy_head_check_impl(drc->drc_ds, 1);
} else {
error = dsl_dataset_snapshot_check_impl(drc->drc_ds,
drc->drc_tosnap, tx, B_TRUE, 1,
drc->drc_cred, drc->drc_proc);
}
return (error);
}
static void
dmu_recv_end_sync(void *arg, dmu_tx_t *tx)
{
dmu_recv_cookie_t *drc = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
boolean_t encrypted = drc->drc_ds->ds_dir->dd_crypto_obj != 0;
uint64_t newsnapobj = 0;
spa_history_log_internal_ds(drc->drc_ds, "finish receiving",
tx, "snap=%s", drc->drc_tosnap);
drc->drc_ds->ds_objset->os_raw_receive = B_FALSE;
if (drc->drc_heal) {
if (drc->drc_keynvl != NULL) {
nvlist_free(drc->drc_keynvl);
drc->drc_keynvl = NULL;
}
} else if (!drc->drc_newfs) {
dsl_dataset_t *origin_head;
VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG,
&origin_head));
if (drc->drc_force) {
/*
* Destroy any snapshots of drc_tofs (origin_head)
* after the origin (the snap before drc_ds).
*/
uint64_t obj;
obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
while (obj !=
dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
dsl_dataset_t *snap;
VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG,
&snap));
ASSERT3P(snap->ds_dir, ==, origin_head->ds_dir);
obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
dsl_destroy_snapshot_sync_impl(snap,
B_FALSE, tx);
dsl_dataset_rele(snap, FTAG);
}
}
if (drc->drc_keynvl != NULL) {
dsl_crypto_recv_raw_key_sync(drc->drc_ds,
drc->drc_keynvl, tx);
nvlist_free(drc->drc_keynvl);
drc->drc_keynvl = NULL;
}
VERIFY3P(drc->drc_ds->ds_prev, ==,
origin_head->ds_prev);
dsl_dataset_clone_swap_sync_impl(drc->drc_ds,
origin_head, tx);
/*
* The objset was evicted by dsl_dataset_clone_swap_sync_impl,
* so drc_os is no longer valid.
*/
drc->drc_os = NULL;
dsl_dataset_snapshot_sync_impl(origin_head,
drc->drc_tosnap, tx);
/* set snapshot's creation time and guid */
dmu_buf_will_dirty(origin_head->ds_prev->ds_dbuf, tx);
dsl_dataset_phys(origin_head->ds_prev)->ds_creation_time =
drc->drc_drrb->drr_creation_time;
dsl_dataset_phys(origin_head->ds_prev)->ds_guid =
drc->drc_drrb->drr_toguid;
dsl_dataset_phys(origin_head->ds_prev)->ds_flags &=
~DS_FLAG_INCONSISTENT;
dmu_buf_will_dirty(origin_head->ds_dbuf, tx);
dsl_dataset_phys(origin_head)->ds_flags &=
~DS_FLAG_INCONSISTENT;
newsnapobj =
dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
dsl_dataset_rele(origin_head, FTAG);
dsl_destroy_head_sync_impl(drc->drc_ds, tx);
if (drc->drc_owner != NULL)
VERIFY3P(origin_head->ds_owner, ==, drc->drc_owner);
} else {
dsl_dataset_t *ds = drc->drc_ds;
dsl_dataset_snapshot_sync_impl(ds, drc->drc_tosnap, tx);
/* set snapshot's creation time and guid */
dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
dsl_dataset_phys(ds->ds_prev)->ds_creation_time =
drc->drc_drrb->drr_creation_time;
dsl_dataset_phys(ds->ds_prev)->ds_guid =
drc->drc_drrb->drr_toguid;
dsl_dataset_phys(ds->ds_prev)->ds_flags &=
~DS_FLAG_INCONSISTENT;
dmu_buf_will_dirty(ds->ds_dbuf, tx);
dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT;
if (dsl_dataset_has_resume_receive_state(ds)) {
(void) zap_remove(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_FROMGUID, tx);
(void) zap_remove(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_OBJECT, tx);
(void) zap_remove(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_OFFSET, tx);
(void) zap_remove(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_BYTES, tx);
(void) zap_remove(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_TOGUID, tx);
(void) zap_remove(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_TONAME, tx);
(void) zap_remove(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_REDACT_BOOKMARK_SNAPS, tx);
}
newsnapobj =
dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj;
}
/*
* If this is a raw receive, the crypt_keydata nvlist will include
* a to_ivset_guid for us to set on the new snapshot. This value
* will override the value generated by the snapshot code. However,
* this value may not be present, because older implementations of
* the raw send code did not include this value, and we are still
* allowed to receive them if the zfs_disable_ivset_guid_check
* tunable is set, in which case we will leave the newly-generated
* value.
*/
if (!drc->drc_heal && drc->drc_raw && drc->drc_ivset_guid != 0) {
dmu_object_zapify(dp->dp_meta_objset, newsnapobj,
DMU_OT_DSL_DATASET, tx);
VERIFY0(zap_update(dp->dp_meta_objset, newsnapobj,
DS_FIELD_IVSET_GUID, sizeof (uint64_t), 1,
&drc->drc_ivset_guid, tx));
}
/*
* Release the hold from dmu_recv_begin. This must be done before
* we return to open context, so that when we free the dataset's dnode
* we can evict its bonus buffer. Since the dataset may be destroyed
* at this point (and therefore won't have a valid pointer to the spa)
* we release the key mapping manually here while we do have a valid
* pointer, if it exists.
*/
if (!drc->drc_raw && encrypted) {
(void) spa_keystore_remove_mapping(dmu_tx_pool(tx)->dp_spa,
drc->drc_ds->ds_object, drc->drc_ds);
}
dsl_dataset_disown(drc->drc_ds, 0, dmu_recv_tag);
drc->drc_ds = NULL;
}
static int dmu_recv_end_modified_blocks = 3;
static int
dmu_recv_existing_end(dmu_recv_cookie_t *drc)
{
#ifdef _KERNEL
/*
* We will be destroying the ds; make sure its origin is unmounted if
* necessary.
*/
char name[ZFS_MAX_DATASET_NAME_LEN];
dsl_dataset_name(drc->drc_ds, name);
zfs_destroy_unmount_origin(name);
#endif
return (dsl_sync_task(drc->drc_tofs,
dmu_recv_end_check, dmu_recv_end_sync, drc,
dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL));
}
static int
dmu_recv_new_end(dmu_recv_cookie_t *drc)
{
return (dsl_sync_task(drc->drc_tofs,
dmu_recv_end_check, dmu_recv_end_sync, drc,
dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL));
}
int
dmu_recv_end(dmu_recv_cookie_t *drc, void *owner)
{
int error;
drc->drc_owner = owner;
if (drc->drc_newfs)
error = dmu_recv_new_end(drc);
else
error = dmu_recv_existing_end(drc);
if (error != 0) {
dmu_recv_cleanup_ds(drc);
nvlist_free(drc->drc_keynvl);
} else if (!drc->drc_heal) {
if (drc->drc_newfs) {
zvol_create_minor(drc->drc_tofs);
}
char *snapname = kmem_asprintf("%s@%s",
drc->drc_tofs, drc->drc_tosnap);
zvol_create_minor(snapname);
kmem_strfree(snapname);
}
return (error);
}
/*
* Return TRUE if this objset is currently being received into.
*/
boolean_t
dmu_objset_is_receiving(objset_t *os)
{
return (os->os_dsl_dataset != NULL &&
os->os_dsl_dataset->ds_owner == dmu_recv_tag);
}
ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, queue_length, UINT, ZMOD_RW,
"Maximum receive queue length");
ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, queue_ff, UINT, ZMOD_RW,
"Receive queue fill fraction");
ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, write_batch_size, UINT, ZMOD_RW,
"Maximum amount of writes to batch into one transaction");
ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, best_effort_corrective, INT, ZMOD_RW,
"Ignore errors during corrective receive");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/dmu_send.c b/sys/contrib/openzfs/module/zfs/dmu_send.c
index 2d37ed2cdfb5..37c68528bf95 100644
--- a/sys/contrib/openzfs/module/zfs/dmu_send.c
+++ b/sys/contrib/openzfs/module/zfs/dmu_send.c
@@ -1,3124 +1,3122 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2011, 2018 by Delphix. All rights reserved.
* Copyright (c) 2014, Joyent, Inc. All rights reserved.
* Copyright 2014 HybridCluster. All rights reserved.
* Copyright 2016 RackTop Systems.
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019, Allan Jude
*/
#include <sys/dmu.h>
#include <sys/dmu_impl.h>
#include <sys/dmu_tx.h>
#include <sys/dbuf.h>
#include <sys/dnode.h>
#include <sys/zfs_context.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_traverse.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_synctask.h>
#include <sys/spa_impl.h>
#include <sys/zfs_ioctl.h>
#include <sys/zap.h>
#include <sys/zio_checksum.h>
#include <sys/zfs_znode.h>
#include <zfs_fletcher.h>
#include <sys/avl.h>
#include <sys/ddt.h>
#include <sys/zfs_onexit.h>
#include <sys/dmu_send.h>
#include <sys/dmu_recv.h>
#include <sys/dsl_destroy.h>
#include <sys/blkptr.h>
#include <sys/dsl_bookmark.h>
#include <sys/zfeature.h>
#include <sys/bqueue.h>
#include <sys/zvol.h>
#include <sys/policy.h>
#include <sys/objlist.h>
#ifdef _KERNEL
#include <sys/zfs_vfsops.h>
#endif
/* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
static int zfs_send_corrupt_data = B_FALSE;
/*
* This tunable controls the amount of data (measured in bytes) that will be
* prefetched by zfs send. If the main thread is blocking on reads that haven't
* completed, this variable might need to be increased. If instead the main
* thread is issuing new reads because the prefetches have fallen out of the
* cache, this may need to be decreased.
*/
static uint_t zfs_send_queue_length = SPA_MAXBLOCKSIZE;
/*
* This tunable controls the length of the queues that zfs send worker threads
* use to communicate. If the send_main_thread is blocking on these queues,
* this variable may need to be increased. If there is a significant slowdown
* at the start of a send as these threads consume all the available IO
* resources, this variable may need to be decreased.
*/
static uint_t zfs_send_no_prefetch_queue_length = 1024 * 1024;
/*
* These tunables control the fill fraction of the queues by zfs send. The fill
* fraction controls the frequency with which threads have to be cv_signaled.
* If a lot of cpu time is being spent on cv_signal, then these should be tuned
* down. If the queues empty before the signalled thread can catch up, then
* these should be tuned up.
*/
static uint_t zfs_send_queue_ff = 20;
static uint_t zfs_send_no_prefetch_queue_ff = 20;
/*
* Use this to override the recordsize calculation for fast zfs send estimates.
*/
static uint_t zfs_override_estimate_recordsize = 0;
/* Set this tunable to FALSE to disable setting of DRR_FLAG_FREERECORDS */
static const boolean_t zfs_send_set_freerecords_bit = B_TRUE;
/* Set this tunable to FALSE is disable sending unmodified spill blocks. */
static int zfs_send_unmodified_spill_blocks = B_TRUE;
static inline boolean_t
overflow_multiply(uint64_t a, uint64_t b, uint64_t *c)
{
uint64_t temp = a * b;
if (b != 0 && temp / b != a)
return (B_FALSE);
*c = temp;
return (B_TRUE);
}
struct send_thread_arg {
bqueue_t q;
objset_t *os; /* Objset to traverse */
uint64_t fromtxg; /* Traverse from this txg */
int flags; /* flags to pass to traverse_dataset */
int error_code;
boolean_t cancel;
zbookmark_phys_t resume;
uint64_t *num_blocks_visited;
};
struct redact_list_thread_arg {
boolean_t cancel;
bqueue_t q;
zbookmark_phys_t resume;
redaction_list_t *rl;
boolean_t mark_redact;
int error_code;
uint64_t *num_blocks_visited;
};
struct send_merge_thread_arg {
bqueue_t q;
objset_t *os;
struct redact_list_thread_arg *from_arg;
struct send_thread_arg *to_arg;
struct redact_list_thread_arg *redact_arg;
int error;
boolean_t cancel;
};
struct send_range {
boolean_t eos_marker; /* Marks the end of the stream */
uint64_t object;
uint64_t start_blkid;
uint64_t end_blkid;
bqueue_node_t ln;
enum type {DATA, HOLE, OBJECT, OBJECT_RANGE, REDACT,
PREVIOUSLY_REDACTED} type;
union {
struct srd {
dmu_object_type_t obj_type;
uint32_t datablksz; // logical size
uint32_t datasz; // payload size
blkptr_t bp;
arc_buf_t *abuf;
abd_t *abd;
kmutex_t lock;
kcondvar_t cv;
boolean_t io_outstanding;
boolean_t io_compressed;
int io_err;
} data;
struct srh {
uint32_t datablksz;
} hole;
struct sro {
/*
* This is a pointer because embedding it in the
* struct causes these structures to be massively larger
* for all range types; this makes the code much less
* memory efficient.
*/
dnode_phys_t *dnp;
blkptr_t bp;
} object;
struct srr {
uint32_t datablksz;
} redact;
struct sror {
blkptr_t bp;
} object_range;
} sru;
};
/*
* The list of data whose inclusion in a send stream can be pending from
* one call to backup_cb to another. Multiple calls to dump_free(),
* dump_freeobjects(), and dump_redact() can be aggregated into a single
* DRR_FREE, DRR_FREEOBJECTS, or DRR_REDACT replay record.
*/
typedef enum {
PENDING_NONE,
PENDING_FREE,
PENDING_FREEOBJECTS,
PENDING_REDACT
} dmu_pendop_t;
typedef struct dmu_send_cookie {
dmu_replay_record_t *dsc_drr;
dmu_send_outparams_t *dsc_dso;
offset_t *dsc_off;
objset_t *dsc_os;
zio_cksum_t dsc_zc;
uint64_t dsc_toguid;
uint64_t dsc_fromtxg;
int dsc_err;
dmu_pendop_t dsc_pending_op;
uint64_t dsc_featureflags;
uint64_t dsc_last_data_object;
uint64_t dsc_last_data_offset;
uint64_t dsc_resume_object;
uint64_t dsc_resume_offset;
boolean_t dsc_sent_begin;
boolean_t dsc_sent_end;
} dmu_send_cookie_t;
static int do_dump(dmu_send_cookie_t *dscp, struct send_range *range);
static void
range_free(struct send_range *range)
{
if (range->type == OBJECT) {
size_t size = sizeof (dnode_phys_t) *
(range->sru.object.dnp->dn_extra_slots + 1);
kmem_free(range->sru.object.dnp, size);
} else if (range->type == DATA) {
mutex_enter(&range->sru.data.lock);
while (range->sru.data.io_outstanding)
cv_wait(&range->sru.data.cv, &range->sru.data.lock);
if (range->sru.data.abd != NULL)
abd_free(range->sru.data.abd);
if (range->sru.data.abuf != NULL) {
arc_buf_destroy(range->sru.data.abuf,
&range->sru.data.abuf);
}
mutex_exit(&range->sru.data.lock);
cv_destroy(&range->sru.data.cv);
mutex_destroy(&range->sru.data.lock);
}
kmem_free(range, sizeof (*range));
}
/*
* For all record types except BEGIN, fill in the checksum (overlaid in
* drr_u.drr_checksum.drr_checksum). The checksum verifies everything
* up to the start of the checksum itself.
*/
static int
dump_record(dmu_send_cookie_t *dscp, void *payload, int payload_len)
{
dmu_send_outparams_t *dso = dscp->dsc_dso;
ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
(void) fletcher_4_incremental_native(dscp->dsc_drr,
offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
&dscp->dsc_zc);
if (dscp->dsc_drr->drr_type == DRR_BEGIN) {
dscp->dsc_sent_begin = B_TRUE;
} else {
ASSERT(ZIO_CHECKSUM_IS_ZERO(&dscp->dsc_drr->drr_u.
drr_checksum.drr_checksum));
dscp->dsc_drr->drr_u.drr_checksum.drr_checksum = dscp->dsc_zc;
}
if (dscp->dsc_drr->drr_type == DRR_END) {
dscp->dsc_sent_end = B_TRUE;
}
(void) fletcher_4_incremental_native(&dscp->dsc_drr->
drr_u.drr_checksum.drr_checksum,
sizeof (zio_cksum_t), &dscp->dsc_zc);
*dscp->dsc_off += sizeof (dmu_replay_record_t);
dscp->dsc_err = dso->dso_outfunc(dscp->dsc_os, dscp->dsc_drr,
sizeof (dmu_replay_record_t), dso->dso_arg);
if (dscp->dsc_err != 0)
return (SET_ERROR(EINTR));
if (payload_len != 0) {
*dscp->dsc_off += payload_len;
/*
* payload is null when dso_dryrun == B_TRUE (i.e. when we're
* doing a send size calculation)
*/
if (payload != NULL) {
(void) fletcher_4_incremental_native(
payload, payload_len, &dscp->dsc_zc);
}
/*
* The code does not rely on this (len being a multiple of 8).
* We keep this assertion because of the corresponding assertion
* in receive_read(). Keeping this assertion ensures that we do
* not inadvertently break backwards compatibility (causing the
* assertion in receive_read() to trigger on old software).
*
* Raw sends cannot be received on old software, and so can
* bypass this assertion.
*/
ASSERT((payload_len % 8 == 0) ||
(dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW));
dscp->dsc_err = dso->dso_outfunc(dscp->dsc_os, payload,
payload_len, dso->dso_arg);
if (dscp->dsc_err != 0)
return (SET_ERROR(EINTR));
}
return (0);
}
/*
* Fill in the drr_free struct, or perform aggregation if the previous record is
* also a free record, and the two are adjacent.
*
* Note that we send free records even for a full send, because we want to be
* able to receive a full send as a clone, which requires a list of all the free
* and freeobject records that were generated on the source.
*/
static int
dump_free(dmu_send_cookie_t *dscp, uint64_t object, uint64_t offset,
uint64_t length)
{
struct drr_free *drrf = &(dscp->dsc_drr->drr_u.drr_free);
/*
* When we receive a free record, dbuf_free_range() assumes
* that the receiving system doesn't have any dbufs in the range
* being freed. This is always true because there is a one-record
* constraint: we only send one WRITE record for any given
* object,offset. We know that the one-record constraint is
* true because we always send data in increasing order by
* object,offset.
*
* If the increasing-order constraint ever changes, we should find
* another way to assert that the one-record constraint is still
* satisfied.
*/
ASSERT(object > dscp->dsc_last_data_object ||
(object == dscp->dsc_last_data_object &&
offset > dscp->dsc_last_data_offset));
/*
* If there is a pending op, but it's not PENDING_FREE, push it out,
* since free block aggregation can only be done for blocks of the
* same type (i.e., DRR_FREE records can only be aggregated with
* other DRR_FREE records. DRR_FREEOBJECTS records can only be
* aggregated with other DRR_FREEOBJECTS records).
*/
if (dscp->dsc_pending_op != PENDING_NONE &&
dscp->dsc_pending_op != PENDING_FREE) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
if (dscp->dsc_pending_op == PENDING_FREE) {
/*
* Check to see whether this free block can be aggregated
* with pending one.
*/
if (drrf->drr_object == object && drrf->drr_offset +
drrf->drr_length == offset) {
if (offset + length < offset || length == UINT64_MAX)
drrf->drr_length = UINT64_MAX;
else
drrf->drr_length += length;
return (0);
} else {
/* not a continuation. Push out pending record */
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
}
/* create a FREE record and make it pending */
memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_FREE;
drrf->drr_object = object;
drrf->drr_offset = offset;
if (offset + length < offset)
drrf->drr_length = DMU_OBJECT_END;
else
drrf->drr_length = length;
drrf->drr_toguid = dscp->dsc_toguid;
if (length == DMU_OBJECT_END) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
} else {
dscp->dsc_pending_op = PENDING_FREE;
}
return (0);
}
/*
* Fill in the drr_redact struct, or perform aggregation if the previous record
* is also a redaction record, and the two are adjacent.
*/
static int
dump_redact(dmu_send_cookie_t *dscp, uint64_t object, uint64_t offset,
uint64_t length)
{
struct drr_redact *drrr = &dscp->dsc_drr->drr_u.drr_redact;
/*
* If there is a pending op, but it's not PENDING_REDACT, push it out,
* since free block aggregation can only be done for blocks of the
* same type (i.e., DRR_REDACT records can only be aggregated with
* other DRR_REDACT records).
*/
if (dscp->dsc_pending_op != PENDING_NONE &&
dscp->dsc_pending_op != PENDING_REDACT) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
if (dscp->dsc_pending_op == PENDING_REDACT) {
/*
* Check to see whether this redacted block can be aggregated
* with pending one.
*/
if (drrr->drr_object == object && drrr->drr_offset +
drrr->drr_length == offset) {
drrr->drr_length += length;
return (0);
} else {
/* not a continuation. Push out pending record */
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
}
/* create a REDACT record and make it pending */
memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_REDACT;
drrr->drr_object = object;
drrr->drr_offset = offset;
drrr->drr_length = length;
drrr->drr_toguid = dscp->dsc_toguid;
dscp->dsc_pending_op = PENDING_REDACT;
return (0);
}
static int
dmu_dump_write(dmu_send_cookie_t *dscp, dmu_object_type_t type, uint64_t object,
uint64_t offset, int lsize, int psize, const blkptr_t *bp,
boolean_t io_compressed, void *data)
{
uint64_t payload_size;
boolean_t raw = (dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW);
struct drr_write *drrw = &(dscp->dsc_drr->drr_u.drr_write);
/*
* We send data in increasing object, offset order.
* See comment in dump_free() for details.
*/
ASSERT(object > dscp->dsc_last_data_object ||
(object == dscp->dsc_last_data_object &&
offset > dscp->dsc_last_data_offset));
dscp->dsc_last_data_object = object;
dscp->dsc_last_data_offset = offset + lsize - 1;
/*
* If there is any kind of pending aggregation (currently either
* a grouping of free objects or free blocks), push it out to
* the stream, since aggregation can't be done across operations
* of different types.
*/
if (dscp->dsc_pending_op != PENDING_NONE) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
/* write a WRITE record */
memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_WRITE;
drrw->drr_object = object;
drrw->drr_type = type;
drrw->drr_offset = offset;
drrw->drr_toguid = dscp->dsc_toguid;
drrw->drr_logical_size = lsize;
/* only set the compression fields if the buf is compressed or raw */
boolean_t compressed =
(bp != NULL ? BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF &&
io_compressed : lsize != psize);
if (raw || compressed) {
ASSERT(bp != NULL);
ASSERT(raw || dscp->dsc_featureflags &
DMU_BACKUP_FEATURE_COMPRESSED);
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT3S(psize, >, 0);
if (raw) {
ASSERT(BP_IS_PROTECTED(bp));
/*
* This is a raw protected block so we need to pass
* along everything the receiving side will need to
* interpret this block, including the byteswap, salt,
* IV, and MAC.
*/
if (BP_SHOULD_BYTESWAP(bp))
drrw->drr_flags |= DRR_RAW_BYTESWAP;
zio_crypt_decode_params_bp(bp, drrw->drr_salt,
drrw->drr_iv);
zio_crypt_decode_mac_bp(bp, drrw->drr_mac);
} else {
/* this is a compressed block */
ASSERT(dscp->dsc_featureflags &
DMU_BACKUP_FEATURE_COMPRESSED);
ASSERT(!BP_SHOULD_BYTESWAP(bp));
ASSERT(!DMU_OT_IS_METADATA(BP_GET_TYPE(bp)));
ASSERT3U(BP_GET_COMPRESS(bp), !=, ZIO_COMPRESS_OFF);
ASSERT3S(lsize, >=, psize);
}
/* set fields common to compressed and raw sends */
drrw->drr_compressiontype = BP_GET_COMPRESS(bp);
drrw->drr_compressed_size = psize;
payload_size = drrw->drr_compressed_size;
} else {
payload_size = drrw->drr_logical_size;
}
if (bp == NULL || BP_IS_EMBEDDED(bp) || (BP_IS_PROTECTED(bp) && !raw)) {
/*
* There's no pre-computed checksum for partial-block writes,
* embedded BP's, or encrypted BP's that are being sent as
* plaintext, so (like fletcher4-checksummed blocks) userland
* will have to compute a dedup-capable checksum itself.
*/
drrw->drr_checksumtype = ZIO_CHECKSUM_OFF;
} else {
drrw->drr_checksumtype = BP_GET_CHECKSUM(bp);
if (zio_checksum_table[drrw->drr_checksumtype].ci_flags &
ZCHECKSUM_FLAG_DEDUP)
drrw->drr_flags |= DRR_CHECKSUM_DEDUP;
DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp));
DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp));
DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp));
DDK_SET_CRYPT(&drrw->drr_key, BP_IS_PROTECTED(bp));
drrw->drr_key.ddk_cksum = bp->blk_cksum;
}
if (dump_record(dscp, data, payload_size) != 0)
return (SET_ERROR(EINTR));
return (0);
}
static int
dump_write_embedded(dmu_send_cookie_t *dscp, uint64_t object, uint64_t offset,
int blksz, const blkptr_t *bp)
{
char buf[BPE_PAYLOAD_SIZE];
struct drr_write_embedded *drrw =
&(dscp->dsc_drr->drr_u.drr_write_embedded);
if (dscp->dsc_pending_op != PENDING_NONE) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
ASSERT(BP_IS_EMBEDDED(bp));
memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_WRITE_EMBEDDED;
drrw->drr_object = object;
drrw->drr_offset = offset;
drrw->drr_length = blksz;
drrw->drr_toguid = dscp->dsc_toguid;
drrw->drr_compression = BP_GET_COMPRESS(bp);
drrw->drr_etype = BPE_GET_ETYPE(bp);
drrw->drr_lsize = BPE_GET_LSIZE(bp);
drrw->drr_psize = BPE_GET_PSIZE(bp);
decode_embedded_bp_compressed(bp, buf);
uint32_t psize = drrw->drr_psize;
uint32_t rsize = P2ROUNDUP(psize, 8);
if (psize != rsize)
memset(buf + psize, 0, rsize - psize);
if (dump_record(dscp, buf, rsize) != 0)
return (SET_ERROR(EINTR));
return (0);
}
static int
dump_spill(dmu_send_cookie_t *dscp, const blkptr_t *bp, uint64_t object,
void *data)
{
struct drr_spill *drrs = &(dscp->dsc_drr->drr_u.drr_spill);
uint64_t blksz = BP_GET_LSIZE(bp);
uint64_t payload_size = blksz;
if (dscp->dsc_pending_op != PENDING_NONE) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
/* write a SPILL record */
memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_SPILL;
drrs->drr_object = object;
drrs->drr_length = blksz;
drrs->drr_toguid = dscp->dsc_toguid;
/* See comment in dump_dnode() for full details */
if (zfs_send_unmodified_spill_blocks &&
(bp->blk_birth <= dscp->dsc_fromtxg)) {
drrs->drr_flags |= DRR_SPILL_UNMODIFIED;
}
/* handle raw send fields */
if (dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW) {
ASSERT(BP_IS_PROTECTED(bp));
if (BP_SHOULD_BYTESWAP(bp))
drrs->drr_flags |= DRR_RAW_BYTESWAP;
drrs->drr_compressiontype = BP_GET_COMPRESS(bp);
drrs->drr_compressed_size = BP_GET_PSIZE(bp);
zio_crypt_decode_params_bp(bp, drrs->drr_salt, drrs->drr_iv);
zio_crypt_decode_mac_bp(bp, drrs->drr_mac);
payload_size = drrs->drr_compressed_size;
}
if (dump_record(dscp, data, payload_size) != 0)
return (SET_ERROR(EINTR));
return (0);
}
static int
dump_freeobjects(dmu_send_cookie_t *dscp, uint64_t firstobj, uint64_t numobjs)
{
struct drr_freeobjects *drrfo = &(dscp->dsc_drr->drr_u.drr_freeobjects);
uint64_t maxobj = DNODES_PER_BLOCK *
(DMU_META_DNODE(dscp->dsc_os)->dn_maxblkid + 1);
/*
* ZoL < 0.7 does not handle large FREEOBJECTS records correctly,
* leading to zfs recv never completing. to avoid this issue, don't
* send FREEOBJECTS records for object IDs which cannot exist on the
* receiving side.
*/
if (maxobj > 0) {
if (maxobj <= firstobj)
return (0);
if (maxobj < firstobj + numobjs)
numobjs = maxobj - firstobj;
}
/*
* If there is a pending op, but it's not PENDING_FREEOBJECTS,
* push it out, since free block aggregation can only be done for
* blocks of the same type (i.e., DRR_FREE records can only be
* aggregated with other DRR_FREE records. DRR_FREEOBJECTS records
* can only be aggregated with other DRR_FREEOBJECTS records).
*/
if (dscp->dsc_pending_op != PENDING_NONE &&
dscp->dsc_pending_op != PENDING_FREEOBJECTS) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
if (dscp->dsc_pending_op == PENDING_FREEOBJECTS) {
/*
* See whether this free object array can be aggregated
* with pending one
*/
if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) {
drrfo->drr_numobjs += numobjs;
return (0);
} else {
/* can't be aggregated. Push out pending record */
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
}
/* write a FREEOBJECTS record */
memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_FREEOBJECTS;
drrfo->drr_firstobj = firstobj;
drrfo->drr_numobjs = numobjs;
drrfo->drr_toguid = dscp->dsc_toguid;
dscp->dsc_pending_op = PENDING_FREEOBJECTS;
return (0);
}
static int
dump_dnode(dmu_send_cookie_t *dscp, const blkptr_t *bp, uint64_t object,
dnode_phys_t *dnp)
{
struct drr_object *drro = &(dscp->dsc_drr->drr_u.drr_object);
int bonuslen;
if (object < dscp->dsc_resume_object) {
/*
* Note: when resuming, we will visit all the dnodes in
* the block of dnodes that we are resuming from. In
* this case it's unnecessary to send the dnodes prior to
* the one we are resuming from. We should be at most one
* block's worth of dnodes behind the resume point.
*/
ASSERT3U(dscp->dsc_resume_object - object, <,
1 << (DNODE_BLOCK_SHIFT - DNODE_SHIFT));
return (0);
}
if (dnp == NULL || dnp->dn_type == DMU_OT_NONE)
return (dump_freeobjects(dscp, object, 1));
if (dscp->dsc_pending_op != PENDING_NONE) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
/* write an OBJECT record */
memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_OBJECT;
drro->drr_object = object;
drro->drr_type = dnp->dn_type;
drro->drr_bonustype = dnp->dn_bonustype;
drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
drro->drr_bonuslen = dnp->dn_bonuslen;
drro->drr_dn_slots = dnp->dn_extra_slots + 1;
drro->drr_checksumtype = dnp->dn_checksum;
drro->drr_compress = dnp->dn_compress;
drro->drr_toguid = dscp->dsc_toguid;
if (!(dscp->dsc_featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
drro->drr_blksz > SPA_OLD_MAXBLOCKSIZE)
drro->drr_blksz = SPA_OLD_MAXBLOCKSIZE;
bonuslen = P2ROUNDUP(dnp->dn_bonuslen, 8);
if ((dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW)) {
ASSERT(BP_IS_ENCRYPTED(bp));
if (BP_SHOULD_BYTESWAP(bp))
drro->drr_flags |= DRR_RAW_BYTESWAP;
/* needed for reconstructing dnp on recv side */
drro->drr_maxblkid = dnp->dn_maxblkid;
drro->drr_indblkshift = dnp->dn_indblkshift;
drro->drr_nlevels = dnp->dn_nlevels;
drro->drr_nblkptr = dnp->dn_nblkptr;
/*
* Since we encrypt the entire bonus area, the (raw) part
* beyond the bonuslen is actually nonzero, so we need
* to send it.
*/
if (bonuslen != 0) {
if (drro->drr_bonuslen > DN_MAX_BONUS_LEN(dnp))
return (SET_ERROR(EINVAL));
drro->drr_raw_bonuslen = DN_MAX_BONUS_LEN(dnp);
bonuslen = drro->drr_raw_bonuslen;
}
}
/*
* DRR_OBJECT_SPILL is set for every dnode which references a
* spill block. This allows the receiving pool to definitively
* determine when a spill block should be kept or freed.
*/
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)
drro->drr_flags |= DRR_OBJECT_SPILL;
if (dump_record(dscp, DN_BONUS(dnp), bonuslen) != 0)
return (SET_ERROR(EINTR));
/* Free anything past the end of the file. */
if (dump_free(dscp, object, (dnp->dn_maxblkid + 1) *
(dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), DMU_OBJECT_END) != 0)
return (SET_ERROR(EINTR));
/*
* Send DRR_SPILL records for unmodified spill blocks. This is useful
* because changing certain attributes of the object (e.g. blocksize)
* can cause old versions of ZFS to incorrectly remove a spill block.
* Including these records in the stream forces an up to date version
* to always be written ensuring they're never lost. Current versions
* of the code which understand the DRR_FLAG_SPILL_BLOCK feature can
* ignore these unmodified spill blocks.
*/
if (zfs_send_unmodified_spill_blocks &&
(dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) &&
(DN_SPILL_BLKPTR(dnp)->blk_birth <= dscp->dsc_fromtxg)) {
struct send_range record;
blkptr_t *bp = DN_SPILL_BLKPTR(dnp);
memset(&record, 0, sizeof (struct send_range));
record.type = DATA;
record.object = object;
record.eos_marker = B_FALSE;
record.start_blkid = DMU_SPILL_BLKID;
record.end_blkid = record.start_blkid + 1;
record.sru.data.bp = *bp;
record.sru.data.obj_type = dnp->dn_type;
record.sru.data.datablksz = BP_GET_LSIZE(bp);
if (do_dump(dscp, &record) != 0)
return (SET_ERROR(EINTR));
}
if (dscp->dsc_err != 0)
return (SET_ERROR(EINTR));
return (0);
}
static int
dump_object_range(dmu_send_cookie_t *dscp, const blkptr_t *bp,
uint64_t firstobj, uint64_t numslots)
{
struct drr_object_range *drror =
&(dscp->dsc_drr->drr_u.drr_object_range);
/* we only use this record type for raw sends */
ASSERT(BP_IS_PROTECTED(bp));
ASSERT(dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW);
ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_DNODE);
ASSERT0(BP_GET_LEVEL(bp));
if (dscp->dsc_pending_op != PENDING_NONE) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_OBJECT_RANGE;
drror->drr_firstobj = firstobj;
drror->drr_numslots = numslots;
drror->drr_toguid = dscp->dsc_toguid;
if (BP_SHOULD_BYTESWAP(bp))
drror->drr_flags |= DRR_RAW_BYTESWAP;
zio_crypt_decode_params_bp(bp, drror->drr_salt, drror->drr_iv);
zio_crypt_decode_mac_bp(bp, drror->drr_mac);
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
return (0);
}
static boolean_t
send_do_embed(const blkptr_t *bp, uint64_t featureflags)
{
if (!BP_IS_EMBEDDED(bp))
return (B_FALSE);
/*
* Compression function must be legacy, or explicitly enabled.
*/
if ((BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_LEGACY_FUNCTIONS &&
!(featureflags & DMU_BACKUP_FEATURE_LZ4)))
return (B_FALSE);
/*
* If we have not set the ZSTD feature flag, we can't send ZSTD
* compressed embedded blocks, as the receiver may not support them.
*/
if ((BP_GET_COMPRESS(bp) == ZIO_COMPRESS_ZSTD &&
!(featureflags & DMU_BACKUP_FEATURE_ZSTD)))
return (B_FALSE);
/*
* Embed type must be explicitly enabled.
*/
switch (BPE_GET_ETYPE(bp)) {
case BP_EMBEDDED_TYPE_DATA:
if (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)
return (B_TRUE);
break;
default:
return (B_FALSE);
}
return (B_FALSE);
}
/*
* This function actually handles figuring out what kind of record needs to be
* dumped, and calling the appropriate helper function. In most cases,
* the data has already been read by send_reader_thread().
*/
static int
do_dump(dmu_send_cookie_t *dscp, struct send_range *range)
{
int err = 0;
switch (range->type) {
case OBJECT:
err = dump_dnode(dscp, &range->sru.object.bp, range->object,
range->sru.object.dnp);
return (err);
case OBJECT_RANGE: {
ASSERT3U(range->start_blkid + 1, ==, range->end_blkid);
if (!(dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW)) {
return (0);
}
uint64_t epb = BP_GET_LSIZE(&range->sru.object_range.bp) >>
DNODE_SHIFT;
uint64_t firstobj = range->start_blkid * epb;
err = dump_object_range(dscp, &range->sru.object_range.bp,
firstobj, epb);
break;
}
case REDACT: {
struct srr *srrp = &range->sru.redact;
err = dump_redact(dscp, range->object, range->start_blkid *
srrp->datablksz, (range->end_blkid - range->start_blkid) *
srrp->datablksz);
return (err);
}
case DATA: {
struct srd *srdp = &range->sru.data;
blkptr_t *bp = &srdp->bp;
spa_t *spa =
dmu_objset_spa(dscp->dsc_os);
ASSERT3U(srdp->datablksz, ==, BP_GET_LSIZE(bp));
ASSERT3U(range->start_blkid + 1, ==, range->end_blkid);
if (BP_GET_TYPE(bp) == DMU_OT_SA) {
arc_flags_t aflags = ARC_FLAG_WAIT;
zio_flag_t zioflags = ZIO_FLAG_CANFAIL;
if (dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW) {
ASSERT(BP_IS_PROTECTED(bp));
zioflags |= ZIO_FLAG_RAW;
}
zbookmark_phys_t zb;
ASSERT3U(range->start_blkid, ==, DMU_SPILL_BLKID);
zb.zb_objset = dmu_objset_id(dscp->dsc_os);
zb.zb_object = range->object;
zb.zb_level = 0;
zb.zb_blkid = range->start_blkid;
arc_buf_t *abuf = NULL;
if (!dscp->dsc_dso->dso_dryrun && arc_read(NULL, spa,
bp, arc_getbuf_func, &abuf, ZIO_PRIORITY_ASYNC_READ,
zioflags, &aflags, &zb) != 0)
return (SET_ERROR(EIO));
err = dump_spill(dscp, bp, zb.zb_object,
(abuf == NULL ? NULL : abuf->b_data));
if (abuf != NULL)
arc_buf_destroy(abuf, &abuf);
return (err);
}
if (send_do_embed(bp, dscp->dsc_featureflags)) {
err = dump_write_embedded(dscp, range->object,
range->start_blkid * srdp->datablksz,
srdp->datablksz, bp);
return (err);
}
ASSERT(range->object > dscp->dsc_resume_object ||
(range->object == dscp->dsc_resume_object &&
range->start_blkid * srdp->datablksz >=
dscp->dsc_resume_offset));
/* it's a level-0 block of a regular object */
mutex_enter(&srdp->lock);
while (srdp->io_outstanding)
cv_wait(&srdp->cv, &srdp->lock);
err = srdp->io_err;
mutex_exit(&srdp->lock);
if (err != 0) {
if (zfs_send_corrupt_data &&
!dscp->dsc_dso->dso_dryrun) {
/*
* Send a block filled with 0x"zfs badd bloc"
*/
srdp->abuf = arc_alloc_buf(spa, &srdp->abuf,
ARC_BUFC_DATA, srdp->datablksz);
uint64_t *ptr;
for (ptr = srdp->abuf->b_data;
(char *)ptr < (char *)srdp->abuf->b_data +
srdp->datablksz; ptr++)
*ptr = 0x2f5baddb10cULL;
} else {
return (SET_ERROR(EIO));
}
}
ASSERT(dscp->dsc_dso->dso_dryrun ||
srdp->abuf != NULL || srdp->abd != NULL);
uint64_t offset = range->start_blkid * srdp->datablksz;
char *data = NULL;
if (srdp->abd != NULL) {
data = abd_to_buf(srdp->abd);
ASSERT3P(srdp->abuf, ==, NULL);
} else if (srdp->abuf != NULL) {
data = srdp->abuf->b_data;
}
/*
* If we have large blocks stored on disk but the send flags
* don't allow us to send large blocks, we split the data from
* the arc buf into chunks.
*/
if (srdp->datablksz > SPA_OLD_MAXBLOCKSIZE &&
!(dscp->dsc_featureflags &
DMU_BACKUP_FEATURE_LARGE_BLOCKS)) {
while (srdp->datablksz > 0 && err == 0) {
int n = MIN(srdp->datablksz,
SPA_OLD_MAXBLOCKSIZE);
err = dmu_dump_write(dscp, srdp->obj_type,
range->object, offset, n, n, NULL, B_FALSE,
data);
offset += n;
/*
* When doing dry run, data==NULL is used as a
* sentinel value by
* dmu_dump_write()->dump_record().
*/
if (data != NULL)
data += n;
srdp->datablksz -= n;
}
} else {
err = dmu_dump_write(dscp, srdp->obj_type,
range->object, offset,
srdp->datablksz, srdp->datasz, bp,
srdp->io_compressed, data);
}
return (err);
}
case HOLE: {
struct srh *srhp = &range->sru.hole;
if (range->object == DMU_META_DNODE_OBJECT) {
uint32_t span = srhp->datablksz >> DNODE_SHIFT;
uint64_t first_obj = range->start_blkid * span;
uint64_t numobj = range->end_blkid * span - first_obj;
return (dump_freeobjects(dscp, first_obj, numobj));
}
uint64_t offset = 0;
/*
* If this multiply overflows, we don't need to send this block.
* Even if it has a birth time, it can never not be a hole, so
* we don't need to send records for it.
*/
if (!overflow_multiply(range->start_blkid, srhp->datablksz,
&offset)) {
return (0);
}
uint64_t len = 0;
if (!overflow_multiply(range->end_blkid, srhp->datablksz, &len))
len = UINT64_MAX;
len = len - offset;
return (dump_free(dscp, range->object, offset, len));
}
default:
panic("Invalid range type in do_dump: %d", range->type);
}
return (err);
}
static struct send_range *
range_alloc(enum type type, uint64_t object, uint64_t start_blkid,
uint64_t end_blkid, boolean_t eos)
{
struct send_range *range = kmem_alloc(sizeof (*range), KM_SLEEP);
range->type = type;
range->object = object;
range->start_blkid = start_blkid;
range->end_blkid = end_blkid;
range->eos_marker = eos;
if (type == DATA) {
range->sru.data.abd = NULL;
range->sru.data.abuf = NULL;
mutex_init(&range->sru.data.lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&range->sru.data.cv, NULL, CV_DEFAULT, NULL);
range->sru.data.io_outstanding = 0;
range->sru.data.io_err = 0;
range->sru.data.io_compressed = B_FALSE;
}
return (range);
}
/*
* This is the callback function to traverse_dataset that acts as a worker
* thread for dmu_send_impl.
*/
static int
send_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const struct dnode_phys *dnp, void *arg)
{
(void) zilog;
struct send_thread_arg *sta = arg;
struct send_range *record;
ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT ||
zb->zb_object >= sta->resume.zb_object);
/*
* All bps of an encrypted os should have the encryption bit set.
* If this is not true it indicates tampering and we report an error.
*/
if (sta->os->os_encrypted &&
!BP_IS_HOLE(bp) && !BP_USES_CRYPT(bp)) {
spa_log_error(spa, zb, &bp->blk_birth);
- zfs_panic_recover("unencrypted block in encrypted "
- "object set %llu", dmu_objset_id(sta->os));
return (SET_ERROR(EIO));
}
if (sta->cancel)
return (SET_ERROR(EINTR));
if (zb->zb_object != DMU_META_DNODE_OBJECT &&
DMU_OBJECT_IS_SPECIAL(zb->zb_object))
return (0);
atomic_inc_64(sta->num_blocks_visited);
if (zb->zb_level == ZB_DNODE_LEVEL) {
if (zb->zb_object == DMU_META_DNODE_OBJECT)
return (0);
record = range_alloc(OBJECT, zb->zb_object, 0, 0, B_FALSE);
record->sru.object.bp = *bp;
size_t size = sizeof (*dnp) * (dnp->dn_extra_slots + 1);
record->sru.object.dnp = kmem_alloc(size, KM_SLEEP);
memcpy(record->sru.object.dnp, dnp, size);
bqueue_enqueue(&sta->q, record, sizeof (*record));
return (0);
}
if (zb->zb_level == 0 && zb->zb_object == DMU_META_DNODE_OBJECT &&
!BP_IS_HOLE(bp)) {
record = range_alloc(OBJECT_RANGE, 0, zb->zb_blkid,
zb->zb_blkid + 1, B_FALSE);
record->sru.object_range.bp = *bp;
bqueue_enqueue(&sta->q, record, sizeof (*record));
return (0);
}
if (zb->zb_level < 0 || (zb->zb_level > 0 && !BP_IS_HOLE(bp)))
return (0);
if (zb->zb_object == DMU_META_DNODE_OBJECT && !BP_IS_HOLE(bp))
return (0);
uint64_t span = bp_span_in_blocks(dnp->dn_indblkshift, zb->zb_level);
uint64_t start;
/*
* If this multiply overflows, we don't need to send this block.
* Even if it has a birth time, it can never not be a hole, so
* we don't need to send records for it.
*/
if (!overflow_multiply(span, zb->zb_blkid, &start) || (!(zb->zb_blkid ==
DMU_SPILL_BLKID || DMU_OT_IS_METADATA(dnp->dn_type)) &&
span * zb->zb_blkid > dnp->dn_maxblkid)) {
ASSERT(BP_IS_HOLE(bp));
return (0);
}
if (zb->zb_blkid == DMU_SPILL_BLKID)
ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_SA);
enum type record_type = DATA;
if (BP_IS_HOLE(bp))
record_type = HOLE;
else if (BP_IS_REDACTED(bp))
record_type = REDACT;
else
record_type = DATA;
record = range_alloc(record_type, zb->zb_object, start,
(start + span < start ? 0 : start + span), B_FALSE);
uint64_t datablksz = (zb->zb_blkid == DMU_SPILL_BLKID ?
BP_GET_LSIZE(bp) : dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
if (BP_IS_HOLE(bp)) {
record->sru.hole.datablksz = datablksz;
} else if (BP_IS_REDACTED(bp)) {
record->sru.redact.datablksz = datablksz;
} else {
record->sru.data.datablksz = datablksz;
record->sru.data.obj_type = dnp->dn_type;
record->sru.data.bp = *bp;
}
bqueue_enqueue(&sta->q, record, sizeof (*record));
return (0);
}
struct redact_list_cb_arg {
uint64_t *num_blocks_visited;
bqueue_t *q;
boolean_t *cancel;
boolean_t mark_redact;
};
static int
redact_list_cb(redact_block_phys_t *rb, void *arg)
{
struct redact_list_cb_arg *rlcap = arg;
atomic_inc_64(rlcap->num_blocks_visited);
if (*rlcap->cancel)
return (-1);
struct send_range *data = range_alloc(REDACT, rb->rbp_object,
rb->rbp_blkid, rb->rbp_blkid + redact_block_get_count(rb), B_FALSE);
ASSERT3U(data->end_blkid, >, rb->rbp_blkid);
if (rlcap->mark_redact) {
data->type = REDACT;
data->sru.redact.datablksz = redact_block_get_size(rb);
} else {
data->type = PREVIOUSLY_REDACTED;
}
bqueue_enqueue(rlcap->q, data, sizeof (*data));
return (0);
}
/*
* This function kicks off the traverse_dataset. It also handles setting the
* error code of the thread in case something goes wrong, and pushes the End of
* Stream record when the traverse_dataset call has finished.
*/
static __attribute__((noreturn)) void
send_traverse_thread(void *arg)
{
struct send_thread_arg *st_arg = arg;
int err = 0;
struct send_range *data;
fstrans_cookie_t cookie = spl_fstrans_mark();
err = traverse_dataset_resume(st_arg->os->os_dsl_dataset,
st_arg->fromtxg, &st_arg->resume,
st_arg->flags, send_cb, st_arg);
if (err != EINTR)
st_arg->error_code = err;
data = range_alloc(DATA, 0, 0, 0, B_TRUE);
bqueue_enqueue_flush(&st_arg->q, data, sizeof (*data));
spl_fstrans_unmark(cookie);
thread_exit();
}
/*
* Utility function that causes End of Stream records to compare after of all
* others, so that other threads' comparison logic can stay simple.
*/
static int __attribute__((unused))
send_range_after(const struct send_range *from, const struct send_range *to)
{
if (from->eos_marker == B_TRUE)
return (1);
if (to->eos_marker == B_TRUE)
return (-1);
uint64_t from_obj = from->object;
uint64_t from_end_obj = from->object + 1;
uint64_t to_obj = to->object;
uint64_t to_end_obj = to->object + 1;
if (from_obj == 0) {
ASSERT(from->type == HOLE || from->type == OBJECT_RANGE);
from_obj = from->start_blkid << DNODES_PER_BLOCK_SHIFT;
from_end_obj = from->end_blkid << DNODES_PER_BLOCK_SHIFT;
}
if (to_obj == 0) {
ASSERT(to->type == HOLE || to->type == OBJECT_RANGE);
to_obj = to->start_blkid << DNODES_PER_BLOCK_SHIFT;
to_end_obj = to->end_blkid << DNODES_PER_BLOCK_SHIFT;
}
if (from_end_obj <= to_obj)
return (-1);
if (from_obj >= to_end_obj)
return (1);
int64_t cmp = TREE_CMP(to->type == OBJECT_RANGE, from->type ==
OBJECT_RANGE);
if (unlikely(cmp))
return (cmp);
cmp = TREE_CMP(to->type == OBJECT, from->type == OBJECT);
if (unlikely(cmp))
return (cmp);
if (from->end_blkid <= to->start_blkid)
return (-1);
if (from->start_blkid >= to->end_blkid)
return (1);
return (0);
}
/*
* Pop the new data off the queue, check that the records we receive are in
* the right order, but do not free the old data. This is used so that the
* records can be sent on to the main thread without copying the data.
*/
static struct send_range *
get_next_range_nofree(bqueue_t *bq, struct send_range *prev)
{
struct send_range *next = bqueue_dequeue(bq);
ASSERT3S(send_range_after(prev, next), ==, -1);
return (next);
}
/*
* Pop the new data off the queue, check that the records we receive are in
* the right order, and free the old data.
*/
static struct send_range *
get_next_range(bqueue_t *bq, struct send_range *prev)
{
struct send_range *next = get_next_range_nofree(bq, prev);
range_free(prev);
return (next);
}
static __attribute__((noreturn)) void
redact_list_thread(void *arg)
{
struct redact_list_thread_arg *rlt_arg = arg;
struct send_range *record;
fstrans_cookie_t cookie = spl_fstrans_mark();
if (rlt_arg->rl != NULL) {
struct redact_list_cb_arg rlcba = {0};
rlcba.cancel = &rlt_arg->cancel;
rlcba.q = &rlt_arg->q;
rlcba.num_blocks_visited = rlt_arg->num_blocks_visited;
rlcba.mark_redact = rlt_arg->mark_redact;
int err = dsl_redaction_list_traverse(rlt_arg->rl,
&rlt_arg->resume, redact_list_cb, &rlcba);
if (err != EINTR)
rlt_arg->error_code = err;
}
record = range_alloc(DATA, 0, 0, 0, B_TRUE);
bqueue_enqueue_flush(&rlt_arg->q, record, sizeof (*record));
spl_fstrans_unmark(cookie);
thread_exit();
}
/*
* Compare the start point of the two provided ranges. End of stream ranges
* compare last, objects compare before any data or hole inside that object and
* multi-object holes that start at the same object.
*/
static int
send_range_start_compare(struct send_range *r1, struct send_range *r2)
{
uint64_t r1_objequiv = r1->object;
uint64_t r1_l0equiv = r1->start_blkid;
uint64_t r2_objequiv = r2->object;
uint64_t r2_l0equiv = r2->start_blkid;
int64_t cmp = TREE_CMP(r1->eos_marker, r2->eos_marker);
if (unlikely(cmp))
return (cmp);
if (r1->object == 0) {
r1_objequiv = r1->start_blkid * DNODES_PER_BLOCK;
r1_l0equiv = 0;
}
if (r2->object == 0) {
r2_objequiv = r2->start_blkid * DNODES_PER_BLOCK;
r2_l0equiv = 0;
}
cmp = TREE_CMP(r1_objequiv, r2_objequiv);
if (likely(cmp))
return (cmp);
cmp = TREE_CMP(r2->type == OBJECT_RANGE, r1->type == OBJECT_RANGE);
if (unlikely(cmp))
return (cmp);
cmp = TREE_CMP(r2->type == OBJECT, r1->type == OBJECT);
if (unlikely(cmp))
return (cmp);
return (TREE_CMP(r1_l0equiv, r2_l0equiv));
}
enum q_idx {
REDACT_IDX = 0,
TO_IDX,
FROM_IDX,
NUM_THREADS
};
/*
* This function returns the next range the send_merge_thread should operate on.
* The inputs are two arrays; the first one stores the range at the front of the
* queues stored in the second one. The ranges are sorted in descending
* priority order; the metadata from earlier ranges overrules metadata from
* later ranges. out_mask is used to return which threads the ranges came from;
* bit i is set if ranges[i] started at the same place as the returned range.
*
* This code is not hardcoded to compare a specific number of threads; it could
* be used with any number, just by changing the q_idx enum.
*
* The "next range" is the one with the earliest start; if two starts are equal,
* the highest-priority range is the next to operate on. If a higher-priority
* range starts in the middle of the first range, then the first range will be
* truncated to end where the higher-priority range starts, and we will operate
* on that one next time. In this way, we make sure that each block covered by
* some range gets covered by a returned range, and each block covered is
* returned using the metadata of the highest-priority range it appears in.
*
* For example, if the three ranges at the front of the queues were [2,4),
* [3,5), and [1,3), then the ranges returned would be [1,2) with the metadata
* from the third range, [2,4) with the metadata from the first range, and then
* [4,5) with the metadata from the second.
*/
static struct send_range *
find_next_range(struct send_range **ranges, bqueue_t **qs, uint64_t *out_mask)
{
int idx = 0; // index of the range with the earliest start
int i;
uint64_t bmask = 0;
for (i = 1; i < NUM_THREADS; i++) {
if (send_range_start_compare(ranges[i], ranges[idx]) < 0)
idx = i;
}
if (ranges[idx]->eos_marker) {
struct send_range *ret = range_alloc(DATA, 0, 0, 0, B_TRUE);
*out_mask = 0;
return (ret);
}
/*
* Find all the ranges that start at that same point.
*/
for (i = 0; i < NUM_THREADS; i++) {
if (send_range_start_compare(ranges[i], ranges[idx]) == 0)
bmask |= 1 << i;
}
*out_mask = bmask;
/*
* OBJECT_RANGE records only come from the TO thread, and should always
* be treated as overlapping with nothing and sent on immediately. They
* are only used in raw sends, and are never redacted.
*/
if (ranges[idx]->type == OBJECT_RANGE) {
ASSERT3U(idx, ==, TO_IDX);
ASSERT3U(*out_mask, ==, 1 << TO_IDX);
struct send_range *ret = ranges[idx];
ranges[idx] = get_next_range_nofree(qs[idx], ranges[idx]);
return (ret);
}
/*
* Find the first start or end point after the start of the first range.
*/
uint64_t first_change = ranges[idx]->end_blkid;
for (i = 0; i < NUM_THREADS; i++) {
if (i == idx || ranges[i]->eos_marker ||
ranges[i]->object > ranges[idx]->object ||
ranges[i]->object == DMU_META_DNODE_OBJECT)
continue;
ASSERT3U(ranges[i]->object, ==, ranges[idx]->object);
if (first_change > ranges[i]->start_blkid &&
(bmask & (1 << i)) == 0)
first_change = ranges[i]->start_blkid;
else if (first_change > ranges[i]->end_blkid)
first_change = ranges[i]->end_blkid;
}
/*
* Update all ranges to no longer overlap with the range we're
* returning. All such ranges must start at the same place as the range
* being returned, and end at or after first_change. Thus we update
* their start to first_change. If that makes them size 0, then free
* them and pull a new range from that thread.
*/
for (i = 0; i < NUM_THREADS; i++) {
if (i == idx || (bmask & (1 << i)) == 0)
continue;
ASSERT3U(first_change, >, ranges[i]->start_blkid);
ranges[i]->start_blkid = first_change;
ASSERT3U(ranges[i]->start_blkid, <=, ranges[i]->end_blkid);
if (ranges[i]->start_blkid == ranges[i]->end_blkid)
ranges[i] = get_next_range(qs[i], ranges[i]);
}
/*
* Short-circuit the simple case; if the range doesn't overlap with
* anything else, or it only overlaps with things that start at the same
* place and are longer, send it on.
*/
if (first_change == ranges[idx]->end_blkid) {
struct send_range *ret = ranges[idx];
ranges[idx] = get_next_range_nofree(qs[idx], ranges[idx]);
return (ret);
}
/*
* Otherwise, return a truncated copy of ranges[idx] and move the start
* of ranges[idx] back to first_change.
*/
struct send_range *ret = kmem_alloc(sizeof (*ret), KM_SLEEP);
*ret = *ranges[idx];
ret->end_blkid = first_change;
ranges[idx]->start_blkid = first_change;
return (ret);
}
#define FROM_AND_REDACT_BITS ((1 << REDACT_IDX) | (1 << FROM_IDX))
/*
* Merge the results from the from thread and the to thread, and then hand the
* records off to send_prefetch_thread to prefetch them. If this is not a
* send from a redaction bookmark, the from thread will push an end of stream
* record and stop, and we'll just send everything that was changed in the
* to_ds since the ancestor's creation txg. If it is, then since
* traverse_dataset has a canonical order, we can compare each change as
* they're pulled off the queues. That will give us a stream that is
* appropriately sorted, and covers all records. In addition, we pull the
* data from the redact_list_thread and use that to determine which blocks
* should be redacted.
*/
static __attribute__((noreturn)) void
send_merge_thread(void *arg)
{
struct send_merge_thread_arg *smt_arg = arg;
struct send_range *front_ranges[NUM_THREADS];
bqueue_t *queues[NUM_THREADS];
int err = 0;
fstrans_cookie_t cookie = spl_fstrans_mark();
if (smt_arg->redact_arg == NULL) {
front_ranges[REDACT_IDX] =
kmem_zalloc(sizeof (struct send_range), KM_SLEEP);
front_ranges[REDACT_IDX]->eos_marker = B_TRUE;
front_ranges[REDACT_IDX]->type = REDACT;
queues[REDACT_IDX] = NULL;
} else {
front_ranges[REDACT_IDX] =
bqueue_dequeue(&smt_arg->redact_arg->q);
queues[REDACT_IDX] = &smt_arg->redact_arg->q;
}
front_ranges[TO_IDX] = bqueue_dequeue(&smt_arg->to_arg->q);
queues[TO_IDX] = &smt_arg->to_arg->q;
front_ranges[FROM_IDX] = bqueue_dequeue(&smt_arg->from_arg->q);
queues[FROM_IDX] = &smt_arg->from_arg->q;
uint64_t mask = 0;
struct send_range *range;
for (range = find_next_range(front_ranges, queues, &mask);
!range->eos_marker && err == 0 && !smt_arg->cancel;
range = find_next_range(front_ranges, queues, &mask)) {
/*
* If the range in question was in both the from redact bookmark
* and the bookmark we're using to redact, then don't send it.
* It's already redacted on the receiving system, so a redaction
* record would be redundant.
*/
if ((mask & FROM_AND_REDACT_BITS) == FROM_AND_REDACT_BITS) {
ASSERT3U(range->type, ==, REDACT);
range_free(range);
continue;
}
bqueue_enqueue(&smt_arg->q, range, sizeof (*range));
if (smt_arg->to_arg->error_code != 0) {
err = smt_arg->to_arg->error_code;
} else if (smt_arg->from_arg->error_code != 0) {
err = smt_arg->from_arg->error_code;
} else if (smt_arg->redact_arg != NULL &&
smt_arg->redact_arg->error_code != 0) {
err = smt_arg->redact_arg->error_code;
}
}
if (smt_arg->cancel && err == 0)
err = SET_ERROR(EINTR);
smt_arg->error = err;
if (smt_arg->error != 0) {
smt_arg->to_arg->cancel = B_TRUE;
smt_arg->from_arg->cancel = B_TRUE;
if (smt_arg->redact_arg != NULL)
smt_arg->redact_arg->cancel = B_TRUE;
}
for (int i = 0; i < NUM_THREADS; i++) {
while (!front_ranges[i]->eos_marker) {
front_ranges[i] = get_next_range(queues[i],
front_ranges[i]);
}
range_free(front_ranges[i]);
}
range->eos_marker = B_TRUE;
bqueue_enqueue_flush(&smt_arg->q, range, 1);
spl_fstrans_unmark(cookie);
thread_exit();
}
struct send_reader_thread_arg {
struct send_merge_thread_arg *smta;
bqueue_t q;
boolean_t cancel;
boolean_t issue_reads;
uint64_t featureflags;
int error;
};
static void
dmu_send_read_done(zio_t *zio)
{
struct send_range *range = zio->io_private;
mutex_enter(&range->sru.data.lock);
if (zio->io_error != 0) {
abd_free(range->sru.data.abd);
range->sru.data.abd = NULL;
range->sru.data.io_err = zio->io_error;
}
ASSERT(range->sru.data.io_outstanding);
range->sru.data.io_outstanding = B_FALSE;
cv_broadcast(&range->sru.data.cv);
mutex_exit(&range->sru.data.lock);
}
static void
issue_data_read(struct send_reader_thread_arg *srta, struct send_range *range)
{
struct srd *srdp = &range->sru.data;
blkptr_t *bp = &srdp->bp;
objset_t *os = srta->smta->os;
ASSERT3U(range->type, ==, DATA);
ASSERT3U(range->start_blkid + 1, ==, range->end_blkid);
/*
* If we have large blocks stored on disk but
* the send flags don't allow us to send large
* blocks, we split the data from the arc buf
* into chunks.
*/
boolean_t split_large_blocks =
srdp->datablksz > SPA_OLD_MAXBLOCKSIZE &&
!(srta->featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS);
/*
* We should only request compressed data from the ARC if all
* the following are true:
* - stream compression was requested
* - we aren't splitting large blocks into smaller chunks
* - the data won't need to be byteswapped before sending
* - this isn't an embedded block
* - this isn't metadata (if receiving on a different endian
* system it can be byteswapped more easily)
*/
boolean_t request_compressed =
(srta->featureflags & DMU_BACKUP_FEATURE_COMPRESSED) &&
!split_large_blocks && !BP_SHOULD_BYTESWAP(bp) &&
!BP_IS_EMBEDDED(bp) && !DMU_OT_IS_METADATA(BP_GET_TYPE(bp));
zio_flag_t zioflags = ZIO_FLAG_CANFAIL;
if (srta->featureflags & DMU_BACKUP_FEATURE_RAW) {
zioflags |= ZIO_FLAG_RAW;
srdp->io_compressed = B_TRUE;
} else if (request_compressed) {
zioflags |= ZIO_FLAG_RAW_COMPRESS;
srdp->io_compressed = B_TRUE;
}
srdp->datasz = (zioflags & ZIO_FLAG_RAW_COMPRESS) ?
BP_GET_PSIZE(bp) : BP_GET_LSIZE(bp);
if (!srta->issue_reads)
return;
if (BP_IS_REDACTED(bp))
return;
if (send_do_embed(bp, srta->featureflags))
return;
zbookmark_phys_t zb = {
.zb_objset = dmu_objset_id(os),
.zb_object = range->object,
.zb_level = 0,
.zb_blkid = range->start_blkid,
};
arc_flags_t aflags = ARC_FLAG_CACHED_ONLY;
int arc_err = arc_read(NULL, os->os_spa, bp,
arc_getbuf_func, &srdp->abuf, ZIO_PRIORITY_ASYNC_READ,
zioflags, &aflags, &zb);
/*
* If the data is not already cached in the ARC, we read directly
* from zio. This avoids the performance overhead of adding a new
* entry to the ARC, and we also avoid polluting the ARC cache with
* data that is not likely to be used in the future.
*/
if (arc_err != 0) {
srdp->abd = abd_alloc_linear(srdp->datasz, B_FALSE);
srdp->io_outstanding = B_TRUE;
zio_nowait(zio_read(NULL, os->os_spa, bp, srdp->abd,
srdp->datasz, dmu_send_read_done, range,
ZIO_PRIORITY_ASYNC_READ, zioflags, &zb));
}
}
/*
* Create a new record with the given values.
*/
static void
enqueue_range(struct send_reader_thread_arg *srta, bqueue_t *q, dnode_t *dn,
uint64_t blkid, uint64_t count, const blkptr_t *bp, uint32_t datablksz)
{
enum type range_type = (bp == NULL || BP_IS_HOLE(bp) ? HOLE :
(BP_IS_REDACTED(bp) ? REDACT : DATA));
struct send_range *range = range_alloc(range_type, dn->dn_object,
blkid, blkid + count, B_FALSE);
if (blkid == DMU_SPILL_BLKID) {
ASSERT3P(bp, !=, NULL);
ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_SA);
}
switch (range_type) {
case HOLE:
range->sru.hole.datablksz = datablksz;
break;
case DATA:
ASSERT3U(count, ==, 1);
range->sru.data.datablksz = datablksz;
range->sru.data.obj_type = dn->dn_type;
range->sru.data.bp = *bp;
issue_data_read(srta, range);
break;
case REDACT:
range->sru.redact.datablksz = datablksz;
break;
default:
break;
}
bqueue_enqueue(q, range, datablksz);
}
/*
* This thread is responsible for two things: First, it retrieves the correct
* blkptr in the to ds if we need to send the data because of something from
* the from thread. As a result of this, we're the first ones to discover that
* some indirect blocks can be discarded because they're not holes. Second,
* it issues prefetches for the data we need to send.
*/
static __attribute__((noreturn)) void
send_reader_thread(void *arg)
{
struct send_reader_thread_arg *srta = arg;
struct send_merge_thread_arg *smta = srta->smta;
bqueue_t *inq = &smta->q;
bqueue_t *outq = &srta->q;
objset_t *os = smta->os;
fstrans_cookie_t cookie = spl_fstrans_mark();
struct send_range *range = bqueue_dequeue(inq);
int err = 0;
/*
* If the record we're analyzing is from a redaction bookmark from the
* fromds, then we need to know whether or not it exists in the tods so
* we know whether to create records for it or not. If it does, we need
* the datablksz so we can generate an appropriate record for it.
* Finally, if it isn't redacted, we need the blkptr so that we can send
* a WRITE record containing the actual data.
*/
uint64_t last_obj = UINT64_MAX;
uint64_t last_obj_exists = B_TRUE;
while (!range->eos_marker && !srta->cancel && smta->error == 0 &&
err == 0) {
switch (range->type) {
case DATA:
issue_data_read(srta, range);
bqueue_enqueue(outq, range, range->sru.data.datablksz);
range = get_next_range_nofree(inq, range);
break;
case HOLE:
case OBJECT:
case OBJECT_RANGE:
case REDACT: // Redacted blocks must exist
bqueue_enqueue(outq, range, sizeof (*range));
range = get_next_range_nofree(inq, range);
break;
case PREVIOUSLY_REDACTED: {
/*
* This entry came from the "from bookmark" when
* sending from a bookmark that has a redaction
* list. We need to check if this object/blkid
* exists in the target ("to") dataset, and if
* not then we drop this entry. We also need
* to fill in the block pointer so that we know
* what to prefetch.
*
* To accomplish the above, we first cache whether or
* not the last object we examined exists. If it
* doesn't, we can drop this record. If it does, we hold
* the dnode and use it to call dbuf_dnode_findbp. We do
* this instead of dbuf_bookmark_findbp because we will
* often operate on large ranges, and holding the dnode
* once is more efficient.
*/
boolean_t object_exists = B_TRUE;
/*
* If the data is redacted, we only care if it exists,
* so that we don't send records for objects that have
* been deleted.
*/
dnode_t *dn;
if (range->object == last_obj && !last_obj_exists) {
/*
* If we're still examining the same object as
* previously, and it doesn't exist, we don't
* need to call dbuf_bookmark_findbp.
*/
object_exists = B_FALSE;
} else {
err = dnode_hold(os, range->object, FTAG, &dn);
if (err == ENOENT) {
object_exists = B_FALSE;
err = 0;
}
last_obj = range->object;
last_obj_exists = object_exists;
}
if (err != 0) {
break;
} else if (!object_exists) {
/*
* The block was modified, but doesn't
* exist in the to dataset; if it was
* deleted in the to dataset, then we'll
* visit the hole bp for it at some point.
*/
range = get_next_range(inq, range);
continue;
}
uint64_t file_max =
MIN(dn->dn_maxblkid, range->end_blkid);
/*
* The object exists, so we need to try to find the
* blkptr for each block in the range we're processing.
*/
rw_enter(&dn->dn_struct_rwlock, RW_READER);
for (uint64_t blkid = range->start_blkid;
blkid < file_max; blkid++) {
blkptr_t bp;
uint32_t datablksz =
dn->dn_phys->dn_datablkszsec <<
SPA_MINBLOCKSHIFT;
uint64_t offset = blkid * datablksz;
/*
* This call finds the next non-hole block in
* the object. This is to prevent a
* performance problem where we're unredacting
* a large hole. Using dnode_next_offset to
* skip over the large hole avoids iterating
* over every block in it.
*/
err = dnode_next_offset(dn, DNODE_FIND_HAVELOCK,
&offset, 1, 1, 0);
if (err == ESRCH) {
offset = UINT64_MAX;
err = 0;
} else if (err != 0) {
break;
}
if (offset != blkid * datablksz) {
/*
* if there is a hole from here
* (blkid) to offset
*/
offset = MIN(offset, file_max *
datablksz);
uint64_t nblks = (offset / datablksz) -
blkid;
enqueue_range(srta, outq, dn, blkid,
nblks, NULL, datablksz);
blkid += nblks;
}
if (blkid >= file_max)
break;
err = dbuf_dnode_findbp(dn, 0, blkid, &bp,
NULL, NULL);
if (err != 0)
break;
ASSERT(!BP_IS_HOLE(&bp));
enqueue_range(srta, outq, dn, blkid, 1, &bp,
datablksz);
}
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
range = get_next_range(inq, range);
}
}
}
if (srta->cancel || err != 0) {
smta->cancel = B_TRUE;
srta->error = err;
} else if (smta->error != 0) {
srta->error = smta->error;
}
while (!range->eos_marker)
range = get_next_range(inq, range);
bqueue_enqueue_flush(outq, range, 1);
spl_fstrans_unmark(cookie);
thread_exit();
}
#define NUM_SNAPS_NOT_REDACTED UINT64_MAX
struct dmu_send_params {
/* Pool args */
const void *tag; // Tag dp was held with, will be used to release dp.
dsl_pool_t *dp;
/* To snapshot args */
const char *tosnap;
dsl_dataset_t *to_ds;
/* From snapshot args */
zfs_bookmark_phys_t ancestor_zb;
uint64_t *fromredactsnaps;
/* NUM_SNAPS_NOT_REDACTED if not sending from redaction bookmark */
uint64_t numfromredactsnaps;
/* Stream params */
boolean_t is_clone;
boolean_t embedok;
boolean_t large_block_ok;
boolean_t compressok;
boolean_t rawok;
boolean_t savedok;
uint64_t resumeobj;
uint64_t resumeoff;
uint64_t saved_guid;
zfs_bookmark_phys_t *redactbook;
/* Stream output params */
dmu_send_outparams_t *dso;
/* Stream progress params */
offset_t *off;
int outfd;
char saved_toname[MAXNAMELEN];
};
static int
setup_featureflags(struct dmu_send_params *dspp, objset_t *os,
uint64_t *featureflags)
{
dsl_dataset_t *to_ds = dspp->to_ds;
dsl_pool_t *dp = dspp->dp;
if (dmu_objset_type(os) == DMU_OST_ZFS) {
uint64_t version;
if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &version) != 0)
return (SET_ERROR(EINVAL));
if (version >= ZPL_VERSION_SA)
*featureflags |= DMU_BACKUP_FEATURE_SA_SPILL;
}
/* raw sends imply large_block_ok */
if ((dspp->rawok || dspp->large_block_ok) &&
dsl_dataset_feature_is_active(to_ds, SPA_FEATURE_LARGE_BLOCKS)) {
*featureflags |= DMU_BACKUP_FEATURE_LARGE_BLOCKS;
}
/* encrypted datasets will not have embedded blocks */
if ((dspp->embedok || dspp->rawok) && !os->os_encrypted &&
spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) {
*featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA;
}
/* raw send implies compressok */
if (dspp->compressok || dspp->rawok)
*featureflags |= DMU_BACKUP_FEATURE_COMPRESSED;
if (dspp->rawok && os->os_encrypted)
*featureflags |= DMU_BACKUP_FEATURE_RAW;
if ((*featureflags &
(DMU_BACKUP_FEATURE_EMBED_DATA | DMU_BACKUP_FEATURE_COMPRESSED |
DMU_BACKUP_FEATURE_RAW)) != 0 &&
spa_feature_is_active(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS)) {
*featureflags |= DMU_BACKUP_FEATURE_LZ4;
}
/*
* We specifically do not include DMU_BACKUP_FEATURE_EMBED_DATA here to
* allow sending ZSTD compressed datasets to a receiver that does not
* support ZSTD
*/
if ((*featureflags &
(DMU_BACKUP_FEATURE_COMPRESSED | DMU_BACKUP_FEATURE_RAW)) != 0 &&
dsl_dataset_feature_is_active(to_ds, SPA_FEATURE_ZSTD_COMPRESS)) {
*featureflags |= DMU_BACKUP_FEATURE_ZSTD;
}
if (dspp->resumeobj != 0 || dspp->resumeoff != 0) {
*featureflags |= DMU_BACKUP_FEATURE_RESUMING;
}
if (dspp->redactbook != NULL) {
*featureflags |= DMU_BACKUP_FEATURE_REDACTED;
}
if (dsl_dataset_feature_is_active(to_ds, SPA_FEATURE_LARGE_DNODE)) {
*featureflags |= DMU_BACKUP_FEATURE_LARGE_DNODE;
}
return (0);
}
static dmu_replay_record_t *
create_begin_record(struct dmu_send_params *dspp, objset_t *os,
uint64_t featureflags)
{
dmu_replay_record_t *drr = kmem_zalloc(sizeof (dmu_replay_record_t),
KM_SLEEP);
drr->drr_type = DRR_BEGIN;
struct drr_begin *drrb = &drr->drr_u.drr_begin;
dsl_dataset_t *to_ds = dspp->to_ds;
drrb->drr_magic = DMU_BACKUP_MAGIC;
drrb->drr_creation_time = dsl_dataset_phys(to_ds)->ds_creation_time;
drrb->drr_type = dmu_objset_type(os);
drrb->drr_toguid = dsl_dataset_phys(to_ds)->ds_guid;
drrb->drr_fromguid = dspp->ancestor_zb.zbm_guid;
DMU_SET_STREAM_HDRTYPE(drrb->drr_versioninfo, DMU_SUBSTREAM);
DMU_SET_FEATUREFLAGS(drrb->drr_versioninfo, featureflags);
if (dspp->is_clone)
drrb->drr_flags |= DRR_FLAG_CLONE;
if (dsl_dataset_phys(dspp->to_ds)->ds_flags & DS_FLAG_CI_DATASET)
drrb->drr_flags |= DRR_FLAG_CI_DATA;
if (zfs_send_set_freerecords_bit)
drrb->drr_flags |= DRR_FLAG_FREERECORDS;
drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_SPILL_BLOCK;
if (dspp->savedok) {
drrb->drr_toguid = dspp->saved_guid;
strlcpy(drrb->drr_toname, dspp->saved_toname,
sizeof (drrb->drr_toname));
} else {
dsl_dataset_name(to_ds, drrb->drr_toname);
if (!to_ds->ds_is_snapshot) {
(void) strlcat(drrb->drr_toname, "@--head--",
sizeof (drrb->drr_toname));
}
}
return (drr);
}
static void
setup_to_thread(struct send_thread_arg *to_arg, objset_t *to_os,
dmu_sendstatus_t *dssp, uint64_t fromtxg, boolean_t rawok)
{
VERIFY0(bqueue_init(&to_arg->q, zfs_send_no_prefetch_queue_ff,
MAX(zfs_send_no_prefetch_queue_length, 2 * zfs_max_recordsize),
offsetof(struct send_range, ln)));
to_arg->error_code = 0;
to_arg->cancel = B_FALSE;
to_arg->os = to_os;
to_arg->fromtxg = fromtxg;
to_arg->flags = TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA;
if (rawok)
to_arg->flags |= TRAVERSE_NO_DECRYPT;
if (zfs_send_corrupt_data)
to_arg->flags |= TRAVERSE_HARD;
to_arg->num_blocks_visited = &dssp->dss_blocks;
(void) thread_create(NULL, 0, send_traverse_thread, to_arg, 0,
curproc, TS_RUN, minclsyspri);
}
static void
setup_from_thread(struct redact_list_thread_arg *from_arg,
redaction_list_t *from_rl, dmu_sendstatus_t *dssp)
{
VERIFY0(bqueue_init(&from_arg->q, zfs_send_no_prefetch_queue_ff,
MAX(zfs_send_no_prefetch_queue_length, 2 * zfs_max_recordsize),
offsetof(struct send_range, ln)));
from_arg->error_code = 0;
from_arg->cancel = B_FALSE;
from_arg->rl = from_rl;
from_arg->mark_redact = B_FALSE;
from_arg->num_blocks_visited = &dssp->dss_blocks;
/*
* If from_ds is null, send_traverse_thread just returns success and
* enqueues an eos marker.
*/
(void) thread_create(NULL, 0, redact_list_thread, from_arg, 0,
curproc, TS_RUN, minclsyspri);
}
static void
setup_redact_list_thread(struct redact_list_thread_arg *rlt_arg,
struct dmu_send_params *dspp, redaction_list_t *rl, dmu_sendstatus_t *dssp)
{
if (dspp->redactbook == NULL)
return;
rlt_arg->cancel = B_FALSE;
VERIFY0(bqueue_init(&rlt_arg->q, zfs_send_no_prefetch_queue_ff,
MAX(zfs_send_no_prefetch_queue_length, 2 * zfs_max_recordsize),
offsetof(struct send_range, ln)));
rlt_arg->error_code = 0;
rlt_arg->mark_redact = B_TRUE;
rlt_arg->rl = rl;
rlt_arg->num_blocks_visited = &dssp->dss_blocks;
(void) thread_create(NULL, 0, redact_list_thread, rlt_arg, 0,
curproc, TS_RUN, minclsyspri);
}
static void
setup_merge_thread(struct send_merge_thread_arg *smt_arg,
struct dmu_send_params *dspp, struct redact_list_thread_arg *from_arg,
struct send_thread_arg *to_arg, struct redact_list_thread_arg *rlt_arg,
objset_t *os)
{
VERIFY0(bqueue_init(&smt_arg->q, zfs_send_no_prefetch_queue_ff,
MAX(zfs_send_no_prefetch_queue_length, 2 * zfs_max_recordsize),
offsetof(struct send_range, ln)));
smt_arg->cancel = B_FALSE;
smt_arg->error = 0;
smt_arg->from_arg = from_arg;
smt_arg->to_arg = to_arg;
if (dspp->redactbook != NULL)
smt_arg->redact_arg = rlt_arg;
smt_arg->os = os;
(void) thread_create(NULL, 0, send_merge_thread, smt_arg, 0, curproc,
TS_RUN, minclsyspri);
}
static void
setup_reader_thread(struct send_reader_thread_arg *srt_arg,
struct dmu_send_params *dspp, struct send_merge_thread_arg *smt_arg,
uint64_t featureflags)
{
VERIFY0(bqueue_init(&srt_arg->q, zfs_send_queue_ff,
MAX(zfs_send_queue_length, 2 * zfs_max_recordsize),
offsetof(struct send_range, ln)));
srt_arg->smta = smt_arg;
srt_arg->issue_reads = !dspp->dso->dso_dryrun;
srt_arg->featureflags = featureflags;
(void) thread_create(NULL, 0, send_reader_thread, srt_arg, 0,
curproc, TS_RUN, minclsyspri);
}
static int
setup_resume_points(struct dmu_send_params *dspp,
struct send_thread_arg *to_arg, struct redact_list_thread_arg *from_arg,
struct redact_list_thread_arg *rlt_arg,
struct send_merge_thread_arg *smt_arg, boolean_t resuming, objset_t *os,
redaction_list_t *redact_rl, nvlist_t *nvl)
{
(void) smt_arg;
dsl_dataset_t *to_ds = dspp->to_ds;
int err = 0;
uint64_t obj = 0;
uint64_t blkid = 0;
if (resuming) {
obj = dspp->resumeobj;
dmu_object_info_t to_doi;
err = dmu_object_info(os, obj, &to_doi);
if (err != 0)
return (err);
blkid = dspp->resumeoff / to_doi.doi_data_block_size;
}
/*
* If we're resuming a redacted send, we can skip to the appropriate
* point in the redaction bookmark by binary searching through it.
*/
if (redact_rl != NULL) {
SET_BOOKMARK(&rlt_arg->resume, to_ds->ds_object, obj, 0, blkid);
}
SET_BOOKMARK(&to_arg->resume, to_ds->ds_object, obj, 0, blkid);
if (nvlist_exists(nvl, BEGINNV_REDACT_FROM_SNAPS)) {
uint64_t objset = dspp->ancestor_zb.zbm_redaction_obj;
/*
* Note: If the resume point is in an object whose
* blocksize is different in the from vs to snapshots,
* we will have divided by the "wrong" blocksize.
* However, in this case fromsnap's send_cb() will
* detect that the blocksize has changed and therefore
* ignore this object.
*
* If we're resuming a send from a redaction bookmark,
* we still cannot accidentally suggest blocks behind
* the to_ds. In addition, we know that any blocks in
* the object in the to_ds will have to be sent, since
* the size changed. Therefore, we can't cause any harm
* this way either.
*/
SET_BOOKMARK(&from_arg->resume, objset, obj, 0, blkid);
}
if (resuming) {
fnvlist_add_uint64(nvl, BEGINNV_RESUME_OBJECT, dspp->resumeobj);
fnvlist_add_uint64(nvl, BEGINNV_RESUME_OFFSET, dspp->resumeoff);
}
return (0);
}
static dmu_sendstatus_t *
setup_send_progress(struct dmu_send_params *dspp)
{
dmu_sendstatus_t *dssp = kmem_zalloc(sizeof (*dssp), KM_SLEEP);
dssp->dss_outfd = dspp->outfd;
dssp->dss_off = dspp->off;
dssp->dss_proc = curproc;
mutex_enter(&dspp->to_ds->ds_sendstream_lock);
list_insert_head(&dspp->to_ds->ds_sendstreams, dssp);
mutex_exit(&dspp->to_ds->ds_sendstream_lock);
return (dssp);
}
/*
* Actually do the bulk of the work in a zfs send.
*
* The idea is that we want to do a send from ancestor_zb to to_ds. We also
* want to not send any data that has been modified by all the datasets in
* redactsnaparr, and store the list of blocks that are redacted in this way in
* a bookmark named redactbook, created on the to_ds. We do this by creating
* several worker threads, whose function is described below.
*
* There are three cases.
* The first case is a redacted zfs send. In this case there are 5 threads.
* The first thread is the to_ds traversal thread: it calls dataset_traverse on
* the to_ds and finds all the blocks that have changed since ancestor_zb (if
* it's a full send, that's all blocks in the dataset). It then sends those
* blocks on to the send merge thread. The redact list thread takes the data
* from the redaction bookmark and sends those blocks on to the send merge
* thread. The send merge thread takes the data from the to_ds traversal
* thread, and combines it with the redaction records from the redact list
* thread. If a block appears in both the to_ds's data and the redaction data,
* the send merge thread will mark it as redacted and send it on to the prefetch
* thread. Otherwise, the send merge thread will send the block on to the
* prefetch thread unchanged. The prefetch thread will issue prefetch reads for
* any data that isn't redacted, and then send the data on to the main thread.
* The main thread behaves the same as in a normal send case, issuing demand
* reads for data blocks and sending out records over the network
*
* The graphic below diagrams the flow of data in the case of a redacted zfs
* send. Each box represents a thread, and each line represents the flow of
* data.
*
* Records from the |
* redaction bookmark |
* +--------------------+ | +---------------------------+
* | | v | Send Merge Thread |
* | Redact List Thread +----------> Apply redaction marks to |
* | | | records as specified by |
* +--------------------+ | redaction ranges |
* +----^---------------+------+
* | | Merged data
* | |
* | +------------v--------+
* | | Prefetch Thread |
* +--------------------+ | | Issues prefetch |
* | to_ds Traversal | | | reads of data blocks|
* | Thread (finds +---------------+ +------------+--------+
* | candidate blocks) | Blocks modified | Prefetched data
* +--------------------+ by to_ds since |
* ancestor_zb +------------v----+
* | Main Thread | File Descriptor
* | Sends data over +->(to zfs receive)
* | wire |
* +-----------------+
*
* The second case is an incremental send from a redaction bookmark. The to_ds
* traversal thread and the main thread behave the same as in the redacted
* send case. The new thread is the from bookmark traversal thread. It
* iterates over the redaction list in the redaction bookmark, and enqueues
* records for each block that was redacted in the original send. The send
* merge thread now has to merge the data from the two threads. For details
* about that process, see the header comment of send_merge_thread(). Any data
* it decides to send on will be prefetched by the prefetch thread. Note that
* you can perform a redacted send from a redaction bookmark; in that case,
* the data flow behaves very similarly to the flow in the redacted send case,
* except with the addition of the bookmark traversal thread iterating over the
* redaction bookmark. The send_merge_thread also has to take on the
* responsibility of merging the redact list thread's records, the bookmark
* traversal thread's records, and the to_ds records.
*
* +---------------------+
* | |
* | Redact List Thread +--------------+
* | | |
* +---------------------+ |
* Blocks in redaction list | Ranges modified by every secure snap
* of from bookmark | (or EOS if not readcted)
* |
* +---------------------+ | +----v----------------------+
* | bookmark Traversal | v | Send Merge Thread |
* | Thread (finds +---------> Merges bookmark, rlt, and |
* | candidate blocks) | | to_ds send records |
* +---------------------+ +----^---------------+------+
* | | Merged data
* | +------------v--------+
* | | Prefetch Thread |
* +--------------------+ | | Issues prefetch |
* | to_ds Traversal | | | reads of data blocks|
* | Thread (finds +---------------+ +------------+--------+
* | candidate blocks) | Blocks modified | Prefetched data
* +--------------------+ by to_ds since +------------v----+
* ancestor_zb | Main Thread | File Descriptor
* | Sends data over +->(to zfs receive)
* | wire |
* +-----------------+
*
* The final case is a simple zfs full or incremental send. The to_ds traversal
* thread behaves the same as always. The redact list thread is never started.
* The send merge thread takes all the blocks that the to_ds traversal thread
* sends it, prefetches the data, and sends the blocks on to the main thread.
* The main thread sends the data over the wire.
*
* To keep performance acceptable, we want to prefetch the data in the worker
* threads. While the to_ds thread could simply use the TRAVERSE_PREFETCH
* feature built into traverse_dataset, the combining and deletion of records
* due to redaction and sends from redaction bookmarks mean that we could
* issue many unnecessary prefetches. As a result, we only prefetch data
* after we've determined that the record is not going to be redacted. To
* prevent the prefetching from getting too far ahead of the main thread, the
* blocking queues that are used for communication are capped not by the
* number of entries in the queue, but by the sum of the size of the
* prefetches associated with them. The limit on the amount of data that the
* thread can prefetch beyond what the main thread has reached is controlled
* by the global variable zfs_send_queue_length. In addition, to prevent poor
* performance in the beginning of a send, we also limit the distance ahead
* that the traversal threads can be. That distance is controlled by the
* zfs_send_no_prefetch_queue_length tunable.
*
* Note: Releases dp using the specified tag.
*/
static int
dmu_send_impl(struct dmu_send_params *dspp)
{
objset_t *os;
dmu_replay_record_t *drr;
dmu_sendstatus_t *dssp;
dmu_send_cookie_t dsc = {0};
int err;
uint64_t fromtxg = dspp->ancestor_zb.zbm_creation_txg;
uint64_t featureflags = 0;
struct redact_list_thread_arg *from_arg;
struct send_thread_arg *to_arg;
struct redact_list_thread_arg *rlt_arg;
struct send_merge_thread_arg *smt_arg;
struct send_reader_thread_arg *srt_arg;
struct send_range *range;
redaction_list_t *from_rl = NULL;
redaction_list_t *redact_rl = NULL;
boolean_t resuming = (dspp->resumeobj != 0 || dspp->resumeoff != 0);
boolean_t book_resuming = resuming;
dsl_dataset_t *to_ds = dspp->to_ds;
zfs_bookmark_phys_t *ancestor_zb = &dspp->ancestor_zb;
dsl_pool_t *dp = dspp->dp;
const void *tag = dspp->tag;
err = dmu_objset_from_ds(to_ds, &os);
if (err != 0) {
dsl_pool_rele(dp, tag);
return (err);
}
/*
* If this is a non-raw send of an encrypted ds, we can ensure that
* the objset_phys_t is authenticated. This is safe because this is
* either a snapshot or we have owned the dataset, ensuring that
* it can't be modified.
*/
if (!dspp->rawok && os->os_encrypted &&
arc_is_unauthenticated(os->os_phys_buf)) {
zbookmark_phys_t zb;
SET_BOOKMARK(&zb, to_ds->ds_object, ZB_ROOT_OBJECT,
ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
err = arc_untransform(os->os_phys_buf, os->os_spa,
&zb, B_FALSE);
if (err != 0) {
dsl_pool_rele(dp, tag);
return (err);
}
ASSERT0(arc_is_unauthenticated(os->os_phys_buf));
}
if ((err = setup_featureflags(dspp, os, &featureflags)) != 0) {
dsl_pool_rele(dp, tag);
return (err);
}
/*
* If we're doing a redacted send, hold the bookmark's redaction list.
*/
if (dspp->redactbook != NULL) {
err = dsl_redaction_list_hold_obj(dp,
dspp->redactbook->zbm_redaction_obj, FTAG,
&redact_rl);
if (err != 0) {
dsl_pool_rele(dp, tag);
return (SET_ERROR(EINVAL));
}
dsl_redaction_list_long_hold(dp, redact_rl, FTAG);
}
/*
* If we're sending from a redaction bookmark, hold the redaction list
* so that we can consider sending the redacted blocks.
*/
if (ancestor_zb->zbm_redaction_obj != 0) {
err = dsl_redaction_list_hold_obj(dp,
ancestor_zb->zbm_redaction_obj, FTAG, &from_rl);
if (err != 0) {
if (redact_rl != NULL) {
dsl_redaction_list_long_rele(redact_rl, FTAG);
dsl_redaction_list_rele(redact_rl, FTAG);
}
dsl_pool_rele(dp, tag);
return (SET_ERROR(EINVAL));
}
dsl_redaction_list_long_hold(dp, from_rl, FTAG);
}
dsl_dataset_long_hold(to_ds, FTAG);
from_arg = kmem_zalloc(sizeof (*from_arg), KM_SLEEP);
to_arg = kmem_zalloc(sizeof (*to_arg), KM_SLEEP);
rlt_arg = kmem_zalloc(sizeof (*rlt_arg), KM_SLEEP);
smt_arg = kmem_zalloc(sizeof (*smt_arg), KM_SLEEP);
srt_arg = kmem_zalloc(sizeof (*srt_arg), KM_SLEEP);
drr = create_begin_record(dspp, os, featureflags);
dssp = setup_send_progress(dspp);
dsc.dsc_drr = drr;
dsc.dsc_dso = dspp->dso;
dsc.dsc_os = os;
dsc.dsc_off = dspp->off;
dsc.dsc_toguid = dsl_dataset_phys(to_ds)->ds_guid;
dsc.dsc_fromtxg = fromtxg;
dsc.dsc_pending_op = PENDING_NONE;
dsc.dsc_featureflags = featureflags;
dsc.dsc_resume_object = dspp->resumeobj;
dsc.dsc_resume_offset = dspp->resumeoff;
dsl_pool_rele(dp, tag);
void *payload = NULL;
size_t payload_len = 0;
nvlist_t *nvl = fnvlist_alloc();
/*
* If we're doing a redacted send, we include the snapshots we're
* redacted with respect to so that the target system knows what send
* streams can be correctly received on top of this dataset. If we're
* instead sending a redacted dataset, we include the snapshots that the
* dataset was created with respect to.
*/
if (dspp->redactbook != NULL) {
fnvlist_add_uint64_array(nvl, BEGINNV_REDACT_SNAPS,
redact_rl->rl_phys->rlp_snaps,
redact_rl->rl_phys->rlp_num_snaps);
} else if (dsl_dataset_feature_is_active(to_ds,
SPA_FEATURE_REDACTED_DATASETS)) {
uint64_t *tods_guids;
uint64_t length;
VERIFY(dsl_dataset_get_uint64_array_feature(to_ds,
SPA_FEATURE_REDACTED_DATASETS, &length, &tods_guids));
fnvlist_add_uint64_array(nvl, BEGINNV_REDACT_SNAPS, tods_guids,
length);
}
/*
* If we're sending from a redaction bookmark, then we should retrieve
* the guids of that bookmark so we can send them over the wire.
*/
if (from_rl != NULL) {
fnvlist_add_uint64_array(nvl, BEGINNV_REDACT_FROM_SNAPS,
from_rl->rl_phys->rlp_snaps,
from_rl->rl_phys->rlp_num_snaps);
}
/*
* If the snapshot we're sending from is redacted, include the redaction
* list in the stream.
*/
if (dspp->numfromredactsnaps != NUM_SNAPS_NOT_REDACTED) {
ASSERT3P(from_rl, ==, NULL);
fnvlist_add_uint64_array(nvl, BEGINNV_REDACT_FROM_SNAPS,
dspp->fromredactsnaps, (uint_t)dspp->numfromredactsnaps);
if (dspp->numfromredactsnaps > 0) {
kmem_free(dspp->fromredactsnaps,
dspp->numfromredactsnaps * sizeof (uint64_t));
dspp->fromredactsnaps = NULL;
}
}
if (resuming || book_resuming) {
err = setup_resume_points(dspp, to_arg, from_arg,
rlt_arg, smt_arg, resuming, os, redact_rl, nvl);
if (err != 0)
goto out;
}
if (featureflags & DMU_BACKUP_FEATURE_RAW) {
uint64_t ivset_guid = ancestor_zb->zbm_ivset_guid;
nvlist_t *keynvl = NULL;
ASSERT(os->os_encrypted);
err = dsl_crypto_populate_key_nvlist(os, ivset_guid,
&keynvl);
if (err != 0) {
fnvlist_free(nvl);
goto out;
}
fnvlist_add_nvlist(nvl, "crypt_keydata", keynvl);
fnvlist_free(keynvl);
}
if (!nvlist_empty(nvl)) {
payload = fnvlist_pack(nvl, &payload_len);
drr->drr_payloadlen = payload_len;
}
fnvlist_free(nvl);
err = dump_record(&dsc, payload, payload_len);
fnvlist_pack_free(payload, payload_len);
if (err != 0) {
err = dsc.dsc_err;
goto out;
}
setup_to_thread(to_arg, os, dssp, fromtxg, dspp->rawok);
setup_from_thread(from_arg, from_rl, dssp);
setup_redact_list_thread(rlt_arg, dspp, redact_rl, dssp);
setup_merge_thread(smt_arg, dspp, from_arg, to_arg, rlt_arg, os);
setup_reader_thread(srt_arg, dspp, smt_arg, featureflags);
range = bqueue_dequeue(&srt_arg->q);
while (err == 0 && !range->eos_marker) {
err = do_dump(&dsc, range);
range = get_next_range(&srt_arg->q, range);
if (issig(JUSTLOOKING) && issig(FORREAL))
err = SET_ERROR(EINTR);
}
/*
* If we hit an error or are interrupted, cancel our worker threads and
* clear the queue of any pending records. The threads will pass the
* cancel up the tree of worker threads, and each one will clean up any
* pending records before exiting.
*/
if (err != 0) {
srt_arg->cancel = B_TRUE;
while (!range->eos_marker) {
range = get_next_range(&srt_arg->q, range);
}
}
range_free(range);
bqueue_destroy(&srt_arg->q);
bqueue_destroy(&smt_arg->q);
if (dspp->redactbook != NULL)
bqueue_destroy(&rlt_arg->q);
bqueue_destroy(&to_arg->q);
bqueue_destroy(&from_arg->q);
if (err == 0 && srt_arg->error != 0)
err = srt_arg->error;
if (err != 0)
goto out;
if (dsc.dsc_pending_op != PENDING_NONE)
if (dump_record(&dsc, NULL, 0) != 0)
err = SET_ERROR(EINTR);
if (err != 0) {
if (err == EINTR && dsc.dsc_err != 0)
err = dsc.dsc_err;
goto out;
}
/*
* Send the DRR_END record if this is not a saved stream.
* Otherwise, the omitted DRR_END record will signal to
* the receive side that the stream is incomplete.
*/
if (!dspp->savedok) {
memset(drr, 0, sizeof (dmu_replay_record_t));
drr->drr_type = DRR_END;
drr->drr_u.drr_end.drr_checksum = dsc.dsc_zc;
drr->drr_u.drr_end.drr_toguid = dsc.dsc_toguid;
if (dump_record(&dsc, NULL, 0) != 0)
err = dsc.dsc_err;
}
out:
mutex_enter(&to_ds->ds_sendstream_lock);
list_remove(&to_ds->ds_sendstreams, dssp);
mutex_exit(&to_ds->ds_sendstream_lock);
VERIFY(err != 0 || (dsc.dsc_sent_begin &&
(dsc.dsc_sent_end || dspp->savedok)));
kmem_free(drr, sizeof (dmu_replay_record_t));
kmem_free(dssp, sizeof (dmu_sendstatus_t));
kmem_free(from_arg, sizeof (*from_arg));
kmem_free(to_arg, sizeof (*to_arg));
kmem_free(rlt_arg, sizeof (*rlt_arg));
kmem_free(smt_arg, sizeof (*smt_arg));
kmem_free(srt_arg, sizeof (*srt_arg));
dsl_dataset_long_rele(to_ds, FTAG);
if (from_rl != NULL) {
dsl_redaction_list_long_rele(from_rl, FTAG);
dsl_redaction_list_rele(from_rl, FTAG);
}
if (redact_rl != NULL) {
dsl_redaction_list_long_rele(redact_rl, FTAG);
dsl_redaction_list_rele(redact_rl, FTAG);
}
return (err);
}
int
dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap,
boolean_t embedok, boolean_t large_block_ok, boolean_t compressok,
boolean_t rawok, boolean_t savedok, int outfd, offset_t *off,
dmu_send_outparams_t *dsop)
{
int err;
dsl_dataset_t *fromds;
ds_hold_flags_t dsflags;
struct dmu_send_params dspp = {0};
dspp.embedok = embedok;
dspp.large_block_ok = large_block_ok;
dspp.compressok = compressok;
dspp.outfd = outfd;
dspp.off = off;
dspp.dso = dsop;
dspp.tag = FTAG;
dspp.rawok = rawok;
dspp.savedok = savedok;
dsflags = (rawok) ? DS_HOLD_FLAG_NONE : DS_HOLD_FLAG_DECRYPT;
err = dsl_pool_hold(pool, FTAG, &dspp.dp);
if (err != 0)
return (err);
err = dsl_dataset_hold_obj_flags(dspp.dp, tosnap, dsflags, FTAG,
&dspp.to_ds);
if (err != 0) {
dsl_pool_rele(dspp.dp, FTAG);
return (err);
}
if (fromsnap != 0) {
err = dsl_dataset_hold_obj_flags(dspp.dp, fromsnap, dsflags,
FTAG, &fromds);
if (err != 0) {
dsl_dataset_rele_flags(dspp.to_ds, dsflags, FTAG);
dsl_pool_rele(dspp.dp, FTAG);
return (err);
}
dspp.ancestor_zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid;
dspp.ancestor_zb.zbm_creation_txg =
dsl_dataset_phys(fromds)->ds_creation_txg;
dspp.ancestor_zb.zbm_creation_time =
dsl_dataset_phys(fromds)->ds_creation_time;
if (dsl_dataset_is_zapified(fromds)) {
(void) zap_lookup(dspp.dp->dp_meta_objset,
fromds->ds_object, DS_FIELD_IVSET_GUID, 8, 1,
&dspp.ancestor_zb.zbm_ivset_guid);
}
/* See dmu_send for the reasons behind this. */
uint64_t *fromredact;
if (!dsl_dataset_get_uint64_array_feature(fromds,
SPA_FEATURE_REDACTED_DATASETS,
&dspp.numfromredactsnaps,
&fromredact)) {
dspp.numfromredactsnaps = NUM_SNAPS_NOT_REDACTED;
} else if (dspp.numfromredactsnaps > 0) {
uint64_t size = dspp.numfromredactsnaps *
sizeof (uint64_t);
dspp.fromredactsnaps = kmem_zalloc(size, KM_SLEEP);
memcpy(dspp.fromredactsnaps, fromredact, size);
}
boolean_t is_before =
dsl_dataset_is_before(dspp.to_ds, fromds, 0);
dspp.is_clone = (dspp.to_ds->ds_dir !=
fromds->ds_dir);
dsl_dataset_rele(fromds, FTAG);
if (!is_before) {
dsl_pool_rele(dspp.dp, FTAG);
err = SET_ERROR(EXDEV);
} else {
err = dmu_send_impl(&dspp);
}
} else {
dspp.numfromredactsnaps = NUM_SNAPS_NOT_REDACTED;
err = dmu_send_impl(&dspp);
}
if (dspp.fromredactsnaps)
kmem_free(dspp.fromredactsnaps,
dspp.numfromredactsnaps * sizeof (uint64_t));
dsl_dataset_rele(dspp.to_ds, FTAG);
return (err);
}
int
dmu_send(const char *tosnap, const char *fromsnap, boolean_t embedok,
boolean_t large_block_ok, boolean_t compressok, boolean_t rawok,
boolean_t savedok, uint64_t resumeobj, uint64_t resumeoff,
const char *redactbook, int outfd, offset_t *off,
dmu_send_outparams_t *dsop)
{
int err = 0;
ds_hold_flags_t dsflags;
boolean_t owned = B_FALSE;
dsl_dataset_t *fromds = NULL;
zfs_bookmark_phys_t book = {0};
struct dmu_send_params dspp = {0};
dsflags = (rawok) ? DS_HOLD_FLAG_NONE : DS_HOLD_FLAG_DECRYPT;
dspp.tosnap = tosnap;
dspp.embedok = embedok;
dspp.large_block_ok = large_block_ok;
dspp.compressok = compressok;
dspp.outfd = outfd;
dspp.off = off;
dspp.dso = dsop;
dspp.tag = FTAG;
dspp.resumeobj = resumeobj;
dspp.resumeoff = resumeoff;
dspp.rawok = rawok;
dspp.savedok = savedok;
if (fromsnap != NULL && strpbrk(fromsnap, "@#") == NULL)
return (SET_ERROR(EINVAL));
err = dsl_pool_hold(tosnap, FTAG, &dspp.dp);
if (err != 0)
return (err);
if (strchr(tosnap, '@') == NULL && spa_writeable(dspp.dp->dp_spa)) {
/*
* We are sending a filesystem or volume. Ensure
* that it doesn't change by owning the dataset.
*/
if (savedok) {
/*
* We are looking for the dataset that represents the
* partially received send stream. If this stream was
* received as a new snapshot of an existing dataset,
* this will be saved in a hidden clone named
* "<pool>/<dataset>/%recv". Otherwise, the stream
* will be saved in the live dataset itself. In
* either case we need to use dsl_dataset_own_force()
* because the stream is marked as inconsistent,
* which would normally make it unavailable to be
* owned.
*/
char *name = kmem_asprintf("%s/%s", tosnap,
recv_clone_name);
err = dsl_dataset_own_force(dspp.dp, name, dsflags,
FTAG, &dspp.to_ds);
if (err == ENOENT) {
err = dsl_dataset_own_force(dspp.dp, tosnap,
dsflags, FTAG, &dspp.to_ds);
}
if (err == 0) {
owned = B_TRUE;
err = zap_lookup(dspp.dp->dp_meta_objset,
dspp.to_ds->ds_object,
DS_FIELD_RESUME_TOGUID, 8, 1,
&dspp.saved_guid);
}
if (err == 0) {
err = zap_lookup(dspp.dp->dp_meta_objset,
dspp.to_ds->ds_object,
DS_FIELD_RESUME_TONAME, 1,
sizeof (dspp.saved_toname),
dspp.saved_toname);
}
/* Only disown if there was an error in the lookups */
if (owned && (err != 0))
dsl_dataset_disown(dspp.to_ds, dsflags, FTAG);
kmem_strfree(name);
} else {
err = dsl_dataset_own(dspp.dp, tosnap, dsflags,
FTAG, &dspp.to_ds);
if (err == 0)
owned = B_TRUE;
}
} else {
err = dsl_dataset_hold_flags(dspp.dp, tosnap, dsflags, FTAG,
&dspp.to_ds);
}
if (err != 0) {
/* Note: dsl dataset is not owned at this point */
dsl_pool_rele(dspp.dp, FTAG);
return (err);
}
if (redactbook != NULL) {
char path[ZFS_MAX_DATASET_NAME_LEN];
(void) strlcpy(path, tosnap, sizeof (path));
char *at = strchr(path, '@');
if (at == NULL) {
err = EINVAL;
} else {
(void) snprintf(at, sizeof (path) - (at - path), "#%s",
redactbook);
err = dsl_bookmark_lookup(dspp.dp, path,
NULL, &book);
dspp.redactbook = &book;
}
}
if (err != 0) {
dsl_pool_rele(dspp.dp, FTAG);
if (owned)
dsl_dataset_disown(dspp.to_ds, dsflags, FTAG);
else
dsl_dataset_rele_flags(dspp.to_ds, dsflags, FTAG);
return (err);
}
if (fromsnap != NULL) {
zfs_bookmark_phys_t *zb = &dspp.ancestor_zb;
int fsnamelen;
if (strpbrk(tosnap, "@#") != NULL)
fsnamelen = strpbrk(tosnap, "@#") - tosnap;
else
fsnamelen = strlen(tosnap);
/*
* If the fromsnap is in a different filesystem, then
* mark the send stream as a clone.
*/
if (strncmp(tosnap, fromsnap, fsnamelen) != 0 ||
(fromsnap[fsnamelen] != '@' &&
fromsnap[fsnamelen] != '#')) {
dspp.is_clone = B_TRUE;
}
if (strchr(fromsnap, '@') != NULL) {
err = dsl_dataset_hold(dspp.dp, fromsnap, FTAG,
&fromds);
if (err != 0) {
ASSERT3P(fromds, ==, NULL);
} else {
/*
* We need to make a deep copy of the redact
* snapshots of the from snapshot, because the
* array will be freed when we evict from_ds.
*/
uint64_t *fromredact;
if (!dsl_dataset_get_uint64_array_feature(
fromds, SPA_FEATURE_REDACTED_DATASETS,
&dspp.numfromredactsnaps,
&fromredact)) {
dspp.numfromredactsnaps =
NUM_SNAPS_NOT_REDACTED;
} else if (dspp.numfromredactsnaps > 0) {
uint64_t size =
dspp.numfromredactsnaps *
sizeof (uint64_t);
dspp.fromredactsnaps = kmem_zalloc(size,
KM_SLEEP);
memcpy(dspp.fromredactsnaps, fromredact,
size);
}
if (!dsl_dataset_is_before(dspp.to_ds, fromds,
0)) {
err = SET_ERROR(EXDEV);
} else {
zb->zbm_creation_txg =
dsl_dataset_phys(fromds)->
ds_creation_txg;
zb->zbm_creation_time =
dsl_dataset_phys(fromds)->
ds_creation_time;
zb->zbm_guid =
dsl_dataset_phys(fromds)->ds_guid;
zb->zbm_redaction_obj = 0;
if (dsl_dataset_is_zapified(fromds)) {
(void) zap_lookup(
dspp.dp->dp_meta_objset,
fromds->ds_object,
DS_FIELD_IVSET_GUID, 8, 1,
&zb->zbm_ivset_guid);
}
}
dsl_dataset_rele(fromds, FTAG);
}
} else {
dspp.numfromredactsnaps = NUM_SNAPS_NOT_REDACTED;
err = dsl_bookmark_lookup(dspp.dp, fromsnap, dspp.to_ds,
zb);
if (err == EXDEV && zb->zbm_redaction_obj != 0 &&
zb->zbm_guid ==
dsl_dataset_phys(dspp.to_ds)->ds_guid)
err = 0;
}
if (err == 0) {
/* dmu_send_impl will call dsl_pool_rele for us. */
err = dmu_send_impl(&dspp);
} else {
if (dspp.fromredactsnaps)
kmem_free(dspp.fromredactsnaps,
dspp.numfromredactsnaps *
sizeof (uint64_t));
dsl_pool_rele(dspp.dp, FTAG);
}
} else {
dspp.numfromredactsnaps = NUM_SNAPS_NOT_REDACTED;
err = dmu_send_impl(&dspp);
}
if (owned)
dsl_dataset_disown(dspp.to_ds, dsflags, FTAG);
else
dsl_dataset_rele_flags(dspp.to_ds, dsflags, FTAG);
return (err);
}
static int
dmu_adjust_send_estimate_for_indirects(dsl_dataset_t *ds, uint64_t uncompressed,
uint64_t compressed, boolean_t stream_compressed, uint64_t *sizep)
{
int err = 0;
uint64_t size;
/*
* Assume that space (both on-disk and in-stream) is dominated by
* data. We will adjust for indirect blocks and the copies property,
* but ignore per-object space used (eg, dnodes and DRR_OBJECT records).
*/
uint64_t recordsize;
uint64_t record_count;
objset_t *os;
VERIFY0(dmu_objset_from_ds(ds, &os));
/* Assume all (uncompressed) blocks are recordsize. */
if (zfs_override_estimate_recordsize != 0) {
recordsize = zfs_override_estimate_recordsize;
} else if (os->os_phys->os_type == DMU_OST_ZVOL) {
err = dsl_prop_get_int_ds(ds,
zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &recordsize);
} else {
err = dsl_prop_get_int_ds(ds,
zfs_prop_to_name(ZFS_PROP_RECORDSIZE), &recordsize);
}
if (err != 0)
return (err);
record_count = uncompressed / recordsize;
/*
* If we're estimating a send size for a compressed stream, use the
* compressed data size to estimate the stream size. Otherwise, use the
* uncompressed data size.
*/
size = stream_compressed ? compressed : uncompressed;
/*
* Subtract out approximate space used by indirect blocks.
* Assume most space is used by data blocks (non-indirect, non-dnode).
* Assume no ditto blocks or internal fragmentation.
*
* Therefore, space used by indirect blocks is sizeof(blkptr_t) per
* block.
*/
size -= record_count * sizeof (blkptr_t);
/* Add in the space for the record associated with each block. */
size += record_count * sizeof (dmu_replay_record_t);
*sizep = size;
return (0);
}
int
dmu_send_estimate_fast(dsl_dataset_t *origds, dsl_dataset_t *fromds,
zfs_bookmark_phys_t *frombook, boolean_t stream_compressed,
boolean_t saved, uint64_t *sizep)
{
int err;
dsl_dataset_t *ds = origds;
uint64_t uncomp, comp;
ASSERT(dsl_pool_config_held(origds->ds_dir->dd_pool));
ASSERT(fromds == NULL || frombook == NULL);
/*
* If this is a saved send we may actually be sending
* from the %recv clone used for resuming.
*/
if (saved) {
objset_t *mos = origds->ds_dir->dd_pool->dp_meta_objset;
uint64_t guid;
char dsname[ZFS_MAX_DATASET_NAME_LEN + 6];
dsl_dataset_name(origds, dsname);
(void) strcat(dsname, "/");
(void) strlcat(dsname, recv_clone_name, sizeof (dsname));
err = dsl_dataset_hold(origds->ds_dir->dd_pool,
dsname, FTAG, &ds);
if (err != ENOENT && err != 0) {
return (err);
} else if (err == ENOENT) {
ds = origds;
}
/* check that this dataset has partially received data */
err = zap_lookup(mos, ds->ds_object,
DS_FIELD_RESUME_TOGUID, 8, 1, &guid);
if (err != 0) {
err = SET_ERROR(err == ENOENT ? EINVAL : err);
goto out;
}
err = zap_lookup(mos, ds->ds_object,
DS_FIELD_RESUME_TONAME, 1, sizeof (dsname), dsname);
if (err != 0) {
err = SET_ERROR(err == ENOENT ? EINVAL : err);
goto out;
}
}
/* tosnap must be a snapshot or the target of a saved send */
if (!ds->ds_is_snapshot && ds == origds)
return (SET_ERROR(EINVAL));
if (fromds != NULL) {
uint64_t used;
if (!fromds->ds_is_snapshot) {
err = SET_ERROR(EINVAL);
goto out;
}
if (!dsl_dataset_is_before(ds, fromds, 0)) {
err = SET_ERROR(EXDEV);
goto out;
}
err = dsl_dataset_space_written(fromds, ds, &used, &comp,
&uncomp);
if (err != 0)
goto out;
} else if (frombook != NULL) {
uint64_t used;
err = dsl_dataset_space_written_bookmark(frombook, ds, &used,
&comp, &uncomp);
if (err != 0)
goto out;
} else {
uncomp = dsl_dataset_phys(ds)->ds_uncompressed_bytes;
comp = dsl_dataset_phys(ds)->ds_compressed_bytes;
}
err = dmu_adjust_send_estimate_for_indirects(ds, uncomp, comp,
stream_compressed, sizep);
/*
* Add the size of the BEGIN and END records to the estimate.
*/
*sizep += 2 * sizeof (dmu_replay_record_t);
out:
if (ds != origds)
dsl_dataset_rele(ds, FTAG);
return (err);
}
ZFS_MODULE_PARAM(zfs_send, zfs_send_, corrupt_data, INT, ZMOD_RW,
"Allow sending corrupt data");
ZFS_MODULE_PARAM(zfs_send, zfs_send_, queue_length, UINT, ZMOD_RW,
"Maximum send queue length");
ZFS_MODULE_PARAM(zfs_send, zfs_send_, unmodified_spill_blocks, INT, ZMOD_RW,
"Send unmodified spill blocks");
ZFS_MODULE_PARAM(zfs_send, zfs_send_, no_prefetch_queue_length, UINT, ZMOD_RW,
"Maximum send queue length for non-prefetch queues");
ZFS_MODULE_PARAM(zfs_send, zfs_send_, queue_ff, UINT, ZMOD_RW,
"Send queue fill fraction");
ZFS_MODULE_PARAM(zfs_send, zfs_send_, no_prefetch_queue_ff, UINT, ZMOD_RW,
"Send queue fill fraction for non-prefetch queues");
ZFS_MODULE_PARAM(zfs_send, zfs_, override_estimate_recordsize, UINT, ZMOD_RW,
"Override block size estimate with fixed size");
diff --git a/sys/contrib/openzfs/module/zfs/dsl_crypt.c b/sys/contrib/openzfs/module/zfs/dsl_crypt.c
index 5e6e4e3d6c39..8e1055d9bcb1 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_crypt.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_crypt.c
@@ -1,2884 +1,2918 @@
/*
* CDDL HEADER START
*
* This file and its contents are supplied under the terms of the
* Common Development and Distribution License ("CDDL"), version 1.0.
* You may only use this file in accordance with the terms of version
* 1.0 of the CDDL.
*
* A full copy of the text of the CDDL should have accompanied this
* source. A copy of the CDDL is also available via the Internet at
* http://www.illumos.org/license/CDDL.
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2017, Datto, Inc. All rights reserved.
* Copyright (c) 2018 by Delphix. All rights reserved.
*/
#include <sys/dsl_crypt.h>
#include <sys/dsl_pool.h>
#include <sys/zap.h>
#include <sys/zil.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_prop.h>
#include <sys/spa_impl.h>
#include <sys/dmu_objset.h>
#include <sys/zvol.h>
/*
* This file's primary purpose is for managing master encryption keys in
* memory and on disk. For more info on how these keys are used, see the
* block comment in zio_crypt.c.
*
* All master keys are stored encrypted on disk in the form of the DSL
* Crypto Key ZAP object. The binary key data in this object is always
* randomly generated and is encrypted with the user's wrapping key. This
* layer of indirection allows the user to change their key without
* needing to re-encrypt the entire dataset. The ZAP also holds on to the
* (non-encrypted) encryption algorithm identifier, IV, and MAC needed to
* safely decrypt the master key. For more info on the user's key see the
* block comment in libzfs_crypto.c
*
* In-memory encryption keys are managed through the spa_keystore. The
* keystore consists of 3 AVL trees, which are as follows:
*
* The Wrapping Key Tree:
* The wrapping key (wkey) tree stores the user's keys that are fed into the
* kernel through 'zfs load-key' and related commands. Datasets inherit their
* parent's wkey by default, so these structures are refcounted. The wrapping
* keys remain in memory until they are explicitly unloaded (with
* "zfs unload-key"). Unloading is only possible when no datasets are using
* them (refcount=0).
*
* The DSL Crypto Key Tree:
* The DSL Crypto Keys (DCK) are the in-memory representation of decrypted
* master keys. They are used by the functions in zio_crypt.c to perform
* encryption, decryption, and authentication. Snapshots and clones of a given
* dataset will share a DSL Crypto Key, so they are also refcounted. Once the
* refcount on a key hits zero, it is immediately zeroed out and freed.
*
* The Crypto Key Mapping Tree:
* The zio layer needs to lookup master keys by their dataset object id. Since
* the DSL Crypto Keys can belong to multiple datasets, we maintain a tree of
* dsl_key_mapping_t's which essentially just map the dataset object id to its
* appropriate DSL Crypto Key. The management for creating and destroying these
* mappings hooks into the code for owning and disowning datasets. Usually,
* there will only be one active dataset owner, but there are times
* (particularly during dataset creation and destruction) when this may not be
* true or the dataset may not be initialized enough to own. As a result, this
* object is also refcounted.
*/
/*
* This tunable allows datasets to be raw received even if the stream does
* not include IVset guids or if the guids don't match. This is used as part
* of the resolution for ZPOOL_ERRATA_ZOL_8308_ENCRYPTION.
*/
int zfs_disable_ivset_guid_check = 0;
static void
dsl_wrapping_key_hold(dsl_wrapping_key_t *wkey, const void *tag)
{
(void) zfs_refcount_add(&wkey->wk_refcnt, tag);
}
static void
dsl_wrapping_key_rele(dsl_wrapping_key_t *wkey, const void *tag)
{
(void) zfs_refcount_remove(&wkey->wk_refcnt, tag);
}
static void
dsl_wrapping_key_free(dsl_wrapping_key_t *wkey)
{
ASSERT0(zfs_refcount_count(&wkey->wk_refcnt));
if (wkey->wk_key.ck_data) {
memset(wkey->wk_key.ck_data, 0,
CRYPTO_BITS2BYTES(wkey->wk_key.ck_length));
kmem_free(wkey->wk_key.ck_data,
CRYPTO_BITS2BYTES(wkey->wk_key.ck_length));
}
zfs_refcount_destroy(&wkey->wk_refcnt);
kmem_free(wkey, sizeof (dsl_wrapping_key_t));
}
static void
dsl_wrapping_key_create(uint8_t *wkeydata, zfs_keyformat_t keyformat,
uint64_t salt, uint64_t iters, dsl_wrapping_key_t **wkey_out)
{
dsl_wrapping_key_t *wkey;
/* allocate the wrapping key */
wkey = kmem_alloc(sizeof (dsl_wrapping_key_t), KM_SLEEP);
/* allocate and initialize the underlying crypto key */
wkey->wk_key.ck_data = kmem_alloc(WRAPPING_KEY_LEN, KM_SLEEP);
wkey->wk_key.ck_length = CRYPTO_BYTES2BITS(WRAPPING_KEY_LEN);
memcpy(wkey->wk_key.ck_data, wkeydata, WRAPPING_KEY_LEN);
/* initialize the rest of the struct */
zfs_refcount_create(&wkey->wk_refcnt);
wkey->wk_keyformat = keyformat;
wkey->wk_salt = salt;
wkey->wk_iters = iters;
*wkey_out = wkey;
}
int
dsl_crypto_params_create_nvlist(dcp_cmd_t cmd, nvlist_t *props,
nvlist_t *crypto_args, dsl_crypto_params_t **dcp_out)
{
int ret;
uint64_t crypt = ZIO_CRYPT_INHERIT;
uint64_t keyformat = ZFS_KEYFORMAT_NONE;
uint64_t salt = 0, iters = 0;
dsl_crypto_params_t *dcp = NULL;
dsl_wrapping_key_t *wkey = NULL;
uint8_t *wkeydata = NULL;
uint_t wkeydata_len = 0;
const char *keylocation = NULL;
dcp = kmem_zalloc(sizeof (dsl_crypto_params_t), KM_SLEEP);
dcp->cp_cmd = cmd;
/* get relevant arguments from the nvlists */
if (props != NULL) {
(void) nvlist_lookup_uint64(props,
zfs_prop_to_name(ZFS_PROP_ENCRYPTION), &crypt);
(void) nvlist_lookup_uint64(props,
zfs_prop_to_name(ZFS_PROP_KEYFORMAT), &keyformat);
(void) nvlist_lookup_string(props,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION), &keylocation);
(void) nvlist_lookup_uint64(props,
zfs_prop_to_name(ZFS_PROP_PBKDF2_SALT), &salt);
(void) nvlist_lookup_uint64(props,
zfs_prop_to_name(ZFS_PROP_PBKDF2_ITERS), &iters);
dcp->cp_crypt = crypt;
}
if (crypto_args != NULL) {
(void) nvlist_lookup_uint8_array(crypto_args, "wkeydata",
&wkeydata, &wkeydata_len);
}
/* check for valid command */
if (dcp->cp_cmd >= DCP_CMD_MAX) {
ret = SET_ERROR(EINVAL);
goto error;
} else {
dcp->cp_cmd = cmd;
}
/* check for valid crypt */
if (dcp->cp_crypt >= ZIO_CRYPT_FUNCTIONS) {
ret = SET_ERROR(EINVAL);
goto error;
} else {
dcp->cp_crypt = crypt;
}
/* check for valid keyformat */
if (keyformat >= ZFS_KEYFORMAT_FORMATS) {
ret = SET_ERROR(EINVAL);
goto error;
}
/* check for a valid keylocation (of any kind) and copy it in */
if (keylocation != NULL) {
if (!zfs_prop_valid_keylocation(keylocation, B_FALSE)) {
ret = SET_ERROR(EINVAL);
goto error;
}
dcp->cp_keylocation = spa_strdup(keylocation);
}
/* check wrapping key length, if given */
if (wkeydata != NULL && wkeydata_len != WRAPPING_KEY_LEN) {
ret = SET_ERROR(EINVAL);
goto error;
}
/* if the user asked for the default crypt, determine that now */
if (dcp->cp_crypt == ZIO_CRYPT_ON)
dcp->cp_crypt = ZIO_CRYPT_ON_VALUE;
/* create the wrapping key from the raw data */
if (wkeydata != NULL) {
/* create the wrapping key with the verified parameters */
dsl_wrapping_key_create(wkeydata, keyformat, salt,
iters, &wkey);
dcp->cp_wkey = wkey;
}
/*
* Remove the encryption properties from the nvlist since they are not
* maintained through the DSL.
*/
(void) nvlist_remove_all(props, zfs_prop_to_name(ZFS_PROP_ENCRYPTION));
(void) nvlist_remove_all(props, zfs_prop_to_name(ZFS_PROP_KEYFORMAT));
(void) nvlist_remove_all(props, zfs_prop_to_name(ZFS_PROP_PBKDF2_SALT));
(void) nvlist_remove_all(props,
zfs_prop_to_name(ZFS_PROP_PBKDF2_ITERS));
*dcp_out = dcp;
return (0);
error:
kmem_free(dcp, sizeof (dsl_crypto_params_t));
*dcp_out = NULL;
return (ret);
}
void
dsl_crypto_params_free(dsl_crypto_params_t *dcp, boolean_t unload)
{
if (dcp == NULL)
return;
if (dcp->cp_keylocation != NULL)
spa_strfree(dcp->cp_keylocation);
if (unload && dcp->cp_wkey != NULL)
dsl_wrapping_key_free(dcp->cp_wkey);
kmem_free(dcp, sizeof (dsl_crypto_params_t));
}
static int
spa_crypto_key_compare(const void *a, const void *b)
{
const dsl_crypto_key_t *dcka = a;
const dsl_crypto_key_t *dckb = b;
if (dcka->dck_obj < dckb->dck_obj)
return (-1);
if (dcka->dck_obj > dckb->dck_obj)
return (1);
return (0);
}
+/*
+ * this compares a crypto key based on zk_guid. See comment on
+ * spa_crypto_key_compare for more information.
+ */
+boolean_t
+dmu_objset_crypto_key_equal(objset_t *osa, objset_t *osb)
+{
+ dsl_crypto_key_t *dcka = NULL;
+ dsl_crypto_key_t *dckb = NULL;
+ uint64_t obja, objb;
+ boolean_t equal;
+ spa_t *spa;
+
+ spa = dmu_objset_spa(osa);
+ if (spa != dmu_objset_spa(osb))
+ return (B_FALSE);
+ obja = dmu_objset_ds(osa)->ds_object;
+ objb = dmu_objset_ds(osb)->ds_object;
+
+ if (spa_keystore_lookup_key(spa, obja, FTAG, &dcka) != 0)
+ return (B_FALSE);
+ if (spa_keystore_lookup_key(spa, objb, FTAG, &dckb) != 0) {
+ spa_keystore_dsl_key_rele(spa, dcka, FTAG);
+ return (B_FALSE);
+ }
+
+ equal = (dcka->dck_key.zk_guid == dckb->dck_key.zk_guid);
+
+ spa_keystore_dsl_key_rele(spa, dcka, FTAG);
+ spa_keystore_dsl_key_rele(spa, dckb, FTAG);
+
+ return (equal);
+}
+
static int
spa_key_mapping_compare(const void *a, const void *b)
{
const dsl_key_mapping_t *kma = a;
const dsl_key_mapping_t *kmb = b;
if (kma->km_dsobj < kmb->km_dsobj)
return (-1);
if (kma->km_dsobj > kmb->km_dsobj)
return (1);
return (0);
}
static int
spa_wkey_compare(const void *a, const void *b)
{
const dsl_wrapping_key_t *wka = a;
const dsl_wrapping_key_t *wkb = b;
if (wka->wk_ddobj < wkb->wk_ddobj)
return (-1);
if (wka->wk_ddobj > wkb->wk_ddobj)
return (1);
return (0);
}
void
spa_keystore_init(spa_keystore_t *sk)
{
rw_init(&sk->sk_dk_lock, NULL, RW_DEFAULT, NULL);
rw_init(&sk->sk_km_lock, NULL, RW_DEFAULT, NULL);
rw_init(&sk->sk_wkeys_lock, NULL, RW_DEFAULT, NULL);
avl_create(&sk->sk_dsl_keys, spa_crypto_key_compare,
sizeof (dsl_crypto_key_t),
offsetof(dsl_crypto_key_t, dck_avl_link));
avl_create(&sk->sk_key_mappings, spa_key_mapping_compare,
sizeof (dsl_key_mapping_t),
offsetof(dsl_key_mapping_t, km_avl_link));
avl_create(&sk->sk_wkeys, spa_wkey_compare, sizeof (dsl_wrapping_key_t),
offsetof(dsl_wrapping_key_t, wk_avl_link));
}
void
spa_keystore_fini(spa_keystore_t *sk)
{
dsl_wrapping_key_t *wkey;
void *cookie = NULL;
ASSERT(avl_is_empty(&sk->sk_dsl_keys));
ASSERT(avl_is_empty(&sk->sk_key_mappings));
while ((wkey = avl_destroy_nodes(&sk->sk_wkeys, &cookie)) != NULL)
dsl_wrapping_key_free(wkey);
avl_destroy(&sk->sk_wkeys);
avl_destroy(&sk->sk_key_mappings);
avl_destroy(&sk->sk_dsl_keys);
rw_destroy(&sk->sk_wkeys_lock);
rw_destroy(&sk->sk_km_lock);
rw_destroy(&sk->sk_dk_lock);
}
static int
dsl_dir_get_encryption_root_ddobj(dsl_dir_t *dd, uint64_t *rddobj)
{
if (dd->dd_crypto_obj == 0)
return (SET_ERROR(ENOENT));
return (zap_lookup(dd->dd_pool->dp_meta_objset, dd->dd_crypto_obj,
DSL_CRYPTO_KEY_ROOT_DDOBJ, 8, 1, rddobj));
}
static int
dsl_dir_get_encryption_version(dsl_dir_t *dd, uint64_t *version)
{
*version = 0;
if (dd->dd_crypto_obj == 0)
return (SET_ERROR(ENOENT));
/* version 0 is implied by ENOENT */
(void) zap_lookup(dd->dd_pool->dp_meta_objset, dd->dd_crypto_obj,
DSL_CRYPTO_KEY_VERSION, 8, 1, version);
return (0);
}
boolean_t
dsl_dir_incompatible_encryption_version(dsl_dir_t *dd)
{
int ret;
uint64_t version = 0;
ret = dsl_dir_get_encryption_version(dd, &version);
if (ret != 0)
return (B_FALSE);
return (version != ZIO_CRYPT_KEY_CURRENT_VERSION);
}
static int
spa_keystore_wkey_hold_ddobj_impl(spa_t *spa, uint64_t ddobj,
const void *tag, dsl_wrapping_key_t **wkey_out)
{
int ret;
dsl_wrapping_key_t search_wkey;
dsl_wrapping_key_t *found_wkey;
ASSERT(RW_LOCK_HELD(&spa->spa_keystore.sk_wkeys_lock));
/* init the search wrapping key */
search_wkey.wk_ddobj = ddobj;
/* lookup the wrapping key */
found_wkey = avl_find(&spa->spa_keystore.sk_wkeys, &search_wkey, NULL);
if (!found_wkey) {
ret = SET_ERROR(ENOENT);
goto error;
}
/* increment the refcount */
dsl_wrapping_key_hold(found_wkey, tag);
*wkey_out = found_wkey;
return (0);
error:
*wkey_out = NULL;
return (ret);
}
static int
spa_keystore_wkey_hold_dd(spa_t *spa, dsl_dir_t *dd, const void *tag,
dsl_wrapping_key_t **wkey_out)
{
int ret;
dsl_wrapping_key_t *wkey;
uint64_t rddobj;
boolean_t locked = B_FALSE;
if (!RW_WRITE_HELD(&spa->spa_keystore.sk_wkeys_lock)) {
rw_enter(&spa->spa_keystore.sk_wkeys_lock, RW_READER);
locked = B_TRUE;
}
/* get the ddobj that the keylocation property was inherited from */
ret = dsl_dir_get_encryption_root_ddobj(dd, &rddobj);
if (ret != 0)
goto error;
/* lookup the wkey in the avl tree */
ret = spa_keystore_wkey_hold_ddobj_impl(spa, rddobj, tag, &wkey);
if (ret != 0)
goto error;
/* unlock the wkey tree if we locked it */
if (locked)
rw_exit(&spa->spa_keystore.sk_wkeys_lock);
*wkey_out = wkey;
return (0);
error:
if (locked)
rw_exit(&spa->spa_keystore.sk_wkeys_lock);
*wkey_out = NULL;
return (ret);
}
int
dsl_crypto_can_set_keylocation(const char *dsname, const char *keylocation)
{
int ret = 0;
dsl_dir_t *dd = NULL;
dsl_pool_t *dp = NULL;
uint64_t rddobj;
/* hold the dsl dir */
ret = dsl_pool_hold(dsname, FTAG, &dp);
if (ret != 0)
goto out;
ret = dsl_dir_hold(dp, dsname, FTAG, &dd, NULL);
if (ret != 0) {
dd = NULL;
goto out;
}
/* if dd is not encrypted, the value may only be "none" */
if (dd->dd_crypto_obj == 0) {
if (strcmp(keylocation, "none") != 0) {
ret = SET_ERROR(EACCES);
goto out;
}
ret = 0;
goto out;
}
/* check for a valid keylocation for encrypted datasets */
if (!zfs_prop_valid_keylocation(keylocation, B_TRUE)) {
ret = SET_ERROR(EINVAL);
goto out;
}
/* check that this is an encryption root */
ret = dsl_dir_get_encryption_root_ddobj(dd, &rddobj);
if (ret != 0)
goto out;
if (rddobj != dd->dd_object) {
ret = SET_ERROR(EACCES);
goto out;
}
dsl_dir_rele(dd, FTAG);
dsl_pool_rele(dp, FTAG);
return (0);
out:
if (dd != NULL)
dsl_dir_rele(dd, FTAG);
if (dp != NULL)
dsl_pool_rele(dp, FTAG);
return (ret);
}
static void
dsl_crypto_key_free(dsl_crypto_key_t *dck)
{
ASSERT(zfs_refcount_count(&dck->dck_holds) == 0);
/* destroy the zio_crypt_key_t */
zio_crypt_key_destroy(&dck->dck_key);
/* free the refcount, wrapping key, and lock */
zfs_refcount_destroy(&dck->dck_holds);
if (dck->dck_wkey)
dsl_wrapping_key_rele(dck->dck_wkey, dck);
/* free the key */
kmem_free(dck, sizeof (dsl_crypto_key_t));
}
static void
dsl_crypto_key_rele(dsl_crypto_key_t *dck, const void *tag)
{
if (zfs_refcount_remove(&dck->dck_holds, tag) == 0)
dsl_crypto_key_free(dck);
}
static int
dsl_crypto_key_open(objset_t *mos, dsl_wrapping_key_t *wkey,
uint64_t dckobj, const void *tag, dsl_crypto_key_t **dck_out)
{
int ret;
uint64_t crypt = 0, guid = 0, version = 0;
uint8_t raw_keydata[MASTER_KEY_MAX_LEN];
uint8_t raw_hmac_keydata[SHA512_HMAC_KEYLEN];
uint8_t iv[WRAPPING_IV_LEN];
uint8_t mac[WRAPPING_MAC_LEN];
dsl_crypto_key_t *dck;
/* allocate and initialize the key */
dck = kmem_zalloc(sizeof (dsl_crypto_key_t), KM_SLEEP);
/* fetch all of the values we need from the ZAP */
ret = zap_lookup(mos, dckobj, DSL_CRYPTO_KEY_CRYPTO_SUITE, 8, 1,
&crypt);
if (ret != 0)
goto error;
/* handle a future crypto suite that we don't support */
if (crypt >= ZIO_CRYPT_FUNCTIONS) {
ret = (SET_ERROR(ZFS_ERR_CRYPTO_NOTSUP));
goto error;
}
ret = zap_lookup(mos, dckobj, DSL_CRYPTO_KEY_GUID, 8, 1, &guid);
if (ret != 0)
goto error;
ret = zap_lookup(mos, dckobj, DSL_CRYPTO_KEY_MASTER_KEY, 1,
MASTER_KEY_MAX_LEN, raw_keydata);
if (ret != 0)
goto error;
ret = zap_lookup(mos, dckobj, DSL_CRYPTO_KEY_HMAC_KEY, 1,
SHA512_HMAC_KEYLEN, raw_hmac_keydata);
if (ret != 0)
goto error;
ret = zap_lookup(mos, dckobj, DSL_CRYPTO_KEY_IV, 1, WRAPPING_IV_LEN,
iv);
if (ret != 0)
goto error;
ret = zap_lookup(mos, dckobj, DSL_CRYPTO_KEY_MAC, 1, WRAPPING_MAC_LEN,
mac);
if (ret != 0)
goto error;
/* the initial on-disk format for encryption did not have a version */
(void) zap_lookup(mos, dckobj, DSL_CRYPTO_KEY_VERSION, 8, 1, &version);
/*
* Unwrap the keys. If there is an error return EACCES to indicate
* an authentication failure.
*/
ret = zio_crypt_key_unwrap(&wkey->wk_key, crypt, version, guid,
raw_keydata, raw_hmac_keydata, iv, mac, &dck->dck_key);
if (ret != 0) {
ret = SET_ERROR(EACCES);
goto error;
}
/* finish initializing the dsl_crypto_key_t */
zfs_refcount_create(&dck->dck_holds);
dsl_wrapping_key_hold(wkey, dck);
dck->dck_wkey = wkey;
dck->dck_obj = dckobj;
zfs_refcount_add(&dck->dck_holds, tag);
*dck_out = dck;
return (0);
error:
if (dck != NULL) {
memset(dck, 0, sizeof (dsl_crypto_key_t));
kmem_free(dck, sizeof (dsl_crypto_key_t));
}
*dck_out = NULL;
return (ret);
}
static int
spa_keystore_dsl_key_hold_impl(spa_t *spa, uint64_t dckobj, const void *tag,
dsl_crypto_key_t **dck_out)
{
int ret;
dsl_crypto_key_t search_dck;
dsl_crypto_key_t *found_dck;
ASSERT(RW_LOCK_HELD(&spa->spa_keystore.sk_dk_lock));
/* init the search key */
search_dck.dck_obj = dckobj;
/* find the matching key in the keystore */
found_dck = avl_find(&spa->spa_keystore.sk_dsl_keys, &search_dck, NULL);
if (!found_dck) {
ret = SET_ERROR(ENOENT);
goto error;
}
/* increment the refcount */
zfs_refcount_add(&found_dck->dck_holds, tag);
*dck_out = found_dck;
return (0);
error:
*dck_out = NULL;
return (ret);
}
static int
spa_keystore_dsl_key_hold_dd(spa_t *spa, dsl_dir_t *dd, const void *tag,
dsl_crypto_key_t **dck_out)
{
int ret;
avl_index_t where;
dsl_crypto_key_t *dck_io = NULL, *dck_ks = NULL;
dsl_wrapping_key_t *wkey = NULL;
uint64_t dckobj = dd->dd_crypto_obj;
/* Lookup the key in the tree of currently loaded keys */
rw_enter(&spa->spa_keystore.sk_dk_lock, RW_READER);
ret = spa_keystore_dsl_key_hold_impl(spa, dckobj, tag, &dck_ks);
rw_exit(&spa->spa_keystore.sk_dk_lock);
if (ret == 0) {
*dck_out = dck_ks;
return (0);
}
/* Lookup the wrapping key from the keystore */
ret = spa_keystore_wkey_hold_dd(spa, dd, FTAG, &wkey);
if (ret != 0) {
*dck_out = NULL;
return (SET_ERROR(EACCES));
}
/* Read the key from disk */
ret = dsl_crypto_key_open(spa->spa_meta_objset, wkey, dckobj,
tag, &dck_io);
if (ret != 0) {
dsl_wrapping_key_rele(wkey, FTAG);
*dck_out = NULL;
return (ret);
}
/*
* Add the key to the keystore. It may already exist if it was
* added while performing the read from disk. In this case discard
* it and return the key from the keystore.
*/
rw_enter(&spa->spa_keystore.sk_dk_lock, RW_WRITER);
ret = spa_keystore_dsl_key_hold_impl(spa, dckobj, tag, &dck_ks);
if (ret != 0) {
avl_find(&spa->spa_keystore.sk_dsl_keys, dck_io, &where);
avl_insert(&spa->spa_keystore.sk_dsl_keys, dck_io, where);
*dck_out = dck_io;
} else {
dsl_crypto_key_free(dck_io);
*dck_out = dck_ks;
}
/* Release the wrapping key (the dsl key now has a reference to it) */
dsl_wrapping_key_rele(wkey, FTAG);
rw_exit(&spa->spa_keystore.sk_dk_lock);
return (0);
}
void
spa_keystore_dsl_key_rele(spa_t *spa, dsl_crypto_key_t *dck, const void *tag)
{
rw_enter(&spa->spa_keystore.sk_dk_lock, RW_WRITER);
if (zfs_refcount_remove(&dck->dck_holds, tag) == 0) {
avl_remove(&spa->spa_keystore.sk_dsl_keys, dck);
dsl_crypto_key_free(dck);
}
rw_exit(&spa->spa_keystore.sk_dk_lock);
}
int
spa_keystore_load_wkey_impl(spa_t *spa, dsl_wrapping_key_t *wkey)
{
int ret;
avl_index_t where;
dsl_wrapping_key_t *found_wkey;
rw_enter(&spa->spa_keystore.sk_wkeys_lock, RW_WRITER);
/* insert the wrapping key into the keystore */
found_wkey = avl_find(&spa->spa_keystore.sk_wkeys, wkey, &where);
if (found_wkey != NULL) {
ret = SET_ERROR(EEXIST);
goto error_unlock;
}
avl_insert(&spa->spa_keystore.sk_wkeys, wkey, where);
rw_exit(&spa->spa_keystore.sk_wkeys_lock);
return (0);
error_unlock:
rw_exit(&spa->spa_keystore.sk_wkeys_lock);
return (ret);
}
int
spa_keystore_load_wkey(const char *dsname, dsl_crypto_params_t *dcp,
boolean_t noop)
{
int ret;
dsl_dir_t *dd = NULL;
dsl_crypto_key_t *dck = NULL;
dsl_wrapping_key_t *wkey = dcp->cp_wkey;
dsl_pool_t *dp = NULL;
uint64_t rddobj, keyformat, salt, iters;
/*
* We don't validate the wrapping key's keyformat, salt, or iters
* since they will never be needed after the DCK has been wrapped.
*/
if (dcp->cp_wkey == NULL ||
dcp->cp_cmd != DCP_CMD_NONE ||
dcp->cp_crypt != ZIO_CRYPT_INHERIT ||
dcp->cp_keylocation != NULL)
return (SET_ERROR(EINVAL));
ret = dsl_pool_hold(dsname, FTAG, &dp);
if (ret != 0)
goto error;
if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_ENCRYPTION)) {
ret = SET_ERROR(ENOTSUP);
goto error;
}
/* hold the dsl dir */
ret = dsl_dir_hold(dp, dsname, FTAG, &dd, NULL);
if (ret != 0) {
dd = NULL;
goto error;
}
/* confirm that dd is the encryption root */
ret = dsl_dir_get_encryption_root_ddobj(dd, &rddobj);
if (ret != 0 || rddobj != dd->dd_object) {
ret = SET_ERROR(EINVAL);
goto error;
}
/* initialize the wkey's ddobj */
wkey->wk_ddobj = dd->dd_object;
/* verify that the wkey is correct by opening its dsl key */
ret = dsl_crypto_key_open(dp->dp_meta_objset, wkey,
dd->dd_crypto_obj, FTAG, &dck);
if (ret != 0)
goto error;
/* initialize the wkey encryption parameters from the DSL Crypto Key */
ret = zap_lookup(dp->dp_meta_objset, dd->dd_crypto_obj,
zfs_prop_to_name(ZFS_PROP_KEYFORMAT), 8, 1, &keyformat);
if (ret != 0)
goto error;
ret = zap_lookup(dp->dp_meta_objset, dd->dd_crypto_obj,
zfs_prop_to_name(ZFS_PROP_PBKDF2_SALT), 8, 1, &salt);
if (ret != 0)
goto error;
ret = zap_lookup(dp->dp_meta_objset, dd->dd_crypto_obj,
zfs_prop_to_name(ZFS_PROP_PBKDF2_ITERS), 8, 1, &iters);
if (ret != 0)
goto error;
ASSERT3U(keyformat, <, ZFS_KEYFORMAT_FORMATS);
ASSERT3U(keyformat, !=, ZFS_KEYFORMAT_NONE);
IMPLY(keyformat == ZFS_KEYFORMAT_PASSPHRASE, iters != 0);
IMPLY(keyformat == ZFS_KEYFORMAT_PASSPHRASE, salt != 0);
IMPLY(keyformat != ZFS_KEYFORMAT_PASSPHRASE, iters == 0);
IMPLY(keyformat != ZFS_KEYFORMAT_PASSPHRASE, salt == 0);
wkey->wk_keyformat = keyformat;
wkey->wk_salt = salt;
wkey->wk_iters = iters;
/*
* At this point we have verified the wkey and confirmed that it can
* be used to decrypt a DSL Crypto Key. We can simply cleanup and
* return if this is all the user wanted to do.
*/
if (noop)
goto error;
/* insert the wrapping key into the keystore */
ret = spa_keystore_load_wkey_impl(dp->dp_spa, wkey);
if (ret != 0)
goto error;
dsl_crypto_key_rele(dck, FTAG);
dsl_dir_rele(dd, FTAG);
dsl_pool_rele(dp, FTAG);
/* create any zvols under this ds */
zvol_create_minors_recursive(dsname);
return (0);
error:
if (dck != NULL)
dsl_crypto_key_rele(dck, FTAG);
if (dd != NULL)
dsl_dir_rele(dd, FTAG);
if (dp != NULL)
dsl_pool_rele(dp, FTAG);
return (ret);
}
int
spa_keystore_unload_wkey_impl(spa_t *spa, uint64_t ddobj)
{
int ret;
dsl_wrapping_key_t search_wkey;
dsl_wrapping_key_t *found_wkey;
/* init the search wrapping key */
search_wkey.wk_ddobj = ddobj;
rw_enter(&spa->spa_keystore.sk_wkeys_lock, RW_WRITER);
/* remove the wrapping key from the keystore */
found_wkey = avl_find(&spa->spa_keystore.sk_wkeys,
&search_wkey, NULL);
if (!found_wkey) {
ret = SET_ERROR(EACCES);
goto error_unlock;
} else if (zfs_refcount_count(&found_wkey->wk_refcnt) != 0) {
ret = SET_ERROR(EBUSY);
goto error_unlock;
}
avl_remove(&spa->spa_keystore.sk_wkeys, found_wkey);
rw_exit(&spa->spa_keystore.sk_wkeys_lock);
/* free the wrapping key */
dsl_wrapping_key_free(found_wkey);
return (0);
error_unlock:
rw_exit(&spa->spa_keystore.sk_wkeys_lock);
return (ret);
}
int
spa_keystore_unload_wkey(const char *dsname)
{
int ret = 0;
dsl_dir_t *dd = NULL;
dsl_pool_t *dp = NULL;
spa_t *spa = NULL;
ret = spa_open(dsname, &spa, FTAG);
if (ret != 0)
return (ret);
/*
* Wait for any outstanding txg IO to complete, releasing any
* remaining references on the wkey.
*/
if (spa_mode(spa) != SPA_MODE_READ)
txg_wait_synced(spa->spa_dsl_pool, 0);
spa_close(spa, FTAG);
/* hold the dsl dir */
ret = dsl_pool_hold(dsname, FTAG, &dp);
if (ret != 0)
goto error;
if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_ENCRYPTION)) {
ret = (SET_ERROR(ENOTSUP));
goto error;
}
ret = dsl_dir_hold(dp, dsname, FTAG, &dd, NULL);
if (ret != 0) {
dd = NULL;
goto error;
}
/* unload the wkey */
ret = spa_keystore_unload_wkey_impl(dp->dp_spa, dd->dd_object);
if (ret != 0)
goto error;
dsl_dir_rele(dd, FTAG);
dsl_pool_rele(dp, FTAG);
/* remove any zvols under this ds */
zvol_remove_minors(dp->dp_spa, dsname, B_TRUE);
return (0);
error:
if (dd != NULL)
dsl_dir_rele(dd, FTAG);
if (dp != NULL)
dsl_pool_rele(dp, FTAG);
return (ret);
}
void
key_mapping_add_ref(dsl_key_mapping_t *km, const void *tag)
{
ASSERT3U(zfs_refcount_count(&km->km_refcnt), >=, 1);
zfs_refcount_add(&km->km_refcnt, tag);
}
/*
* The locking here is a little tricky to ensure we don't cause unnecessary
* performance problems. We want to release a key mapping whenever someone
* decrements the refcount to 0, but freeing the mapping requires removing
* it from the spa_keystore, which requires holding sk_km_lock as a writer.
* Most of the time we don't want to hold this lock as a writer, since the
* same lock is held as a reader for each IO that needs to encrypt / decrypt
* data for any dataset and in practice we will only actually free the
* mapping after unmounting a dataset.
*/
void
key_mapping_rele(spa_t *spa, dsl_key_mapping_t *km, const void *tag)
{
ASSERT3U(zfs_refcount_count(&km->km_refcnt), >=, 1);
if (zfs_refcount_remove(&km->km_refcnt, tag) != 0)
return;
/*
* We think we are going to need to free the mapping. Add a
* reference to prevent most other releasers from thinking
* this might be their responsibility. This is inherently
* racy, so we will confirm that we are legitimately the
* last holder once we have the sk_km_lock as a writer.
*/
zfs_refcount_add(&km->km_refcnt, FTAG);
rw_enter(&spa->spa_keystore.sk_km_lock, RW_WRITER);
if (zfs_refcount_remove(&km->km_refcnt, FTAG) != 0) {
rw_exit(&spa->spa_keystore.sk_km_lock);
return;
}
avl_remove(&spa->spa_keystore.sk_key_mappings, km);
rw_exit(&spa->spa_keystore.sk_km_lock);
spa_keystore_dsl_key_rele(spa, km->km_key, km);
zfs_refcount_destroy(&km->km_refcnt);
kmem_free(km, sizeof (dsl_key_mapping_t));
}
int
spa_keystore_create_mapping(spa_t *spa, dsl_dataset_t *ds, const void *tag,
dsl_key_mapping_t **km_out)
{
int ret;
avl_index_t where;
dsl_key_mapping_t *km, *found_km;
boolean_t should_free = B_FALSE;
/* Allocate and initialize the mapping */
km = kmem_zalloc(sizeof (dsl_key_mapping_t), KM_SLEEP);
zfs_refcount_create(&km->km_refcnt);
ret = spa_keystore_dsl_key_hold_dd(spa, ds->ds_dir, km, &km->km_key);
if (ret != 0) {
zfs_refcount_destroy(&km->km_refcnt);
kmem_free(km, sizeof (dsl_key_mapping_t));
if (km_out != NULL)
*km_out = NULL;
return (ret);
}
km->km_dsobj = ds->ds_object;
rw_enter(&spa->spa_keystore.sk_km_lock, RW_WRITER);
/*
* If a mapping already exists, simply increment its refcount and
* cleanup the one we made. We want to allocate / free outside of
* the lock because this lock is also used by the zio layer to lookup
* key mappings. Otherwise, use the one we created. Normally, there will
* only be one active reference at a time (the objset owner), but there
* are times when there could be multiple async users.
*/
found_km = avl_find(&spa->spa_keystore.sk_key_mappings, km, &where);
if (found_km != NULL) {
should_free = B_TRUE;
zfs_refcount_add(&found_km->km_refcnt, tag);
if (km_out != NULL)
*km_out = found_km;
} else {
zfs_refcount_add(&km->km_refcnt, tag);
avl_insert(&spa->spa_keystore.sk_key_mappings, km, where);
if (km_out != NULL)
*km_out = km;
}
rw_exit(&spa->spa_keystore.sk_km_lock);
if (should_free) {
spa_keystore_dsl_key_rele(spa, km->km_key, km);
zfs_refcount_destroy(&km->km_refcnt);
kmem_free(km, sizeof (dsl_key_mapping_t));
}
return (0);
}
int
spa_keystore_remove_mapping(spa_t *spa, uint64_t dsobj, const void *tag)
{
int ret;
dsl_key_mapping_t search_km;
dsl_key_mapping_t *found_km;
/* init the search key mapping */
search_km.km_dsobj = dsobj;
rw_enter(&spa->spa_keystore.sk_km_lock, RW_READER);
/* find the matching mapping */
found_km = avl_find(&spa->spa_keystore.sk_key_mappings,
&search_km, NULL);
if (found_km == NULL) {
ret = SET_ERROR(ENOENT);
goto error_unlock;
}
rw_exit(&spa->spa_keystore.sk_km_lock);
key_mapping_rele(spa, found_km, tag);
return (0);
error_unlock:
rw_exit(&spa->spa_keystore.sk_km_lock);
return (ret);
}
/*
* This function is primarily used by the zio and arc layer to lookup
* DSL Crypto Keys for encryption. Callers must release the key with
* spa_keystore_dsl_key_rele(). The function may also be called with
* dck_out == NULL and tag == NULL to simply check that a key exists
* without getting a reference to it.
*/
int
spa_keystore_lookup_key(spa_t *spa, uint64_t dsobj, const void *tag,
dsl_crypto_key_t **dck_out)
{
int ret;
dsl_key_mapping_t search_km;
dsl_key_mapping_t *found_km;
ASSERT((tag != NULL && dck_out != NULL) ||
(tag == NULL && dck_out == NULL));
/* init the search key mapping */
search_km.km_dsobj = dsobj;
rw_enter(&spa->spa_keystore.sk_km_lock, RW_READER);
/* remove the mapping from the tree */
found_km = avl_find(&spa->spa_keystore.sk_key_mappings, &search_km,
NULL);
if (found_km == NULL) {
ret = SET_ERROR(ENOENT);
goto error_unlock;
}
if (found_km && tag)
zfs_refcount_add(&found_km->km_key->dck_holds, tag);
rw_exit(&spa->spa_keystore.sk_km_lock);
if (dck_out != NULL)
*dck_out = found_km->km_key;
return (0);
error_unlock:
rw_exit(&spa->spa_keystore.sk_km_lock);
if (dck_out != NULL)
*dck_out = NULL;
return (ret);
}
static int
dmu_objset_check_wkey_loaded(dsl_dir_t *dd)
{
int ret;
dsl_wrapping_key_t *wkey = NULL;
ret = spa_keystore_wkey_hold_dd(dd->dd_pool->dp_spa, dd, FTAG,
&wkey);
if (ret != 0)
return (SET_ERROR(EACCES));
dsl_wrapping_key_rele(wkey, FTAG);
return (0);
}
zfs_keystatus_t
dsl_dataset_get_keystatus(dsl_dir_t *dd)
{
/* check if this dd has a has a dsl key */
if (dd->dd_crypto_obj == 0)
return (ZFS_KEYSTATUS_NONE);
return (dmu_objset_check_wkey_loaded(dd) == 0 ?
ZFS_KEYSTATUS_AVAILABLE : ZFS_KEYSTATUS_UNAVAILABLE);
}
static int
dsl_dir_get_crypt(dsl_dir_t *dd, uint64_t *crypt)
{
if (dd->dd_crypto_obj == 0) {
*crypt = ZIO_CRYPT_OFF;
return (0);
}
return (zap_lookup(dd->dd_pool->dp_meta_objset, dd->dd_crypto_obj,
DSL_CRYPTO_KEY_CRYPTO_SUITE, 8, 1, crypt));
}
static void
dsl_crypto_key_sync_impl(objset_t *mos, uint64_t dckobj, uint64_t crypt,
uint64_t root_ddobj, uint64_t guid, uint8_t *iv, uint8_t *mac,
uint8_t *keydata, uint8_t *hmac_keydata, uint64_t keyformat,
uint64_t salt, uint64_t iters, dmu_tx_t *tx)
{
VERIFY0(zap_update(mos, dckobj, DSL_CRYPTO_KEY_CRYPTO_SUITE, 8, 1,
&crypt, tx));
VERIFY0(zap_update(mos, dckobj, DSL_CRYPTO_KEY_ROOT_DDOBJ, 8, 1,
&root_ddobj, tx));
VERIFY0(zap_update(mos, dckobj, DSL_CRYPTO_KEY_GUID, 8, 1,
&guid, tx));
VERIFY0(zap_update(mos, dckobj, DSL_CRYPTO_KEY_IV, 1, WRAPPING_IV_LEN,
iv, tx));
VERIFY0(zap_update(mos, dckobj, DSL_CRYPTO_KEY_MAC, 1, WRAPPING_MAC_LEN,
mac, tx));
VERIFY0(zap_update(mos, dckobj, DSL_CRYPTO_KEY_MASTER_KEY, 1,
MASTER_KEY_MAX_LEN, keydata, tx));
VERIFY0(zap_update(mos, dckobj, DSL_CRYPTO_KEY_HMAC_KEY, 1,
SHA512_HMAC_KEYLEN, hmac_keydata, tx));
VERIFY0(zap_update(mos, dckobj, zfs_prop_to_name(ZFS_PROP_KEYFORMAT),
8, 1, &keyformat, tx));
VERIFY0(zap_update(mos, dckobj, zfs_prop_to_name(ZFS_PROP_PBKDF2_SALT),
8, 1, &salt, tx));
VERIFY0(zap_update(mos, dckobj, zfs_prop_to_name(ZFS_PROP_PBKDF2_ITERS),
8, 1, &iters, tx));
}
static void
dsl_crypto_key_sync(dsl_crypto_key_t *dck, dmu_tx_t *tx)
{
zio_crypt_key_t *key = &dck->dck_key;
dsl_wrapping_key_t *wkey = dck->dck_wkey;
uint8_t keydata[MASTER_KEY_MAX_LEN];
uint8_t hmac_keydata[SHA512_HMAC_KEYLEN];
uint8_t iv[WRAPPING_IV_LEN];
uint8_t mac[WRAPPING_MAC_LEN];
ASSERT(dmu_tx_is_syncing(tx));
ASSERT3U(key->zk_crypt, <, ZIO_CRYPT_FUNCTIONS);
/* encrypt and store the keys along with the IV and MAC */
VERIFY0(zio_crypt_key_wrap(&dck->dck_wkey->wk_key, key, iv, mac,
keydata, hmac_keydata));
/* update the ZAP with the obtained values */
dsl_crypto_key_sync_impl(tx->tx_pool->dp_meta_objset, dck->dck_obj,
key->zk_crypt, wkey->wk_ddobj, key->zk_guid, iv, mac, keydata,
hmac_keydata, wkey->wk_keyformat, wkey->wk_salt, wkey->wk_iters,
tx);
}
typedef struct spa_keystore_change_key_args {
const char *skcka_dsname;
dsl_crypto_params_t *skcka_cp;
} spa_keystore_change_key_args_t;
static int
spa_keystore_change_key_check(void *arg, dmu_tx_t *tx)
{
int ret;
dsl_dir_t *dd = NULL;
dsl_pool_t *dp = dmu_tx_pool(tx);
spa_keystore_change_key_args_t *skcka = arg;
dsl_crypto_params_t *dcp = skcka->skcka_cp;
uint64_t rddobj;
/* check for the encryption feature */
if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_ENCRYPTION)) {
ret = SET_ERROR(ENOTSUP);
goto error;
}
/* check for valid key change command */
if (dcp->cp_cmd != DCP_CMD_NEW_KEY &&
dcp->cp_cmd != DCP_CMD_INHERIT &&
dcp->cp_cmd != DCP_CMD_FORCE_NEW_KEY &&
dcp->cp_cmd != DCP_CMD_FORCE_INHERIT) {
ret = SET_ERROR(EINVAL);
goto error;
}
/* hold the dd */
ret = dsl_dir_hold(dp, skcka->skcka_dsname, FTAG, &dd, NULL);
if (ret != 0) {
dd = NULL;
goto error;
}
/* verify that the dataset is encrypted */
if (dd->dd_crypto_obj == 0) {
ret = SET_ERROR(EINVAL);
goto error;
}
/* clones must always use their origin's key */
if (dsl_dir_is_clone(dd)) {
ret = SET_ERROR(EINVAL);
goto error;
}
/* lookup the ddobj we are inheriting the keylocation from */
ret = dsl_dir_get_encryption_root_ddobj(dd, &rddobj);
if (ret != 0)
goto error;
/* Handle inheritance */
if (dcp->cp_cmd == DCP_CMD_INHERIT ||
dcp->cp_cmd == DCP_CMD_FORCE_INHERIT) {
/* no other encryption params should be given */
if (dcp->cp_crypt != ZIO_CRYPT_INHERIT ||
dcp->cp_keylocation != NULL ||
dcp->cp_wkey != NULL) {
ret = SET_ERROR(EINVAL);
goto error;
}
/* check that this is an encryption root */
if (dd->dd_object != rddobj) {
ret = SET_ERROR(EINVAL);
goto error;
}
/* check that the parent is encrypted */
if (dd->dd_parent->dd_crypto_obj == 0) {
ret = SET_ERROR(EINVAL);
goto error;
}
/* if we are rewrapping check that both keys are loaded */
if (dcp->cp_cmd == DCP_CMD_INHERIT) {
ret = dmu_objset_check_wkey_loaded(dd);
if (ret != 0)
goto error;
ret = dmu_objset_check_wkey_loaded(dd->dd_parent);
if (ret != 0)
goto error;
}
dsl_dir_rele(dd, FTAG);
return (0);
}
/* handle forcing an encryption root without rewrapping */
if (dcp->cp_cmd == DCP_CMD_FORCE_NEW_KEY) {
/* no other encryption params should be given */
if (dcp->cp_crypt != ZIO_CRYPT_INHERIT ||
dcp->cp_keylocation != NULL ||
dcp->cp_wkey != NULL) {
ret = SET_ERROR(EINVAL);
goto error;
}
/* check that this is not an encryption root */
if (dd->dd_object == rddobj) {
ret = SET_ERROR(EINVAL);
goto error;
}
dsl_dir_rele(dd, FTAG);
return (0);
}
/* crypt cannot be changed after creation */
if (dcp->cp_crypt != ZIO_CRYPT_INHERIT) {
ret = SET_ERROR(EINVAL);
goto error;
}
/* we are not inheritting our parent's wkey so we need one ourselves */
if (dcp->cp_wkey == NULL) {
ret = SET_ERROR(EINVAL);
goto error;
}
/* check for a valid keyformat for the new wrapping key */
if (dcp->cp_wkey->wk_keyformat >= ZFS_KEYFORMAT_FORMATS ||
dcp->cp_wkey->wk_keyformat == ZFS_KEYFORMAT_NONE) {
ret = SET_ERROR(EINVAL);
goto error;
}
/*
* If this dataset is not currently an encryption root we need a new
* keylocation for this dataset's new wrapping key. Otherwise we can
* just keep the one we already had.
*/
if (dd->dd_object != rddobj && dcp->cp_keylocation == NULL) {
ret = SET_ERROR(EINVAL);
goto error;
}
/* check that the keylocation is valid if it is not NULL */
if (dcp->cp_keylocation != NULL &&
!zfs_prop_valid_keylocation(dcp->cp_keylocation, B_TRUE)) {
ret = SET_ERROR(EINVAL);
goto error;
}
/* passphrases require pbkdf2 salt and iters */
if (dcp->cp_wkey->wk_keyformat == ZFS_KEYFORMAT_PASSPHRASE) {
if (dcp->cp_wkey->wk_salt == 0 ||
dcp->cp_wkey->wk_iters < MIN_PBKDF2_ITERATIONS) {
ret = SET_ERROR(EINVAL);
goto error;
}
} else {
if (dcp->cp_wkey->wk_salt != 0 || dcp->cp_wkey->wk_iters != 0) {
ret = SET_ERROR(EINVAL);
goto error;
}
}
/* make sure the dd's wkey is loaded */
ret = dmu_objset_check_wkey_loaded(dd);
if (ret != 0)
goto error;
dsl_dir_rele(dd, FTAG);
return (0);
error:
if (dd != NULL)
dsl_dir_rele(dd, FTAG);
return (ret);
}
/*
* This function deals with the intricacies of updating wrapping
* key references and encryption roots recursively in the event
* of a call to 'zfs change-key' or 'zfs promote'. The 'skip'
* parameter should always be set to B_FALSE when called
* externally.
*/
static void
spa_keystore_change_key_sync_impl(uint64_t rddobj, uint64_t ddobj,
uint64_t new_rddobj, dsl_wrapping_key_t *wkey, boolean_t skip,
dmu_tx_t *tx)
{
int ret;
zap_cursor_t *zc;
zap_attribute_t *za;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dir_t *dd = NULL;
dsl_crypto_key_t *dck = NULL;
uint64_t curr_rddobj;
ASSERT(RW_WRITE_HELD(&dp->dp_spa->spa_keystore.sk_wkeys_lock));
/* hold the dd */
VERIFY0(dsl_dir_hold_obj(dp, ddobj, NULL, FTAG, &dd));
/* ignore special dsl dirs */
if (dd->dd_myname[0] == '$' || dd->dd_myname[0] == '%') {
dsl_dir_rele(dd, FTAG);
return;
}
ret = dsl_dir_get_encryption_root_ddobj(dd, &curr_rddobj);
VERIFY(ret == 0 || ret == ENOENT);
/*
* Stop recursing if this dsl dir didn't inherit from the root
* or if this dd is a clone.
*/
if (ret == ENOENT ||
(!skip && (curr_rddobj != rddobj || dsl_dir_is_clone(dd)))) {
dsl_dir_rele(dd, FTAG);
return;
}
/*
* If we don't have a wrapping key just update the dck to reflect the
* new encryption root. Otherwise rewrap the entire dck and re-sync it
* to disk. If skip is set, we don't do any of this work.
*/
if (!skip) {
if (wkey == NULL) {
VERIFY0(zap_update(dp->dp_meta_objset,
dd->dd_crypto_obj,
DSL_CRYPTO_KEY_ROOT_DDOBJ, 8, 1,
&new_rddobj, tx));
} else {
VERIFY0(spa_keystore_dsl_key_hold_dd(dp->dp_spa, dd,
FTAG, &dck));
dsl_wrapping_key_hold(wkey, dck);
dsl_wrapping_key_rele(dck->dck_wkey, dck);
dck->dck_wkey = wkey;
dsl_crypto_key_sync(dck, tx);
spa_keystore_dsl_key_rele(dp->dp_spa, dck, FTAG);
}
}
zc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP);
za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
/* Recurse into all child dsl dirs. */
for (zap_cursor_init(zc, dp->dp_meta_objset,
dsl_dir_phys(dd)->dd_child_dir_zapobj);
zap_cursor_retrieve(zc, za) == 0;
zap_cursor_advance(zc)) {
spa_keystore_change_key_sync_impl(rddobj,
za->za_first_integer, new_rddobj, wkey, B_FALSE, tx);
}
zap_cursor_fini(zc);
/*
* Recurse into all dsl dirs of clones. We utilize the skip parameter
* here so that we don't attempt to process the clones directly. This
* is because the clone and its origin share the same dck, which has
* already been updated.
*/
for (zap_cursor_init(zc, dp->dp_meta_objset,
dsl_dir_phys(dd)->dd_clones);
zap_cursor_retrieve(zc, za) == 0;
zap_cursor_advance(zc)) {
dsl_dataset_t *clone;
VERIFY0(dsl_dataset_hold_obj(dp, za->za_first_integer,
FTAG, &clone));
spa_keystore_change_key_sync_impl(rddobj,
clone->ds_dir->dd_object, new_rddobj, wkey, B_TRUE, tx);
dsl_dataset_rele(clone, FTAG);
}
zap_cursor_fini(zc);
kmem_free(za, sizeof (zap_attribute_t));
kmem_free(zc, sizeof (zap_cursor_t));
dsl_dir_rele(dd, FTAG);
}
static void
spa_keystore_change_key_sync(void *arg, dmu_tx_t *tx)
{
dsl_dataset_t *ds;
avl_index_t where;
dsl_pool_t *dp = dmu_tx_pool(tx);
spa_t *spa = dp->dp_spa;
spa_keystore_change_key_args_t *skcka = arg;
dsl_crypto_params_t *dcp = skcka->skcka_cp;
dsl_wrapping_key_t *wkey = NULL, *found_wkey;
dsl_wrapping_key_t wkey_search;
const char *keylocation = dcp->cp_keylocation;
uint64_t rddobj, new_rddobj;
/* create and initialize the wrapping key */
VERIFY0(dsl_dataset_hold(dp, skcka->skcka_dsname, FTAG, &ds));
ASSERT(!ds->ds_is_snapshot);
if (dcp->cp_cmd == DCP_CMD_NEW_KEY ||
dcp->cp_cmd == DCP_CMD_FORCE_NEW_KEY) {
/*
* We are changing to a new wkey. Set additional properties
* which can be sent along with this ioctl. Note that this
* command can set keylocation even if it can't normally be
* set via 'zfs set' due to a non-local keylocation.
*/
if (dcp->cp_cmd == DCP_CMD_NEW_KEY) {
wkey = dcp->cp_wkey;
wkey->wk_ddobj = ds->ds_dir->dd_object;
} else {
keylocation = "prompt";
}
if (keylocation != NULL) {
dsl_prop_set_sync_impl(ds,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION),
ZPROP_SRC_LOCAL, 1, strlen(keylocation) + 1,
keylocation, tx);
}
VERIFY0(dsl_dir_get_encryption_root_ddobj(ds->ds_dir, &rddobj));
new_rddobj = ds->ds_dir->dd_object;
} else {
/*
* We are inheritting the parent's wkey. Unset any local
* keylocation and grab a reference to the wkey.
*/
if (dcp->cp_cmd == DCP_CMD_INHERIT) {
VERIFY0(spa_keystore_wkey_hold_dd(spa,
ds->ds_dir->dd_parent, FTAG, &wkey));
}
dsl_prop_set_sync_impl(ds,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION), ZPROP_SRC_NONE,
0, 0, NULL, tx);
rddobj = ds->ds_dir->dd_object;
VERIFY0(dsl_dir_get_encryption_root_ddobj(ds->ds_dir->dd_parent,
&new_rddobj));
}
if (wkey == NULL) {
ASSERT(dcp->cp_cmd == DCP_CMD_FORCE_INHERIT ||
dcp->cp_cmd == DCP_CMD_FORCE_NEW_KEY);
}
rw_enter(&spa->spa_keystore.sk_wkeys_lock, RW_WRITER);
/* recurse through all children and rewrap their keys */
spa_keystore_change_key_sync_impl(rddobj, ds->ds_dir->dd_object,
new_rddobj, wkey, B_FALSE, tx);
/*
* All references to the old wkey should be released now (if it
* existed). Replace the wrapping key.
*/
wkey_search.wk_ddobj = ds->ds_dir->dd_object;
found_wkey = avl_find(&spa->spa_keystore.sk_wkeys, &wkey_search, NULL);
if (found_wkey != NULL) {
ASSERT0(zfs_refcount_count(&found_wkey->wk_refcnt));
avl_remove(&spa->spa_keystore.sk_wkeys, found_wkey);
dsl_wrapping_key_free(found_wkey);
}
if (dcp->cp_cmd == DCP_CMD_NEW_KEY) {
avl_find(&spa->spa_keystore.sk_wkeys, wkey, &where);
avl_insert(&spa->spa_keystore.sk_wkeys, wkey, where);
} else if (wkey != NULL) {
dsl_wrapping_key_rele(wkey, FTAG);
}
rw_exit(&spa->spa_keystore.sk_wkeys_lock);
dsl_dataset_rele(ds, FTAG);
}
int
spa_keystore_change_key(const char *dsname, dsl_crypto_params_t *dcp)
{
spa_keystore_change_key_args_t skcka;
/* initialize the args struct */
skcka.skcka_dsname = dsname;
skcka.skcka_cp = dcp;
/*
* Perform the actual work in syncing context. The blocks modified
* here could be calculated but it would require holding the pool
* lock and traversing all of the datasets that will have their keys
* changed.
*/
return (dsl_sync_task(dsname, spa_keystore_change_key_check,
spa_keystore_change_key_sync, &skcka, 15,
ZFS_SPACE_CHECK_RESERVED));
}
int
dsl_dir_rename_crypt_check(dsl_dir_t *dd, dsl_dir_t *newparent)
{
int ret;
uint64_t curr_rddobj, parent_rddobj;
if (dd->dd_crypto_obj == 0)
return (0);
ret = dsl_dir_get_encryption_root_ddobj(dd, &curr_rddobj);
if (ret != 0)
goto error;
/*
* if this is not an encryption root, we must make sure we are not
* moving dd to a new encryption root
*/
if (dd->dd_object != curr_rddobj) {
ret = dsl_dir_get_encryption_root_ddobj(newparent,
&parent_rddobj);
if (ret != 0)
goto error;
if (parent_rddobj != curr_rddobj) {
ret = SET_ERROR(EACCES);
goto error;
}
}
return (0);
error:
return (ret);
}
/*
* Check to make sure that a promote from targetdd to origindd will not require
* any key rewraps.
*/
int
dsl_dataset_promote_crypt_check(dsl_dir_t *target, dsl_dir_t *origin)
{
int ret;
uint64_t rddobj, op_rddobj, tp_rddobj;
/* If the dataset is not encrypted we don't need to check anything */
if (origin->dd_crypto_obj == 0)
return (0);
/*
* If we are not changing the first origin snapshot in a chain
* the encryption root won't change either.
*/
if (dsl_dir_is_clone(origin))
return (0);
/*
* If the origin is the encryption root we will update
* the DSL Crypto Key to point to the target instead.
*/
ret = dsl_dir_get_encryption_root_ddobj(origin, &rddobj);
if (ret != 0)
return (ret);
if (rddobj == origin->dd_object)
return (0);
/*
* The origin is inheriting its encryption root from its parent.
* Check that the parent of the target has the same encryption root.
*/
ret = dsl_dir_get_encryption_root_ddobj(origin->dd_parent, &op_rddobj);
if (ret == ENOENT)
return (SET_ERROR(EACCES));
else if (ret != 0)
return (ret);
ret = dsl_dir_get_encryption_root_ddobj(target->dd_parent, &tp_rddobj);
if (ret == ENOENT)
return (SET_ERROR(EACCES));
else if (ret != 0)
return (ret);
if (op_rddobj != tp_rddobj)
return (SET_ERROR(EACCES));
return (0);
}
void
dsl_dataset_promote_crypt_sync(dsl_dir_t *target, dsl_dir_t *origin,
dmu_tx_t *tx)
{
uint64_t rddobj;
dsl_pool_t *dp = target->dd_pool;
dsl_dataset_t *targetds;
dsl_dataset_t *originds;
char *keylocation;
if (origin->dd_crypto_obj == 0)
return;
if (dsl_dir_is_clone(origin))
return;
VERIFY0(dsl_dir_get_encryption_root_ddobj(origin, &rddobj));
if (rddobj != origin->dd_object)
return;
/*
* If the target is being promoted to the encryption root update the
* DSL Crypto Key and keylocation to reflect that. We also need to
* update the DSL Crypto Keys of all children inheritting their
* encryption root to point to the new target. Otherwise, the check
* function ensured that the encryption root will not change.
*/
keylocation = kmem_alloc(ZAP_MAXVALUELEN, KM_SLEEP);
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dir_phys(target)->dd_head_dataset_obj, FTAG, &targetds));
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dir_phys(origin)->dd_head_dataset_obj, FTAG, &originds));
VERIFY0(dsl_prop_get_dd(origin, zfs_prop_to_name(ZFS_PROP_KEYLOCATION),
1, ZAP_MAXVALUELEN, keylocation, NULL, B_FALSE));
dsl_prop_set_sync_impl(targetds, zfs_prop_to_name(ZFS_PROP_KEYLOCATION),
ZPROP_SRC_LOCAL, 1, strlen(keylocation) + 1, keylocation, tx);
dsl_prop_set_sync_impl(originds, zfs_prop_to_name(ZFS_PROP_KEYLOCATION),
ZPROP_SRC_NONE, 0, 0, NULL, tx);
rw_enter(&dp->dp_spa->spa_keystore.sk_wkeys_lock, RW_WRITER);
spa_keystore_change_key_sync_impl(rddobj, origin->dd_object,
target->dd_object, NULL, B_FALSE, tx);
rw_exit(&dp->dp_spa->spa_keystore.sk_wkeys_lock);
dsl_dataset_rele(targetds, FTAG);
dsl_dataset_rele(originds, FTAG);
kmem_free(keylocation, ZAP_MAXVALUELEN);
}
int
dmu_objset_create_crypt_check(dsl_dir_t *parentdd, dsl_crypto_params_t *dcp,
boolean_t *will_encrypt)
{
int ret;
uint64_t pcrypt, crypt;
dsl_crypto_params_t dummy_dcp = { 0 };
if (will_encrypt != NULL)
*will_encrypt = B_FALSE;
if (dcp == NULL)
dcp = &dummy_dcp;
if (dcp->cp_cmd != DCP_CMD_NONE)
return (SET_ERROR(EINVAL));
if (parentdd != NULL) {
ret = dsl_dir_get_crypt(parentdd, &pcrypt);
if (ret != 0)
return (ret);
} else {
pcrypt = ZIO_CRYPT_OFF;
}
crypt = (dcp->cp_crypt == ZIO_CRYPT_INHERIT) ? pcrypt : dcp->cp_crypt;
ASSERT3U(pcrypt, !=, ZIO_CRYPT_INHERIT);
ASSERT3U(crypt, !=, ZIO_CRYPT_INHERIT);
/* check for valid dcp with no encryption (inherited or local) */
if (crypt == ZIO_CRYPT_OFF) {
/* Must not specify encryption params */
if (dcp->cp_wkey != NULL ||
(dcp->cp_keylocation != NULL &&
strcmp(dcp->cp_keylocation, "none") != 0))
return (SET_ERROR(EINVAL));
return (0);
}
if (will_encrypt != NULL)
*will_encrypt = B_TRUE;
/*
* We will now definitely be encrypting. Check the feature flag. When
* creating the pool the caller will check this for us since we won't
* technically have the feature activated yet.
*/
if (parentdd != NULL &&
!spa_feature_is_enabled(parentdd->dd_pool->dp_spa,
SPA_FEATURE_ENCRYPTION)) {
return (SET_ERROR(EOPNOTSUPP));
}
/* Check for errata #4 (encryption enabled, bookmark_v2 disabled) */
if (parentdd != NULL &&
!spa_feature_is_enabled(parentdd->dd_pool->dp_spa,
SPA_FEATURE_BOOKMARK_V2)) {
return (SET_ERROR(EOPNOTSUPP));
}
/* handle inheritance */
if (dcp->cp_wkey == NULL) {
ASSERT3P(parentdd, !=, NULL);
/* key must be fully unspecified */
if (dcp->cp_keylocation != NULL)
return (SET_ERROR(EINVAL));
/* parent must have a key to inherit */
if (pcrypt == ZIO_CRYPT_OFF)
return (SET_ERROR(EINVAL));
/* check for parent key */
ret = dmu_objset_check_wkey_loaded(parentdd);
if (ret != 0)
return (ret);
return (0);
}
/* At this point we should have a fully specified key. Check location */
if (dcp->cp_keylocation == NULL ||
!zfs_prop_valid_keylocation(dcp->cp_keylocation, B_TRUE))
return (SET_ERROR(EINVAL));
/* Must have fully specified keyformat */
switch (dcp->cp_wkey->wk_keyformat) {
case ZFS_KEYFORMAT_HEX:
case ZFS_KEYFORMAT_RAW:
/* requires no pbkdf2 iters and salt */
if (dcp->cp_wkey->wk_salt != 0 || dcp->cp_wkey->wk_iters != 0)
return (SET_ERROR(EINVAL));
break;
case ZFS_KEYFORMAT_PASSPHRASE:
/* requires pbkdf2 iters and salt */
if (dcp->cp_wkey->wk_salt == 0 ||
dcp->cp_wkey->wk_iters < MIN_PBKDF2_ITERATIONS)
return (SET_ERROR(EINVAL));
break;
case ZFS_KEYFORMAT_NONE:
default:
/* keyformat must be specified and valid */
return (SET_ERROR(EINVAL));
}
return (0);
}
void
dsl_dataset_create_crypt_sync(uint64_t dsobj, dsl_dir_t *dd,
dsl_dataset_t *origin, dsl_crypto_params_t *dcp, dmu_tx_t *tx)
{
dsl_pool_t *dp = dd->dd_pool;
uint64_t crypt;
dsl_wrapping_key_t *wkey;
/* clones always use their origin's wrapping key */
if (dsl_dir_is_clone(dd)) {
ASSERT3P(dcp, ==, NULL);
/*
* If this is an encrypted clone we just need to clone the
* dck into dd. Zapify the dd so we can do that.
*/
if (origin->ds_dir->dd_crypto_obj != 0) {
dmu_buf_will_dirty(dd->dd_dbuf, tx);
dsl_dir_zapify(dd, tx);
dd->dd_crypto_obj =
dsl_crypto_key_clone_sync(origin->ds_dir, tx);
VERIFY0(zap_add(dp->dp_meta_objset, dd->dd_object,
DD_FIELD_CRYPTO_KEY_OBJ, sizeof (uint64_t), 1,
&dd->dd_crypto_obj, tx));
}
return;
}
/*
* A NULL dcp at this point indicates this is the origin dataset
* which does not have an objset to encrypt. Raw receives will handle
* encryption separately later. In both cases we can simply return.
*/
if (dcp == NULL || dcp->cp_cmd == DCP_CMD_RAW_RECV)
return;
crypt = dcp->cp_crypt;
wkey = dcp->cp_wkey;
/* figure out the effective crypt */
if (crypt == ZIO_CRYPT_INHERIT && dd->dd_parent != NULL)
VERIFY0(dsl_dir_get_crypt(dd->dd_parent, &crypt));
/* if we aren't doing encryption just return */
if (crypt == ZIO_CRYPT_OFF || crypt == ZIO_CRYPT_INHERIT)
return;
/* zapify the dd so that we can add the crypto key obj to it */
dmu_buf_will_dirty(dd->dd_dbuf, tx);
dsl_dir_zapify(dd, tx);
/* use the new key if given or inherit from the parent */
if (wkey == NULL) {
VERIFY0(spa_keystore_wkey_hold_dd(dp->dp_spa,
dd->dd_parent, FTAG, &wkey));
} else {
wkey->wk_ddobj = dd->dd_object;
}
ASSERT3P(wkey, !=, NULL);
/* Create or clone the DSL crypto key and activate the feature */
dd->dd_crypto_obj = dsl_crypto_key_create_sync(crypt, wkey, tx);
VERIFY0(zap_add(dp->dp_meta_objset, dd->dd_object,
DD_FIELD_CRYPTO_KEY_OBJ, sizeof (uint64_t), 1, &dd->dd_crypto_obj,
tx));
dsl_dataset_activate_feature(dsobj, SPA_FEATURE_ENCRYPTION,
(void *)B_TRUE, tx);
/*
* If we inherited the wrapping key we release our reference now.
* Otherwise, this is a new key and we need to load it into the
* keystore.
*/
if (dcp->cp_wkey == NULL) {
dsl_wrapping_key_rele(wkey, FTAG);
} else {
VERIFY0(spa_keystore_load_wkey_impl(dp->dp_spa, wkey));
}
}
typedef struct dsl_crypto_recv_key_arg {
uint64_t dcrka_dsobj;
uint64_t dcrka_fromobj;
dmu_objset_type_t dcrka_ostype;
nvlist_t *dcrka_nvl;
boolean_t dcrka_do_key;
} dsl_crypto_recv_key_arg_t;
static int
dsl_crypto_recv_raw_objset_check(dsl_dataset_t *ds, dsl_dataset_t *fromds,
dmu_objset_type_t ostype, nvlist_t *nvl, dmu_tx_t *tx)
{
int ret;
objset_t *os;
dnode_t *mdn;
uint8_t *buf = NULL;
uint_t len;
uint64_t intval, nlevels, blksz, ibs;
uint64_t nblkptr, maxblkid;
if (ostype != DMU_OST_ZFS && ostype != DMU_OST_ZVOL)
return (SET_ERROR(EINVAL));
/* raw receives also need info about the structure of the metadnode */
ret = nvlist_lookup_uint64(nvl, "mdn_compress", &intval);
if (ret != 0 || intval >= ZIO_COMPRESS_LEGACY_FUNCTIONS)
return (SET_ERROR(EINVAL));
ret = nvlist_lookup_uint64(nvl, "mdn_checksum", &intval);
if (ret != 0 || intval >= ZIO_CHECKSUM_LEGACY_FUNCTIONS)
return (SET_ERROR(EINVAL));
ret = nvlist_lookup_uint64(nvl, "mdn_nlevels", &nlevels);
if (ret != 0 || nlevels > DN_MAX_LEVELS)
return (SET_ERROR(EINVAL));
ret = nvlist_lookup_uint64(nvl, "mdn_blksz", &blksz);
if (ret != 0 || blksz < SPA_MINBLOCKSIZE)
return (SET_ERROR(EINVAL));
else if (blksz > spa_maxblocksize(tx->tx_pool->dp_spa))
return (SET_ERROR(ENOTSUP));
ret = nvlist_lookup_uint64(nvl, "mdn_indblkshift", &ibs);
if (ret != 0 || ibs < DN_MIN_INDBLKSHIFT || ibs > DN_MAX_INDBLKSHIFT)
return (SET_ERROR(ENOTSUP));
ret = nvlist_lookup_uint64(nvl, "mdn_nblkptr", &nblkptr);
if (ret != 0 || nblkptr != DN_MAX_NBLKPTR)
return (SET_ERROR(ENOTSUP));
ret = nvlist_lookup_uint64(nvl, "mdn_maxblkid", &maxblkid);
if (ret != 0)
return (SET_ERROR(EINVAL));
ret = nvlist_lookup_uint8_array(nvl, "portable_mac", &buf, &len);
if (ret != 0 || len != ZIO_OBJSET_MAC_LEN)
return (SET_ERROR(EINVAL));
ret = dmu_objset_from_ds(ds, &os);
if (ret != 0)
return (ret);
mdn = DMU_META_DNODE(os);
/*
* If we already created the objset, make sure its unchangeable
* properties match the ones received in the nvlist.
*/
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
if (!BP_IS_HOLE(dsl_dataset_get_blkptr(ds)) &&
(mdn->dn_nlevels != nlevels || mdn->dn_datablksz != blksz ||
mdn->dn_indblkshift != ibs || mdn->dn_nblkptr != nblkptr)) {
rrw_exit(&ds->ds_bp_rwlock, FTAG);
return (SET_ERROR(EINVAL));
}
rrw_exit(&ds->ds_bp_rwlock, FTAG);
/*
* Check that the ivset guid of the fromds matches the one from the
* send stream. Older versions of the encryption code did not have
* an ivset guid on the from dataset and did not send one in the
* stream. For these streams we provide the
* zfs_disable_ivset_guid_check tunable to allow these datasets to
* be received with a generated ivset guid.
*/
if (fromds != NULL && !zfs_disable_ivset_guid_check) {
uint64_t from_ivset_guid = 0;
intval = 0;
(void) nvlist_lookup_uint64(nvl, "from_ivset_guid", &intval);
(void) zap_lookup(tx->tx_pool->dp_meta_objset,
fromds->ds_object, DS_FIELD_IVSET_GUID,
sizeof (from_ivset_guid), 1, &from_ivset_guid);
if (intval == 0 || from_ivset_guid == 0)
return (SET_ERROR(ZFS_ERR_FROM_IVSET_GUID_MISSING));
if (intval != from_ivset_guid)
return (SET_ERROR(ZFS_ERR_FROM_IVSET_GUID_MISMATCH));
}
return (0);
}
static void
dsl_crypto_recv_raw_objset_sync(dsl_dataset_t *ds, dmu_objset_type_t ostype,
nvlist_t *nvl, dmu_tx_t *tx)
{
dsl_pool_t *dp = tx->tx_pool;
objset_t *os;
dnode_t *mdn;
zio_t *zio;
uint8_t *portable_mac;
uint_t len;
uint64_t compress, checksum, nlevels, blksz, ibs, maxblkid;
boolean_t newds = B_FALSE;
VERIFY0(dmu_objset_from_ds(ds, &os));
mdn = DMU_META_DNODE(os);
/*
* Fetch the values we need from the nvlist. "to_ivset_guid" must
* be set on the snapshot, which doesn't exist yet. The receive
* code will take care of this for us later.
*/
compress = fnvlist_lookup_uint64(nvl, "mdn_compress");
checksum = fnvlist_lookup_uint64(nvl, "mdn_checksum");
nlevels = fnvlist_lookup_uint64(nvl, "mdn_nlevels");
blksz = fnvlist_lookup_uint64(nvl, "mdn_blksz");
ibs = fnvlist_lookup_uint64(nvl, "mdn_indblkshift");
maxblkid = fnvlist_lookup_uint64(nvl, "mdn_maxblkid");
VERIFY0(nvlist_lookup_uint8_array(nvl, "portable_mac", &portable_mac,
&len));
/* if we haven't created an objset for the ds yet, do that now */
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
if (BP_IS_HOLE(dsl_dataset_get_blkptr(ds))) {
(void) dmu_objset_create_impl_dnstats(dp->dp_spa, ds,
dsl_dataset_get_blkptr(ds), ostype, nlevels, blksz,
ibs, tx);
newds = B_TRUE;
}
rrw_exit(&ds->ds_bp_rwlock, FTAG);
/*
* Set the portable MAC. The local MAC will always be zero since the
* incoming data will all be portable and user accounting will be
* deferred until the next mount. Afterwards, flag the os to be
* written out raw next time.
*/
arc_release(os->os_phys_buf, &os->os_phys_buf);
memcpy(os->os_phys->os_portable_mac, portable_mac, ZIO_OBJSET_MAC_LEN);
memset(os->os_phys->os_local_mac, 0, ZIO_OBJSET_MAC_LEN);
os->os_flags &= ~OBJSET_FLAG_USERACCOUNTING_COMPLETE;
os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE;
/* set metadnode compression and checksum */
mdn->dn_compress = compress;
mdn->dn_checksum = checksum;
rw_enter(&mdn->dn_struct_rwlock, RW_WRITER);
dnode_new_blkid(mdn, maxblkid, tx, B_FALSE, B_TRUE);
rw_exit(&mdn->dn_struct_rwlock);
/*
* We can't normally dirty the dataset in syncing context unless
* we are creating a new dataset. In this case, we perform a
* pseudo txg sync here instead.
*/
if (newds) {
dsl_dataset_dirty(ds, tx);
} else {
zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
dsl_dataset_sync(ds, zio, tx);
VERIFY0(zio_wait(zio));
dsl_dataset_sync_done(ds, tx);
}
}
int
dsl_crypto_recv_raw_key_check(dsl_dataset_t *ds, nvlist_t *nvl, dmu_tx_t *tx)
{
int ret;
objset_t *mos = tx->tx_pool->dp_meta_objset;
uint8_t *buf = NULL;
uint_t len;
uint64_t intval, key_guid, version;
boolean_t is_passphrase = B_FALSE;
ASSERT(dsl_dataset_phys(ds)->ds_flags & DS_FLAG_INCONSISTENT);
/*
* Read and check all the encryption values from the nvlist. We need
* all of the fields of a DSL Crypto Key, as well as a fully specified
* wrapping key.
*/
ret = nvlist_lookup_uint64(nvl, DSL_CRYPTO_KEY_CRYPTO_SUITE, &intval);
if (ret != 0 || intval <= ZIO_CRYPT_OFF)
return (SET_ERROR(EINVAL));
/*
* Flag a future crypto suite that we don't support differently, so
* we can return a more useful error to the user.
*/
if (intval >= ZIO_CRYPT_FUNCTIONS)
return (SET_ERROR(ZFS_ERR_CRYPTO_NOTSUP));
ret = nvlist_lookup_uint64(nvl, DSL_CRYPTO_KEY_GUID, &intval);
if (ret != 0)
return (SET_ERROR(EINVAL));
/*
* If this is an incremental receive make sure the given key guid
* matches the one we already have.
*/
if (ds->ds_dir->dd_crypto_obj != 0) {
ret = zap_lookup(mos, ds->ds_dir->dd_crypto_obj,
DSL_CRYPTO_KEY_GUID, 8, 1, &key_guid);
if (ret != 0)
return (ret);
if (intval != key_guid)
return (SET_ERROR(EACCES));
}
ret = nvlist_lookup_uint8_array(nvl, DSL_CRYPTO_KEY_MASTER_KEY,
&buf, &len);
if (ret != 0 || len != MASTER_KEY_MAX_LEN)
return (SET_ERROR(EINVAL));
ret = nvlist_lookup_uint8_array(nvl, DSL_CRYPTO_KEY_HMAC_KEY,
&buf, &len);
if (ret != 0 || len != SHA512_HMAC_KEYLEN)
return (SET_ERROR(EINVAL));
ret = nvlist_lookup_uint8_array(nvl, DSL_CRYPTO_KEY_IV, &buf, &len);
if (ret != 0 || len != WRAPPING_IV_LEN)
return (SET_ERROR(EINVAL));
ret = nvlist_lookup_uint8_array(nvl, DSL_CRYPTO_KEY_MAC, &buf, &len);
if (ret != 0 || len != WRAPPING_MAC_LEN)
return (SET_ERROR(EINVAL));
/*
* We don't support receiving old on-disk formats. The version 0
* implementation protected several fields in an objset that were
* not always portable during a raw receive. As a result, we call
* the old version an on-disk errata #3.
*/
ret = nvlist_lookup_uint64(nvl, DSL_CRYPTO_KEY_VERSION, &version);
if (ret != 0 || version != ZIO_CRYPT_KEY_CURRENT_VERSION)
return (SET_ERROR(ENOTSUP));
ret = nvlist_lookup_uint64(nvl, zfs_prop_to_name(ZFS_PROP_KEYFORMAT),
&intval);
if (ret != 0 || intval >= ZFS_KEYFORMAT_FORMATS ||
intval == ZFS_KEYFORMAT_NONE)
return (SET_ERROR(EINVAL));
is_passphrase = (intval == ZFS_KEYFORMAT_PASSPHRASE);
/*
* for raw receives we allow any number of pbkdf2iters since there
* won't be a chance for the user to change it.
*/
ret = nvlist_lookup_uint64(nvl, zfs_prop_to_name(ZFS_PROP_PBKDF2_ITERS),
&intval);
if (ret != 0 || (is_passphrase == (intval == 0)))
return (SET_ERROR(EINVAL));
ret = nvlist_lookup_uint64(nvl, zfs_prop_to_name(ZFS_PROP_PBKDF2_SALT),
&intval);
if (ret != 0 || (is_passphrase == (intval == 0)))
return (SET_ERROR(EINVAL));
return (0);
}
void
dsl_crypto_recv_raw_key_sync(dsl_dataset_t *ds, nvlist_t *nvl, dmu_tx_t *tx)
{
dsl_pool_t *dp = tx->tx_pool;
objset_t *mos = dp->dp_meta_objset;
dsl_dir_t *dd = ds->ds_dir;
uint_t len;
uint64_t rddobj, one = 1;
uint8_t *keydata, *hmac_keydata, *iv, *mac;
uint64_t crypt, key_guid, keyformat, iters, salt;
uint64_t version = ZIO_CRYPT_KEY_CURRENT_VERSION;
const char *keylocation = "prompt";
/* lookup the values we need to create the DSL Crypto Key */
crypt = fnvlist_lookup_uint64(nvl, DSL_CRYPTO_KEY_CRYPTO_SUITE);
key_guid = fnvlist_lookup_uint64(nvl, DSL_CRYPTO_KEY_GUID);
keyformat = fnvlist_lookup_uint64(nvl,
zfs_prop_to_name(ZFS_PROP_KEYFORMAT));
iters = fnvlist_lookup_uint64(nvl,
zfs_prop_to_name(ZFS_PROP_PBKDF2_ITERS));
salt = fnvlist_lookup_uint64(nvl,
zfs_prop_to_name(ZFS_PROP_PBKDF2_SALT));
VERIFY0(nvlist_lookup_uint8_array(nvl, DSL_CRYPTO_KEY_MASTER_KEY,
&keydata, &len));
VERIFY0(nvlist_lookup_uint8_array(nvl, DSL_CRYPTO_KEY_HMAC_KEY,
&hmac_keydata, &len));
VERIFY0(nvlist_lookup_uint8_array(nvl, DSL_CRYPTO_KEY_IV, &iv, &len));
VERIFY0(nvlist_lookup_uint8_array(nvl, DSL_CRYPTO_KEY_MAC, &mac, &len));
/* if this is a new dataset setup the DSL Crypto Key. */
if (dd->dd_crypto_obj == 0) {
/* zapify the dsl dir so we can add the key object to it */
dmu_buf_will_dirty(dd->dd_dbuf, tx);
dsl_dir_zapify(dd, tx);
/* create the DSL Crypto Key on disk and activate the feature */
dd->dd_crypto_obj = zap_create(mos,
DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0, tx);
VERIFY0(zap_update(tx->tx_pool->dp_meta_objset,
dd->dd_crypto_obj, DSL_CRYPTO_KEY_REFCOUNT,
sizeof (uint64_t), 1, &one, tx));
VERIFY0(zap_update(tx->tx_pool->dp_meta_objset,
dd->dd_crypto_obj, DSL_CRYPTO_KEY_VERSION,
sizeof (uint64_t), 1, &version, tx));
dsl_dataset_activate_feature(ds->ds_object,
SPA_FEATURE_ENCRYPTION, (void *)B_TRUE, tx);
ds->ds_feature[SPA_FEATURE_ENCRYPTION] = (void *)B_TRUE;
/* save the dd_crypto_obj on disk */
VERIFY0(zap_add(mos, dd->dd_object, DD_FIELD_CRYPTO_KEY_OBJ,
sizeof (uint64_t), 1, &dd->dd_crypto_obj, tx));
/*
* Set the keylocation to prompt by default. If keylocation
* has been provided via the properties, this will be overridden
* later.
*/
dsl_prop_set_sync_impl(ds,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION),
ZPROP_SRC_LOCAL, 1, strlen(keylocation) + 1,
keylocation, tx);
rddobj = dd->dd_object;
} else {
VERIFY0(dsl_dir_get_encryption_root_ddobj(dd, &rddobj));
}
/* sync the key data to the ZAP object on disk */
dsl_crypto_key_sync_impl(mos, dd->dd_crypto_obj, crypt,
rddobj, key_guid, iv, mac, keydata, hmac_keydata, keyformat, salt,
iters, tx);
}
static int
dsl_crypto_recv_key_check(void *arg, dmu_tx_t *tx)
{
int ret;
dsl_crypto_recv_key_arg_t *dcrka = arg;
dsl_dataset_t *ds = NULL, *fromds = NULL;
ret = dsl_dataset_hold_obj(tx->tx_pool, dcrka->dcrka_dsobj,
FTAG, &ds);
if (ret != 0)
goto out;
if (dcrka->dcrka_fromobj != 0) {
ret = dsl_dataset_hold_obj(tx->tx_pool, dcrka->dcrka_fromobj,
FTAG, &fromds);
if (ret != 0)
goto out;
}
ret = dsl_crypto_recv_raw_objset_check(ds, fromds,
dcrka->dcrka_ostype, dcrka->dcrka_nvl, tx);
if (ret != 0)
goto out;
/*
* We run this check even if we won't be doing this part of
* the receive now so that we don't make the user wait until
* the receive finishes to fail.
*/
ret = dsl_crypto_recv_raw_key_check(ds, dcrka->dcrka_nvl, tx);
if (ret != 0)
goto out;
out:
if (ds != NULL)
dsl_dataset_rele(ds, FTAG);
if (fromds != NULL)
dsl_dataset_rele(fromds, FTAG);
return (ret);
}
static void
dsl_crypto_recv_key_sync(void *arg, dmu_tx_t *tx)
{
dsl_crypto_recv_key_arg_t *dcrka = arg;
dsl_dataset_t *ds;
VERIFY0(dsl_dataset_hold_obj(tx->tx_pool, dcrka->dcrka_dsobj,
FTAG, &ds));
dsl_crypto_recv_raw_objset_sync(ds, dcrka->dcrka_ostype,
dcrka->dcrka_nvl, tx);
if (dcrka->dcrka_do_key)
dsl_crypto_recv_raw_key_sync(ds, dcrka->dcrka_nvl, tx);
dsl_dataset_rele(ds, FTAG);
}
/*
* This function is used to sync an nvlist representing a DSL Crypto Key and
* the associated encryption parameters. The key will be written exactly as is
* without wrapping it.
*/
int
dsl_crypto_recv_raw(const char *poolname, uint64_t dsobj, uint64_t fromobj,
dmu_objset_type_t ostype, nvlist_t *nvl, boolean_t do_key)
{
dsl_crypto_recv_key_arg_t dcrka;
dcrka.dcrka_dsobj = dsobj;
dcrka.dcrka_fromobj = fromobj;
dcrka.dcrka_ostype = ostype;
dcrka.dcrka_nvl = nvl;
dcrka.dcrka_do_key = do_key;
return (dsl_sync_task(poolname, dsl_crypto_recv_key_check,
dsl_crypto_recv_key_sync, &dcrka, 1, ZFS_SPACE_CHECK_NORMAL));
}
int
dsl_crypto_populate_key_nvlist(objset_t *os, uint64_t from_ivset_guid,
nvlist_t **nvl_out)
{
int ret;
dsl_dataset_t *ds = os->os_dsl_dataset;
dnode_t *mdn;
uint64_t rddobj;
nvlist_t *nvl = NULL;
uint64_t dckobj = ds->ds_dir->dd_crypto_obj;
dsl_dir_t *rdd = NULL;
dsl_pool_t *dp = ds->ds_dir->dd_pool;
objset_t *mos = dp->dp_meta_objset;
uint64_t crypt = 0, key_guid = 0, format = 0;
uint64_t iters = 0, salt = 0, version = 0;
uint64_t to_ivset_guid = 0;
uint8_t raw_keydata[MASTER_KEY_MAX_LEN];
uint8_t raw_hmac_keydata[SHA512_HMAC_KEYLEN];
uint8_t iv[WRAPPING_IV_LEN];
uint8_t mac[WRAPPING_MAC_LEN];
ASSERT(dckobj != 0);
mdn = DMU_META_DNODE(os);
nvl = fnvlist_alloc();
/* lookup values from the DSL Crypto Key */
ret = zap_lookup(mos, dckobj, DSL_CRYPTO_KEY_CRYPTO_SUITE, 8, 1,
&crypt);
if (ret != 0)
goto error;
ret = zap_lookup(mos, dckobj, DSL_CRYPTO_KEY_GUID, 8, 1, &key_guid);
if (ret != 0)
goto error;
ret = zap_lookup(mos, dckobj, DSL_CRYPTO_KEY_MASTER_KEY, 1,
MASTER_KEY_MAX_LEN, raw_keydata);
if (ret != 0)
goto error;
ret = zap_lookup(mos, dckobj, DSL_CRYPTO_KEY_HMAC_KEY, 1,
SHA512_HMAC_KEYLEN, raw_hmac_keydata);
if (ret != 0)
goto error;
ret = zap_lookup(mos, dckobj, DSL_CRYPTO_KEY_IV, 1, WRAPPING_IV_LEN,
iv);
if (ret != 0)
goto error;
ret = zap_lookup(mos, dckobj, DSL_CRYPTO_KEY_MAC, 1, WRAPPING_MAC_LEN,
mac);
if (ret != 0)
goto error;
/* see zfs_disable_ivset_guid_check tunable for errata info */
ret = zap_lookup(mos, ds->ds_object, DS_FIELD_IVSET_GUID, 8, 1,
&to_ivset_guid);
if (ret != 0)
ASSERT3U(dp->dp_spa->spa_errata, !=, 0);
/*
* We don't support raw sends of legacy on-disk formats. See the
* comment in dsl_crypto_recv_key_check() for details.
*/
ret = zap_lookup(mos, dckobj, DSL_CRYPTO_KEY_VERSION, 8, 1, &version);
if (ret != 0 || version != ZIO_CRYPT_KEY_CURRENT_VERSION) {
dp->dp_spa->spa_errata = ZPOOL_ERRATA_ZOL_6845_ENCRYPTION;
ret = SET_ERROR(ENOTSUP);
goto error;
}
/*
* Lookup wrapping key properties. An early version of the code did
* not correctly add these values to the wrapping key or the DSL
* Crypto Key on disk for non encryption roots, so to be safe we
* always take the slightly circuitous route of looking it up from
* the encryption root's key.
*/
ret = dsl_dir_get_encryption_root_ddobj(ds->ds_dir, &rddobj);
if (ret != 0)
goto error;
dsl_pool_config_enter(dp, FTAG);
ret = dsl_dir_hold_obj(dp, rddobj, NULL, FTAG, &rdd);
if (ret != 0)
goto error_unlock;
ret = zap_lookup(dp->dp_meta_objset, rdd->dd_crypto_obj,
zfs_prop_to_name(ZFS_PROP_KEYFORMAT), 8, 1, &format);
if (ret != 0)
goto error_unlock;
if (format == ZFS_KEYFORMAT_PASSPHRASE) {
ret = zap_lookup(dp->dp_meta_objset, rdd->dd_crypto_obj,
zfs_prop_to_name(ZFS_PROP_PBKDF2_ITERS), 8, 1, &iters);
if (ret != 0)
goto error_unlock;
ret = zap_lookup(dp->dp_meta_objset, rdd->dd_crypto_obj,
zfs_prop_to_name(ZFS_PROP_PBKDF2_SALT), 8, 1, &salt);
if (ret != 0)
goto error_unlock;
}
dsl_dir_rele(rdd, FTAG);
dsl_pool_config_exit(dp, FTAG);
fnvlist_add_uint64(nvl, DSL_CRYPTO_KEY_CRYPTO_SUITE, crypt);
fnvlist_add_uint64(nvl, DSL_CRYPTO_KEY_GUID, key_guid);
fnvlist_add_uint64(nvl, DSL_CRYPTO_KEY_VERSION, version);
VERIFY0(nvlist_add_uint8_array(nvl, DSL_CRYPTO_KEY_MASTER_KEY,
raw_keydata, MASTER_KEY_MAX_LEN));
VERIFY0(nvlist_add_uint8_array(nvl, DSL_CRYPTO_KEY_HMAC_KEY,
raw_hmac_keydata, SHA512_HMAC_KEYLEN));
VERIFY0(nvlist_add_uint8_array(nvl, DSL_CRYPTO_KEY_IV, iv,
WRAPPING_IV_LEN));
VERIFY0(nvlist_add_uint8_array(nvl, DSL_CRYPTO_KEY_MAC, mac,
WRAPPING_MAC_LEN));
VERIFY0(nvlist_add_uint8_array(nvl, "portable_mac",
os->os_phys->os_portable_mac, ZIO_OBJSET_MAC_LEN));
fnvlist_add_uint64(nvl, zfs_prop_to_name(ZFS_PROP_KEYFORMAT), format);
fnvlist_add_uint64(nvl, zfs_prop_to_name(ZFS_PROP_PBKDF2_ITERS), iters);
fnvlist_add_uint64(nvl, zfs_prop_to_name(ZFS_PROP_PBKDF2_SALT), salt);
fnvlist_add_uint64(nvl, "mdn_checksum", mdn->dn_checksum);
fnvlist_add_uint64(nvl, "mdn_compress", mdn->dn_compress);
fnvlist_add_uint64(nvl, "mdn_nlevels", mdn->dn_nlevels);
fnvlist_add_uint64(nvl, "mdn_blksz", mdn->dn_datablksz);
fnvlist_add_uint64(nvl, "mdn_indblkshift", mdn->dn_indblkshift);
fnvlist_add_uint64(nvl, "mdn_nblkptr", mdn->dn_nblkptr);
fnvlist_add_uint64(nvl, "mdn_maxblkid", mdn->dn_maxblkid);
fnvlist_add_uint64(nvl, "to_ivset_guid", to_ivset_guid);
fnvlist_add_uint64(nvl, "from_ivset_guid", from_ivset_guid);
*nvl_out = nvl;
return (0);
error_unlock:
dsl_pool_config_exit(dp, FTAG);
error:
if (rdd != NULL)
dsl_dir_rele(rdd, FTAG);
nvlist_free(nvl);
*nvl_out = NULL;
return (ret);
}
uint64_t
dsl_crypto_key_create_sync(uint64_t crypt, dsl_wrapping_key_t *wkey,
dmu_tx_t *tx)
{
dsl_crypto_key_t dck;
uint64_t version = ZIO_CRYPT_KEY_CURRENT_VERSION;
uint64_t one = 1ULL;
ASSERT(dmu_tx_is_syncing(tx));
ASSERT3U(crypt, <, ZIO_CRYPT_FUNCTIONS);
ASSERT3U(crypt, >, ZIO_CRYPT_OFF);
/* create the DSL Crypto Key ZAP object */
dck.dck_obj = zap_create(tx->tx_pool->dp_meta_objset,
DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0, tx);
/* fill in the key (on the stack) and sync it to disk */
dck.dck_wkey = wkey;
VERIFY0(zio_crypt_key_init(crypt, &dck.dck_key));
dsl_crypto_key_sync(&dck, tx);
VERIFY0(zap_update(tx->tx_pool->dp_meta_objset, dck.dck_obj,
DSL_CRYPTO_KEY_REFCOUNT, sizeof (uint64_t), 1, &one, tx));
VERIFY0(zap_update(tx->tx_pool->dp_meta_objset, dck.dck_obj,
DSL_CRYPTO_KEY_VERSION, sizeof (uint64_t), 1, &version, tx));
zio_crypt_key_destroy(&dck.dck_key);
memset(&dck.dck_key, 0, sizeof (zio_crypt_key_t));
return (dck.dck_obj);
}
uint64_t
dsl_crypto_key_clone_sync(dsl_dir_t *origindd, dmu_tx_t *tx)
{
objset_t *mos = tx->tx_pool->dp_meta_objset;
ASSERT(dmu_tx_is_syncing(tx));
VERIFY0(zap_increment(mos, origindd->dd_crypto_obj,
DSL_CRYPTO_KEY_REFCOUNT, 1, tx));
return (origindd->dd_crypto_obj);
}
void
dsl_crypto_key_destroy_sync(uint64_t dckobj, dmu_tx_t *tx)
{
objset_t *mos = tx->tx_pool->dp_meta_objset;
uint64_t refcnt;
/* Decrement the refcount, destroy if this is the last reference */
VERIFY0(zap_lookup(mos, dckobj, DSL_CRYPTO_KEY_REFCOUNT,
sizeof (uint64_t), 1, &refcnt));
if (refcnt != 1) {
VERIFY0(zap_increment(mos, dckobj, DSL_CRYPTO_KEY_REFCOUNT,
-1, tx));
} else {
VERIFY0(zap_destroy(mos, dckobj, tx));
}
}
void
dsl_dataset_crypt_stats(dsl_dataset_t *ds, nvlist_t *nv)
{
uint64_t intval;
dsl_dir_t *dd = ds->ds_dir;
dsl_dir_t *enc_root;
char buf[ZFS_MAX_DATASET_NAME_LEN];
if (dd->dd_crypto_obj == 0)
return;
intval = dsl_dataset_get_keystatus(dd);
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_KEYSTATUS, intval);
if (dsl_dir_get_crypt(dd, &intval) == 0)
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_ENCRYPTION, intval);
if (zap_lookup(dd->dd_pool->dp_meta_objset, dd->dd_crypto_obj,
DSL_CRYPTO_KEY_GUID, 8, 1, &intval) == 0) {
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_KEY_GUID, intval);
}
if (zap_lookup(dd->dd_pool->dp_meta_objset, dd->dd_crypto_obj,
zfs_prop_to_name(ZFS_PROP_KEYFORMAT), 8, 1, &intval) == 0) {
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_KEYFORMAT, intval);
}
if (zap_lookup(dd->dd_pool->dp_meta_objset, dd->dd_crypto_obj,
zfs_prop_to_name(ZFS_PROP_PBKDF2_SALT), 8, 1, &intval) == 0) {
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_PBKDF2_SALT, intval);
}
if (zap_lookup(dd->dd_pool->dp_meta_objset, dd->dd_crypto_obj,
zfs_prop_to_name(ZFS_PROP_PBKDF2_ITERS), 8, 1, &intval) == 0) {
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_PBKDF2_ITERS, intval);
}
if (zap_lookup(dd->dd_pool->dp_meta_objset, ds->ds_object,
DS_FIELD_IVSET_GUID, 8, 1, &intval) == 0) {
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_IVSET_GUID, intval);
}
if (dsl_dir_get_encryption_root_ddobj(dd, &intval) == 0) {
if (dsl_dir_hold_obj(dd->dd_pool, intval, NULL, FTAG,
&enc_root) == 0) {
dsl_dir_name(enc_root, buf);
dsl_dir_rele(enc_root, FTAG);
dsl_prop_nvlist_add_string(nv,
ZFS_PROP_ENCRYPTION_ROOT, buf);
}
}
}
int
spa_crypt_get_salt(spa_t *spa, uint64_t dsobj, uint8_t *salt)
{
int ret;
dsl_crypto_key_t *dck = NULL;
/* look up the key from the spa's keystore */
ret = spa_keystore_lookup_key(spa, dsobj, FTAG, &dck);
if (ret != 0)
goto error;
ret = zio_crypt_key_get_salt(&dck->dck_key, salt);
if (ret != 0)
goto error;
spa_keystore_dsl_key_rele(spa, dck, FTAG);
return (0);
error:
if (dck != NULL)
spa_keystore_dsl_key_rele(spa, dck, FTAG);
return (ret);
}
/*
* Objset blocks are a special case for MAC generation. These blocks have 2
* 256-bit MACs which are embedded within the block itself, rather than a
* single 128 bit MAC. As a result, this function handles encoding and decoding
* the MACs on its own, unlike other functions in this file.
*/
int
spa_do_crypt_objset_mac_abd(boolean_t generate, spa_t *spa, uint64_t dsobj,
abd_t *abd, uint_t datalen, boolean_t byteswap)
{
int ret;
dsl_crypto_key_t *dck = NULL;
void *buf = abd_borrow_buf_copy(abd, datalen);
objset_phys_t *osp = buf;
uint8_t portable_mac[ZIO_OBJSET_MAC_LEN];
uint8_t local_mac[ZIO_OBJSET_MAC_LEN];
const uint8_t zeroed_mac[ZIO_OBJSET_MAC_LEN] = {0};
/* look up the key from the spa's keystore */
ret = spa_keystore_lookup_key(spa, dsobj, FTAG, &dck);
if (ret != 0)
goto error;
/* calculate both HMACs */
ret = zio_crypt_do_objset_hmacs(&dck->dck_key, buf, datalen,
byteswap, portable_mac, local_mac);
if (ret != 0)
goto error;
spa_keystore_dsl_key_rele(spa, dck, FTAG);
/* if we are generating encode the HMACs in the objset_phys_t */
if (generate) {
memcpy(osp->os_portable_mac, portable_mac, ZIO_OBJSET_MAC_LEN);
memcpy(osp->os_local_mac, local_mac, ZIO_OBJSET_MAC_LEN);
abd_return_buf_copy(abd, buf, datalen);
return (0);
}
if (memcmp(portable_mac, osp->os_portable_mac,
ZIO_OBJSET_MAC_LEN) != 0 ||
memcmp(local_mac, osp->os_local_mac, ZIO_OBJSET_MAC_LEN) != 0) {
/*
* If the MAC is zeroed out, we failed to decrypt it.
* This should only arise, at least on Linux,
* if we hit edge case handling for useraccounting, since we
* shouldn't get here without bailing out on error earlier
* otherwise.
*
* So if we're in that case, we can just fall through and
* special-casing noticing that it's zero will handle it
* elsewhere, since we can just regenerate it.
*/
if (memcmp(local_mac, zeroed_mac, ZIO_OBJSET_MAC_LEN) != 0) {
abd_return_buf(abd, buf, datalen);
return (SET_ERROR(ECKSUM));
}
}
abd_return_buf(abd, buf, datalen);
return (0);
error:
if (dck != NULL)
spa_keystore_dsl_key_rele(spa, dck, FTAG);
abd_return_buf(abd, buf, datalen);
return (ret);
}
int
spa_do_crypt_mac_abd(boolean_t generate, spa_t *spa, uint64_t dsobj, abd_t *abd,
uint_t datalen, uint8_t *mac)
{
int ret;
dsl_crypto_key_t *dck = NULL;
uint8_t *buf = abd_borrow_buf_copy(abd, datalen);
uint8_t digestbuf[ZIO_DATA_MAC_LEN];
/* look up the key from the spa's keystore */
ret = spa_keystore_lookup_key(spa, dsobj, FTAG, &dck);
if (ret != 0)
goto error;
/* perform the hmac */
ret = zio_crypt_do_hmac(&dck->dck_key, buf, datalen,
digestbuf, ZIO_DATA_MAC_LEN);
if (ret != 0)
goto error;
abd_return_buf(abd, buf, datalen);
spa_keystore_dsl_key_rele(spa, dck, FTAG);
/*
* Truncate and fill in mac buffer if we were asked to generate a MAC.
* Otherwise verify that the MAC matched what we expected.
*/
if (generate) {
memcpy(mac, digestbuf, ZIO_DATA_MAC_LEN);
return (0);
}
if (memcmp(digestbuf, mac, ZIO_DATA_MAC_LEN) != 0)
return (SET_ERROR(ECKSUM));
return (0);
error:
if (dck != NULL)
spa_keystore_dsl_key_rele(spa, dck, FTAG);
abd_return_buf(abd, buf, datalen);
return (ret);
}
/*
* This function serves as a multiplexer for encryption and decryption of
* all blocks (except the L2ARC). For encryption, it will populate the IV,
* salt, MAC, and cabd (the ciphertext). On decryption it will simply use
* these fields to populate pabd (the plaintext).
*/
int
spa_do_crypt_abd(boolean_t encrypt, spa_t *spa, const zbookmark_phys_t *zb,
dmu_object_type_t ot, boolean_t dedup, boolean_t bswap, uint8_t *salt,
uint8_t *iv, uint8_t *mac, uint_t datalen, abd_t *pabd, abd_t *cabd,
boolean_t *no_crypt)
{
int ret;
dsl_crypto_key_t *dck = NULL;
uint8_t *plainbuf = NULL, *cipherbuf = NULL;
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_ENCRYPTION));
/* look up the key from the spa's keystore */
ret = spa_keystore_lookup_key(spa, zb->zb_objset, FTAG, &dck);
if (ret != 0) {
ret = SET_ERROR(EACCES);
return (ret);
}
if (encrypt) {
plainbuf = abd_borrow_buf_copy(pabd, datalen);
cipherbuf = abd_borrow_buf(cabd, datalen);
} else {
plainbuf = abd_borrow_buf(pabd, datalen);
cipherbuf = abd_borrow_buf_copy(cabd, datalen);
}
/*
* Both encryption and decryption functions need a salt for key
* generation and an IV. When encrypting a non-dedup block, we
* generate the salt and IV randomly to be stored by the caller. Dedup
* blocks perform a (more expensive) HMAC of the plaintext to obtain
* the salt and the IV. ZIL blocks have their salt and IV generated
* at allocation time in zio_alloc_zil(). On decryption, we simply use
* the provided values.
*/
if (encrypt && ot != DMU_OT_INTENT_LOG && !dedup) {
ret = zio_crypt_key_get_salt(&dck->dck_key, salt);
if (ret != 0)
goto error;
ret = zio_crypt_generate_iv(iv);
if (ret != 0)
goto error;
} else if (encrypt && dedup) {
ret = zio_crypt_generate_iv_salt_dedup(&dck->dck_key,
plainbuf, datalen, iv, salt);
if (ret != 0)
goto error;
}
/* call lower level function to perform encryption / decryption */
ret = zio_do_crypt_data(encrypt, &dck->dck_key, ot, bswap, salt, iv,
mac, datalen, plainbuf, cipherbuf, no_crypt);
/*
* Handle injected decryption faults. Unfortunately, we cannot inject
* faults for dnode blocks because we might trigger the panic in
* dbuf_prepare_encrypted_dnode_leaf(), which exists because syncing
* context is not prepared to handle malicious decryption failures.
*/
if (zio_injection_enabled && !encrypt && ot != DMU_OT_DNODE && ret == 0)
ret = zio_handle_decrypt_injection(spa, zb, ot, ECKSUM);
if (ret != 0)
goto error;
if (encrypt) {
abd_return_buf(pabd, plainbuf, datalen);
abd_return_buf_copy(cabd, cipherbuf, datalen);
} else {
abd_return_buf_copy(pabd, plainbuf, datalen);
abd_return_buf(cabd, cipherbuf, datalen);
}
spa_keystore_dsl_key_rele(spa, dck, FTAG);
return (0);
error:
if (encrypt) {
/* zero out any state we might have changed while encrypting */
memset(salt, 0, ZIO_DATA_SALT_LEN);
memset(iv, 0, ZIO_DATA_IV_LEN);
memset(mac, 0, ZIO_DATA_MAC_LEN);
abd_return_buf(pabd, plainbuf, datalen);
abd_return_buf_copy(cabd, cipherbuf, datalen);
} else {
abd_return_buf_copy(pabd, plainbuf, datalen);
abd_return_buf(cabd, cipherbuf, datalen);
}
spa_keystore_dsl_key_rele(spa, dck, FTAG);
return (ret);
}
ZFS_MODULE_PARAM(zfs, zfs_, disable_ivset_guid_check, INT, ZMOD_RW,
"Set to allow raw receives without IVset guids");
diff --git a/sys/contrib/openzfs/module/zfs/dsl_deadlist.c b/sys/contrib/openzfs/module/zfs/dsl_deadlist.c
index 47c234f76c40..ac30a370813f 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_deadlist.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_deadlist.c
@@ -1,1116 +1,1113 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2019 by Delphix. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
*/
#include <sys/dmu.h>
#include <sys/zap.h>
#include <sys/zfs_context.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_dataset.h>
/*
* Deadlist concurrency:
*
* Deadlists can only be modified from the syncing thread.
*
* Except for dsl_deadlist_insert(), it can only be modified with the
* dp_config_rwlock held with RW_WRITER.
*
* The accessors (dsl_deadlist_space() and dsl_deadlist_space_range()) can
* be called concurrently, from open context, with the dl_config_rwlock held
* with RW_READER.
*
* Therefore, we only need to provide locking between dsl_deadlist_insert() and
* the accessors, protecting:
* dl_phys->dl_used,comp,uncomp
* and protecting the dl_tree from being loaded.
* The locking is provided by dl_lock. Note that locking on the bpobj_t
* provides its own locking, and dl_oldfmt is immutable.
*/
/*
* Livelist Overview
* ================
*
* Livelists use the same 'deadlist_t' struct as deadlists and are also used
* to track blkptrs over the lifetime of a dataset. Livelists however, belong
* to clones and track the blkptrs that are clone-specific (were born after
* the clone's creation). The exception is embedded block pointers which are
* not included in livelists because they do not need to be freed.
*
* When it comes time to delete the clone, the livelist provides a quick
* reference as to what needs to be freed. For this reason, livelists also track
* when clone-specific blkptrs are freed before deletion to prevent double
* frees. Each blkptr in a livelist is marked as a FREE or an ALLOC and the
* deletion algorithm iterates backwards over the livelist, matching
* FREE/ALLOC pairs and then freeing those ALLOCs which remain. livelists
* are also updated in the case when blkptrs are remapped: the old version
* of the blkptr is cancelled out with a FREE and the new version is tracked
* with an ALLOC.
*
* To bound the amount of memory required for deletion, livelists over a
* certain size are spread over multiple entries. Entries are grouped by
* birth txg so we can be sure the ALLOC/FREE pair for a given blkptr will
* be in the same entry. This allows us to delete livelists incrementally
* over multiple syncs, one entry at a time.
*
* During the lifetime of the clone, livelists can get extremely large.
* Their size is managed by periodic condensing (preemptively cancelling out
* FREE/ALLOC pairs). Livelists are disabled when a clone is promoted or when
* the shared space between the clone and its origin is so small that it
* doesn't make sense to use livelists anymore.
*/
/*
* The threshold sublist size at which we create a new sub-livelist for the
* next txg. However, since blkptrs of the same transaction group must be in
* the same sub-list, the actual sublist size may exceed this. When picking the
* size we had to balance the fact that larger sublists mean fewer sublists
* (decreasing the cost of insertion) against the consideration that sublists
* will be loaded into memory and shouldn't take up an inordinate amount of
* space. We settled on ~500000 entries, corresponding to roughly 128M.
*/
uint64_t zfs_livelist_max_entries = 500000;
/*
* We can approximate how much of a performance gain a livelist will give us
* based on the percentage of blocks shared between the clone and its origin.
* 0 percent shared means that the clone has completely diverged and that the
* old method is maximally effective: every read from the block tree will
* result in lots of frees. Livelists give us gains when they track blocks
* scattered across the tree, when one read in the old method might only
* result in a few frees. Once the clone has been overwritten enough,
* writes are no longer sparse and we'll no longer get much of a benefit from
* tracking them with a livelist. We chose a lower limit of 75 percent shared
* (25 percent overwritten). This means that 1/4 of all block pointers will be
* freed (e.g. each read frees 256, out of a max of 1024) so we expect livelists
* to make deletion 4x faster. Once the amount of shared space drops below this
* threshold, the clone will revert to the old deletion method.
*/
int zfs_livelist_min_percent_shared = 75;
static int
dsl_deadlist_compare(const void *arg1, const void *arg2)
{
const dsl_deadlist_entry_t *dle1 = arg1;
const dsl_deadlist_entry_t *dle2 = arg2;
return (TREE_CMP(dle1->dle_mintxg, dle2->dle_mintxg));
}
static int
dsl_deadlist_cache_compare(const void *arg1, const void *arg2)
{
const dsl_deadlist_cache_entry_t *dlce1 = arg1;
const dsl_deadlist_cache_entry_t *dlce2 = arg2;
return (TREE_CMP(dlce1->dlce_mintxg, dlce2->dlce_mintxg));
}
static void
dsl_deadlist_load_tree(dsl_deadlist_t *dl)
{
zap_cursor_t zc;
zap_attribute_t za;
int error;
ASSERT(MUTEX_HELD(&dl->dl_lock));
ASSERT(!dl->dl_oldfmt);
if (dl->dl_havecache) {
/*
* After loading the tree, the caller may modify the tree,
* e.g. to add or remove nodes, or to make a node no longer
* refer to the empty_bpobj. These changes would make the
* dl_cache incorrect. Therefore we discard the cache here,
* so that it can't become incorrect.
*/
dsl_deadlist_cache_entry_t *dlce;
void *cookie = NULL;
while ((dlce = avl_destroy_nodes(&dl->dl_cache, &cookie))
!= NULL) {
kmem_free(dlce, sizeof (*dlce));
}
avl_destroy(&dl->dl_cache);
dl->dl_havecache = B_FALSE;
}
if (dl->dl_havetree)
return;
avl_create(&dl->dl_tree, dsl_deadlist_compare,
sizeof (dsl_deadlist_entry_t),
offsetof(dsl_deadlist_entry_t, dle_node));
for (zap_cursor_init(&zc, dl->dl_os, dl->dl_object);
(error = zap_cursor_retrieve(&zc, &za)) == 0;
zap_cursor_advance(&zc)) {
dsl_deadlist_entry_t *dle = kmem_alloc(sizeof (*dle), KM_SLEEP);
dle->dle_mintxg = zfs_strtonum(za.za_name, NULL);
/*
* Prefetch all the bpobj's so that we do that i/o
* in parallel. Then open them all in a second pass.
*/
dle->dle_bpobj.bpo_object = za.za_first_integer;
dmu_prefetch(dl->dl_os, dle->dle_bpobj.bpo_object,
0, 0, 0, ZIO_PRIORITY_SYNC_READ);
avl_add(&dl->dl_tree, dle);
}
VERIFY3U(error, ==, ENOENT);
zap_cursor_fini(&zc);
for (dsl_deadlist_entry_t *dle = avl_first(&dl->dl_tree);
dle != NULL; dle = AVL_NEXT(&dl->dl_tree, dle)) {
VERIFY0(bpobj_open(&dle->dle_bpobj, dl->dl_os,
dle->dle_bpobj.bpo_object));
}
dl->dl_havetree = B_TRUE;
}
/*
* Load only the non-empty bpobj's into the dl_cache. The cache is an analog
* of the dl_tree, but contains only non-empty_bpobj nodes from the ZAP. It
* is used only for gathering space statistics. The dl_cache has two
* advantages over the dl_tree:
*
* 1. Loading the dl_cache is ~5x faster than loading the dl_tree (if it's
* mostly empty_bpobj's), due to less CPU overhead to open the empty_bpobj
* many times and to inquire about its (zero) space stats many times.
*
* 2. The dl_cache uses less memory than the dl_tree. We only need to load
* the dl_tree of snapshots when deleting a snapshot, after which we free the
* dl_tree with dsl_deadlist_discard_tree
*/
static void
dsl_deadlist_load_cache(dsl_deadlist_t *dl)
{
zap_cursor_t zc;
zap_attribute_t za;
int error;
ASSERT(MUTEX_HELD(&dl->dl_lock));
ASSERT(!dl->dl_oldfmt);
if (dl->dl_havecache)
return;
uint64_t empty_bpobj = dmu_objset_pool(dl->dl_os)->dp_empty_bpobj;
avl_create(&dl->dl_cache, dsl_deadlist_cache_compare,
sizeof (dsl_deadlist_cache_entry_t),
offsetof(dsl_deadlist_cache_entry_t, dlce_node));
for (zap_cursor_init(&zc, dl->dl_os, dl->dl_object);
(error = zap_cursor_retrieve(&zc, &za)) == 0;
zap_cursor_advance(&zc)) {
if (za.za_first_integer == empty_bpobj)
continue;
dsl_deadlist_cache_entry_t *dlce =
kmem_zalloc(sizeof (*dlce), KM_SLEEP);
dlce->dlce_mintxg = zfs_strtonum(za.za_name, NULL);
/*
* Prefetch all the bpobj's so that we do that i/o
* in parallel. Then open them all in a second pass.
*/
dlce->dlce_bpobj = za.za_first_integer;
dmu_prefetch(dl->dl_os, dlce->dlce_bpobj,
0, 0, 0, ZIO_PRIORITY_SYNC_READ);
avl_add(&dl->dl_cache, dlce);
}
VERIFY3U(error, ==, ENOENT);
zap_cursor_fini(&zc);
for (dsl_deadlist_cache_entry_t *dlce = avl_first(&dl->dl_cache);
dlce != NULL; dlce = AVL_NEXT(&dl->dl_cache, dlce)) {
bpobj_t bpo;
VERIFY0(bpobj_open(&bpo, dl->dl_os, dlce->dlce_bpobj));
VERIFY0(bpobj_space(&bpo,
&dlce->dlce_bytes, &dlce->dlce_comp, &dlce->dlce_uncomp));
bpobj_close(&bpo);
}
dl->dl_havecache = B_TRUE;
}
/*
* Discard the tree to save memory.
*/
void
dsl_deadlist_discard_tree(dsl_deadlist_t *dl)
{
mutex_enter(&dl->dl_lock);
if (!dl->dl_havetree) {
mutex_exit(&dl->dl_lock);
return;
}
dsl_deadlist_entry_t *dle;
void *cookie = NULL;
while ((dle = avl_destroy_nodes(&dl->dl_tree, &cookie)) != NULL) {
bpobj_close(&dle->dle_bpobj);
kmem_free(dle, sizeof (*dle));
}
avl_destroy(&dl->dl_tree);
dl->dl_havetree = B_FALSE;
mutex_exit(&dl->dl_lock);
}
void
dsl_deadlist_iterate(dsl_deadlist_t *dl, deadlist_iter_t func, void *args)
{
dsl_deadlist_entry_t *dle;
ASSERT(dsl_deadlist_is_open(dl));
mutex_enter(&dl->dl_lock);
dsl_deadlist_load_tree(dl);
mutex_exit(&dl->dl_lock);
for (dle = avl_first(&dl->dl_tree); dle != NULL;
dle = AVL_NEXT(&dl->dl_tree, dle)) {
if (func(args, dle) != 0)
break;
}
}
void
dsl_deadlist_open(dsl_deadlist_t *dl, objset_t *os, uint64_t object)
{
dmu_object_info_t doi;
ASSERT(!dsl_deadlist_is_open(dl));
mutex_init(&dl->dl_lock, NULL, MUTEX_DEFAULT, NULL);
dl->dl_os = os;
dl->dl_object = object;
VERIFY0(dmu_bonus_hold(os, object, dl, &dl->dl_dbuf));
dmu_object_info_from_db(dl->dl_dbuf, &doi);
if (doi.doi_type == DMU_OT_BPOBJ) {
dmu_buf_rele(dl->dl_dbuf, dl);
dl->dl_dbuf = NULL;
dl->dl_oldfmt = B_TRUE;
VERIFY0(bpobj_open(&dl->dl_bpobj, os, object));
return;
}
dl->dl_oldfmt = B_FALSE;
dl->dl_phys = dl->dl_dbuf->db_data;
dl->dl_havetree = B_FALSE;
dl->dl_havecache = B_FALSE;
}
boolean_t
dsl_deadlist_is_open(dsl_deadlist_t *dl)
{
return (dl->dl_os != NULL);
}
void
dsl_deadlist_close(dsl_deadlist_t *dl)
{
ASSERT(dsl_deadlist_is_open(dl));
mutex_destroy(&dl->dl_lock);
if (dl->dl_oldfmt) {
dl->dl_oldfmt = B_FALSE;
bpobj_close(&dl->dl_bpobj);
dl->dl_os = NULL;
dl->dl_object = 0;
return;
}
if (dl->dl_havetree) {
dsl_deadlist_entry_t *dle;
void *cookie = NULL;
while ((dle = avl_destroy_nodes(&dl->dl_tree, &cookie))
!= NULL) {
bpobj_close(&dle->dle_bpobj);
kmem_free(dle, sizeof (*dle));
}
avl_destroy(&dl->dl_tree);
}
if (dl->dl_havecache) {
dsl_deadlist_cache_entry_t *dlce;
void *cookie = NULL;
while ((dlce = avl_destroy_nodes(&dl->dl_cache, &cookie))
!= NULL) {
kmem_free(dlce, sizeof (*dlce));
}
avl_destroy(&dl->dl_cache);
}
dmu_buf_rele(dl->dl_dbuf, dl);
dl->dl_dbuf = NULL;
dl->dl_phys = NULL;
dl->dl_os = NULL;
dl->dl_object = 0;
}
uint64_t
dsl_deadlist_alloc(objset_t *os, dmu_tx_t *tx)
{
if (spa_version(dmu_objset_spa(os)) < SPA_VERSION_DEADLISTS)
return (bpobj_alloc(os, SPA_OLD_MAXBLOCKSIZE, tx));
return (zap_create(os, DMU_OT_DEADLIST, DMU_OT_DEADLIST_HDR,
sizeof (dsl_deadlist_phys_t), tx));
}
void
dsl_deadlist_free(objset_t *os, uint64_t dlobj, dmu_tx_t *tx)
{
dmu_object_info_t doi;
zap_cursor_t zc;
zap_attribute_t za;
int error;
VERIFY0(dmu_object_info(os, dlobj, &doi));
if (doi.doi_type == DMU_OT_BPOBJ) {
bpobj_free(os, dlobj, tx);
return;
}
for (zap_cursor_init(&zc, os, dlobj);
(error = zap_cursor_retrieve(&zc, &za)) == 0;
zap_cursor_advance(&zc)) {
uint64_t obj = za.za_first_integer;
if (obj == dmu_objset_pool(os)->dp_empty_bpobj)
bpobj_decr_empty(os, tx);
else
bpobj_free(os, obj, tx);
}
VERIFY3U(error, ==, ENOENT);
zap_cursor_fini(&zc);
VERIFY0(dmu_object_free(os, dlobj, tx));
}
static void
dle_enqueue(dsl_deadlist_t *dl, dsl_deadlist_entry_t *dle,
const blkptr_t *bp, boolean_t bp_freed, dmu_tx_t *tx)
{
ASSERT(MUTEX_HELD(&dl->dl_lock));
if (dle->dle_bpobj.bpo_object ==
dmu_objset_pool(dl->dl_os)->dp_empty_bpobj) {
uint64_t obj = bpobj_alloc(dl->dl_os, SPA_OLD_MAXBLOCKSIZE, tx);
bpobj_close(&dle->dle_bpobj);
bpobj_decr_empty(dl->dl_os, tx);
VERIFY0(bpobj_open(&dle->dle_bpobj, dl->dl_os, obj));
VERIFY0(zap_update_int_key(dl->dl_os, dl->dl_object,
dle->dle_mintxg, obj, tx));
}
bpobj_enqueue(&dle->dle_bpobj, bp, bp_freed, tx);
}
static void
dle_enqueue_subobj(dsl_deadlist_t *dl, dsl_deadlist_entry_t *dle,
uint64_t obj, dmu_tx_t *tx)
{
ASSERT(MUTEX_HELD(&dl->dl_lock));
if (dle->dle_bpobj.bpo_object !=
dmu_objset_pool(dl->dl_os)->dp_empty_bpobj) {
bpobj_enqueue_subobj(&dle->dle_bpobj, obj, tx);
} else {
bpobj_close(&dle->dle_bpobj);
bpobj_decr_empty(dl->dl_os, tx);
VERIFY0(bpobj_open(&dle->dle_bpobj, dl->dl_os, obj));
VERIFY0(zap_update_int_key(dl->dl_os, dl->dl_object,
dle->dle_mintxg, obj, tx));
}
}
/*
* Prefetch metadata required for dle_enqueue_subobj().
*/
static void
dle_prefetch_subobj(dsl_deadlist_t *dl, dsl_deadlist_entry_t *dle,
uint64_t obj)
{
if (dle->dle_bpobj.bpo_object !=
dmu_objset_pool(dl->dl_os)->dp_empty_bpobj)
bpobj_prefetch_subobj(&dle->dle_bpobj, obj);
}
void
dsl_deadlist_insert(dsl_deadlist_t *dl, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
dsl_deadlist_entry_t dle_tofind;
dsl_deadlist_entry_t *dle;
avl_index_t where;
if (dl->dl_oldfmt) {
bpobj_enqueue(&dl->dl_bpobj, bp, bp_freed, tx);
return;
}
mutex_enter(&dl->dl_lock);
dsl_deadlist_load_tree(dl);
dmu_buf_will_dirty(dl->dl_dbuf, tx);
int sign = bp_freed ? -1 : +1;
dl->dl_phys->dl_used +=
sign * bp_get_dsize_sync(dmu_objset_spa(dl->dl_os), bp);
dl->dl_phys->dl_comp += sign * BP_GET_PSIZE(bp);
dl->dl_phys->dl_uncomp += sign * BP_GET_UCSIZE(bp);
dle_tofind.dle_mintxg = bp->blk_birth;
dle = avl_find(&dl->dl_tree, &dle_tofind, &where);
if (dle == NULL)
dle = avl_nearest(&dl->dl_tree, where, AVL_BEFORE);
else
dle = AVL_PREV(&dl->dl_tree, dle);
if (dle == NULL) {
zfs_panic_recover("blkptr at %p has invalid BLK_BIRTH %llu",
bp, (longlong_t)bp->blk_birth);
dle = avl_first(&dl->dl_tree);
}
ASSERT3P(dle, !=, NULL);
dle_enqueue(dl, dle, bp, bp_freed, tx);
mutex_exit(&dl->dl_lock);
}
int
dsl_deadlist_insert_alloc_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
dsl_deadlist_t *dl = arg;
dsl_deadlist_insert(dl, bp, B_FALSE, tx);
return (0);
}
int
dsl_deadlist_insert_free_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
dsl_deadlist_t *dl = arg;
dsl_deadlist_insert(dl, bp, B_TRUE, tx);
return (0);
}
/*
* Insert new key in deadlist, which must be > all current entries.
* mintxg is not inclusive.
*/
void
dsl_deadlist_add_key(dsl_deadlist_t *dl, uint64_t mintxg, dmu_tx_t *tx)
{
uint64_t obj;
dsl_deadlist_entry_t *dle;
if (dl->dl_oldfmt)
return;
dle = kmem_alloc(sizeof (*dle), KM_SLEEP);
dle->dle_mintxg = mintxg;
mutex_enter(&dl->dl_lock);
dsl_deadlist_load_tree(dl);
obj = bpobj_alloc_empty(dl->dl_os, SPA_OLD_MAXBLOCKSIZE, tx);
VERIFY0(bpobj_open(&dle->dle_bpobj, dl->dl_os, obj));
avl_add(&dl->dl_tree, dle);
VERIFY0(zap_add_int_key(dl->dl_os, dl->dl_object,
mintxg, obj, tx));
mutex_exit(&dl->dl_lock);
}
/*
* Remove this key, merging its entries into the previous key.
*/
void
dsl_deadlist_remove_key(dsl_deadlist_t *dl, uint64_t mintxg, dmu_tx_t *tx)
{
dsl_deadlist_entry_t dle_tofind;
dsl_deadlist_entry_t *dle, *dle_prev;
if (dl->dl_oldfmt)
return;
mutex_enter(&dl->dl_lock);
dsl_deadlist_load_tree(dl);
dle_tofind.dle_mintxg = mintxg;
dle = avl_find(&dl->dl_tree, &dle_tofind, NULL);
ASSERT3P(dle, !=, NULL);
dle_prev = AVL_PREV(&dl->dl_tree, dle);
ASSERT3P(dle_prev, !=, NULL);
dle_enqueue_subobj(dl, dle_prev, dle->dle_bpobj.bpo_object, tx);
avl_remove(&dl->dl_tree, dle);
bpobj_close(&dle->dle_bpobj);
kmem_free(dle, sizeof (*dle));
VERIFY0(zap_remove_int(dl->dl_os, dl->dl_object, mintxg, tx));
mutex_exit(&dl->dl_lock);
}
/*
* Remove a deadlist entry and all of its contents by removing the entry from
* the deadlist's avl tree, freeing the entry's bpobj and adjusting the
* deadlist's space accounting accordingly.
*/
void
dsl_deadlist_remove_entry(dsl_deadlist_t *dl, uint64_t mintxg, dmu_tx_t *tx)
{
uint64_t used, comp, uncomp;
dsl_deadlist_entry_t dle_tofind;
dsl_deadlist_entry_t *dle;
objset_t *os = dl->dl_os;
if (dl->dl_oldfmt)
return;
mutex_enter(&dl->dl_lock);
dsl_deadlist_load_tree(dl);
dle_tofind.dle_mintxg = mintxg;
dle = avl_find(&dl->dl_tree, &dle_tofind, NULL);
VERIFY3P(dle, !=, NULL);
avl_remove(&dl->dl_tree, dle);
VERIFY0(zap_remove_int(os, dl->dl_object, mintxg, tx));
VERIFY0(bpobj_space(&dle->dle_bpobj, &used, &comp, &uncomp));
dmu_buf_will_dirty(dl->dl_dbuf, tx);
dl->dl_phys->dl_used -= used;
dl->dl_phys->dl_comp -= comp;
dl->dl_phys->dl_uncomp -= uncomp;
if (dle->dle_bpobj.bpo_object == dmu_objset_pool(os)->dp_empty_bpobj) {
bpobj_decr_empty(os, tx);
} else {
bpobj_free(os, dle->dle_bpobj.bpo_object, tx);
}
bpobj_close(&dle->dle_bpobj);
kmem_free(dle, sizeof (*dle));
mutex_exit(&dl->dl_lock);
}
/*
* Clear out the contents of a deadlist_entry by freeing its bpobj,
* replacing it with an empty bpobj and adjusting the deadlist's
* space accounting
*/
void
dsl_deadlist_clear_entry(dsl_deadlist_entry_t *dle, dsl_deadlist_t *dl,
dmu_tx_t *tx)
{
uint64_t new_obj, used, comp, uncomp;
objset_t *os = dl->dl_os;
mutex_enter(&dl->dl_lock);
VERIFY0(zap_remove_int(os, dl->dl_object, dle->dle_mintxg, tx));
VERIFY0(bpobj_space(&dle->dle_bpobj, &used, &comp, &uncomp));
dmu_buf_will_dirty(dl->dl_dbuf, tx);
dl->dl_phys->dl_used -= used;
dl->dl_phys->dl_comp -= comp;
dl->dl_phys->dl_uncomp -= uncomp;
if (dle->dle_bpobj.bpo_object == dmu_objset_pool(os)->dp_empty_bpobj)
bpobj_decr_empty(os, tx);
else
bpobj_free(os, dle->dle_bpobj.bpo_object, tx);
bpobj_close(&dle->dle_bpobj);
new_obj = bpobj_alloc_empty(os, SPA_OLD_MAXBLOCKSIZE, tx);
VERIFY0(bpobj_open(&dle->dle_bpobj, os, new_obj));
VERIFY0(zap_add_int_key(os, dl->dl_object, dle->dle_mintxg,
new_obj, tx));
ASSERT(bpobj_is_empty(&dle->dle_bpobj));
mutex_exit(&dl->dl_lock);
}
/*
* Return the first entry in deadlist's avl tree
*/
dsl_deadlist_entry_t *
dsl_deadlist_first(dsl_deadlist_t *dl)
{
dsl_deadlist_entry_t *dle;
mutex_enter(&dl->dl_lock);
dsl_deadlist_load_tree(dl);
dle = avl_first(&dl->dl_tree);
mutex_exit(&dl->dl_lock);
return (dle);
}
/*
* Return the last entry in deadlist's avl tree
*/
dsl_deadlist_entry_t *
dsl_deadlist_last(dsl_deadlist_t *dl)
{
dsl_deadlist_entry_t *dle;
mutex_enter(&dl->dl_lock);
dsl_deadlist_load_tree(dl);
dle = avl_last(&dl->dl_tree);
mutex_exit(&dl->dl_lock);
return (dle);
}
/*
* Walk ds's snapshots to regenerate generate ZAP & AVL.
*/
static void
dsl_deadlist_regenerate(objset_t *os, uint64_t dlobj,
uint64_t mrs_obj, dmu_tx_t *tx)
{
dsl_deadlist_t dl = { 0 };
dsl_pool_t *dp = dmu_objset_pool(os);
dsl_deadlist_open(&dl, os, dlobj);
if (dl.dl_oldfmt) {
dsl_deadlist_close(&dl);
return;
}
while (mrs_obj != 0) {
dsl_dataset_t *ds;
VERIFY0(dsl_dataset_hold_obj(dp, mrs_obj, FTAG, &ds));
dsl_deadlist_add_key(&dl,
dsl_dataset_phys(ds)->ds_prev_snap_txg, tx);
mrs_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
dsl_dataset_rele(ds, FTAG);
}
dsl_deadlist_close(&dl);
}
uint64_t
dsl_deadlist_clone(dsl_deadlist_t *dl, uint64_t maxtxg,
uint64_t mrs_obj, dmu_tx_t *tx)
{
dsl_deadlist_entry_t *dle;
uint64_t newobj;
newobj = dsl_deadlist_alloc(dl->dl_os, tx);
if (dl->dl_oldfmt) {
dsl_deadlist_regenerate(dl->dl_os, newobj, mrs_obj, tx);
return (newobj);
}
mutex_enter(&dl->dl_lock);
dsl_deadlist_load_tree(dl);
for (dle = avl_first(&dl->dl_tree); dle;
dle = AVL_NEXT(&dl->dl_tree, dle)) {
uint64_t obj;
if (dle->dle_mintxg >= maxtxg)
break;
obj = bpobj_alloc_empty(dl->dl_os, SPA_OLD_MAXBLOCKSIZE, tx);
VERIFY0(zap_add_int_key(dl->dl_os, newobj,
dle->dle_mintxg, obj, tx));
}
mutex_exit(&dl->dl_lock);
return (newobj);
}
void
dsl_deadlist_space(dsl_deadlist_t *dl,
uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
{
ASSERT(dsl_deadlist_is_open(dl));
if (dl->dl_oldfmt) {
VERIFY0(bpobj_space(&dl->dl_bpobj,
usedp, compp, uncompp));
return;
}
mutex_enter(&dl->dl_lock);
*usedp = dl->dl_phys->dl_used;
*compp = dl->dl_phys->dl_comp;
*uncompp = dl->dl_phys->dl_uncomp;
mutex_exit(&dl->dl_lock);
}
/*
* return space used in the range (mintxg, maxtxg].
* Includes maxtxg, does not include mintxg.
* mintxg and maxtxg must both be keys in the deadlist (unless maxtxg is
* UINT64_MAX).
*/
void
dsl_deadlist_space_range(dsl_deadlist_t *dl, uint64_t mintxg, uint64_t maxtxg,
uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
{
dsl_deadlist_cache_entry_t *dlce;
dsl_deadlist_cache_entry_t dlce_tofind;
avl_index_t where;
if (dl->dl_oldfmt) {
VERIFY0(bpobj_space_range(&dl->dl_bpobj,
mintxg, maxtxg, usedp, compp, uncompp));
return;
}
*usedp = *compp = *uncompp = 0;
mutex_enter(&dl->dl_lock);
dsl_deadlist_load_cache(dl);
dlce_tofind.dlce_mintxg = mintxg;
dlce = avl_find(&dl->dl_cache, &dlce_tofind, &where);
/*
* If this mintxg doesn't exist, it may be an empty_bpobj which
* is omitted from the sparse tree. Start at the next non-empty
* entry.
*/
if (dlce == NULL)
dlce = avl_nearest(&dl->dl_cache, where, AVL_AFTER);
for (; dlce && dlce->dlce_mintxg < maxtxg;
dlce = AVL_NEXT(&dl->dl_tree, dlce)) {
*usedp += dlce->dlce_bytes;
*compp += dlce->dlce_comp;
*uncompp += dlce->dlce_uncomp;
}
mutex_exit(&dl->dl_lock);
}
static void
dsl_deadlist_insert_bpobj(dsl_deadlist_t *dl, uint64_t obj, uint64_t birth,
dmu_tx_t *tx)
{
dsl_deadlist_entry_t dle_tofind;
dsl_deadlist_entry_t *dle;
avl_index_t where;
uint64_t used, comp, uncomp;
bpobj_t bpo;
ASSERT(MUTEX_HELD(&dl->dl_lock));
VERIFY0(bpobj_open(&bpo, dl->dl_os, obj));
VERIFY0(bpobj_space(&bpo, &used, &comp, &uncomp));
bpobj_close(&bpo);
dsl_deadlist_load_tree(dl);
dmu_buf_will_dirty(dl->dl_dbuf, tx);
dl->dl_phys->dl_used += used;
dl->dl_phys->dl_comp += comp;
dl->dl_phys->dl_uncomp += uncomp;
dle_tofind.dle_mintxg = birth;
dle = avl_find(&dl->dl_tree, &dle_tofind, &where);
if (dle == NULL)
dle = avl_nearest(&dl->dl_tree, where, AVL_BEFORE);
dle_enqueue_subobj(dl, dle, obj, tx);
}
/*
* Prefetch metadata required for dsl_deadlist_insert_bpobj().
*/
static void
dsl_deadlist_prefetch_bpobj(dsl_deadlist_t *dl, uint64_t obj, uint64_t birth)
{
dsl_deadlist_entry_t dle_tofind;
dsl_deadlist_entry_t *dle;
avl_index_t where;
ASSERT(MUTEX_HELD(&dl->dl_lock));
dsl_deadlist_load_tree(dl);
dle_tofind.dle_mintxg = birth;
dle = avl_find(&dl->dl_tree, &dle_tofind, &where);
if (dle == NULL)
dle = avl_nearest(&dl->dl_tree, where, AVL_BEFORE);
dle_prefetch_subobj(dl, dle, obj);
}
static int
dsl_deadlist_insert_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
dsl_deadlist_t *dl = arg;
dsl_deadlist_insert(dl, bp, bp_freed, tx);
return (0);
}
/*
* Merge the deadlist pointed to by 'obj' into dl. obj will be left as
* an empty deadlist.
*/
void
dsl_deadlist_merge(dsl_deadlist_t *dl, uint64_t obj, dmu_tx_t *tx)
{
zap_cursor_t zc, pzc;
zap_attribute_t *za, *pza;
dmu_buf_t *bonus;
dsl_deadlist_phys_t *dlp;
dmu_object_info_t doi;
int error, perror, i;
VERIFY0(dmu_object_info(dl->dl_os, obj, &doi));
if (doi.doi_type == DMU_OT_BPOBJ) {
bpobj_t bpo;
VERIFY0(bpobj_open(&bpo, dl->dl_os, obj));
VERIFY0(bpobj_iterate(&bpo, dsl_deadlist_insert_cb, dl, tx));
bpobj_close(&bpo);
return;
}
za = kmem_alloc(sizeof (*za), KM_SLEEP);
pza = kmem_alloc(sizeof (*pza), KM_SLEEP);
mutex_enter(&dl->dl_lock);
/*
* Prefetch up to 128 deadlists first and then more as we progress.
* The limit is a balance between ARC use and diminishing returns.
*/
for (zap_cursor_init(&pzc, dl->dl_os, obj), i = 0;
(perror = zap_cursor_retrieve(&pzc, pza)) == 0 && i < 128;
zap_cursor_advance(&pzc), i++) {
dsl_deadlist_prefetch_bpobj(dl, pza->za_first_integer,
zfs_strtonum(pza->za_name, NULL));
}
for (zap_cursor_init(&zc, dl->dl_os, obj);
(error = zap_cursor_retrieve(&zc, za)) == 0;
zap_cursor_advance(&zc)) {
dsl_deadlist_insert_bpobj(dl, za->za_first_integer,
zfs_strtonum(za->za_name, NULL), tx);
VERIFY0(zap_remove(dl->dl_os, obj, za->za_name, tx));
if (perror == 0) {
dsl_deadlist_prefetch_bpobj(dl, pza->za_first_integer,
zfs_strtonum(pza->za_name, NULL));
zap_cursor_advance(&pzc);
perror = zap_cursor_retrieve(&pzc, pza);
}
}
VERIFY3U(error, ==, ENOENT);
zap_cursor_fini(&zc);
zap_cursor_fini(&pzc);
VERIFY0(dmu_bonus_hold(dl->dl_os, obj, FTAG, &bonus));
dlp = bonus->db_data;
dmu_buf_will_dirty(bonus, tx);
memset(dlp, 0, sizeof (*dlp));
dmu_buf_rele(bonus, FTAG);
mutex_exit(&dl->dl_lock);
kmem_free(za, sizeof (*za));
kmem_free(pza, sizeof (*pza));
}
/*
* Remove entries on dl that are born > mintxg, and put them on the bpobj.
*/
void
dsl_deadlist_move_bpobj(dsl_deadlist_t *dl, bpobj_t *bpo, uint64_t mintxg,
dmu_tx_t *tx)
{
dsl_deadlist_entry_t dle_tofind;
dsl_deadlist_entry_t *dle, *pdle;
avl_index_t where;
int i;
ASSERT(!dl->dl_oldfmt);
mutex_enter(&dl->dl_lock);
dmu_buf_will_dirty(dl->dl_dbuf, tx);
dsl_deadlist_load_tree(dl);
dle_tofind.dle_mintxg = mintxg;
dle = avl_find(&dl->dl_tree, &dle_tofind, &where);
if (dle == NULL)
dle = avl_nearest(&dl->dl_tree, where, AVL_AFTER);
/*
* Prefetch up to 128 deadlists first and then more as we progress.
* The limit is a balance between ARC use and diminishing returns.
*/
for (pdle = dle, i = 0; pdle && i < 128; i++) {
bpobj_prefetch_subobj(bpo, pdle->dle_bpobj.bpo_object);
pdle = AVL_NEXT(&dl->dl_tree, pdle);
}
while (dle) {
uint64_t used, comp, uncomp;
dsl_deadlist_entry_t *dle_next;
bpobj_enqueue_subobj(bpo, dle->dle_bpobj.bpo_object, tx);
if (pdle) {
bpobj_prefetch_subobj(bpo, pdle->dle_bpobj.bpo_object);
pdle = AVL_NEXT(&dl->dl_tree, pdle);
}
VERIFY0(bpobj_space(&dle->dle_bpobj,
&used, &comp, &uncomp));
ASSERT3U(dl->dl_phys->dl_used, >=, used);
ASSERT3U(dl->dl_phys->dl_comp, >=, comp);
ASSERT3U(dl->dl_phys->dl_uncomp, >=, uncomp);
dl->dl_phys->dl_used -= used;
dl->dl_phys->dl_comp -= comp;
dl->dl_phys->dl_uncomp -= uncomp;
VERIFY0(zap_remove_int(dl->dl_os, dl->dl_object,
dle->dle_mintxg, tx));
dle_next = AVL_NEXT(&dl->dl_tree, dle);
avl_remove(&dl->dl_tree, dle);
bpobj_close(&dle->dle_bpobj);
kmem_free(dle, sizeof (*dle));
dle = dle_next;
}
mutex_exit(&dl->dl_lock);
}
typedef struct livelist_entry {
blkptr_t le_bp;
uint32_t le_refcnt;
avl_node_t le_node;
} livelist_entry_t;
static int
livelist_compare(const void *larg, const void *rarg)
{
const blkptr_t *l = &((livelist_entry_t *)larg)->le_bp;
const blkptr_t *r = &((livelist_entry_t *)rarg)->le_bp;
/* Sort them according to dva[0] */
uint64_t l_dva0_vdev = DVA_GET_VDEV(&l->blk_dva[0]);
uint64_t r_dva0_vdev = DVA_GET_VDEV(&r->blk_dva[0]);
if (l_dva0_vdev != r_dva0_vdev)
return (TREE_CMP(l_dva0_vdev, r_dva0_vdev));
/* if vdevs are equal, sort by offsets. */
uint64_t l_dva0_offset = DVA_GET_OFFSET(&l->blk_dva[0]);
uint64_t r_dva0_offset = DVA_GET_OFFSET(&r->blk_dva[0]);
- if (l_dva0_offset == r_dva0_offset)
- ASSERT3U(l->blk_birth, ==, r->blk_birth);
return (TREE_CMP(l_dva0_offset, r_dva0_offset));
}
struct livelist_iter_arg {
avl_tree_t *avl;
bplist_t *to_free;
zthr_t *t;
};
/*
* Expects an AVL tree which is incrementally filled will FREE blkptrs
* and used to match up ALLOC/FREE pairs. ALLOC'd blkptrs without a
* corresponding FREE are stored in the supplied bplist.
*
- * Note that multiple FREE and ALLOC entries for the same blkptr may
- * be encountered when dedup is involved. For this reason we keep a
- * refcount for all the FREE entries of each blkptr and ensure that
+ * Note that multiple FREE and ALLOC entries for the same blkptr may be
+ * encountered when dedup or block cloning is involved. For this reason we
+ * keep a refcount for all the FREE entries of each blkptr and ensure that
* each of those FREE entries has a corresponding ALLOC preceding it.
*/
static int
dsl_livelist_iterate(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
struct livelist_iter_arg *lia = arg;
avl_tree_t *avl = lia->avl;
bplist_t *to_free = lia->to_free;
zthr_t *t = lia->t;
ASSERT(tx == NULL);
if ((t != NULL) && (zthr_has_waiters(t) || zthr_iscancelled(t)))
return (SET_ERROR(EINTR));
livelist_entry_t node;
node.le_bp = *bp;
livelist_entry_t *found = avl_find(avl, &node, NULL);
+ if (found) {
+ ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(&found->le_bp));
+ ASSERT3U(BP_GET_CHECKSUM(bp), ==,
+ BP_GET_CHECKSUM(&found->le_bp));
+ ASSERT3U(BP_PHYSICAL_BIRTH(bp), ==,
+ BP_PHYSICAL_BIRTH(&found->le_bp));
+ }
if (bp_freed) {
if (found == NULL) {
/* first free entry for this blkptr */
livelist_entry_t *e =
kmem_alloc(sizeof (livelist_entry_t), KM_SLEEP);
e->le_bp = *bp;
e->le_refcnt = 1;
avl_add(avl, e);
} else {
- /* dedup block free */
- ASSERT(BP_GET_DEDUP(bp));
- ASSERT3U(BP_GET_CHECKSUM(bp), ==,
- BP_GET_CHECKSUM(&found->le_bp));
+ /*
+ * Deduped or cloned block free. We could assert D bit
+ * for dedup, but there is no such one for cloning.
+ */
ASSERT3U(found->le_refcnt + 1, >, found->le_refcnt);
found->le_refcnt++;
}
} else {
if (found == NULL) {
/* block is currently marked as allocated */
bplist_append(to_free, bp);
} else {
/* alloc matches a free entry */
ASSERT3U(found->le_refcnt, !=, 0);
found->le_refcnt--;
if (found->le_refcnt == 0) {
/* all tracked free pairs have been matched */
avl_remove(avl, found);
kmem_free(found, sizeof (livelist_entry_t));
- } else {
- /*
- * This is definitely a deduped blkptr so
- * let's validate it.
- */
- ASSERT(BP_GET_DEDUP(bp));
- ASSERT3U(BP_GET_CHECKSUM(bp), ==,
- BP_GET_CHECKSUM(&found->le_bp));
}
}
}
return (0);
}
/*
* Accepts a bpobj and a bplist. Will insert into the bplist the blkptrs
* which have an ALLOC entry but no matching FREE
*/
int
dsl_process_sub_livelist(bpobj_t *bpobj, bplist_t *to_free, zthr_t *t,
uint64_t *size)
{
avl_tree_t avl;
avl_create(&avl, livelist_compare, sizeof (livelist_entry_t),
offsetof(livelist_entry_t, le_node));
/* process the sublist */
struct livelist_iter_arg arg = {
.avl = &avl,
.to_free = to_free,
.t = t
};
int err = bpobj_iterate_nofree(bpobj, dsl_livelist_iterate, &arg, size);
VERIFY(err != 0 || avl_numnodes(&avl) == 0);
void *cookie = NULL;
livelist_entry_t *le = NULL;
while ((le = avl_destroy_nodes(&avl, &cookie)) != NULL) {
kmem_free(le, sizeof (livelist_entry_t));
}
avl_destroy(&avl);
return (err);
}
ZFS_MODULE_PARAM(zfs_livelist, zfs_livelist_, max_entries, U64, ZMOD_RW,
"Size to start the next sub-livelist in a livelist");
ZFS_MODULE_PARAM(zfs_livelist, zfs_livelist_, min_percent_shared, INT, ZMOD_RW,
"Threshold at which livelist is disabled");
diff --git a/sys/contrib/openzfs/module/zfs/metaslab.c b/sys/contrib/openzfs/module/zfs/metaslab.c
index 599d7ffa0cf3..5809a832bcb0 100644
--- a/sys/contrib/openzfs/module/zfs/metaslab.c
+++ b/sys/contrib/openzfs/module/zfs/metaslab.c
@@ -1,6236 +1,6233 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2019 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
* Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2017, Intel Corporation.
*/
#include <sys/zfs_context.h>
#include <sys/dmu.h>
#include <sys/dmu_tx.h>
#include <sys/space_map.h>
#include <sys/metaslab_impl.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_draid.h>
#include <sys/zio.h>
#include <sys/spa_impl.h>
#include <sys/zfeature.h>
#include <sys/vdev_indirect_mapping.h>
#include <sys/zap.h>
#include <sys/btree.h>
#define WITH_DF_BLOCK_ALLOCATOR
#define GANG_ALLOCATION(flags) \
((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER))
/*
* Metaslab granularity, in bytes. This is roughly similar to what would be
* referred to as the "stripe size" in traditional RAID arrays. In normal
* operation, we will try to write this amount of data to each disk before
* moving on to the next top-level vdev.
*/
static uint64_t metaslab_aliquot = 1024 * 1024;
/*
* For testing, make some blocks above a certain size be gang blocks.
*/
uint64_t metaslab_force_ganging = SPA_MAXBLOCKSIZE + 1;
/*
* Of blocks of size >= metaslab_force_ganging, actually gang them this often.
*/
uint_t metaslab_force_ganging_pct = 3;
/*
* In pools where the log space map feature is not enabled we touch
* multiple metaslabs (and their respective space maps) with each
* transaction group. Thus, we benefit from having a small space map
* block size since it allows us to issue more I/O operations scattered
* around the disk. So a sane default for the space map block size
* is 8~16K.
*/
int zfs_metaslab_sm_blksz_no_log = (1 << 14);
/*
* When the log space map feature is enabled, we accumulate a lot of
* changes per metaslab that are flushed once in a while so we benefit
* from a bigger block size like 128K for the metaslab space maps.
*/
int zfs_metaslab_sm_blksz_with_log = (1 << 17);
/*
* The in-core space map representation is more compact than its on-disk form.
* The zfs_condense_pct determines how much more compact the in-core
* space map representation must be before we compact it on-disk.
* Values should be greater than or equal to 100.
*/
uint_t zfs_condense_pct = 200;
/*
* Condensing a metaslab is not guaranteed to actually reduce the amount of
* space used on disk. In particular, a space map uses data in increments of
* MAX(1 << ashift, space_map_blksz), so a metaslab might use the
* same number of blocks after condensing. Since the goal of condensing is to
* reduce the number of IOPs required to read the space map, we only want to
* condense when we can be sure we will reduce the number of blocks used by the
* space map. Unfortunately, we cannot precisely compute whether or not this is
* the case in metaslab_should_condense since we are holding ms_lock. Instead,
* we apply the following heuristic: do not condense a spacemap unless the
* uncondensed size consumes greater than zfs_metaslab_condense_block_threshold
* blocks.
*/
static const int zfs_metaslab_condense_block_threshold = 4;
/*
* The zfs_mg_noalloc_threshold defines which metaslab groups should
* be eligible for allocation. The value is defined as a percentage of
* free space. Metaslab groups that have more free space than
* zfs_mg_noalloc_threshold are always eligible for allocations. Once
* a metaslab group's free space is less than or equal to the
* zfs_mg_noalloc_threshold the allocator will avoid allocating to that
* group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
* Once all groups in the pool reach zfs_mg_noalloc_threshold then all
* groups are allowed to accept allocations. Gang blocks are always
* eligible to allocate on any metaslab group. The default value of 0 means
* no metaslab group will be excluded based on this criterion.
*/
static uint_t zfs_mg_noalloc_threshold = 0;
/*
* Metaslab groups are considered eligible for allocations if their
* fragmentation metric (measured as a percentage) is less than or
* equal to zfs_mg_fragmentation_threshold. If a metaslab group
* exceeds this threshold then it will be skipped unless all metaslab
* groups within the metaslab class have also crossed this threshold.
*
* This tunable was introduced to avoid edge cases where we continue
* allocating from very fragmented disks in our pool while other, less
* fragmented disks, exists. On the other hand, if all disks in the
* pool are uniformly approaching the threshold, the threshold can
* be a speed bump in performance, where we keep switching the disks
* that we allocate from (e.g. we allocate some segments from disk A
* making it bypassing the threshold while freeing segments from disk
* B getting its fragmentation below the threshold).
*
* Empirically, we've seen that our vdev selection for allocations is
* good enough that fragmentation increases uniformly across all vdevs
* the majority of the time. Thus we set the threshold percentage high
* enough to avoid hitting the speed bump on pools that are being pushed
* to the edge.
*/
static uint_t zfs_mg_fragmentation_threshold = 95;
/*
* Allow metaslabs to keep their active state as long as their fragmentation
* percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An
* active metaslab that exceeds this threshold will no longer keep its active
* status allowing better metaslabs to be selected.
*/
static uint_t zfs_metaslab_fragmentation_threshold = 70;
/*
* When set will load all metaslabs when pool is first opened.
*/
int metaslab_debug_load = B_FALSE;
/*
* When set will prevent metaslabs from being unloaded.
*/
static int metaslab_debug_unload = B_FALSE;
/*
* Minimum size which forces the dynamic allocator to change
* it's allocation strategy. Once the space map cannot satisfy
* an allocation of this size then it switches to using more
* aggressive strategy (i.e search by size rather than offset).
*/
uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE;
/*
* The minimum free space, in percent, which must be available
* in a space map to continue allocations in a first-fit fashion.
* Once the space map's free space drops below this level we dynamically
* switch to using best-fit allocations.
*/
uint_t metaslab_df_free_pct = 4;
/*
* Maximum distance to search forward from the last offset. Without this
* limit, fragmented pools can see >100,000 iterations and
* metaslab_block_picker() becomes the performance limiting factor on
* high-performance storage.
*
* With the default setting of 16MB, we typically see less than 500
* iterations, even with very fragmented, ashift=9 pools. The maximum number
* of iterations possible is:
* metaslab_df_max_search / (2 * (1<<ashift))
* With the default setting of 16MB this is 16*1024 (with ashift=9) or
* 2048 (with ashift=12).
*/
static uint_t metaslab_df_max_search = 16 * 1024 * 1024;
/*
* Forces the metaslab_block_picker function to search for at least this many
* segments forwards until giving up on finding a segment that the allocation
* will fit into.
*/
static const uint32_t metaslab_min_search_count = 100;
/*
* If we are not searching forward (due to metaslab_df_max_search,
* metaslab_df_free_pct, or metaslab_df_alloc_threshold), this tunable
* controls what segment is used. If it is set, we will use the largest free
* segment. If it is not set, we will use a segment of exactly the requested
* size (or larger).
*/
static int metaslab_df_use_largest_segment = B_FALSE;
/*
* These tunables control how long a metaslab will remain loaded after the
* last allocation from it. A metaslab can't be unloaded until at least
* metaslab_unload_delay TXG's and metaslab_unload_delay_ms milliseconds
* have elapsed. However, zfs_metaslab_mem_limit may cause it to be
* unloaded sooner. These settings are intended to be generous -- to keep
* metaslabs loaded for a long time, reducing the rate of metaslab loading.
*/
static uint_t metaslab_unload_delay = 32;
static uint_t metaslab_unload_delay_ms = 10 * 60 * 1000; /* ten minutes */
/*
* Max number of metaslabs per group to preload.
*/
uint_t metaslab_preload_limit = 10;
/*
* Enable/disable preloading of metaslab.
*/
static int metaslab_preload_enabled = B_TRUE;
/*
* Enable/disable fragmentation weighting on metaslabs.
*/
static int metaslab_fragmentation_factor_enabled = B_TRUE;
/*
* Enable/disable lba weighting (i.e. outer tracks are given preference).
*/
static int metaslab_lba_weighting_enabled = B_TRUE;
/*
* Enable/disable metaslab group biasing.
*/
static int metaslab_bias_enabled = B_TRUE;
/*
* Enable/disable remapping of indirect DVAs to their concrete vdevs.
*/
static const boolean_t zfs_remap_blkptr_enable = B_TRUE;
/*
* Enable/disable segment-based metaslab selection.
*/
static int zfs_metaslab_segment_weight_enabled = B_TRUE;
/*
* When using segment-based metaslab selection, we will continue
* allocating from the active metaslab until we have exhausted
* zfs_metaslab_switch_threshold of its buckets.
*/
static int zfs_metaslab_switch_threshold = 2;
/*
* Internal switch to enable/disable the metaslab allocation tracing
* facility.
*/
static const boolean_t metaslab_trace_enabled = B_FALSE;
/*
* Maximum entries that the metaslab allocation tracing facility will keep
* in a given list when running in non-debug mode. We limit the number
* of entries in non-debug mode to prevent us from using up too much memory.
* The limit should be sufficiently large that we don't expect any allocation
* to every exceed this value. In debug mode, the system will panic if this
* limit is ever reached allowing for further investigation.
*/
static const uint64_t metaslab_trace_max_entries = 5000;
/*
* Maximum number of metaslabs per group that can be disabled
* simultaneously.
*/
static const int max_disabled_ms = 3;
/*
* Time (in seconds) to respect ms_max_size when the metaslab is not loaded.
* To avoid 64-bit overflow, don't set above UINT32_MAX.
*/
static uint64_t zfs_metaslab_max_size_cache_sec = 1 * 60 * 60; /* 1 hour */
/*
* Maximum percentage of memory to use on storing loaded metaslabs. If loading
* a metaslab would take it over this percentage, the oldest selected metaslab
* is automatically unloaded.
*/
static uint_t zfs_metaslab_mem_limit = 25;
/*
* Force the per-metaslab range trees to use 64-bit integers to store
* segments. Used for debugging purposes.
*/
static const boolean_t zfs_metaslab_force_large_segs = B_FALSE;
/*
* By default we only store segments over a certain size in the size-sorted
* metaslab trees (ms_allocatable_by_size and
* ms_unflushed_frees_by_size). This dramatically reduces memory usage and
* improves load and unload times at the cost of causing us to use slightly
* larger segments than we would otherwise in some cases.
*/
static const uint32_t metaslab_by_size_min_shift = 14;
/*
* If not set, we will first try normal allocation. If that fails then
* we will do a gang allocation. If that fails then we will do a "try hard"
* gang allocation. If that fails then we will have a multi-layer gang
* block.
*
* If set, we will first try normal allocation. If that fails then
* we will do a "try hard" allocation. If that fails we will do a gang
* allocation. If that fails we will do a "try hard" gang allocation. If
* that fails then we will have a multi-layer gang block.
*/
static int zfs_metaslab_try_hard_before_gang = B_FALSE;
/*
* When not trying hard, we only consider the best zfs_metaslab_find_max_tries
* metaslabs. This improves performance, especially when there are many
* metaslabs per vdev and the allocation can't actually be satisfied (so we
* would otherwise iterate all the metaslabs). If there is a metaslab with a
* worse weight but it can actually satisfy the allocation, we won't find it
* until trying hard. This may happen if the worse metaslab is not loaded
* (and the true weight is better than we have calculated), or due to weight
* bucketization. E.g. we are looking for a 60K segment, and the best
* metaslabs all have free segments in the 32-63K bucket, but the best
* zfs_metaslab_find_max_tries metaslabs have ms_max_size <60KB, and a
* subsequent metaslab has ms_max_size >60KB (but fewer segments in this
* bucket, and therefore a lower weight).
*/
static uint_t zfs_metaslab_find_max_tries = 100;
static uint64_t metaslab_weight(metaslab_t *, boolean_t);
static void metaslab_set_fragmentation(metaslab_t *, boolean_t);
static void metaslab_free_impl(vdev_t *, uint64_t, uint64_t, boolean_t);
static void metaslab_check_free_impl(vdev_t *, uint64_t, uint64_t);
static void metaslab_passivate(metaslab_t *msp, uint64_t weight);
static uint64_t metaslab_weight_from_range_tree(metaslab_t *msp);
static void metaslab_flush_update(metaslab_t *, dmu_tx_t *);
static unsigned int metaslab_idx_func(multilist_t *, void *);
static void metaslab_evict(metaslab_t *, uint64_t);
static void metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg);
kmem_cache_t *metaslab_alloc_trace_cache;
typedef struct metaslab_stats {
kstat_named_t metaslabstat_trace_over_limit;
kstat_named_t metaslabstat_reload_tree;
kstat_named_t metaslabstat_too_many_tries;
kstat_named_t metaslabstat_try_hard;
} metaslab_stats_t;
static metaslab_stats_t metaslab_stats = {
{ "trace_over_limit", KSTAT_DATA_UINT64 },
{ "reload_tree", KSTAT_DATA_UINT64 },
{ "too_many_tries", KSTAT_DATA_UINT64 },
{ "try_hard", KSTAT_DATA_UINT64 },
};
#define METASLABSTAT_BUMP(stat) \
atomic_inc_64(&metaslab_stats.stat.value.ui64);
static kstat_t *metaslab_ksp;
void
metaslab_stat_init(void)
{
ASSERT(metaslab_alloc_trace_cache == NULL);
metaslab_alloc_trace_cache = kmem_cache_create(
"metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t),
0, NULL, NULL, NULL, NULL, NULL, 0);
metaslab_ksp = kstat_create("zfs", 0, "metaslab_stats",
"misc", KSTAT_TYPE_NAMED, sizeof (metaslab_stats) /
sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
if (metaslab_ksp != NULL) {
metaslab_ksp->ks_data = &metaslab_stats;
kstat_install(metaslab_ksp);
}
}
void
metaslab_stat_fini(void)
{
if (metaslab_ksp != NULL) {
kstat_delete(metaslab_ksp);
metaslab_ksp = NULL;
}
kmem_cache_destroy(metaslab_alloc_trace_cache);
metaslab_alloc_trace_cache = NULL;
}
/*
* ==========================================================================
* Metaslab classes
* ==========================================================================
*/
metaslab_class_t *
metaslab_class_create(spa_t *spa, const metaslab_ops_t *ops)
{
metaslab_class_t *mc;
mc = kmem_zalloc(offsetof(metaslab_class_t,
mc_allocator[spa->spa_alloc_count]), KM_SLEEP);
mc->mc_spa = spa;
mc->mc_ops = ops;
mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL);
multilist_create(&mc->mc_metaslab_txg_list, sizeof (metaslab_t),
offsetof(metaslab_t, ms_class_txg_node), metaslab_idx_func);
for (int i = 0; i < spa->spa_alloc_count; i++) {
metaslab_class_allocator_t *mca = &mc->mc_allocator[i];
mca->mca_rotor = NULL;
zfs_refcount_create_tracked(&mca->mca_alloc_slots);
}
return (mc);
}
void
metaslab_class_destroy(metaslab_class_t *mc)
{
spa_t *spa = mc->mc_spa;
ASSERT(mc->mc_alloc == 0);
ASSERT(mc->mc_deferred == 0);
ASSERT(mc->mc_space == 0);
ASSERT(mc->mc_dspace == 0);
for (int i = 0; i < spa->spa_alloc_count; i++) {
metaslab_class_allocator_t *mca = &mc->mc_allocator[i];
ASSERT(mca->mca_rotor == NULL);
zfs_refcount_destroy(&mca->mca_alloc_slots);
}
mutex_destroy(&mc->mc_lock);
multilist_destroy(&mc->mc_metaslab_txg_list);
kmem_free(mc, offsetof(metaslab_class_t,
mc_allocator[spa->spa_alloc_count]));
}
int
metaslab_class_validate(metaslab_class_t *mc)
{
metaslab_group_t *mg;
vdev_t *vd;
/*
* Must hold one of the spa_config locks.
*/
ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
if ((mg = mc->mc_allocator[0].mca_rotor) == NULL)
return (0);
do {
vd = mg->mg_vd;
ASSERT(vd->vdev_mg != NULL);
ASSERT3P(vd->vdev_top, ==, vd);
ASSERT3P(mg->mg_class, ==, mc);
ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
} while ((mg = mg->mg_next) != mc->mc_allocator[0].mca_rotor);
return (0);
}
static void
metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
{
atomic_add_64(&mc->mc_alloc, alloc_delta);
atomic_add_64(&mc->mc_deferred, defer_delta);
atomic_add_64(&mc->mc_space, space_delta);
atomic_add_64(&mc->mc_dspace, dspace_delta);
}
uint64_t
metaslab_class_get_alloc(metaslab_class_t *mc)
{
return (mc->mc_alloc);
}
uint64_t
metaslab_class_get_deferred(metaslab_class_t *mc)
{
return (mc->mc_deferred);
}
uint64_t
metaslab_class_get_space(metaslab_class_t *mc)
{
return (mc->mc_space);
}
uint64_t
metaslab_class_get_dspace(metaslab_class_t *mc)
{
return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
}
void
metaslab_class_histogram_verify(metaslab_class_t *mc)
{
spa_t *spa = mc->mc_spa;
vdev_t *rvd = spa->spa_root_vdev;
uint64_t *mc_hist;
int i;
if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
return;
mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
KM_SLEEP);
mutex_enter(&mc->mc_lock);
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = vdev_get_mg(tvd, mc);
/*
* Skip any holes, uninitialized top-levels, or
* vdevs that are not in this metalab class.
*/
if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
mg->mg_class != mc) {
continue;
}
IMPLY(mg == mg->mg_vd->vdev_log_mg,
mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
mc_hist[i] += mg->mg_histogram[i];
}
for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]);
}
mutex_exit(&mc->mc_lock);
kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
}
/*
* Calculate the metaslab class's fragmentation metric. The metric
* is weighted based on the space contribution of each metaslab group.
* The return value will be a number between 0 and 100 (inclusive), or
* ZFS_FRAG_INVALID if the metric has not been set. See comment above the
* zfs_frag_table for more information about the metric.
*/
uint64_t
metaslab_class_fragmentation(metaslab_class_t *mc)
{
vdev_t *rvd = mc->mc_spa->spa_root_vdev;
uint64_t fragmentation = 0;
spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg;
/*
* Skip any holes, uninitialized top-levels,
* or vdevs that are not in this metalab class.
*/
if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
mg->mg_class != mc) {
continue;
}
/*
* If a metaslab group does not contain a fragmentation
* metric then just bail out.
*/
if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
return (ZFS_FRAG_INVALID);
}
/*
* Determine how much this metaslab_group is contributing
* to the overall pool fragmentation metric.
*/
fragmentation += mg->mg_fragmentation *
metaslab_group_get_space(mg);
}
fragmentation /= metaslab_class_get_space(mc);
ASSERT3U(fragmentation, <=, 100);
spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
return (fragmentation);
}
/*
* Calculate the amount of expandable space that is available in
* this metaslab class. If a device is expanded then its expandable
* space will be the amount of allocatable space that is currently not
* part of this metaslab class.
*/
uint64_t
metaslab_class_expandable_space(metaslab_class_t *mc)
{
vdev_t *rvd = mc->mc_spa->spa_root_vdev;
uint64_t space = 0;
spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg;
if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
mg->mg_class != mc) {
continue;
}
/*
* Calculate if we have enough space to add additional
* metaslabs. We report the expandable space in terms
* of the metaslab size since that's the unit of expansion.
*/
space += P2ALIGN(tvd->vdev_max_asize - tvd->vdev_asize,
1ULL << tvd->vdev_ms_shift);
}
spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
return (space);
}
void
metaslab_class_evict_old(metaslab_class_t *mc, uint64_t txg)
{
multilist_t *ml = &mc->mc_metaslab_txg_list;
for (int i = 0; i < multilist_get_num_sublists(ml); i++) {
multilist_sublist_t *mls = multilist_sublist_lock(ml, i);
metaslab_t *msp = multilist_sublist_head(mls);
multilist_sublist_unlock(mls);
while (msp != NULL) {
mutex_enter(&msp->ms_lock);
/*
* If the metaslab has been removed from the list
* (which could happen if we were at the memory limit
* and it was evicted during this loop), then we can't
* proceed and we should restart the sublist.
*/
if (!multilist_link_active(&msp->ms_class_txg_node)) {
mutex_exit(&msp->ms_lock);
i--;
break;
}
mls = multilist_sublist_lock(ml, i);
metaslab_t *next_msp = multilist_sublist_next(mls, msp);
multilist_sublist_unlock(mls);
if (txg >
msp->ms_selected_txg + metaslab_unload_delay &&
gethrtime() > msp->ms_selected_time +
(uint64_t)MSEC2NSEC(metaslab_unload_delay_ms)) {
metaslab_evict(msp, txg);
} else {
/*
* Once we've hit a metaslab selected too
* recently to evict, we're done evicting for
* now.
*/
mutex_exit(&msp->ms_lock);
break;
}
mutex_exit(&msp->ms_lock);
msp = next_msp;
}
}
}
static int
metaslab_compare(const void *x1, const void *x2)
{
const metaslab_t *m1 = (const metaslab_t *)x1;
const metaslab_t *m2 = (const metaslab_t *)x2;
int sort1 = 0;
int sort2 = 0;
if (m1->ms_allocator != -1 && m1->ms_primary)
sort1 = 1;
else if (m1->ms_allocator != -1 && !m1->ms_primary)
sort1 = 2;
if (m2->ms_allocator != -1 && m2->ms_primary)
sort2 = 1;
else if (m2->ms_allocator != -1 && !m2->ms_primary)
sort2 = 2;
/*
* Sort inactive metaslabs first, then primaries, then secondaries. When
* selecting a metaslab to allocate from, an allocator first tries its
* primary, then secondary active metaslab. If it doesn't have active
* metaslabs, or can't allocate from them, it searches for an inactive
* metaslab to activate. If it can't find a suitable one, it will steal
* a primary or secondary metaslab from another allocator.
*/
if (sort1 < sort2)
return (-1);
if (sort1 > sort2)
return (1);
int cmp = TREE_CMP(m2->ms_weight, m1->ms_weight);
if (likely(cmp))
return (cmp);
IMPLY(TREE_CMP(m1->ms_start, m2->ms_start) == 0, m1 == m2);
return (TREE_CMP(m1->ms_start, m2->ms_start));
}
/*
* ==========================================================================
* Metaslab groups
* ==========================================================================
*/
/*
* Update the allocatable flag and the metaslab group's capacity.
* The allocatable flag is set to true if the capacity is below
* the zfs_mg_noalloc_threshold or has a fragmentation value that is
* greater than zfs_mg_fragmentation_threshold. If a metaslab group
* transitions from allocatable to non-allocatable or vice versa then the
* metaslab group's class is updated to reflect the transition.
*/
static void
metaslab_group_alloc_update(metaslab_group_t *mg)
{
vdev_t *vd = mg->mg_vd;
metaslab_class_t *mc = mg->mg_class;
vdev_stat_t *vs = &vd->vdev_stat;
boolean_t was_allocatable;
boolean_t was_initialized;
ASSERT(vd == vd->vdev_top);
ASSERT3U(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_READER), ==,
SCL_ALLOC);
mutex_enter(&mg->mg_lock);
was_allocatable = mg->mg_allocatable;
was_initialized = mg->mg_initialized;
mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) /
(vs->vs_space + 1);
mutex_enter(&mc->mc_lock);
/*
* If the metaslab group was just added then it won't
* have any space until we finish syncing out this txg.
* At that point we will consider it initialized and available
* for allocations. We also don't consider non-activated
* metaslab groups (e.g. vdevs that are in the middle of being removed)
* to be initialized, because they can't be used for allocation.
*/
mg->mg_initialized = metaslab_group_initialized(mg);
if (!was_initialized && mg->mg_initialized) {
mc->mc_groups++;
} else if (was_initialized && !mg->mg_initialized) {
ASSERT3U(mc->mc_groups, >, 0);
mc->mc_groups--;
}
if (mg->mg_initialized)
mg->mg_no_free_space = B_FALSE;
/*
* A metaslab group is considered allocatable if it has plenty
* of free space or is not heavily fragmented. We only take
* fragmentation into account if the metaslab group has a valid
* fragmentation metric (i.e. a value between 0 and 100).
*/
mg->mg_allocatable = (mg->mg_activation_count > 0 &&
mg->mg_free_capacity > zfs_mg_noalloc_threshold &&
(mg->mg_fragmentation == ZFS_FRAG_INVALID ||
mg->mg_fragmentation <= zfs_mg_fragmentation_threshold));
/*
* The mc_alloc_groups maintains a count of the number of
* groups in this metaslab class that are still above the
* zfs_mg_noalloc_threshold. This is used by the allocating
* threads to determine if they should avoid allocations to
* a given group. The allocator will avoid allocations to a group
* if that group has reached or is below the zfs_mg_noalloc_threshold
* and there are still other groups that are above the threshold.
* When a group transitions from allocatable to non-allocatable or
* vice versa we update the metaslab class to reflect that change.
* When the mc_alloc_groups value drops to 0 that means that all
* groups have reached the zfs_mg_noalloc_threshold making all groups
* eligible for allocations. This effectively means that all devices
* are balanced again.
*/
if (was_allocatable && !mg->mg_allocatable)
mc->mc_alloc_groups--;
else if (!was_allocatable && mg->mg_allocatable)
mc->mc_alloc_groups++;
mutex_exit(&mc->mc_lock);
mutex_exit(&mg->mg_lock);
}
int
metaslab_sort_by_flushed(const void *va, const void *vb)
{
const metaslab_t *a = va;
const metaslab_t *b = vb;
int cmp = TREE_CMP(a->ms_unflushed_txg, b->ms_unflushed_txg);
if (likely(cmp))
return (cmp);
uint64_t a_vdev_id = a->ms_group->mg_vd->vdev_id;
uint64_t b_vdev_id = b->ms_group->mg_vd->vdev_id;
cmp = TREE_CMP(a_vdev_id, b_vdev_id);
if (cmp)
return (cmp);
return (TREE_CMP(a->ms_id, b->ms_id));
}
metaslab_group_t *
metaslab_group_create(metaslab_class_t *mc, vdev_t *vd, int allocators)
{
metaslab_group_t *mg;
mg = kmem_zalloc(offsetof(metaslab_group_t,
mg_allocator[allocators]), KM_SLEEP);
mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&mg->mg_ms_disabled_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&mg->mg_ms_disabled_cv, NULL, CV_DEFAULT, NULL);
avl_create(&mg->mg_metaslab_tree, metaslab_compare,
sizeof (metaslab_t), offsetof(metaslab_t, ms_group_node));
mg->mg_vd = vd;
mg->mg_class = mc;
mg->mg_activation_count = 0;
mg->mg_initialized = B_FALSE;
mg->mg_no_free_space = B_TRUE;
mg->mg_allocators = allocators;
for (int i = 0; i < allocators; i++) {
metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
zfs_refcount_create_tracked(&mga->mga_alloc_queue_depth);
}
return (mg);
}
void
metaslab_group_destroy(metaslab_group_t *mg)
{
ASSERT(mg->mg_prev == NULL);
ASSERT(mg->mg_next == NULL);
/*
* We may have gone below zero with the activation count
* either because we never activated in the first place or
* because we're done, and possibly removing the vdev.
*/
ASSERT(mg->mg_activation_count <= 0);
avl_destroy(&mg->mg_metaslab_tree);
mutex_destroy(&mg->mg_lock);
mutex_destroy(&mg->mg_ms_disabled_lock);
cv_destroy(&mg->mg_ms_disabled_cv);
for (int i = 0; i < mg->mg_allocators; i++) {
metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
zfs_refcount_destroy(&mga->mga_alloc_queue_depth);
}
kmem_free(mg, offsetof(metaslab_group_t,
mg_allocator[mg->mg_allocators]));
}
void
metaslab_group_activate(metaslab_group_t *mg)
{
metaslab_class_t *mc = mg->mg_class;
spa_t *spa = mc->mc_spa;
metaslab_group_t *mgprev, *mgnext;
ASSERT3U(spa_config_held(spa, SCL_ALLOC, RW_WRITER), !=, 0);
ASSERT(mg->mg_prev == NULL);
ASSERT(mg->mg_next == NULL);
ASSERT(mg->mg_activation_count <= 0);
if (++mg->mg_activation_count <= 0)
return;
mg->mg_aliquot = metaslab_aliquot * MAX(1,
vdev_get_ndisks(mg->mg_vd) - vdev_get_nparity(mg->mg_vd));
metaslab_group_alloc_update(mg);
if ((mgprev = mc->mc_allocator[0].mca_rotor) == NULL) {
mg->mg_prev = mg;
mg->mg_next = mg;
} else {
mgnext = mgprev->mg_next;
mg->mg_prev = mgprev;
mg->mg_next = mgnext;
mgprev->mg_next = mg;
mgnext->mg_prev = mg;
}
for (int i = 0; i < spa->spa_alloc_count; i++) {
mc->mc_allocator[i].mca_rotor = mg;
mg = mg->mg_next;
}
}
/*
* Passivate a metaslab group and remove it from the allocation rotor.
* Callers must hold both the SCL_ALLOC and SCL_ZIO lock prior to passivating
* a metaslab group. This function will momentarily drop spa_config_locks
* that are lower than the SCL_ALLOC lock (see comment below).
*/
void
metaslab_group_passivate(metaslab_group_t *mg)
{
metaslab_class_t *mc = mg->mg_class;
spa_t *spa = mc->mc_spa;
metaslab_group_t *mgprev, *mgnext;
int locks = spa_config_held(spa, SCL_ALL, RW_WRITER);
ASSERT3U(spa_config_held(spa, SCL_ALLOC | SCL_ZIO, RW_WRITER), ==,
(SCL_ALLOC | SCL_ZIO));
if (--mg->mg_activation_count != 0) {
for (int i = 0; i < spa->spa_alloc_count; i++)
ASSERT(mc->mc_allocator[i].mca_rotor != mg);
ASSERT(mg->mg_prev == NULL);
ASSERT(mg->mg_next == NULL);
ASSERT(mg->mg_activation_count < 0);
return;
}
/*
* The spa_config_lock is an array of rwlocks, ordered as
* follows (from highest to lowest):
* SCL_CONFIG > SCL_STATE > SCL_L2ARC > SCL_ALLOC >
* SCL_ZIO > SCL_FREE > SCL_VDEV
* (For more information about the spa_config_lock see spa_misc.c)
* The higher the lock, the broader its coverage. When we passivate
* a metaslab group, we must hold both the SCL_ALLOC and the SCL_ZIO
* config locks. However, the metaslab group's taskq might be trying
* to preload metaslabs so we must drop the SCL_ZIO lock and any
* lower locks to allow the I/O to complete. At a minimum,
* we continue to hold the SCL_ALLOC lock, which prevents any future
* allocations from taking place and any changes to the vdev tree.
*/
spa_config_exit(spa, locks & ~(SCL_ZIO - 1), spa);
taskq_wait_outstanding(spa->spa_metaslab_taskq, 0);
spa_config_enter(spa, locks & ~(SCL_ZIO - 1), spa, RW_WRITER);
metaslab_group_alloc_update(mg);
for (int i = 0; i < mg->mg_allocators; i++) {
metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
metaslab_t *msp = mga->mga_primary;
if (msp != NULL) {
mutex_enter(&msp->ms_lock);
metaslab_passivate(msp,
metaslab_weight_from_range_tree(msp));
mutex_exit(&msp->ms_lock);
}
msp = mga->mga_secondary;
if (msp != NULL) {
mutex_enter(&msp->ms_lock);
metaslab_passivate(msp,
metaslab_weight_from_range_tree(msp));
mutex_exit(&msp->ms_lock);
}
}
mgprev = mg->mg_prev;
mgnext = mg->mg_next;
if (mg == mgnext) {
mgnext = NULL;
} else {
mgprev->mg_next = mgnext;
mgnext->mg_prev = mgprev;
}
for (int i = 0; i < spa->spa_alloc_count; i++) {
if (mc->mc_allocator[i].mca_rotor == mg)
mc->mc_allocator[i].mca_rotor = mgnext;
}
mg->mg_prev = NULL;
mg->mg_next = NULL;
}
boolean_t
metaslab_group_initialized(metaslab_group_t *mg)
{
vdev_t *vd = mg->mg_vd;
vdev_stat_t *vs = &vd->vdev_stat;
return (vs->vs_space != 0 && mg->mg_activation_count > 0);
}
uint64_t
metaslab_group_get_space(metaslab_group_t *mg)
{
/*
* Note that the number of nodes in mg_metaslab_tree may be one less
* than vdev_ms_count, due to the embedded log metaslab.
*/
mutex_enter(&mg->mg_lock);
uint64_t ms_count = avl_numnodes(&mg->mg_metaslab_tree);
mutex_exit(&mg->mg_lock);
return ((1ULL << mg->mg_vd->vdev_ms_shift) * ms_count);
}
void
metaslab_group_histogram_verify(metaslab_group_t *mg)
{
uint64_t *mg_hist;
avl_tree_t *t = &mg->mg_metaslab_tree;
uint64_t ashift = mg->mg_vd->vdev_ashift;
if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
return;
mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
KM_SLEEP);
ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=,
SPACE_MAP_HISTOGRAM_SIZE + ashift);
mutex_enter(&mg->mg_lock);
for (metaslab_t *msp = avl_first(t);
msp != NULL; msp = AVL_NEXT(t, msp)) {
VERIFY3P(msp->ms_group, ==, mg);
/* skip if not active */
if (msp->ms_sm == NULL)
continue;
for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
mg_hist[i + ashift] +=
msp->ms_sm->sm_phys->smp_histogram[i];
}
}
for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++)
VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]);
mutex_exit(&mg->mg_lock);
kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
}
static void
metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp)
{
metaslab_class_t *mc = mg->mg_class;
uint64_t ashift = mg->mg_vd->vdev_ashift;
ASSERT(MUTEX_HELD(&msp->ms_lock));
if (msp->ms_sm == NULL)
return;
mutex_enter(&mg->mg_lock);
mutex_enter(&mc->mc_lock);
for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
IMPLY(mg == mg->mg_vd->vdev_log_mg,
mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
mg->mg_histogram[i + ashift] +=
msp->ms_sm->sm_phys->smp_histogram[i];
mc->mc_histogram[i + ashift] +=
msp->ms_sm->sm_phys->smp_histogram[i];
}
mutex_exit(&mc->mc_lock);
mutex_exit(&mg->mg_lock);
}
void
metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp)
{
metaslab_class_t *mc = mg->mg_class;
uint64_t ashift = mg->mg_vd->vdev_ashift;
ASSERT(MUTEX_HELD(&msp->ms_lock));
if (msp->ms_sm == NULL)
return;
mutex_enter(&mg->mg_lock);
mutex_enter(&mc->mc_lock);
for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
ASSERT3U(mg->mg_histogram[i + ashift], >=,
msp->ms_sm->sm_phys->smp_histogram[i]);
ASSERT3U(mc->mc_histogram[i + ashift], >=,
msp->ms_sm->sm_phys->smp_histogram[i]);
IMPLY(mg == mg->mg_vd->vdev_log_mg,
mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
mg->mg_histogram[i + ashift] -=
msp->ms_sm->sm_phys->smp_histogram[i];
mc->mc_histogram[i + ashift] -=
msp->ms_sm->sm_phys->smp_histogram[i];
}
mutex_exit(&mc->mc_lock);
mutex_exit(&mg->mg_lock);
}
static void
metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
{
ASSERT(msp->ms_group == NULL);
mutex_enter(&mg->mg_lock);
msp->ms_group = mg;
msp->ms_weight = 0;
avl_add(&mg->mg_metaslab_tree, msp);
mutex_exit(&mg->mg_lock);
mutex_enter(&msp->ms_lock);
metaslab_group_histogram_add(mg, msp);
mutex_exit(&msp->ms_lock);
}
static void
metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
{
mutex_enter(&msp->ms_lock);
metaslab_group_histogram_remove(mg, msp);
mutex_exit(&msp->ms_lock);
mutex_enter(&mg->mg_lock);
ASSERT(msp->ms_group == mg);
avl_remove(&mg->mg_metaslab_tree, msp);
metaslab_class_t *mc = msp->ms_group->mg_class;
multilist_sublist_t *mls =
multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
if (multilist_link_active(&msp->ms_class_txg_node))
multilist_sublist_remove(mls, msp);
multilist_sublist_unlock(mls);
msp->ms_group = NULL;
mutex_exit(&mg->mg_lock);
}
static void
metaslab_group_sort_impl(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT(MUTEX_HELD(&mg->mg_lock));
ASSERT(msp->ms_group == mg);
avl_remove(&mg->mg_metaslab_tree, msp);
msp->ms_weight = weight;
avl_add(&mg->mg_metaslab_tree, msp);
}
static void
metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
{
/*
* Although in principle the weight can be any value, in
* practice we do not use values in the range [1, 511].
*/
ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0);
ASSERT(MUTEX_HELD(&msp->ms_lock));
mutex_enter(&mg->mg_lock);
metaslab_group_sort_impl(mg, msp, weight);
mutex_exit(&mg->mg_lock);
}
/*
* Calculate the fragmentation for a given metaslab group. We can use
* a simple average here since all metaslabs within the group must have
* the same size. The return value will be a value between 0 and 100
* (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this
* group have a fragmentation metric.
*/
uint64_t
metaslab_group_fragmentation(metaslab_group_t *mg)
{
vdev_t *vd = mg->mg_vd;
uint64_t fragmentation = 0;
uint64_t valid_ms = 0;
for (int m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *msp = vd->vdev_ms[m];
if (msp->ms_fragmentation == ZFS_FRAG_INVALID)
continue;
if (msp->ms_group != mg)
continue;
valid_ms++;
fragmentation += msp->ms_fragmentation;
}
if (valid_ms <= mg->mg_vd->vdev_ms_count / 2)
return (ZFS_FRAG_INVALID);
fragmentation /= valid_ms;
ASSERT3U(fragmentation, <=, 100);
return (fragmentation);
}
/*
* Determine if a given metaslab group should skip allocations. A metaslab
* group should avoid allocations if its free capacity is less than the
* zfs_mg_noalloc_threshold or its fragmentation metric is greater than
* zfs_mg_fragmentation_threshold and there is at least one metaslab group
* that can still handle allocations. If the allocation throttle is enabled
* then we skip allocations to devices that have reached their maximum
* allocation queue depth unless the selected metaslab group is the only
* eligible group remaining.
*/
static boolean_t
metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor,
int flags, uint64_t psize, int allocator, int d)
{
spa_t *spa = mg->mg_vd->vdev_spa;
metaslab_class_t *mc = mg->mg_class;
/*
* We can only consider skipping this metaslab group if it's
* in the normal metaslab class and there are other metaslab
* groups to select from. Otherwise, we always consider it eligible
* for allocations.
*/
if ((mc != spa_normal_class(spa) &&
mc != spa_special_class(spa) &&
mc != spa_dedup_class(spa)) ||
mc->mc_groups <= 1)
return (B_TRUE);
/*
* If the metaslab group's mg_allocatable flag is set (see comments
* in metaslab_group_alloc_update() for more information) and
* the allocation throttle is disabled then allow allocations to this
* device. However, if the allocation throttle is enabled then
* check if we have reached our allocation limit (mga_alloc_queue_depth)
* to determine if we should allow allocations to this metaslab group.
* If all metaslab groups are no longer considered allocatable
* (mc_alloc_groups == 0) or we're trying to allocate the smallest
* gang block size then we allow allocations on this metaslab group
* regardless of the mg_allocatable or throttle settings.
*/
if (mg->mg_allocatable) {
metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
int64_t qdepth;
uint64_t qmax = mga->mga_cur_max_alloc_queue_depth;
if (!mc->mc_alloc_throttle_enabled)
return (B_TRUE);
/*
* If this metaslab group does not have any free space, then
* there is no point in looking further.
*/
if (mg->mg_no_free_space)
return (B_FALSE);
/*
* Some allocations (e.g., those coming from device removal
* where the * allocations are not even counted in the
* metaslab * allocation queues) are allowed to bypass
* the throttle.
*/
if (flags & METASLAB_DONT_THROTTLE)
return (B_TRUE);
/*
* Relax allocation throttling for ditto blocks. Due to
* random imbalances in allocation it tends to push copies
* to one vdev, that looks a bit better at the moment.
*/
qmax = qmax * (4 + d) / 4;
qdepth = zfs_refcount_count(&mga->mga_alloc_queue_depth);
/*
* If this metaslab group is below its qmax or it's
* the only allocatable metaslab group, then attempt
* to allocate from it.
*/
if (qdepth < qmax || mc->mc_alloc_groups == 1)
return (B_TRUE);
ASSERT3U(mc->mc_alloc_groups, >, 1);
/*
* Since this metaslab group is at or over its qmax, we
* need to determine if there are metaslab groups after this
* one that might be able to handle this allocation. This is
* racy since we can't hold the locks for all metaslab
* groups at the same time when we make this check.
*/
for (metaslab_group_t *mgp = mg->mg_next;
mgp != rotor; mgp = mgp->mg_next) {
metaslab_group_allocator_t *mgap =
&mgp->mg_allocator[allocator];
qmax = mgap->mga_cur_max_alloc_queue_depth;
qmax = qmax * (4 + d) / 4;
qdepth =
zfs_refcount_count(&mgap->mga_alloc_queue_depth);
/*
* If there is another metaslab group that
* might be able to handle the allocation, then
* we return false so that we skip this group.
*/
if (qdepth < qmax && !mgp->mg_no_free_space)
return (B_FALSE);
}
/*
* We didn't find another group to handle the allocation
* so we can't skip this metaslab group even though
* we are at or over our qmax.
*/
return (B_TRUE);
} else if (mc->mc_alloc_groups == 0 || psize == SPA_MINBLOCKSIZE) {
return (B_TRUE);
}
return (B_FALSE);
}
/*
* ==========================================================================
* Range tree callbacks
* ==========================================================================
*/
/*
* Comparison function for the private size-ordered tree using 32-bit
* ranges. Tree is sorted by size, larger sizes at the end of the tree.
*/
__attribute__((always_inline)) inline
static int
metaslab_rangesize32_compare(const void *x1, const void *x2)
{
const range_seg32_t *r1 = x1;
const range_seg32_t *r2 = x2;
uint64_t rs_size1 = r1->rs_end - r1->rs_start;
uint64_t rs_size2 = r2->rs_end - r2->rs_start;
int cmp = TREE_CMP(rs_size1, rs_size2);
return (cmp + !cmp * TREE_CMP(r1->rs_start, r2->rs_start));
}
/*
* Comparison function for the private size-ordered tree using 64-bit
* ranges. Tree is sorted by size, larger sizes at the end of the tree.
*/
__attribute__((always_inline)) inline
static int
metaslab_rangesize64_compare(const void *x1, const void *x2)
{
const range_seg64_t *r1 = x1;
const range_seg64_t *r2 = x2;
uint64_t rs_size1 = r1->rs_end - r1->rs_start;
uint64_t rs_size2 = r2->rs_end - r2->rs_start;
int cmp = TREE_CMP(rs_size1, rs_size2);
return (cmp + !cmp * TREE_CMP(r1->rs_start, r2->rs_start));
}
typedef struct metaslab_rt_arg {
zfs_btree_t *mra_bt;
uint32_t mra_floor_shift;
} metaslab_rt_arg_t;
struct mssa_arg {
range_tree_t *rt;
metaslab_rt_arg_t *mra;
};
static void
metaslab_size_sorted_add(void *arg, uint64_t start, uint64_t size)
{
struct mssa_arg *mssap = arg;
range_tree_t *rt = mssap->rt;
metaslab_rt_arg_t *mrap = mssap->mra;
range_seg_max_t seg = {0};
rs_set_start(&seg, rt, start);
rs_set_end(&seg, rt, start + size);
metaslab_rt_add(rt, &seg, mrap);
}
static void
metaslab_size_tree_full_load(range_tree_t *rt)
{
metaslab_rt_arg_t *mrap = rt->rt_arg;
METASLABSTAT_BUMP(metaslabstat_reload_tree);
ASSERT0(zfs_btree_numnodes(mrap->mra_bt));
mrap->mra_floor_shift = 0;
struct mssa_arg arg = {0};
arg.rt = rt;
arg.mra = mrap;
range_tree_walk(rt, metaslab_size_sorted_add, &arg);
}
ZFS_BTREE_FIND_IN_BUF_FUNC(metaslab_rt_find_rangesize32_in_buf,
range_seg32_t, metaslab_rangesize32_compare)
ZFS_BTREE_FIND_IN_BUF_FUNC(metaslab_rt_find_rangesize64_in_buf,
range_seg64_t, metaslab_rangesize64_compare)
/*
* Create any block allocator specific components. The current allocators
* rely on using both a size-ordered range_tree_t and an array of uint64_t's.
*/
static void
metaslab_rt_create(range_tree_t *rt, void *arg)
{
metaslab_rt_arg_t *mrap = arg;
zfs_btree_t *size_tree = mrap->mra_bt;
size_t size;
int (*compare) (const void *, const void *);
bt_find_in_buf_f bt_find;
switch (rt->rt_type) {
case RANGE_SEG32:
size = sizeof (range_seg32_t);
compare = metaslab_rangesize32_compare;
bt_find = metaslab_rt_find_rangesize32_in_buf;
break;
case RANGE_SEG64:
size = sizeof (range_seg64_t);
compare = metaslab_rangesize64_compare;
bt_find = metaslab_rt_find_rangesize64_in_buf;
break;
default:
panic("Invalid range seg type %d", rt->rt_type);
}
zfs_btree_create(size_tree, compare, bt_find, size);
mrap->mra_floor_shift = metaslab_by_size_min_shift;
}
static void
metaslab_rt_destroy(range_tree_t *rt, void *arg)
{
(void) rt;
metaslab_rt_arg_t *mrap = arg;
zfs_btree_t *size_tree = mrap->mra_bt;
zfs_btree_destroy(size_tree);
kmem_free(mrap, sizeof (*mrap));
}
static void
metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg)
{
metaslab_rt_arg_t *mrap = arg;
zfs_btree_t *size_tree = mrap->mra_bt;
if (rs_get_end(rs, rt) - rs_get_start(rs, rt) <
(1ULL << mrap->mra_floor_shift))
return;
zfs_btree_add(size_tree, rs);
}
static void
metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg)
{
metaslab_rt_arg_t *mrap = arg;
zfs_btree_t *size_tree = mrap->mra_bt;
if (rs_get_end(rs, rt) - rs_get_start(rs, rt) < (1ULL <<
mrap->mra_floor_shift))
return;
zfs_btree_remove(size_tree, rs);
}
static void
metaslab_rt_vacate(range_tree_t *rt, void *arg)
{
metaslab_rt_arg_t *mrap = arg;
zfs_btree_t *size_tree = mrap->mra_bt;
zfs_btree_clear(size_tree);
zfs_btree_destroy(size_tree);
metaslab_rt_create(rt, arg);
}
static const range_tree_ops_t metaslab_rt_ops = {
.rtop_create = metaslab_rt_create,
.rtop_destroy = metaslab_rt_destroy,
.rtop_add = metaslab_rt_add,
.rtop_remove = metaslab_rt_remove,
.rtop_vacate = metaslab_rt_vacate
};
/*
* ==========================================================================
* Common allocator routines
* ==========================================================================
*/
/*
* Return the maximum contiguous segment within the metaslab.
*/
uint64_t
metaslab_largest_allocatable(metaslab_t *msp)
{
zfs_btree_t *t = &msp->ms_allocatable_by_size;
range_seg_t *rs;
if (t == NULL)
return (0);
if (zfs_btree_numnodes(t) == 0)
metaslab_size_tree_full_load(msp->ms_allocatable);
rs = zfs_btree_last(t, NULL);
if (rs == NULL)
return (0);
return (rs_get_end(rs, msp->ms_allocatable) - rs_get_start(rs,
msp->ms_allocatable));
}
/*
* Return the maximum contiguous segment within the unflushed frees of this
* metaslab.
*/
static uint64_t
metaslab_largest_unflushed_free(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
if (msp->ms_unflushed_frees == NULL)
return (0);
if (zfs_btree_numnodes(&msp->ms_unflushed_frees_by_size) == 0)
metaslab_size_tree_full_load(msp->ms_unflushed_frees);
range_seg_t *rs = zfs_btree_last(&msp->ms_unflushed_frees_by_size,
NULL);
if (rs == NULL)
return (0);
/*
* When a range is freed from the metaslab, that range is added to
* both the unflushed frees and the deferred frees. While the block
* will eventually be usable, if the metaslab were loaded the range
* would not be added to the ms_allocatable tree until TXG_DEFER_SIZE
* txgs had passed. As a result, when attempting to estimate an upper
* bound for the largest currently-usable free segment in the
* metaslab, we need to not consider any ranges currently in the defer
* trees. This algorithm approximates the largest available chunk in
* the largest range in the unflushed_frees tree by taking the first
* chunk. While this may be a poor estimate, it should only remain so
* briefly and should eventually self-correct as frees are no longer
* deferred. Similar logic applies to the ms_freed tree. See
* metaslab_load() for more details.
*
* There are two primary sources of inaccuracy in this estimate. Both
* are tolerated for performance reasons. The first source is that we
* only check the largest segment for overlaps. Smaller segments may
* have more favorable overlaps with the other trees, resulting in
* larger usable chunks. Second, we only look at the first chunk in
* the largest segment; there may be other usable chunks in the
* largest segment, but we ignore them.
*/
uint64_t rstart = rs_get_start(rs, msp->ms_unflushed_frees);
uint64_t rsize = rs_get_end(rs, msp->ms_unflushed_frees) - rstart;
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
uint64_t start = 0;
uint64_t size = 0;
boolean_t found = range_tree_find_in(msp->ms_defer[t], rstart,
rsize, &start, &size);
if (found) {
if (rstart == start)
return (0);
rsize = start - rstart;
}
}
uint64_t start = 0;
uint64_t size = 0;
boolean_t found = range_tree_find_in(msp->ms_freed, rstart,
rsize, &start, &size);
if (found)
rsize = start - rstart;
return (rsize);
}
static range_seg_t *
metaslab_block_find(zfs_btree_t *t, range_tree_t *rt, uint64_t start,
uint64_t size, zfs_btree_index_t *where)
{
range_seg_t *rs;
range_seg_max_t rsearch;
rs_set_start(&rsearch, rt, start);
rs_set_end(&rsearch, rt, start + size);
rs = zfs_btree_find(t, &rsearch, where);
if (rs == NULL) {
rs = zfs_btree_next(t, where, where);
}
return (rs);
}
#if defined(WITH_DF_BLOCK_ALLOCATOR) || \
defined(WITH_CF_BLOCK_ALLOCATOR)
/*
* This is a helper function that can be used by the allocator to find a
* suitable block to allocate. This will search the specified B-tree looking
* for a block that matches the specified criteria.
*/
static uint64_t
metaslab_block_picker(range_tree_t *rt, uint64_t *cursor, uint64_t size,
uint64_t max_search)
{
if (*cursor == 0)
*cursor = rt->rt_start;
zfs_btree_t *bt = &rt->rt_root;
zfs_btree_index_t where;
range_seg_t *rs = metaslab_block_find(bt, rt, *cursor, size, &where);
uint64_t first_found;
int count_searched = 0;
if (rs != NULL)
first_found = rs_get_start(rs, rt);
while (rs != NULL && (rs_get_start(rs, rt) - first_found <=
max_search || count_searched < metaslab_min_search_count)) {
uint64_t offset = rs_get_start(rs, rt);
if (offset + size <= rs_get_end(rs, rt)) {
*cursor = offset + size;
return (offset);
}
rs = zfs_btree_next(bt, &where, &where);
count_searched++;
}
*cursor = 0;
return (-1ULL);
}
#endif /* WITH_DF/CF_BLOCK_ALLOCATOR */
#if defined(WITH_DF_BLOCK_ALLOCATOR)
/*
* ==========================================================================
* Dynamic Fit (df) block allocator
*
* Search for a free chunk of at least this size, starting from the last
* offset (for this alignment of block) looking for up to
* metaslab_df_max_search bytes (16MB). If a large enough free chunk is not
* found within 16MB, then return a free chunk of exactly the requested size (or
* larger).
*
* If it seems like searching from the last offset will be unproductive, skip
* that and just return a free chunk of exactly the requested size (or larger).
* This is based on metaslab_df_alloc_threshold and metaslab_df_free_pct. This
* mechanism is probably not very useful and may be removed in the future.
*
* The behavior when not searching can be changed to return the largest free
* chunk, instead of a free chunk of exactly the requested size, by setting
* metaslab_df_use_largest_segment.
* ==========================================================================
*/
static uint64_t
metaslab_df_alloc(metaslab_t *msp, uint64_t size)
{
/*
* Find the largest power of 2 block size that evenly divides the
* requested size. This is used to try to allocate blocks with similar
* alignment from the same area of the metaslab (i.e. same cursor
* bucket) but it does not guarantee that other allocations sizes
* may exist in the same region.
*/
uint64_t align = size & -size;
uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
range_tree_t *rt = msp->ms_allocatable;
uint_t free_pct = range_tree_space(rt) * 100 / msp->ms_size;
uint64_t offset;
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* If we're running low on space, find a segment based on size,
* rather than iterating based on offset.
*/
if (metaslab_largest_allocatable(msp) < metaslab_df_alloc_threshold ||
free_pct < metaslab_df_free_pct) {
offset = -1;
} else {
offset = metaslab_block_picker(rt,
cursor, size, metaslab_df_max_search);
}
if (offset == -1) {
range_seg_t *rs;
if (zfs_btree_numnodes(&msp->ms_allocatable_by_size) == 0)
metaslab_size_tree_full_load(msp->ms_allocatable);
if (metaslab_df_use_largest_segment) {
/* use largest free segment */
rs = zfs_btree_last(&msp->ms_allocatable_by_size, NULL);
} else {
zfs_btree_index_t where;
/* use segment of this size, or next largest */
rs = metaslab_block_find(&msp->ms_allocatable_by_size,
rt, msp->ms_start, size, &where);
}
if (rs != NULL && rs_get_start(rs, rt) + size <= rs_get_end(rs,
rt)) {
offset = rs_get_start(rs, rt);
*cursor = offset + size;
}
}
return (offset);
}
const metaslab_ops_t zfs_metaslab_ops = {
metaslab_df_alloc
};
#endif /* WITH_DF_BLOCK_ALLOCATOR */
#if defined(WITH_CF_BLOCK_ALLOCATOR)
/*
* ==========================================================================
* Cursor fit block allocator -
* Select the largest region in the metaslab, set the cursor to the beginning
* of the range and the cursor_end to the end of the range. As allocations
* are made advance the cursor. Continue allocating from the cursor until
* the range is exhausted and then find a new range.
* ==========================================================================
*/
static uint64_t
metaslab_cf_alloc(metaslab_t *msp, uint64_t size)
{
range_tree_t *rt = msp->ms_allocatable;
zfs_btree_t *t = &msp->ms_allocatable_by_size;
uint64_t *cursor = &msp->ms_lbas[0];
uint64_t *cursor_end = &msp->ms_lbas[1];
uint64_t offset = 0;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT3U(*cursor_end, >=, *cursor);
if ((*cursor + size) > *cursor_end) {
range_seg_t *rs;
if (zfs_btree_numnodes(t) == 0)
metaslab_size_tree_full_load(msp->ms_allocatable);
rs = zfs_btree_last(t, NULL);
if (rs == NULL || (rs_get_end(rs, rt) - rs_get_start(rs, rt)) <
size)
return (-1ULL);
*cursor = rs_get_start(rs, rt);
*cursor_end = rs_get_end(rs, rt);
}
offset = *cursor;
*cursor += size;
return (offset);
}
const metaslab_ops_t zfs_metaslab_ops = {
metaslab_cf_alloc
};
#endif /* WITH_CF_BLOCK_ALLOCATOR */
#if defined(WITH_NDF_BLOCK_ALLOCATOR)
/*
* ==========================================================================
* New dynamic fit allocator -
* Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
* contiguous blocks. If no region is found then just use the largest segment
* that remains.
* ==========================================================================
*/
/*
* Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
* to request from the allocator.
*/
uint64_t metaslab_ndf_clump_shift = 4;
static uint64_t
metaslab_ndf_alloc(metaslab_t *msp, uint64_t size)
{
zfs_btree_t *t = &msp->ms_allocatable->rt_root;
range_tree_t *rt = msp->ms_allocatable;
zfs_btree_index_t where;
range_seg_t *rs;
range_seg_max_t rsearch;
uint64_t hbit = highbit64(size);
uint64_t *cursor = &msp->ms_lbas[hbit - 1];
uint64_t max_size = metaslab_largest_allocatable(msp);
ASSERT(MUTEX_HELD(&msp->ms_lock));
if (max_size < size)
return (-1ULL);
rs_set_start(&rsearch, rt, *cursor);
rs_set_end(&rsearch, rt, *cursor + size);
rs = zfs_btree_find(t, &rsearch, &where);
if (rs == NULL || (rs_get_end(rs, rt) - rs_get_start(rs, rt)) < size) {
t = &msp->ms_allocatable_by_size;
rs_set_start(&rsearch, rt, 0);
rs_set_end(&rsearch, rt, MIN(max_size, 1ULL << (hbit +
metaslab_ndf_clump_shift)));
rs = zfs_btree_find(t, &rsearch, &where);
if (rs == NULL)
rs = zfs_btree_next(t, &where, &where);
ASSERT(rs != NULL);
}
if ((rs_get_end(rs, rt) - rs_get_start(rs, rt)) >= size) {
*cursor = rs_get_start(rs, rt) + size;
return (rs_get_start(rs, rt));
}
return (-1ULL);
}
const metaslab_ops_t zfs_metaslab_ops = {
metaslab_ndf_alloc
};
#endif /* WITH_NDF_BLOCK_ALLOCATOR */
/*
* ==========================================================================
* Metaslabs
* ==========================================================================
*/
/*
* Wait for any in-progress metaslab loads to complete.
*/
static void
metaslab_load_wait(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
while (msp->ms_loading) {
ASSERT(!msp->ms_loaded);
cv_wait(&msp->ms_load_cv, &msp->ms_lock);
}
}
/*
* Wait for any in-progress flushing to complete.
*/
static void
metaslab_flush_wait(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
while (msp->ms_flushing)
cv_wait(&msp->ms_flush_cv, &msp->ms_lock);
}
static unsigned int
metaslab_idx_func(multilist_t *ml, void *arg)
{
metaslab_t *msp = arg;
/*
* ms_id values are allocated sequentially, so full 64bit
* division would be a waste of time, so limit it to 32 bits.
*/
return ((unsigned int)msp->ms_id % multilist_get_num_sublists(ml));
}
uint64_t
metaslab_allocated_space(metaslab_t *msp)
{
return (msp->ms_allocated_space);
}
/*
* Verify that the space accounting on disk matches the in-core range_trees.
*/
static void
metaslab_verify_space(metaslab_t *msp, uint64_t txg)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
uint64_t allocating = 0;
uint64_t sm_free_space, msp_free_space;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT(!msp->ms_condensing);
if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
return;
/*
* We can only verify the metaslab space when we're called
* from syncing context with a loaded metaslab that has an
* allocated space map. Calling this in non-syncing context
* does not provide a consistent view of the metaslab since
* we're performing allocations in the future.
*/
if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL ||
!msp->ms_loaded)
return;
/*
* Even though the smp_alloc field can get negative,
* when it comes to a metaslab's space map, that should
* never be the case.
*/
ASSERT3S(space_map_allocated(msp->ms_sm), >=, 0);
ASSERT3U(space_map_allocated(msp->ms_sm), >=,
range_tree_space(msp->ms_unflushed_frees));
ASSERT3U(metaslab_allocated_space(msp), ==,
space_map_allocated(msp->ms_sm) +
range_tree_space(msp->ms_unflushed_allocs) -
range_tree_space(msp->ms_unflushed_frees));
sm_free_space = msp->ms_size - metaslab_allocated_space(msp);
/*
* Account for future allocations since we would have
* already deducted that space from the ms_allocatable.
*/
for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
allocating +=
range_tree_space(msp->ms_allocating[(txg + t) & TXG_MASK]);
}
ASSERT3U(allocating + msp->ms_allocated_this_txg, ==,
msp->ms_allocating_total);
ASSERT3U(msp->ms_deferspace, ==,
range_tree_space(msp->ms_defer[0]) +
range_tree_space(msp->ms_defer[1]));
msp_free_space = range_tree_space(msp->ms_allocatable) + allocating +
msp->ms_deferspace + range_tree_space(msp->ms_freed);
VERIFY3U(sm_free_space, ==, msp_free_space);
}
static void
metaslab_aux_histograms_clear(metaslab_t *msp)
{
/*
* Auxiliary histograms are only cleared when resetting them,
* which can only happen while the metaslab is loaded.
*/
ASSERT(msp->ms_loaded);
memset(msp->ms_synchist, 0, sizeof (msp->ms_synchist));
for (int t = 0; t < TXG_DEFER_SIZE; t++)
memset(msp->ms_deferhist[t], 0, sizeof (msp->ms_deferhist[t]));
}
static void
metaslab_aux_histogram_add(uint64_t *histogram, uint64_t shift,
range_tree_t *rt)
{
/*
* This is modeled after space_map_histogram_add(), so refer to that
* function for implementation details. We want this to work like
* the space map histogram, and not the range tree histogram, as we
* are essentially constructing a delta that will be later subtracted
* from the space map histogram.
*/
int idx = 0;
for (int i = shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
ASSERT3U(i, >=, idx + shift);
histogram[idx] += rt->rt_histogram[i] << (i - idx - shift);
if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) {
ASSERT3U(idx + shift, ==, i);
idx++;
ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE);
}
}
}
/*
* Called at every sync pass that the metaslab gets synced.
*
* The reason is that we want our auxiliary histograms to be updated
* wherever the metaslab's space map histogram is updated. This way
* we stay consistent on which parts of the metaslab space map's
* histogram are currently not available for allocations (e.g because
* they are in the defer, freed, and freeing trees).
*/
static void
metaslab_aux_histograms_update(metaslab_t *msp)
{
space_map_t *sm = msp->ms_sm;
ASSERT(sm != NULL);
/*
* This is similar to the metaslab's space map histogram updates
* that take place in metaslab_sync(). The only difference is that
* we only care about segments that haven't made it into the
* ms_allocatable tree yet.
*/
if (msp->ms_loaded) {
metaslab_aux_histograms_clear(msp);
metaslab_aux_histogram_add(msp->ms_synchist,
sm->sm_shift, msp->ms_freed);
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
metaslab_aux_histogram_add(msp->ms_deferhist[t],
sm->sm_shift, msp->ms_defer[t]);
}
}
metaslab_aux_histogram_add(msp->ms_synchist,
sm->sm_shift, msp->ms_freeing);
}
/*
* Called every time we are done syncing (writing to) the metaslab,
* i.e. at the end of each sync pass.
* [see the comment in metaslab_impl.h for ms_synchist, ms_deferhist]
*/
static void
metaslab_aux_histograms_update_done(metaslab_t *msp, boolean_t defer_allowed)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
space_map_t *sm = msp->ms_sm;
if (sm == NULL) {
/*
* We came here from metaslab_init() when creating/opening a
* pool, looking at a metaslab that hasn't had any allocations
* yet.
*/
return;
}
/*
* This is similar to the actions that we take for the ms_freed
* and ms_defer trees in metaslab_sync_done().
*/
uint64_t hist_index = spa_syncing_txg(spa) % TXG_DEFER_SIZE;
if (defer_allowed) {
memcpy(msp->ms_deferhist[hist_index], msp->ms_synchist,
sizeof (msp->ms_synchist));
} else {
memset(msp->ms_deferhist[hist_index], 0,
sizeof (msp->ms_deferhist[hist_index]));
}
memset(msp->ms_synchist, 0, sizeof (msp->ms_synchist));
}
/*
* Ensure that the metaslab's weight and fragmentation are consistent
* with the contents of the histogram (either the range tree's histogram
* or the space map's depending whether the metaslab is loaded).
*/
static void
metaslab_verify_weight_and_frag(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
return;
/*
* We can end up here from vdev_remove_complete(), in which case we
* cannot do these assertions because we hold spa config locks and
* thus we are not allowed to read from the DMU.
*
* We check if the metaslab group has been removed and if that's
* the case we return immediately as that would mean that we are
* here from the aforementioned code path.
*/
if (msp->ms_group == NULL)
return;
/*
* Devices being removed always return a weight of 0 and leave
* fragmentation and ms_max_size as is - there is nothing for
* us to verify here.
*/
vdev_t *vd = msp->ms_group->mg_vd;
if (vd->vdev_removing)
return;
/*
* If the metaslab is dirty it probably means that we've done
* some allocations or frees that have changed our histograms
* and thus the weight.
*/
for (int t = 0; t < TXG_SIZE; t++) {
if (txg_list_member(&vd->vdev_ms_list, msp, t))
return;
}
/*
* This verification checks that our in-memory state is consistent
* with what's on disk. If the pool is read-only then there aren't
* any changes and we just have the initially-loaded state.
*/
if (!spa_writeable(msp->ms_group->mg_vd->vdev_spa))
return;
/* some extra verification for in-core tree if you can */
if (msp->ms_loaded) {
range_tree_stat_verify(msp->ms_allocatable);
VERIFY(space_map_histogram_verify(msp->ms_sm,
msp->ms_allocatable));
}
uint64_t weight = msp->ms_weight;
uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
boolean_t space_based = WEIGHT_IS_SPACEBASED(msp->ms_weight);
uint64_t frag = msp->ms_fragmentation;
uint64_t max_segsize = msp->ms_max_size;
msp->ms_weight = 0;
msp->ms_fragmentation = 0;
/*
* This function is used for verification purposes and thus should
* not introduce any side-effects/mutations on the system's state.
*
* Regardless of whether metaslab_weight() thinks this metaslab
* should be active or not, we want to ensure that the actual weight
* (and therefore the value of ms_weight) would be the same if it
* was to be recalculated at this point.
*
* In addition we set the nodirty flag so metaslab_weight() does
* not dirty the metaslab for future TXGs (e.g. when trying to
* force condensing to upgrade the metaslab spacemaps).
*/
msp->ms_weight = metaslab_weight(msp, B_TRUE) | was_active;
VERIFY3U(max_segsize, ==, msp->ms_max_size);
/*
* If the weight type changed then there is no point in doing
* verification. Revert fields to their original values.
*/
if ((space_based && !WEIGHT_IS_SPACEBASED(msp->ms_weight)) ||
(!space_based && WEIGHT_IS_SPACEBASED(msp->ms_weight))) {
msp->ms_fragmentation = frag;
msp->ms_weight = weight;
return;
}
VERIFY3U(msp->ms_fragmentation, ==, frag);
VERIFY3U(msp->ms_weight, ==, weight);
}
/*
* If we're over the zfs_metaslab_mem_limit, select the loaded metaslab from
* this class that was used longest ago, and attempt to unload it. We don't
* want to spend too much time in this loop to prevent performance
* degradation, and we expect that most of the time this operation will
* succeed. Between that and the normal unloading processing during txg sync,
* we expect this to keep the metaslab memory usage under control.
*/
static void
metaslab_potentially_evict(metaslab_class_t *mc)
{
#ifdef _KERNEL
uint64_t allmem = arc_all_memory();
uint64_t inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache);
uint64_t size = spl_kmem_cache_entry_size(zfs_btree_leaf_cache);
uint_t tries = 0;
for (; allmem * zfs_metaslab_mem_limit / 100 < inuse * size &&
tries < multilist_get_num_sublists(&mc->mc_metaslab_txg_list) * 2;
tries++) {
unsigned int idx = multilist_get_random_index(
&mc->mc_metaslab_txg_list);
multilist_sublist_t *mls =
multilist_sublist_lock(&mc->mc_metaslab_txg_list, idx);
metaslab_t *msp = multilist_sublist_head(mls);
multilist_sublist_unlock(mls);
while (msp != NULL && allmem * zfs_metaslab_mem_limit / 100 <
inuse * size) {
VERIFY3P(mls, ==, multilist_sublist_lock(
&mc->mc_metaslab_txg_list, idx));
ASSERT3U(idx, ==,
metaslab_idx_func(&mc->mc_metaslab_txg_list, msp));
if (!multilist_link_active(&msp->ms_class_txg_node)) {
multilist_sublist_unlock(mls);
break;
}
metaslab_t *next_msp = multilist_sublist_next(mls, msp);
multilist_sublist_unlock(mls);
/*
* If the metaslab is currently loading there are two
* cases. If it's the metaslab we're evicting, we
* can't continue on or we'll panic when we attempt to
* recursively lock the mutex. If it's another
* metaslab that's loading, it can be safely skipped,
* since we know it's very new and therefore not a
* good eviction candidate. We check later once the
* lock is held that the metaslab is fully loaded
* before actually unloading it.
*/
if (msp->ms_loading) {
msp = next_msp;
inuse =
spl_kmem_cache_inuse(zfs_btree_leaf_cache);
continue;
}
/*
* We can't unload metaslabs with no spacemap because
* they're not ready to be unloaded yet. We can't
* unload metaslabs with outstanding allocations
* because doing so could cause the metaslab's weight
* to decrease while it's unloaded, which violates an
* invariant that we use to prevent unnecessary
* loading. We also don't unload metaslabs that are
* currently active because they are high-weight
* metaslabs that are likely to be used in the near
* future.
*/
mutex_enter(&msp->ms_lock);
if (msp->ms_allocator == -1 && msp->ms_sm != NULL &&
msp->ms_allocating_total == 0) {
metaslab_unload(msp);
}
mutex_exit(&msp->ms_lock);
msp = next_msp;
inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache);
}
}
#else
(void) mc, (void) zfs_metaslab_mem_limit;
#endif
}
static int
metaslab_load_impl(metaslab_t *msp)
{
int error = 0;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT(msp->ms_loading);
ASSERT(!msp->ms_condensing);
/*
* We temporarily drop the lock to unblock other operations while we
* are reading the space map. Therefore, metaslab_sync() and
* metaslab_sync_done() can run at the same time as we do.
*
* If we are using the log space maps, metaslab_sync() can't write to
* the metaslab's space map while we are loading as we only write to
* it when we are flushing the metaslab, and that can't happen while
* we are loading it.
*
* If we are not using log space maps though, metaslab_sync() can
* append to the space map while we are loading. Therefore we load
* only entries that existed when we started the load. Additionally,
* metaslab_sync_done() has to wait for the load to complete because
* there are potential races like metaslab_load() loading parts of the
* space map that are currently being appended by metaslab_sync(). If
* we didn't, the ms_allocatable would have entries that
* metaslab_sync_done() would try to re-add later.
*
* That's why before dropping the lock we remember the synced length
* of the metaslab and read up to that point of the space map,
* ignoring entries appended by metaslab_sync() that happen after we
* drop the lock.
*/
uint64_t length = msp->ms_synced_length;
mutex_exit(&msp->ms_lock);
hrtime_t load_start = gethrtime();
metaslab_rt_arg_t *mrap;
if (msp->ms_allocatable->rt_arg == NULL) {
mrap = kmem_zalloc(sizeof (*mrap), KM_SLEEP);
} else {
mrap = msp->ms_allocatable->rt_arg;
msp->ms_allocatable->rt_ops = NULL;
msp->ms_allocatable->rt_arg = NULL;
}
mrap->mra_bt = &msp->ms_allocatable_by_size;
mrap->mra_floor_shift = metaslab_by_size_min_shift;
if (msp->ms_sm != NULL) {
error = space_map_load_length(msp->ms_sm, msp->ms_allocatable,
SM_FREE, length);
/* Now, populate the size-sorted tree. */
metaslab_rt_create(msp->ms_allocatable, mrap);
msp->ms_allocatable->rt_ops = &metaslab_rt_ops;
msp->ms_allocatable->rt_arg = mrap;
struct mssa_arg arg = {0};
arg.rt = msp->ms_allocatable;
arg.mra = mrap;
range_tree_walk(msp->ms_allocatable, metaslab_size_sorted_add,
&arg);
} else {
/*
* Add the size-sorted tree first, since we don't need to load
* the metaslab from the spacemap.
*/
metaslab_rt_create(msp->ms_allocatable, mrap);
msp->ms_allocatable->rt_ops = &metaslab_rt_ops;
msp->ms_allocatable->rt_arg = mrap;
/*
* The space map has not been allocated yet, so treat
* all the space in the metaslab as free and add it to the
* ms_allocatable tree.
*/
range_tree_add(msp->ms_allocatable,
msp->ms_start, msp->ms_size);
if (msp->ms_new) {
/*
* If the ms_sm doesn't exist, this means that this
* metaslab hasn't gone through metaslab_sync() and
* thus has never been dirtied. So we shouldn't
* expect any unflushed allocs or frees from previous
* TXGs.
*/
ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
}
}
/*
* We need to grab the ms_sync_lock to prevent metaslab_sync() from
* changing the ms_sm (or log_sm) and the metaslab's range trees
* while we are about to use them and populate the ms_allocatable.
* The ms_lock is insufficient for this because metaslab_sync() doesn't
* hold the ms_lock while writing the ms_checkpointing tree to disk.
*/
mutex_enter(&msp->ms_sync_lock);
mutex_enter(&msp->ms_lock);
ASSERT(!msp->ms_condensing);
ASSERT(!msp->ms_flushing);
if (error != 0) {
mutex_exit(&msp->ms_sync_lock);
return (error);
}
ASSERT3P(msp->ms_group, !=, NULL);
msp->ms_loaded = B_TRUE;
/*
* Apply all the unflushed changes to ms_allocatable right
* away so any manipulations we do below have a clear view
* of what is allocated and what is free.
*/
range_tree_walk(msp->ms_unflushed_allocs,
range_tree_remove, msp->ms_allocatable);
range_tree_walk(msp->ms_unflushed_frees,
range_tree_add, msp->ms_allocatable);
ASSERT3P(msp->ms_group, !=, NULL);
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
if (spa_syncing_log_sm(spa) != NULL) {
ASSERT(spa_feature_is_enabled(spa,
SPA_FEATURE_LOG_SPACEMAP));
/*
* If we use a log space map we add all the segments
* that are in ms_unflushed_frees so they are available
* for allocation.
*
* ms_allocatable needs to contain all free segments
* that are ready for allocations (thus not segments
* from ms_freeing, ms_freed, and the ms_defer trees).
* But if we grab the lock in this code path at a sync
* pass later that 1, then it also contains the
* segments of ms_freed (they were added to it earlier
* in this path through ms_unflushed_frees). So we
* need to remove all the segments that exist in
* ms_freed from ms_allocatable as they will be added
* later in metaslab_sync_done().
*
* When there's no log space map, the ms_allocatable
* correctly doesn't contain any segments that exist
* in ms_freed [see ms_synced_length].
*/
range_tree_walk(msp->ms_freed,
range_tree_remove, msp->ms_allocatable);
}
/*
* If we are not using the log space map, ms_allocatable
* contains the segments that exist in the ms_defer trees
* [see ms_synced_length]. Thus we need to remove them
* from ms_allocatable as they will be added again in
* metaslab_sync_done().
*
* If we are using the log space map, ms_allocatable still
* contains the segments that exist in the ms_defer trees.
* Not because it read them through the ms_sm though. But
* because these segments are part of ms_unflushed_frees
* whose segments we add to ms_allocatable earlier in this
* code path.
*/
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
range_tree_walk(msp->ms_defer[t],
range_tree_remove, msp->ms_allocatable);
}
/*
* Call metaslab_recalculate_weight_and_sort() now that the
* metaslab is loaded so we get the metaslab's real weight.
*
* Unless this metaslab was created with older software and
* has not yet been converted to use segment-based weight, we
* expect the new weight to be better or equal to the weight
* that the metaslab had while it was not loaded. This is
* because the old weight does not take into account the
* consolidation of adjacent segments between TXGs. [see
* comment for ms_synchist and ms_deferhist[] for more info]
*/
uint64_t weight = msp->ms_weight;
uint64_t max_size = msp->ms_max_size;
metaslab_recalculate_weight_and_sort(msp);
if (!WEIGHT_IS_SPACEBASED(weight))
ASSERT3U(weight, <=, msp->ms_weight);
msp->ms_max_size = metaslab_largest_allocatable(msp);
ASSERT3U(max_size, <=, msp->ms_max_size);
hrtime_t load_end = gethrtime();
msp->ms_load_time = load_end;
zfs_dbgmsg("metaslab_load: txg %llu, spa %s, vdev_id %llu, "
"ms_id %llu, smp_length %llu, "
"unflushed_allocs %llu, unflushed_frees %llu, "
"freed %llu, defer %llu + %llu, unloaded time %llu ms, "
"loading_time %lld ms, ms_max_size %llu, "
"max size error %lld, "
"old_weight %llx, new_weight %llx",
(u_longlong_t)spa_syncing_txg(spa), spa_name(spa),
(u_longlong_t)msp->ms_group->mg_vd->vdev_id,
(u_longlong_t)msp->ms_id,
(u_longlong_t)space_map_length(msp->ms_sm),
(u_longlong_t)range_tree_space(msp->ms_unflushed_allocs),
(u_longlong_t)range_tree_space(msp->ms_unflushed_frees),
(u_longlong_t)range_tree_space(msp->ms_freed),
(u_longlong_t)range_tree_space(msp->ms_defer[0]),
(u_longlong_t)range_tree_space(msp->ms_defer[1]),
(longlong_t)((load_start - msp->ms_unload_time) / 1000000),
(longlong_t)((load_end - load_start) / 1000000),
(u_longlong_t)msp->ms_max_size,
(u_longlong_t)msp->ms_max_size - max_size,
(u_longlong_t)weight, (u_longlong_t)msp->ms_weight);
metaslab_verify_space(msp, spa_syncing_txg(spa));
mutex_exit(&msp->ms_sync_lock);
return (0);
}
int
metaslab_load(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* There may be another thread loading the same metaslab, if that's
* the case just wait until the other thread is done and return.
*/
metaslab_load_wait(msp);
if (msp->ms_loaded)
return (0);
VERIFY(!msp->ms_loading);
ASSERT(!msp->ms_condensing);
/*
* We set the loading flag BEFORE potentially dropping the lock to
* wait for an ongoing flush (see ms_flushing below). This way other
* threads know that there is already a thread that is loading this
* metaslab.
*/
msp->ms_loading = B_TRUE;
/*
* Wait for any in-progress flushing to finish as we drop the ms_lock
* both here (during space_map_load()) and in metaslab_flush() (when
* we flush our changes to the ms_sm).
*/
if (msp->ms_flushing)
metaslab_flush_wait(msp);
/*
* In the possibility that we were waiting for the metaslab to be
* flushed (where we temporarily dropped the ms_lock), ensure that
* no one else loaded the metaslab somehow.
*/
ASSERT(!msp->ms_loaded);
/*
* If we're loading a metaslab in the normal class, consider evicting
* another one to keep our memory usage under the limit defined by the
* zfs_metaslab_mem_limit tunable.
*/
if (spa_normal_class(msp->ms_group->mg_class->mc_spa) ==
msp->ms_group->mg_class) {
metaslab_potentially_evict(msp->ms_group->mg_class);
}
int error = metaslab_load_impl(msp);
ASSERT(MUTEX_HELD(&msp->ms_lock));
msp->ms_loading = B_FALSE;
cv_broadcast(&msp->ms_load_cv);
return (error);
}
void
metaslab_unload(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* This can happen if a metaslab is selected for eviction (in
* metaslab_potentially_evict) and then unloaded during spa_sync (via
* metaslab_class_evict_old).
*/
if (!msp->ms_loaded)
return;
range_tree_vacate(msp->ms_allocatable, NULL, NULL);
msp->ms_loaded = B_FALSE;
msp->ms_unload_time = gethrtime();
msp->ms_activation_weight = 0;
msp->ms_weight &= ~METASLAB_ACTIVE_MASK;
if (msp->ms_group != NULL) {
metaslab_class_t *mc = msp->ms_group->mg_class;
multilist_sublist_t *mls =
multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
if (multilist_link_active(&msp->ms_class_txg_node))
multilist_sublist_remove(mls, msp);
multilist_sublist_unlock(mls);
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
zfs_dbgmsg("metaslab_unload: txg %llu, spa %s, vdev_id %llu, "
"ms_id %llu, weight %llx, "
"selected txg %llu (%llu ms ago), alloc_txg %llu, "
"loaded %llu ms ago, max_size %llu",
(u_longlong_t)spa_syncing_txg(spa), spa_name(spa),
(u_longlong_t)msp->ms_group->mg_vd->vdev_id,
(u_longlong_t)msp->ms_id,
(u_longlong_t)msp->ms_weight,
(u_longlong_t)msp->ms_selected_txg,
(u_longlong_t)(msp->ms_unload_time -
msp->ms_selected_time) / 1000 / 1000,
(u_longlong_t)msp->ms_alloc_txg,
(u_longlong_t)(msp->ms_unload_time -
msp->ms_load_time) / 1000 / 1000,
(u_longlong_t)msp->ms_max_size);
}
/*
* We explicitly recalculate the metaslab's weight based on its space
* map (as it is now not loaded). We want unload metaslabs to always
* have their weights calculated from the space map histograms, while
* loaded ones have it calculated from their in-core range tree
* [see metaslab_load()]. This way, the weight reflects the information
* available in-core, whether it is loaded or not.
*
* If ms_group == NULL means that we came here from metaslab_fini(),
* at which point it doesn't make sense for us to do the recalculation
* and the sorting.
*/
if (msp->ms_group != NULL)
metaslab_recalculate_weight_and_sort(msp);
}
/*
* We want to optimize the memory use of the per-metaslab range
* trees. To do this, we store the segments in the range trees in
* units of sectors, zero-indexing from the start of the metaslab. If
* the vdev_ms_shift - the vdev_ashift is less than 32, we can store
* the ranges using two uint32_ts, rather than two uint64_ts.
*/
range_seg_type_t
metaslab_calculate_range_tree_type(vdev_t *vdev, metaslab_t *msp,
uint64_t *start, uint64_t *shift)
{
if (vdev->vdev_ms_shift - vdev->vdev_ashift < 32 &&
!zfs_metaslab_force_large_segs) {
*shift = vdev->vdev_ashift;
*start = msp->ms_start;
return (RANGE_SEG32);
} else {
*shift = 0;
*start = 0;
return (RANGE_SEG64);
}
}
void
metaslab_set_selected_txg(metaslab_t *msp, uint64_t txg)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
metaslab_class_t *mc = msp->ms_group->mg_class;
multilist_sublist_t *mls =
multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
if (multilist_link_active(&msp->ms_class_txg_node))
multilist_sublist_remove(mls, msp);
msp->ms_selected_txg = txg;
msp->ms_selected_time = gethrtime();
multilist_sublist_insert_tail(mls, msp);
multilist_sublist_unlock(mls);
}
void
metaslab_space_update(vdev_t *vd, metaslab_class_t *mc, int64_t alloc_delta,
int64_t defer_delta, int64_t space_delta)
{
vdev_space_update(vd, alloc_delta, defer_delta, space_delta);
ASSERT3P(vd->vdev_spa->spa_root_vdev, ==, vd->vdev_parent);
ASSERT(vd->vdev_ms_count != 0);
metaslab_class_space_update(mc, alloc_delta, defer_delta, space_delta,
vdev_deflated_space(vd, space_delta));
}
int
metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object,
uint64_t txg, metaslab_t **msp)
{
vdev_t *vd = mg->mg_vd;
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa->spa_meta_objset;
metaslab_t *ms;
int error;
ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&ms->ms_sync_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL);
cv_init(&ms->ms_flush_cv, NULL, CV_DEFAULT, NULL);
multilist_link_init(&ms->ms_class_txg_node);
ms->ms_id = id;
ms->ms_start = id << vd->vdev_ms_shift;
ms->ms_size = 1ULL << vd->vdev_ms_shift;
ms->ms_allocator = -1;
ms->ms_new = B_TRUE;
vdev_ops_t *ops = vd->vdev_ops;
if (ops->vdev_op_metaslab_init != NULL)
ops->vdev_op_metaslab_init(vd, &ms->ms_start, &ms->ms_size);
/*
* We only open space map objects that already exist. All others
* will be opened when we finally allocate an object for it. For
* readonly pools there is no need to open the space map object.
*
* Note:
* When called from vdev_expand(), we can't call into the DMU as
* we are holding the spa_config_lock as a writer and we would
* deadlock [see relevant comment in vdev_metaslab_init()]. in
* that case, the object parameter is zero though, so we won't
* call into the DMU.
*/
if (object != 0 && !(spa->spa_mode == SPA_MODE_READ &&
!spa->spa_read_spacemaps)) {
error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start,
ms->ms_size, vd->vdev_ashift);
if (error != 0) {
kmem_free(ms, sizeof (metaslab_t));
return (error);
}
ASSERT(ms->ms_sm != NULL);
ms->ms_allocated_space = space_map_allocated(ms->ms_sm);
}
uint64_t shift, start;
range_seg_type_t type =
metaslab_calculate_range_tree_type(vd, ms, &start, &shift);
ms->ms_allocatable = range_tree_create(NULL, type, NULL, start, shift);
for (int t = 0; t < TXG_SIZE; t++) {
ms->ms_allocating[t] = range_tree_create(NULL, type,
NULL, start, shift);
}
ms->ms_freeing = range_tree_create(NULL, type, NULL, start, shift);
ms->ms_freed = range_tree_create(NULL, type, NULL, start, shift);
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
ms->ms_defer[t] = range_tree_create(NULL, type, NULL,
start, shift);
}
ms->ms_checkpointing =
range_tree_create(NULL, type, NULL, start, shift);
ms->ms_unflushed_allocs =
range_tree_create(NULL, type, NULL, start, shift);
metaslab_rt_arg_t *mrap = kmem_zalloc(sizeof (*mrap), KM_SLEEP);
mrap->mra_bt = &ms->ms_unflushed_frees_by_size;
mrap->mra_floor_shift = metaslab_by_size_min_shift;
ms->ms_unflushed_frees = range_tree_create(&metaslab_rt_ops,
type, mrap, start, shift);
ms->ms_trim = range_tree_create(NULL, type, NULL, start, shift);
metaslab_group_add(mg, ms);
metaslab_set_fragmentation(ms, B_FALSE);
/*
* If we're opening an existing pool (txg == 0) or creating
* a new one (txg == TXG_INITIAL), all space is available now.
* If we're adding space to an existing pool, the new space
* does not become available until after this txg has synced.
* The metaslab's weight will also be initialized when we sync
* out this txg. This ensures that we don't attempt to allocate
* from it before we have initialized it completely.
*/
if (txg <= TXG_INITIAL) {
metaslab_sync_done(ms, 0);
metaslab_space_update(vd, mg->mg_class,
metaslab_allocated_space(ms), 0, 0);
}
if (txg != 0) {
vdev_dirty(vd, 0, NULL, txg);
vdev_dirty(vd, VDD_METASLAB, ms, txg);
}
*msp = ms;
return (0);
}
static void
metaslab_fini_flush_data(metaslab_t *msp)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
if (metaslab_unflushed_txg(msp) == 0) {
ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL),
==, NULL);
return;
}
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
mutex_enter(&spa->spa_flushed_ms_lock);
avl_remove(&spa->spa_metaslabs_by_flushed, msp);
mutex_exit(&spa->spa_flushed_ms_lock);
spa_log_sm_decrement_mscount(spa, metaslab_unflushed_txg(msp));
spa_log_summary_decrement_mscount(spa, metaslab_unflushed_txg(msp),
metaslab_unflushed_dirty(msp));
}
uint64_t
metaslab_unflushed_changes_memused(metaslab_t *ms)
{
return ((range_tree_numsegs(ms->ms_unflushed_allocs) +
range_tree_numsegs(ms->ms_unflushed_frees)) *
ms->ms_unflushed_allocs->rt_root.bt_elem_size);
}
void
metaslab_fini(metaslab_t *msp)
{
metaslab_group_t *mg = msp->ms_group;
vdev_t *vd = mg->mg_vd;
spa_t *spa = vd->vdev_spa;
metaslab_fini_flush_data(msp);
metaslab_group_remove(mg, msp);
mutex_enter(&msp->ms_lock);
VERIFY(msp->ms_group == NULL);
/*
* If this metaslab hasn't been through metaslab_sync_done() yet its
* space hasn't been accounted for in its vdev and doesn't need to be
* subtracted.
*/
if (!msp->ms_new) {
metaslab_space_update(vd, mg->mg_class,
-metaslab_allocated_space(msp), 0, -msp->ms_size);
}
space_map_close(msp->ms_sm);
msp->ms_sm = NULL;
metaslab_unload(msp);
range_tree_destroy(msp->ms_allocatable);
range_tree_destroy(msp->ms_freeing);
range_tree_destroy(msp->ms_freed);
ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
metaslab_unflushed_changes_memused(msp));
spa->spa_unflushed_stats.sus_memused -=
metaslab_unflushed_changes_memused(msp);
range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
range_tree_destroy(msp->ms_unflushed_allocs);
range_tree_destroy(msp->ms_checkpointing);
range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
range_tree_destroy(msp->ms_unflushed_frees);
for (int t = 0; t < TXG_SIZE; t++) {
range_tree_destroy(msp->ms_allocating[t]);
}
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
range_tree_destroy(msp->ms_defer[t]);
}
ASSERT0(msp->ms_deferspace);
for (int t = 0; t < TXG_SIZE; t++)
ASSERT(!txg_list_member(&vd->vdev_ms_list, msp, t));
range_tree_vacate(msp->ms_trim, NULL, NULL);
range_tree_destroy(msp->ms_trim);
mutex_exit(&msp->ms_lock);
cv_destroy(&msp->ms_load_cv);
cv_destroy(&msp->ms_flush_cv);
mutex_destroy(&msp->ms_lock);
mutex_destroy(&msp->ms_sync_lock);
ASSERT3U(msp->ms_allocator, ==, -1);
kmem_free(msp, sizeof (metaslab_t));
}
#define FRAGMENTATION_TABLE_SIZE 17
/*
* This table defines a segment size based fragmentation metric that will
* allow each metaslab to derive its own fragmentation value. This is done
* by calculating the space in each bucket of the spacemap histogram and
* multiplying that by the fragmentation metric in this table. Doing
* this for all buckets and dividing it by the total amount of free
* space in this metaslab (i.e. the total free space in all buckets) gives
* us the fragmentation metric. This means that a high fragmentation metric
* equates to most of the free space being comprised of small segments.
* Conversely, if the metric is low, then most of the free space is in
* large segments. A 10% change in fragmentation equates to approximately
* double the number of segments.
*
* This table defines 0% fragmented space using 16MB segments. Testing has
* shown that segments that are greater than or equal to 16MB do not suffer
* from drastic performance problems. Using this value, we derive the rest
* of the table. Since the fragmentation value is never stored on disk, it
* is possible to change these calculations in the future.
*/
static const int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = {
100, /* 512B */
100, /* 1K */
98, /* 2K */
95, /* 4K */
90, /* 8K */
80, /* 16K */
70, /* 32K */
60, /* 64K */
50, /* 128K */
40, /* 256K */
30, /* 512K */
20, /* 1M */
15, /* 2M */
10, /* 4M */
5, /* 8M */
0 /* 16M */
};
/*
* Calculate the metaslab's fragmentation metric and set ms_fragmentation.
* Setting this value to ZFS_FRAG_INVALID means that the metaslab has not
* been upgraded and does not support this metric. Otherwise, the return
* value should be in the range [0, 100].
*/
static void
metaslab_set_fragmentation(metaslab_t *msp, boolean_t nodirty)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
uint64_t fragmentation = 0;
uint64_t total = 0;
boolean_t feature_enabled = spa_feature_is_enabled(spa,
SPA_FEATURE_SPACEMAP_HISTOGRAM);
if (!feature_enabled) {
msp->ms_fragmentation = ZFS_FRAG_INVALID;
return;
}
/*
* A null space map means that the entire metaslab is free
* and thus is not fragmented.
*/
if (msp->ms_sm == NULL) {
msp->ms_fragmentation = 0;
return;
}
/*
* If this metaslab's space map has not been upgraded, flag it
* so that we upgrade next time we encounter it.
*/
if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) {
uint64_t txg = spa_syncing_txg(spa);
vdev_t *vd = msp->ms_group->mg_vd;
/*
* If we've reached the final dirty txg, then we must
* be shutting down the pool. We don't want to dirty
* any data past this point so skip setting the condense
* flag. We can retry this action the next time the pool
* is imported. We also skip marking this metaslab for
* condensing if the caller has explicitly set nodirty.
*/
if (!nodirty &&
spa_writeable(spa) && txg < spa_final_dirty_txg(spa)) {
msp->ms_condense_wanted = B_TRUE;
vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
zfs_dbgmsg("txg %llu, requesting force condense: "
"ms_id %llu, vdev_id %llu", (u_longlong_t)txg,
(u_longlong_t)msp->ms_id,
(u_longlong_t)vd->vdev_id);
}
msp->ms_fragmentation = ZFS_FRAG_INVALID;
return;
}
for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
uint64_t space = 0;
uint8_t shift = msp->ms_sm->sm_shift;
int idx = MIN(shift - SPA_MINBLOCKSHIFT + i,
FRAGMENTATION_TABLE_SIZE - 1);
if (msp->ms_sm->sm_phys->smp_histogram[i] == 0)
continue;
space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift);
total += space;
ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE);
fragmentation += space * zfs_frag_table[idx];
}
if (total > 0)
fragmentation /= total;
ASSERT3U(fragmentation, <=, 100);
msp->ms_fragmentation = fragmentation;
}
/*
* Compute a weight -- a selection preference value -- for the given metaslab.
* This is based on the amount of free space, the level of fragmentation,
* the LBA range, and whether the metaslab is loaded.
*/
static uint64_t
metaslab_space_weight(metaslab_t *msp)
{
metaslab_group_t *mg = msp->ms_group;
vdev_t *vd = mg->mg_vd;
uint64_t weight, space;
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* The baseline weight is the metaslab's free space.
*/
space = msp->ms_size - metaslab_allocated_space(msp);
if (metaslab_fragmentation_factor_enabled &&
msp->ms_fragmentation != ZFS_FRAG_INVALID) {
/*
* Use the fragmentation information to inversely scale
* down the baseline weight. We need to ensure that we
* don't exclude this metaslab completely when it's 100%
* fragmented. To avoid this we reduce the fragmented value
* by 1.
*/
space = (space * (100 - (msp->ms_fragmentation - 1))) / 100;
/*
* If space < SPA_MINBLOCKSIZE, then we will not allocate from
* this metaslab again. The fragmentation metric may have
* decreased the space to something smaller than
* SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE
* so that we can consume any remaining space.
*/
if (space > 0 && space < SPA_MINBLOCKSIZE)
space = SPA_MINBLOCKSIZE;
}
weight = space;
/*
* Modern disks have uniform bit density and constant angular velocity.
* Therefore, the outer recording zones are faster (higher bandwidth)
* than the inner zones by the ratio of outer to inner track diameter,
* which is typically around 2:1. We account for this by assigning
* higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
* In effect, this means that we'll select the metaslab with the most
* free bandwidth rather than simply the one with the most free space.
*/
if (!vd->vdev_nonrot && metaslab_lba_weighting_enabled) {
weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count;
ASSERT(weight >= space && weight <= 2 * space);
}
/*
* If this metaslab is one we're actively using, adjust its
* weight to make it preferable to any inactive metaslab so
* we'll polish it off. If the fragmentation on this metaslab
* has exceed our threshold, then don't mark it active.
*/
if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID &&
msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) {
weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
}
WEIGHT_SET_SPACEBASED(weight);
return (weight);
}
/*
* Return the weight of the specified metaslab, according to the segment-based
* weighting algorithm. The metaslab must be loaded. This function can
* be called within a sync pass since it relies only on the metaslab's
* range tree which is always accurate when the metaslab is loaded.
*/
static uint64_t
metaslab_weight_from_range_tree(metaslab_t *msp)
{
uint64_t weight = 0;
uint32_t segments = 0;
ASSERT(msp->ms_loaded);
for (int i = RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT;
i--) {
uint8_t shift = msp->ms_group->mg_vd->vdev_ashift;
int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
segments <<= 1;
segments += msp->ms_allocatable->rt_histogram[i];
/*
* The range tree provides more precision than the space map
* and must be downgraded so that all values fit within the
* space map's histogram. This allows us to compare loaded
* vs. unloaded metaslabs to determine which metaslab is
* considered "best".
*/
if (i > max_idx)
continue;
if (segments != 0) {
WEIGHT_SET_COUNT(weight, segments);
WEIGHT_SET_INDEX(weight, i);
WEIGHT_SET_ACTIVE(weight, 0);
break;
}
}
return (weight);
}
/*
* Calculate the weight based on the on-disk histogram. Should be applied
* only to unloaded metaslabs (i.e no incoming allocations) in-order to
* give results consistent with the on-disk state
*/
static uint64_t
metaslab_weight_from_spacemap(metaslab_t *msp)
{
space_map_t *sm = msp->ms_sm;
ASSERT(!msp->ms_loaded);
ASSERT(sm != NULL);
ASSERT3U(space_map_object(sm), !=, 0);
ASSERT3U(sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
/*
* Create a joint histogram from all the segments that have made
* it to the metaslab's space map histogram, that are not yet
* available for allocation because they are still in the freeing
* pipeline (e.g. freeing, freed, and defer trees). Then subtract
* these segments from the space map's histogram to get a more
* accurate weight.
*/
uint64_t deferspace_histogram[SPACE_MAP_HISTOGRAM_SIZE] = {0};
for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++)
deferspace_histogram[i] += msp->ms_synchist[i];
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
deferspace_histogram[i] += msp->ms_deferhist[t][i];
}
}
uint64_t weight = 0;
for (int i = SPACE_MAP_HISTOGRAM_SIZE - 1; i >= 0; i--) {
ASSERT3U(sm->sm_phys->smp_histogram[i], >=,
deferspace_histogram[i]);
uint64_t count =
sm->sm_phys->smp_histogram[i] - deferspace_histogram[i];
if (count != 0) {
WEIGHT_SET_COUNT(weight, count);
WEIGHT_SET_INDEX(weight, i + sm->sm_shift);
WEIGHT_SET_ACTIVE(weight, 0);
break;
}
}
return (weight);
}
/*
* Compute a segment-based weight for the specified metaslab. The weight
* is determined by highest bucket in the histogram. The information
* for the highest bucket is encoded into the weight value.
*/
static uint64_t
metaslab_segment_weight(metaslab_t *msp)
{
metaslab_group_t *mg = msp->ms_group;
uint64_t weight = 0;
uint8_t shift = mg->mg_vd->vdev_ashift;
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* The metaslab is completely free.
*/
if (metaslab_allocated_space(msp) == 0) {
int idx = highbit64(msp->ms_size) - 1;
int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
if (idx < max_idx) {
WEIGHT_SET_COUNT(weight, 1ULL);
WEIGHT_SET_INDEX(weight, idx);
} else {
WEIGHT_SET_COUNT(weight, 1ULL << (idx - max_idx));
WEIGHT_SET_INDEX(weight, max_idx);
}
WEIGHT_SET_ACTIVE(weight, 0);
ASSERT(!WEIGHT_IS_SPACEBASED(weight));
return (weight);
}
ASSERT3U(msp->ms_sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
/*
* If the metaslab is fully allocated then just make the weight 0.
*/
if (metaslab_allocated_space(msp) == msp->ms_size)
return (0);
/*
* If the metaslab is already loaded, then use the range tree to
* determine the weight. Otherwise, we rely on the space map information
* to generate the weight.
*/
if (msp->ms_loaded) {
weight = metaslab_weight_from_range_tree(msp);
} else {
weight = metaslab_weight_from_spacemap(msp);
}
/*
* If the metaslab was active the last time we calculated its weight
* then keep it active. We want to consume the entire region that
* is associated with this weight.
*/
if (msp->ms_activation_weight != 0 && weight != 0)
WEIGHT_SET_ACTIVE(weight, WEIGHT_GET_ACTIVE(msp->ms_weight));
return (weight);
}
/*
* Determine if we should attempt to allocate from this metaslab. If the
* metaslab is loaded, then we can determine if the desired allocation
* can be satisfied by looking at the size of the maximum free segment
* on that metaslab. Otherwise, we make our decision based on the metaslab's
* weight. For segment-based weighting we can determine the maximum
* allocation based on the index encoded in its value. For space-based
* weights we rely on the entire weight (excluding the weight-type bit).
*/
static boolean_t
metaslab_should_allocate(metaslab_t *msp, uint64_t asize, boolean_t try_hard)
{
/*
* This case will usually but not always get caught by the checks below;
* metaslabs can be loaded by various means, including the trim and
* initialize code. Once that happens, without this check they are
* allocatable even before they finish their first txg sync.
*/
if (unlikely(msp->ms_new))
return (B_FALSE);
/*
* If the metaslab is loaded, ms_max_size is definitive and we can use
* the fast check. If it's not, the ms_max_size is a lower bound (once
* set), and we should use the fast check as long as we're not in
* try_hard and it's been less than zfs_metaslab_max_size_cache_sec
* seconds since the metaslab was unloaded.
*/
if (msp->ms_loaded ||
(msp->ms_max_size != 0 && !try_hard && gethrtime() <
msp->ms_unload_time + SEC2NSEC(zfs_metaslab_max_size_cache_sec)))
return (msp->ms_max_size >= asize);
boolean_t should_allocate;
if (!WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
/*
* The metaslab segment weight indicates segments in the
* range [2^i, 2^(i+1)), where i is the index in the weight.
* Since the asize might be in the middle of the range, we
* should attempt the allocation if asize < 2^(i+1).
*/
should_allocate = (asize <
1ULL << (WEIGHT_GET_INDEX(msp->ms_weight) + 1));
} else {
should_allocate = (asize <=
(msp->ms_weight & ~METASLAB_WEIGHT_TYPE));
}
return (should_allocate);
}
static uint64_t
metaslab_weight(metaslab_t *msp, boolean_t nodirty)
{
vdev_t *vd = msp->ms_group->mg_vd;
spa_t *spa = vd->vdev_spa;
uint64_t weight;
ASSERT(MUTEX_HELD(&msp->ms_lock));
metaslab_set_fragmentation(msp, nodirty);
/*
* Update the maximum size. If the metaslab is loaded, this will
* ensure that we get an accurate maximum size if newly freed space
* has been added back into the free tree. If the metaslab is
* unloaded, we check if there's a larger free segment in the
* unflushed frees. This is a lower bound on the largest allocatable
* segment size. Coalescing of adjacent entries may reveal larger
* allocatable segments, but we aren't aware of those until loading
* the space map into a range tree.
*/
if (msp->ms_loaded) {
msp->ms_max_size = metaslab_largest_allocatable(msp);
} else {
msp->ms_max_size = MAX(msp->ms_max_size,
metaslab_largest_unflushed_free(msp));
}
/*
* Segment-based weighting requires space map histogram support.
*/
if (zfs_metaslab_segment_weight_enabled &&
spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
(msp->ms_sm == NULL || msp->ms_sm->sm_dbuf->db_size ==
sizeof (space_map_phys_t))) {
weight = metaslab_segment_weight(msp);
} else {
weight = metaslab_space_weight(msp);
}
return (weight);
}
void
metaslab_recalculate_weight_and_sort(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
/* note: we preserve the mask (e.g. indication of primary, etc..) */
uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
metaslab_group_sort(msp->ms_group, msp,
metaslab_weight(msp, B_FALSE) | was_active);
}
static int
metaslab_activate_allocator(metaslab_group_t *mg, metaslab_t *msp,
int allocator, uint64_t activation_weight)
{
metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* If we're activating for the claim code, we don't want to actually
* set the metaslab up for a specific allocator.
*/
if (activation_weight == METASLAB_WEIGHT_CLAIM) {
ASSERT0(msp->ms_activation_weight);
msp->ms_activation_weight = msp->ms_weight;
metaslab_group_sort(mg, msp, msp->ms_weight |
activation_weight);
return (0);
}
metaslab_t **mspp = (activation_weight == METASLAB_WEIGHT_PRIMARY ?
&mga->mga_primary : &mga->mga_secondary);
mutex_enter(&mg->mg_lock);
if (*mspp != NULL) {
mutex_exit(&mg->mg_lock);
return (EEXIST);
}
*mspp = msp;
ASSERT3S(msp->ms_allocator, ==, -1);
msp->ms_allocator = allocator;
msp->ms_primary = (activation_weight == METASLAB_WEIGHT_PRIMARY);
ASSERT0(msp->ms_activation_weight);
msp->ms_activation_weight = msp->ms_weight;
metaslab_group_sort_impl(mg, msp,
msp->ms_weight | activation_weight);
mutex_exit(&mg->mg_lock);
return (0);
}
static int
metaslab_activate(metaslab_t *msp, int allocator, uint64_t activation_weight)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* The current metaslab is already activated for us so there
* is nothing to do. Already activated though, doesn't mean
* that this metaslab is activated for our allocator nor our
* requested activation weight. The metaslab could have started
* as an active one for our allocator but changed allocators
* while we were waiting to grab its ms_lock or we stole it
* [see find_valid_metaslab()]. This means that there is a
* possibility of passivating a metaslab of another allocator
* or from a different activation mask, from this thread.
*/
if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) {
ASSERT(msp->ms_loaded);
return (0);
}
int error = metaslab_load(msp);
if (error != 0) {
metaslab_group_sort(msp->ms_group, msp, 0);
return (error);
}
/*
* When entering metaslab_load() we may have dropped the
* ms_lock because we were loading this metaslab, or we
* were waiting for another thread to load it for us. In
* that scenario, we recheck the weight of the metaslab
* to see if it was activated by another thread.
*
* If the metaslab was activated for another allocator or
* it was activated with a different activation weight (e.g.
* we wanted to make it a primary but it was activated as
* secondary) we return error (EBUSY).
*
* If the metaslab was activated for the same allocator
* and requested activation mask, skip activating it.
*/
if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) {
if (msp->ms_allocator != allocator)
return (EBUSY);
if ((msp->ms_weight & activation_weight) == 0)
return (SET_ERROR(EBUSY));
EQUIV((activation_weight == METASLAB_WEIGHT_PRIMARY),
msp->ms_primary);
return (0);
}
/*
* If the metaslab has literally 0 space, it will have weight 0. In
* that case, don't bother activating it. This can happen if the
* metaslab had space during find_valid_metaslab, but another thread
* loaded it and used all that space while we were waiting to grab the
* lock.
*/
if (msp->ms_weight == 0) {
ASSERT0(range_tree_space(msp->ms_allocatable));
return (SET_ERROR(ENOSPC));
}
if ((error = metaslab_activate_allocator(msp->ms_group, msp,
allocator, activation_weight)) != 0) {
return (error);
}
ASSERT(msp->ms_loaded);
ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
return (0);
}
static void
metaslab_passivate_allocator(metaslab_group_t *mg, metaslab_t *msp,
uint64_t weight)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT(msp->ms_loaded);
if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) {
metaslab_group_sort(mg, msp, weight);
return;
}
mutex_enter(&mg->mg_lock);
ASSERT3P(msp->ms_group, ==, mg);
ASSERT3S(0, <=, msp->ms_allocator);
ASSERT3U(msp->ms_allocator, <, mg->mg_allocators);
metaslab_group_allocator_t *mga = &mg->mg_allocator[msp->ms_allocator];
if (msp->ms_primary) {
ASSERT3P(mga->mga_primary, ==, msp);
ASSERT(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
mga->mga_primary = NULL;
} else {
ASSERT3P(mga->mga_secondary, ==, msp);
ASSERT(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
mga->mga_secondary = NULL;
}
msp->ms_allocator = -1;
metaslab_group_sort_impl(mg, msp, weight);
mutex_exit(&mg->mg_lock);
}
static void
metaslab_passivate(metaslab_t *msp, uint64_t weight)
{
uint64_t size __maybe_unused = weight & ~METASLAB_WEIGHT_TYPE;
/*
* If size < SPA_MINBLOCKSIZE, then we will not allocate from
* this metaslab again. In that case, it had better be empty,
* or we would be leaving space on the table.
*/
ASSERT(!WEIGHT_IS_SPACEBASED(msp->ms_weight) ||
size >= SPA_MINBLOCKSIZE ||
range_tree_space(msp->ms_allocatable) == 0);
ASSERT0(weight & METASLAB_ACTIVE_MASK);
ASSERT(msp->ms_activation_weight != 0);
msp->ms_activation_weight = 0;
metaslab_passivate_allocator(msp->ms_group, msp, weight);
ASSERT0(msp->ms_weight & METASLAB_ACTIVE_MASK);
}
/*
* Segment-based metaslabs are activated once and remain active until
* we either fail an allocation attempt (similar to space-based metaslabs)
* or have exhausted the free space in zfs_metaslab_switch_threshold
* buckets since the metaslab was activated. This function checks to see
* if we've exhausted the zfs_metaslab_switch_threshold buckets in the
* metaslab and passivates it proactively. This will allow us to select a
* metaslab with a larger contiguous region, if any, remaining within this
* metaslab group. If we're in sync pass > 1, then we continue using this
* metaslab so that we don't dirty more block and cause more sync passes.
*/
static void
metaslab_segment_may_passivate(metaslab_t *msp)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
if (WEIGHT_IS_SPACEBASED(msp->ms_weight) || spa_sync_pass(spa) > 1)
return;
/*
* Since we are in the middle of a sync pass, the most accurate
* information that is accessible to us is the in-core range tree
* histogram; calculate the new weight based on that information.
*/
uint64_t weight = metaslab_weight_from_range_tree(msp);
int activation_idx = WEIGHT_GET_INDEX(msp->ms_activation_weight);
int current_idx = WEIGHT_GET_INDEX(weight);
if (current_idx <= activation_idx - zfs_metaslab_switch_threshold)
metaslab_passivate(msp, weight);
}
static void
metaslab_preload(void *arg)
{
metaslab_t *msp = arg;
metaslab_class_t *mc = msp->ms_group->mg_class;
spa_t *spa = mc->mc_spa;
fstrans_cookie_t cookie = spl_fstrans_mark();
ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock));
mutex_enter(&msp->ms_lock);
(void) metaslab_load(msp);
metaslab_set_selected_txg(msp, spa_syncing_txg(spa));
mutex_exit(&msp->ms_lock);
spl_fstrans_unmark(cookie);
}
static void
metaslab_group_preload(metaslab_group_t *mg)
{
spa_t *spa = mg->mg_vd->vdev_spa;
metaslab_t *msp;
avl_tree_t *t = &mg->mg_metaslab_tree;
int m = 0;
if (spa_shutting_down(spa) || !metaslab_preload_enabled)
return;
mutex_enter(&mg->mg_lock);
/*
* Load the next potential metaslabs
*/
for (msp = avl_first(t); msp != NULL; msp = AVL_NEXT(t, msp)) {
ASSERT3P(msp->ms_group, ==, mg);
/*
* We preload only the maximum number of metaslabs specified
* by metaslab_preload_limit. If a metaslab is being forced
* to condense then we preload it too. This will ensure
* that force condensing happens in the next txg.
*/
if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) {
continue;
}
VERIFY(taskq_dispatch(spa->spa_metaslab_taskq, metaslab_preload,
msp, TQ_SLEEP | (m <= mg->mg_allocators ? TQ_FRONT : 0))
!= TASKQID_INVALID);
}
mutex_exit(&mg->mg_lock);
}
/*
* Determine if the space map's on-disk footprint is past our tolerance for
* inefficiency. We would like to use the following criteria to make our
* decision:
*
* 1. Do not condense if the size of the space map object would dramatically
* increase as a result of writing out the free space range tree.
*
* 2. Condense if the on on-disk space map representation is at least
* zfs_condense_pct/100 times the size of the optimal representation
* (i.e. zfs_condense_pct = 110 and in-core = 1MB, optimal = 1.1MB).
*
* 3. Do not condense if the on-disk size of the space map does not actually
* decrease.
*
* Unfortunately, we cannot compute the on-disk size of the space map in this
* context because we cannot accurately compute the effects of compression, etc.
* Instead, we apply the heuristic described in the block comment for
* zfs_metaslab_condense_block_threshold - we only condense if the space used
* is greater than a threshold number of blocks.
*/
static boolean_t
metaslab_should_condense(metaslab_t *msp)
{
space_map_t *sm = msp->ms_sm;
vdev_t *vd = msp->ms_group->mg_vd;
uint64_t vdev_blocksize = 1ULL << vd->vdev_ashift;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT(msp->ms_loaded);
ASSERT(sm != NULL);
ASSERT3U(spa_sync_pass(vd->vdev_spa), ==, 1);
/*
* We always condense metaslabs that are empty and metaslabs for
* which a condense request has been made.
*/
if (range_tree_numsegs(msp->ms_allocatable) == 0 ||
msp->ms_condense_wanted)
return (B_TRUE);
uint64_t record_size = MAX(sm->sm_blksz, vdev_blocksize);
uint64_t object_size = space_map_length(sm);
uint64_t optimal_size = space_map_estimate_optimal_size(sm,
msp->ms_allocatable, SM_NO_VDEVID);
return (object_size >= (optimal_size * zfs_condense_pct / 100) &&
object_size > zfs_metaslab_condense_block_threshold * record_size);
}
/*
* Condense the on-disk space map representation to its minimized form.
* The minimized form consists of a small number of allocations followed
* by the entries of the free range tree (ms_allocatable). The condensed
* spacemap contains all the entries of previous TXGs (including those in
* the pool-wide log spacemaps; thus this is effectively a superset of
* metaslab_flush()), but this TXG's entries still need to be written.
*/
static void
metaslab_condense(metaslab_t *msp, dmu_tx_t *tx)
{
range_tree_t *condense_tree;
space_map_t *sm = msp->ms_sm;
uint64_t txg = dmu_tx_get_txg(tx);
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT(msp->ms_loaded);
ASSERT(msp->ms_sm != NULL);
/*
* In order to condense the space map, we need to change it so it
* only describes which segments are currently allocated and free.
*
* All the current free space resides in the ms_allocatable, all
* the ms_defer trees, and all the ms_allocating trees. We ignore
* ms_freed because it is empty because we're in sync pass 1. We
* ignore ms_freeing because these changes are not yet reflected
* in the spacemap (they will be written later this txg).
*
* So to truncate the space map to represent all the entries of
* previous TXGs we do the following:
*
* 1] We create a range tree (condense tree) that is 100% empty.
* 2] We add to it all segments found in the ms_defer trees
* as those segments are marked as free in the original space
* map. We do the same with the ms_allocating trees for the same
* reason. Adding these segments should be a relatively
* inexpensive operation since we expect these trees to have a
* small number of nodes.
* 3] We vacate any unflushed allocs, since they are not frees we
* need to add to the condense tree. Then we vacate any
* unflushed frees as they should already be part of ms_allocatable.
* 4] At this point, we would ideally like to add all segments
* in the ms_allocatable tree from the condense tree. This way
* we would write all the entries of the condense tree as the
* condensed space map, which would only contain freed
* segments with everything else assumed to be allocated.
*
* Doing so can be prohibitively expensive as ms_allocatable can
* be large, and therefore computationally expensive to add to
* the condense_tree. Instead we first sync out an entry marking
* everything as allocated, then the condense_tree and then the
* ms_allocatable, in the condensed space map. While this is not
* optimal, it is typically close to optimal and more importantly
* much cheaper to compute.
*
* 5] Finally, as both of the unflushed trees were written to our
* new and condensed metaslab space map, we basically flushed
* all the unflushed changes to disk, thus we call
* metaslab_flush_update().
*/
ASSERT3U(spa_sync_pass(spa), ==, 1);
ASSERT(range_tree_is_empty(msp->ms_freed)); /* since it is pass 1 */
zfs_dbgmsg("condensing: txg %llu, msp[%llu] %px, vdev id %llu, "
"spa %s, smp size %llu, segments %llu, forcing condense=%s",
(u_longlong_t)txg, (u_longlong_t)msp->ms_id, msp,
(u_longlong_t)msp->ms_group->mg_vd->vdev_id,
spa->spa_name, (u_longlong_t)space_map_length(msp->ms_sm),
(u_longlong_t)range_tree_numsegs(msp->ms_allocatable),
msp->ms_condense_wanted ? "TRUE" : "FALSE");
msp->ms_condense_wanted = B_FALSE;
range_seg_type_t type;
uint64_t shift, start;
type = metaslab_calculate_range_tree_type(msp->ms_group->mg_vd, msp,
&start, &shift);
condense_tree = range_tree_create(NULL, type, NULL, start, shift);
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
range_tree_walk(msp->ms_defer[t],
range_tree_add, condense_tree);
}
for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
range_tree_walk(msp->ms_allocating[(txg + t) & TXG_MASK],
range_tree_add, condense_tree);
}
ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
metaslab_unflushed_changes_memused(msp));
spa->spa_unflushed_stats.sus_memused -=
metaslab_unflushed_changes_memused(msp);
range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
/*
* We're about to drop the metaslab's lock thus allowing other
* consumers to change it's content. Set the metaslab's ms_condensing
* flag to ensure that allocations on this metaslab do not occur
* while we're in the middle of committing it to disk. This is only
* critical for ms_allocatable as all other range trees use per TXG
* views of their content.
*/
msp->ms_condensing = B_TRUE;
mutex_exit(&msp->ms_lock);
uint64_t object = space_map_object(msp->ms_sm);
space_map_truncate(sm,
spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ?
zfs_metaslab_sm_blksz_with_log : zfs_metaslab_sm_blksz_no_log, tx);
/*
* space_map_truncate() may have reallocated the spacemap object.
* If so, update the vdev_ms_array.
*/
if (space_map_object(msp->ms_sm) != object) {
object = space_map_object(msp->ms_sm);
dmu_write(spa->spa_meta_objset,
msp->ms_group->mg_vd->vdev_ms_array, sizeof (uint64_t) *
msp->ms_id, sizeof (uint64_t), &object, tx);
}
/*
* Note:
* When the log space map feature is enabled, each space map will
* always have ALLOCS followed by FREES for each sync pass. This is
* typically true even when the log space map feature is disabled,
* except from the case where a metaslab goes through metaslab_sync()
* and gets condensed. In that case the metaslab's space map will have
* ALLOCS followed by FREES (due to condensing) followed by ALLOCS
* followed by FREES (due to space_map_write() in metaslab_sync()) for
* sync pass 1.
*/
range_tree_t *tmp_tree = range_tree_create(NULL, type, NULL, start,
shift);
range_tree_add(tmp_tree, msp->ms_start, msp->ms_size);
space_map_write(sm, tmp_tree, SM_ALLOC, SM_NO_VDEVID, tx);
space_map_write(sm, msp->ms_allocatable, SM_FREE, SM_NO_VDEVID, tx);
space_map_write(sm, condense_tree, SM_FREE, SM_NO_VDEVID, tx);
range_tree_vacate(condense_tree, NULL, NULL);
range_tree_destroy(condense_tree);
range_tree_vacate(tmp_tree, NULL, NULL);
range_tree_destroy(tmp_tree);
mutex_enter(&msp->ms_lock);
msp->ms_condensing = B_FALSE;
metaslab_flush_update(msp, tx);
}
static void
metaslab_unflushed_add(metaslab_t *msp, dmu_tx_t *tx)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
ASSERT(spa_syncing_log_sm(spa) != NULL);
ASSERT(msp->ms_sm != NULL);
ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
mutex_enter(&spa->spa_flushed_ms_lock);
metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx);
metaslab_set_unflushed_dirty(msp, B_TRUE);
avl_add(&spa->spa_metaslabs_by_flushed, msp);
mutex_exit(&spa->spa_flushed_ms_lock);
spa_log_sm_increment_current_mscount(spa);
spa_log_summary_add_flushed_metaslab(spa, B_TRUE);
}
void
metaslab_unflushed_bump(metaslab_t *msp, dmu_tx_t *tx, boolean_t dirty)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
ASSERT(spa_syncing_log_sm(spa) != NULL);
ASSERT(msp->ms_sm != NULL);
ASSERT(metaslab_unflushed_txg(msp) != 0);
ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL), ==, msp);
ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(spa));
/* update metaslab's position in our flushing tree */
uint64_t ms_prev_flushed_txg = metaslab_unflushed_txg(msp);
boolean_t ms_prev_flushed_dirty = metaslab_unflushed_dirty(msp);
mutex_enter(&spa->spa_flushed_ms_lock);
avl_remove(&spa->spa_metaslabs_by_flushed, msp);
metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx);
metaslab_set_unflushed_dirty(msp, dirty);
avl_add(&spa->spa_metaslabs_by_flushed, msp);
mutex_exit(&spa->spa_flushed_ms_lock);
/* update metaslab counts of spa_log_sm_t nodes */
spa_log_sm_decrement_mscount(spa, ms_prev_flushed_txg);
spa_log_sm_increment_current_mscount(spa);
/* update log space map summary */
spa_log_summary_decrement_mscount(spa, ms_prev_flushed_txg,
ms_prev_flushed_dirty);
spa_log_summary_add_flushed_metaslab(spa, dirty);
/* cleanup obsolete logs if any */
spa_cleanup_old_sm_logs(spa, tx);
}
/*
* Called when the metaslab has been flushed (its own spacemap now reflects
* all the contents of the pool-wide spacemap log). Updates the metaslab's
* metadata and any pool-wide related log space map data (e.g. summary,
* obsolete logs, etc..) to reflect that.
*/
static void
metaslab_flush_update(metaslab_t *msp, dmu_tx_t *tx)
{
metaslab_group_t *mg = msp->ms_group;
spa_t *spa = mg->mg_vd->vdev_spa;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT3U(spa_sync_pass(spa), ==, 1);
/*
* Just because a metaslab got flushed, that doesn't mean that
* it will pass through metaslab_sync_done(). Thus, make sure to
* update ms_synced_length here in case it doesn't.
*/
msp->ms_synced_length = space_map_length(msp->ms_sm);
/*
* We may end up here from metaslab_condense() without the
* feature being active. In that case this is a no-op.
*/
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP) ||
metaslab_unflushed_txg(msp) == 0)
return;
metaslab_unflushed_bump(msp, tx, B_FALSE);
}
boolean_t
metaslab_flush(metaslab_t *msp, dmu_tx_t *tx)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT3U(spa_sync_pass(spa), ==, 1);
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
ASSERT(msp->ms_sm != NULL);
ASSERT(metaslab_unflushed_txg(msp) != 0);
ASSERT(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL) != NULL);
/*
* There is nothing wrong with flushing the same metaslab twice, as
* this codepath should work on that case. However, the current
* flushing scheme makes sure to avoid this situation as we would be
* making all these calls without having anything meaningful to write
* to disk. We assert this behavior here.
*/
ASSERT3U(metaslab_unflushed_txg(msp), <, dmu_tx_get_txg(tx));
/*
* We can not flush while loading, because then we would
* not load the ms_unflushed_{allocs,frees}.
*/
if (msp->ms_loading)
return (B_FALSE);
metaslab_verify_space(msp, dmu_tx_get_txg(tx));
metaslab_verify_weight_and_frag(msp);
/*
* Metaslab condensing is effectively flushing. Therefore if the
* metaslab can be condensed we can just condense it instead of
* flushing it.
*
* Note that metaslab_condense() does call metaslab_flush_update()
* so we can just return immediately after condensing. We also
* don't need to care about setting ms_flushing or broadcasting
* ms_flush_cv, even if we temporarily drop the ms_lock in
* metaslab_condense(), as the metaslab is already loaded.
*/
if (msp->ms_loaded && metaslab_should_condense(msp)) {
metaslab_group_t *mg = msp->ms_group;
/*
* For all histogram operations below refer to the
* comments of metaslab_sync() where we follow a
* similar procedure.
*/
metaslab_group_histogram_verify(mg);
metaslab_class_histogram_verify(mg->mg_class);
metaslab_group_histogram_remove(mg, msp);
metaslab_condense(msp, tx);
space_map_histogram_clear(msp->ms_sm);
space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx);
ASSERT(range_tree_is_empty(msp->ms_freed));
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
space_map_histogram_add(msp->ms_sm,
msp->ms_defer[t], tx);
}
metaslab_aux_histograms_update(msp);
metaslab_group_histogram_add(mg, msp);
metaslab_group_histogram_verify(mg);
metaslab_class_histogram_verify(mg->mg_class);
metaslab_verify_space(msp, dmu_tx_get_txg(tx));
/*
* Since we recreated the histogram (and potentially
* the ms_sm too while condensing) ensure that the
* weight is updated too because we are not guaranteed
* that this metaslab is dirty and will go through
* metaslab_sync_done().
*/
metaslab_recalculate_weight_and_sort(msp);
return (B_TRUE);
}
msp->ms_flushing = B_TRUE;
uint64_t sm_len_before = space_map_length(msp->ms_sm);
mutex_exit(&msp->ms_lock);
space_map_write(msp->ms_sm, msp->ms_unflushed_allocs, SM_ALLOC,
SM_NO_VDEVID, tx);
space_map_write(msp->ms_sm, msp->ms_unflushed_frees, SM_FREE,
SM_NO_VDEVID, tx);
mutex_enter(&msp->ms_lock);
uint64_t sm_len_after = space_map_length(msp->ms_sm);
if (zfs_flags & ZFS_DEBUG_LOG_SPACEMAP) {
zfs_dbgmsg("flushing: txg %llu, spa %s, vdev_id %llu, "
"ms_id %llu, unflushed_allocs %llu, unflushed_frees %llu, "
"appended %llu bytes", (u_longlong_t)dmu_tx_get_txg(tx),
spa_name(spa),
(u_longlong_t)msp->ms_group->mg_vd->vdev_id,
(u_longlong_t)msp->ms_id,
(u_longlong_t)range_tree_space(msp->ms_unflushed_allocs),
(u_longlong_t)range_tree_space(msp->ms_unflushed_frees),
(u_longlong_t)(sm_len_after - sm_len_before));
}
ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
metaslab_unflushed_changes_memused(msp));
spa->spa_unflushed_stats.sus_memused -=
metaslab_unflushed_changes_memused(msp);
range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
metaslab_verify_space(msp, dmu_tx_get_txg(tx));
metaslab_verify_weight_and_frag(msp);
metaslab_flush_update(msp, tx);
metaslab_verify_space(msp, dmu_tx_get_txg(tx));
metaslab_verify_weight_and_frag(msp);
msp->ms_flushing = B_FALSE;
cv_broadcast(&msp->ms_flush_cv);
return (B_TRUE);
}
/*
* Write a metaslab to disk in the context of the specified transaction group.
*/
void
metaslab_sync(metaslab_t *msp, uint64_t txg)
{
metaslab_group_t *mg = msp->ms_group;
vdev_t *vd = mg->mg_vd;
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa_meta_objset(spa);
range_tree_t *alloctree = msp->ms_allocating[txg & TXG_MASK];
dmu_tx_t *tx;
ASSERT(!vd->vdev_ishole);
/*
* This metaslab has just been added so there's no work to do now.
*/
if (msp->ms_new) {
ASSERT0(range_tree_space(alloctree));
ASSERT0(range_tree_space(msp->ms_freeing));
ASSERT0(range_tree_space(msp->ms_freed));
ASSERT0(range_tree_space(msp->ms_checkpointing));
ASSERT0(range_tree_space(msp->ms_trim));
return;
}
/*
* Normally, we don't want to process a metaslab if there are no
* allocations or frees to perform. However, if the metaslab is being
* forced to condense, it's loaded and we're not beyond the final
* dirty txg, we need to let it through. Not condensing beyond the
* final dirty txg prevents an issue where metaslabs that need to be
* condensed but were loaded for other reasons could cause a panic
* here. By only checking the txg in that branch of the conditional,
* we preserve the utility of the VERIFY statements in all other
* cases.
*/
if (range_tree_is_empty(alloctree) &&
range_tree_is_empty(msp->ms_freeing) &&
range_tree_is_empty(msp->ms_checkpointing) &&
!(msp->ms_loaded && msp->ms_condense_wanted &&
txg <= spa_final_dirty_txg(spa)))
return;
VERIFY3U(txg, <=, spa_final_dirty_txg(spa));
/*
* The only state that can actually be changing concurrently
* with metaslab_sync() is the metaslab's ms_allocatable. No
* other thread can be modifying this txg's alloc, freeing,
* freed, or space_map_phys_t. We drop ms_lock whenever we
* could call into the DMU, because the DMU can call down to
* us (e.g. via zio_free()) at any time.
*
* The spa_vdev_remove_thread() can be reading metaslab state
* concurrently, and it is locked out by the ms_sync_lock.
* Note that the ms_lock is insufficient for this, because it
* is dropped by space_map_write().
*/
tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
/*
* Generate a log space map if one doesn't exist already.
*/
spa_generate_syncing_log_sm(spa, tx);
if (msp->ms_sm == NULL) {
uint64_t new_object = space_map_alloc(mos,
spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ?
zfs_metaslab_sm_blksz_with_log :
zfs_metaslab_sm_blksz_no_log, tx);
VERIFY3U(new_object, !=, 0);
dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
msp->ms_id, sizeof (uint64_t), &new_object, tx);
VERIFY0(space_map_open(&msp->ms_sm, mos, new_object,
msp->ms_start, msp->ms_size, vd->vdev_ashift));
ASSERT(msp->ms_sm != NULL);
ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
ASSERT0(metaslab_allocated_space(msp));
}
if (!range_tree_is_empty(msp->ms_checkpointing) &&
vd->vdev_checkpoint_sm == NULL) {
ASSERT(spa_has_checkpoint(spa));
uint64_t new_object = space_map_alloc(mos,
zfs_vdev_standard_sm_blksz, tx);
VERIFY3U(new_object, !=, 0);
VERIFY0(space_map_open(&vd->vdev_checkpoint_sm,
mos, new_object, 0, vd->vdev_asize, vd->vdev_ashift));
ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
/*
* We save the space map object as an entry in vdev_top_zap
* so it can be retrieved when the pool is reopened after an
* export or through zdb.
*/
VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset,
vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM,
sizeof (new_object), 1, &new_object, tx));
}
mutex_enter(&msp->ms_sync_lock);
mutex_enter(&msp->ms_lock);
/*
* Note: metaslab_condense() clears the space map's histogram.
* Therefore we must verify and remove this histogram before
* condensing.
*/
metaslab_group_histogram_verify(mg);
metaslab_class_histogram_verify(mg->mg_class);
metaslab_group_histogram_remove(mg, msp);
if (spa->spa_sync_pass == 1 && msp->ms_loaded &&
metaslab_should_condense(msp))
metaslab_condense(msp, tx);
/*
* We'll be going to disk to sync our space accounting, thus we
* drop the ms_lock during that time so allocations coming from
* open-context (ZIL) for future TXGs do not block.
*/
mutex_exit(&msp->ms_lock);
space_map_t *log_sm = spa_syncing_log_sm(spa);
if (log_sm != NULL) {
ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP));
if (metaslab_unflushed_txg(msp) == 0)
metaslab_unflushed_add(msp, tx);
else if (!metaslab_unflushed_dirty(msp))
metaslab_unflushed_bump(msp, tx, B_TRUE);
space_map_write(log_sm, alloctree, SM_ALLOC,
vd->vdev_id, tx);
space_map_write(log_sm, msp->ms_freeing, SM_FREE,
vd->vdev_id, tx);
mutex_enter(&msp->ms_lock);
ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
metaslab_unflushed_changes_memused(msp));
spa->spa_unflushed_stats.sus_memused -=
metaslab_unflushed_changes_memused(msp);
range_tree_remove_xor_add(alloctree,
msp->ms_unflushed_frees, msp->ms_unflushed_allocs);
range_tree_remove_xor_add(msp->ms_freeing,
msp->ms_unflushed_allocs, msp->ms_unflushed_frees);
spa->spa_unflushed_stats.sus_memused +=
metaslab_unflushed_changes_memused(msp);
} else {
ASSERT(!spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP));
space_map_write(msp->ms_sm, alloctree, SM_ALLOC,
SM_NO_VDEVID, tx);
space_map_write(msp->ms_sm, msp->ms_freeing, SM_FREE,
SM_NO_VDEVID, tx);
mutex_enter(&msp->ms_lock);
}
msp->ms_allocated_space += range_tree_space(alloctree);
ASSERT3U(msp->ms_allocated_space, >=,
range_tree_space(msp->ms_freeing));
msp->ms_allocated_space -= range_tree_space(msp->ms_freeing);
if (!range_tree_is_empty(msp->ms_checkpointing)) {
ASSERT(spa_has_checkpoint(spa));
ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
/*
* Since we are doing writes to disk and the ms_checkpointing
* tree won't be changing during that time, we drop the
* ms_lock while writing to the checkpoint space map, for the
* same reason mentioned above.
*/
mutex_exit(&msp->ms_lock);
space_map_write(vd->vdev_checkpoint_sm,
msp->ms_checkpointing, SM_FREE, SM_NO_VDEVID, tx);
mutex_enter(&msp->ms_lock);
spa->spa_checkpoint_info.sci_dspace +=
range_tree_space(msp->ms_checkpointing);
vd->vdev_stat.vs_checkpoint_space +=
range_tree_space(msp->ms_checkpointing);
ASSERT3U(vd->vdev_stat.vs_checkpoint_space, ==,
-space_map_allocated(vd->vdev_checkpoint_sm));
range_tree_vacate(msp->ms_checkpointing, NULL, NULL);
}
if (msp->ms_loaded) {
/*
* When the space map is loaded, we have an accurate
* histogram in the range tree. This gives us an opportunity
* to bring the space map's histogram up-to-date so we clear
* it first before updating it.
*/
space_map_histogram_clear(msp->ms_sm);
space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx);
/*
* Since we've cleared the histogram we need to add back
* any free space that has already been processed, plus
* any deferred space. This allows the on-disk histogram
* to accurately reflect all free space even if some space
* is not yet available for allocation (i.e. deferred).
*/
space_map_histogram_add(msp->ms_sm, msp->ms_freed, tx);
/*
* Add back any deferred free space that has not been
* added back into the in-core free tree yet. This will
* ensure that we don't end up with a space map histogram
* that is completely empty unless the metaslab is fully
* allocated.
*/
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
space_map_histogram_add(msp->ms_sm,
msp->ms_defer[t], tx);
}
}
/*
* Always add the free space from this sync pass to the space
* map histogram. We want to make sure that the on-disk histogram
* accounts for all free space. If the space map is not loaded,
* then we will lose some accuracy but will correct it the next
* time we load the space map.
*/
space_map_histogram_add(msp->ms_sm, msp->ms_freeing, tx);
metaslab_aux_histograms_update(msp);
metaslab_group_histogram_add(mg, msp);
metaslab_group_histogram_verify(mg);
metaslab_class_histogram_verify(mg->mg_class);
/*
* For sync pass 1, we avoid traversing this txg's free range tree
* and instead will just swap the pointers for freeing and freed.
* We can safely do this since the freed_tree is guaranteed to be
* empty on the initial pass.
*
* Keep in mind that even if we are currently using a log spacemap
* we want current frees to end up in the ms_allocatable (but not
* get appended to the ms_sm) so their ranges can be reused as usual.
*/
if (spa_sync_pass(spa) == 1) {
range_tree_swap(&msp->ms_freeing, &msp->ms_freed);
ASSERT0(msp->ms_allocated_this_txg);
} else {
range_tree_vacate(msp->ms_freeing,
range_tree_add, msp->ms_freed);
}
msp->ms_allocated_this_txg += range_tree_space(alloctree);
range_tree_vacate(alloctree, NULL, NULL);
ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
ASSERT0(range_tree_space(msp->ms_allocating[TXG_CLEAN(txg)
& TXG_MASK]));
ASSERT0(range_tree_space(msp->ms_freeing));
ASSERT0(range_tree_space(msp->ms_checkpointing));
mutex_exit(&msp->ms_lock);
/*
* Verify that the space map object ID has been recorded in the
* vdev_ms_array.
*/
uint64_t object;
VERIFY0(dmu_read(mos, vd->vdev_ms_array,
msp->ms_id * sizeof (uint64_t), sizeof (uint64_t), &object, 0));
VERIFY3U(object, ==, space_map_object(msp->ms_sm));
mutex_exit(&msp->ms_sync_lock);
dmu_tx_commit(tx);
}
static void
metaslab_evict(metaslab_t *msp, uint64_t txg)
{
if (!msp->ms_loaded || msp->ms_disabled != 0)
return;
for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
VERIFY0(range_tree_space(
msp->ms_allocating[(txg + t) & TXG_MASK]));
}
if (msp->ms_allocator != -1)
metaslab_passivate(msp, msp->ms_weight & ~METASLAB_ACTIVE_MASK);
if (!metaslab_debug_unload)
metaslab_unload(msp);
}
/*
* Called after a transaction group has completely synced to mark
* all of the metaslab's free space as usable.
*/
void
metaslab_sync_done(metaslab_t *msp, uint64_t txg)
{
metaslab_group_t *mg = msp->ms_group;
vdev_t *vd = mg->mg_vd;
spa_t *spa = vd->vdev_spa;
range_tree_t **defer_tree;
int64_t alloc_delta, defer_delta;
boolean_t defer_allowed = B_TRUE;
ASSERT(!vd->vdev_ishole);
mutex_enter(&msp->ms_lock);
if (msp->ms_new) {
/* this is a new metaslab, add its capacity to the vdev */
metaslab_space_update(vd, mg->mg_class, 0, 0, msp->ms_size);
/* there should be no allocations nor frees at this point */
VERIFY0(msp->ms_allocated_this_txg);
VERIFY0(range_tree_space(msp->ms_freed));
}
ASSERT0(range_tree_space(msp->ms_freeing));
ASSERT0(range_tree_space(msp->ms_checkpointing));
defer_tree = &msp->ms_defer[txg % TXG_DEFER_SIZE];
uint64_t free_space = metaslab_class_get_space(spa_normal_class(spa)) -
metaslab_class_get_alloc(spa_normal_class(spa));
if (free_space <= spa_get_slop_space(spa) || vd->vdev_removing) {
defer_allowed = B_FALSE;
}
defer_delta = 0;
alloc_delta = msp->ms_allocated_this_txg -
range_tree_space(msp->ms_freed);
if (defer_allowed) {
defer_delta = range_tree_space(msp->ms_freed) -
range_tree_space(*defer_tree);
} else {
defer_delta -= range_tree_space(*defer_tree);
}
metaslab_space_update(vd, mg->mg_class, alloc_delta + defer_delta,
defer_delta, 0);
if (spa_syncing_log_sm(spa) == NULL) {
/*
* If there's a metaslab_load() in progress and we don't have
* a log space map, it means that we probably wrote to the
* metaslab's space map. If this is the case, we need to
* make sure that we wait for the load to complete so that we
* have a consistent view at the in-core side of the metaslab.
*/
metaslab_load_wait(msp);
} else {
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
}
/*
* When auto-trimming is enabled, free ranges which are added to
* ms_allocatable are also be added to ms_trim. The ms_trim tree is
* periodically consumed by the vdev_autotrim_thread() which issues
* trims for all ranges and then vacates the tree. The ms_trim tree
* can be discarded at any time with the sole consequence of recent
* frees not being trimmed.
*/
if (spa_get_autotrim(spa) == SPA_AUTOTRIM_ON) {
range_tree_walk(*defer_tree, range_tree_add, msp->ms_trim);
if (!defer_allowed) {
range_tree_walk(msp->ms_freed, range_tree_add,
msp->ms_trim);
}
} else {
range_tree_vacate(msp->ms_trim, NULL, NULL);
}
/*
* Move the frees from the defer_tree back to the free
* range tree (if it's loaded). Swap the freed_tree and
* the defer_tree -- this is safe to do because we've
* just emptied out the defer_tree.
*/
range_tree_vacate(*defer_tree,
msp->ms_loaded ? range_tree_add : NULL, msp->ms_allocatable);
if (defer_allowed) {
range_tree_swap(&msp->ms_freed, defer_tree);
} else {
range_tree_vacate(msp->ms_freed,
msp->ms_loaded ? range_tree_add : NULL,
msp->ms_allocatable);
}
msp->ms_synced_length = space_map_length(msp->ms_sm);
msp->ms_deferspace += defer_delta;
ASSERT3S(msp->ms_deferspace, >=, 0);
ASSERT3S(msp->ms_deferspace, <=, msp->ms_size);
if (msp->ms_deferspace != 0) {
/*
* Keep syncing this metaslab until all deferred frees
* are back in circulation.
*/
vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
}
metaslab_aux_histograms_update_done(msp, defer_allowed);
if (msp->ms_new) {
msp->ms_new = B_FALSE;
mutex_enter(&mg->mg_lock);
mg->mg_ms_ready++;
mutex_exit(&mg->mg_lock);
}
/*
* Re-sort metaslab within its group now that we've adjusted
* its allocatable space.
*/
metaslab_recalculate_weight_and_sort(msp);
ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
ASSERT0(range_tree_space(msp->ms_freeing));
ASSERT0(range_tree_space(msp->ms_freed));
ASSERT0(range_tree_space(msp->ms_checkpointing));
msp->ms_allocating_total -= msp->ms_allocated_this_txg;
msp->ms_allocated_this_txg = 0;
mutex_exit(&msp->ms_lock);
}
void
metaslab_sync_reassess(metaslab_group_t *mg)
{
spa_t *spa = mg->mg_class->mc_spa;
spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
metaslab_group_alloc_update(mg);
mg->mg_fragmentation = metaslab_group_fragmentation(mg);
/*
* Preload the next potential metaslabs but only on active
* metaslab groups. We can get into a state where the metaslab
* is no longer active since we dirty metaslabs as we remove a
* a device, thus potentially making the metaslab group eligible
* for preloading.
*/
if (mg->mg_activation_count > 0) {
metaslab_group_preload(mg);
}
spa_config_exit(spa, SCL_ALLOC, FTAG);
}
/*
* When writing a ditto block (i.e. more than one DVA for a given BP) on
* the same vdev as an existing DVA of this BP, then try to allocate it
* on a different metaslab than existing DVAs (i.e. a unique metaslab).
*/
static boolean_t
metaslab_is_unique(metaslab_t *msp, dva_t *dva)
{
uint64_t dva_ms_id;
if (DVA_GET_ASIZE(dva) == 0)
return (B_TRUE);
if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
return (B_TRUE);
dva_ms_id = DVA_GET_OFFSET(dva) >> msp->ms_group->mg_vd->vdev_ms_shift;
return (msp->ms_id != dva_ms_id);
}
/*
* ==========================================================================
* Metaslab allocation tracing facility
* ==========================================================================
*/
/*
* Add an allocation trace element to the allocation tracing list.
*/
static void
metaslab_trace_add(zio_alloc_list_t *zal, metaslab_group_t *mg,
metaslab_t *msp, uint64_t psize, uint32_t dva_id, uint64_t offset,
int allocator)
{
metaslab_alloc_trace_t *mat;
if (!metaslab_trace_enabled)
return;
/*
* When the tracing list reaches its maximum we remove
* the second element in the list before adding a new one.
* By removing the second element we preserve the original
* entry as a clue to what allocations steps have already been
* performed.
*/
if (zal->zal_size == metaslab_trace_max_entries) {
metaslab_alloc_trace_t *mat_next;
#ifdef ZFS_DEBUG
panic("too many entries in allocation list");
#endif
METASLABSTAT_BUMP(metaslabstat_trace_over_limit);
zal->zal_size--;
mat_next = list_next(&zal->zal_list, list_head(&zal->zal_list));
list_remove(&zal->zal_list, mat_next);
kmem_cache_free(metaslab_alloc_trace_cache, mat_next);
}
mat = kmem_cache_alloc(metaslab_alloc_trace_cache, KM_SLEEP);
list_link_init(&mat->mat_list_node);
mat->mat_mg = mg;
mat->mat_msp = msp;
mat->mat_size = psize;
mat->mat_dva_id = dva_id;
mat->mat_offset = offset;
mat->mat_weight = 0;
mat->mat_allocator = allocator;
if (msp != NULL)
mat->mat_weight = msp->ms_weight;
/*
* The list is part of the zio so locking is not required. Only
* a single thread will perform allocations for a given zio.
*/
list_insert_tail(&zal->zal_list, mat);
zal->zal_size++;
ASSERT3U(zal->zal_size, <=, metaslab_trace_max_entries);
}
void
metaslab_trace_init(zio_alloc_list_t *zal)
{
list_create(&zal->zal_list, sizeof (metaslab_alloc_trace_t),
offsetof(metaslab_alloc_trace_t, mat_list_node));
zal->zal_size = 0;
}
void
metaslab_trace_fini(zio_alloc_list_t *zal)
{
metaslab_alloc_trace_t *mat;
while ((mat = list_remove_head(&zal->zal_list)) != NULL)
kmem_cache_free(metaslab_alloc_trace_cache, mat);
list_destroy(&zal->zal_list);
zal->zal_size = 0;
}
/*
* ==========================================================================
* Metaslab block operations
* ==========================================================================
*/
static void
metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, const void *tag,
int flags, int allocator)
{
if (!(flags & METASLAB_ASYNC_ALLOC) ||
(flags & METASLAB_DONT_THROTTLE))
return;
metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
if (!mg->mg_class->mc_alloc_throttle_enabled)
return;
metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
(void) zfs_refcount_add(&mga->mga_alloc_queue_depth, tag);
}
static void
metaslab_group_increment_qdepth(metaslab_group_t *mg, int allocator)
{
metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
metaslab_class_allocator_t *mca =
&mg->mg_class->mc_allocator[allocator];
uint64_t max = mg->mg_max_alloc_queue_depth;
uint64_t cur = mga->mga_cur_max_alloc_queue_depth;
while (cur < max) {
if (atomic_cas_64(&mga->mga_cur_max_alloc_queue_depth,
cur, cur + 1) == cur) {
atomic_inc_64(&mca->mca_alloc_max_slots);
return;
}
cur = mga->mga_cur_max_alloc_queue_depth;
}
}
void
metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, const void *tag,
int flags, int allocator, boolean_t io_complete)
{
if (!(flags & METASLAB_ASYNC_ALLOC) ||
(flags & METASLAB_DONT_THROTTLE))
return;
metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
if (!mg->mg_class->mc_alloc_throttle_enabled)
return;
metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
(void) zfs_refcount_remove(&mga->mga_alloc_queue_depth, tag);
if (io_complete)
metaslab_group_increment_qdepth(mg, allocator);
}
void
metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, const void *tag,
int allocator)
{
#ifdef ZFS_DEBUG
const dva_t *dva = bp->blk_dva;
int ndvas = BP_GET_NDVAS(bp);
for (int d = 0; d < ndvas; d++) {
uint64_t vdev = DVA_GET_VDEV(&dva[d]);
metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
VERIFY(zfs_refcount_not_held(&mga->mga_alloc_queue_depth, tag));
}
#endif
}
static uint64_t
metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg)
{
uint64_t start;
range_tree_t *rt = msp->ms_allocatable;
metaslab_class_t *mc = msp->ms_group->mg_class;
ASSERT(MUTEX_HELD(&msp->ms_lock));
VERIFY(!msp->ms_condensing);
VERIFY0(msp->ms_disabled);
start = mc->mc_ops->msop_alloc(msp, size);
if (start != -1ULL) {
metaslab_group_t *mg = msp->ms_group;
vdev_t *vd = mg->mg_vd;
VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift));
VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size);
range_tree_remove(rt, start, size);
range_tree_clear(msp->ms_trim, start, size);
if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
range_tree_add(msp->ms_allocating[txg & TXG_MASK], start, size);
msp->ms_allocating_total += size;
/* Track the last successful allocation */
msp->ms_alloc_txg = txg;
metaslab_verify_space(msp, txg);
}
/*
* Now that we've attempted the allocation we need to update the
* metaslab's maximum block size since it may have changed.
*/
msp->ms_max_size = metaslab_largest_allocatable(msp);
return (start);
}
/*
* Find the metaslab with the highest weight that is less than what we've
* already tried. In the common case, this means that we will examine each
* metaslab at most once. Note that concurrent callers could reorder metaslabs
* by activation/passivation once we have dropped the mg_lock. If a metaslab is
* activated by another thread, and we fail to allocate from the metaslab we
* have selected, we may not try the newly-activated metaslab, and instead
* activate another metaslab. This is not optimal, but generally does not cause
* any problems (a possible exception being if every metaslab is completely full
* except for the newly-activated metaslab which we fail to examine).
*/
static metaslab_t *
find_valid_metaslab(metaslab_group_t *mg, uint64_t activation_weight,
dva_t *dva, int d, boolean_t want_unique, uint64_t asize, int allocator,
boolean_t try_hard, zio_alloc_list_t *zal, metaslab_t *search,
boolean_t *was_active)
{
avl_index_t idx;
avl_tree_t *t = &mg->mg_metaslab_tree;
metaslab_t *msp = avl_find(t, search, &idx);
if (msp == NULL)
msp = avl_nearest(t, idx, AVL_AFTER);
uint_t tries = 0;
for (; msp != NULL; msp = AVL_NEXT(t, msp)) {
int i;
if (!try_hard && tries > zfs_metaslab_find_max_tries) {
METASLABSTAT_BUMP(metaslabstat_too_many_tries);
return (NULL);
}
tries++;
if (!metaslab_should_allocate(msp, asize, try_hard)) {
metaslab_trace_add(zal, mg, msp, asize, d,
TRACE_TOO_SMALL, allocator);
continue;
}
/*
* If the selected metaslab is condensing or disabled,
* skip it.
*/
if (msp->ms_condensing || msp->ms_disabled > 0)
continue;
*was_active = msp->ms_allocator != -1;
/*
* If we're activating as primary, this is our first allocation
* from this disk, so we don't need to check how close we are.
* If the metaslab under consideration was already active,
* we're getting desperate enough to steal another allocator's
* metaslab, so we still don't care about distances.
*/
if (activation_weight == METASLAB_WEIGHT_PRIMARY || *was_active)
break;
for (i = 0; i < d; i++) {
if (want_unique &&
!metaslab_is_unique(msp, &dva[i]))
break; /* try another metaslab */
}
if (i == d)
break;
}
if (msp != NULL) {
search->ms_weight = msp->ms_weight;
search->ms_start = msp->ms_start + 1;
search->ms_allocator = msp->ms_allocator;
search->ms_primary = msp->ms_primary;
}
return (msp);
}
static void
metaslab_active_mask_verify(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
return;
if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0)
return;
if (msp->ms_weight & METASLAB_WEIGHT_PRIMARY) {
VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM);
VERIFY3S(msp->ms_allocator, !=, -1);
VERIFY(msp->ms_primary);
return;
}
if (msp->ms_weight & METASLAB_WEIGHT_SECONDARY) {
VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM);
VERIFY3S(msp->ms_allocator, !=, -1);
VERIFY(!msp->ms_primary);
return;
}
if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) {
VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
VERIFY3S(msp->ms_allocator, ==, -1);
return;
}
}
static uint64_t
metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal,
uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, int d,
int allocator, boolean_t try_hard)
{
metaslab_t *msp = NULL;
uint64_t offset = -1ULL;
uint64_t activation_weight = METASLAB_WEIGHT_PRIMARY;
for (int i = 0; i < d; i++) {
if (activation_weight == METASLAB_WEIGHT_PRIMARY &&
DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
activation_weight = METASLAB_WEIGHT_SECONDARY;
} else if (activation_weight == METASLAB_WEIGHT_SECONDARY &&
DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
activation_weight = METASLAB_WEIGHT_CLAIM;
break;
}
}
/*
* If we don't have enough metaslabs active to fill the entire array, we
* just use the 0th slot.
*/
if (mg->mg_ms_ready < mg->mg_allocators * 3)
allocator = 0;
metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
ASSERT3U(mg->mg_vd->vdev_ms_count, >=, 2);
metaslab_t *search = kmem_alloc(sizeof (*search), KM_SLEEP);
search->ms_weight = UINT64_MAX;
search->ms_start = 0;
/*
* At the end of the metaslab tree are the already-active metaslabs,
* first the primaries, then the secondaries. When we resume searching
* through the tree, we need to consider ms_allocator and ms_primary so
* we start in the location right after where we left off, and don't
* accidentally loop forever considering the same metaslabs.
*/
search->ms_allocator = -1;
search->ms_primary = B_TRUE;
for (;;) {
boolean_t was_active = B_FALSE;
mutex_enter(&mg->mg_lock);
if (activation_weight == METASLAB_WEIGHT_PRIMARY &&
mga->mga_primary != NULL) {
msp = mga->mga_primary;
/*
* Even though we don't hold the ms_lock for the
* primary metaslab, those fields should not
* change while we hold the mg_lock. Thus it is
* safe to make assertions on them.
*/
ASSERT(msp->ms_primary);
ASSERT3S(msp->ms_allocator, ==, allocator);
ASSERT(msp->ms_loaded);
was_active = B_TRUE;
ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
} else if (activation_weight == METASLAB_WEIGHT_SECONDARY &&
mga->mga_secondary != NULL) {
msp = mga->mga_secondary;
/*
* See comment above about the similar assertions
* for the primary metaslab.
*/
ASSERT(!msp->ms_primary);
ASSERT3S(msp->ms_allocator, ==, allocator);
ASSERT(msp->ms_loaded);
was_active = B_TRUE;
ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
} else {
msp = find_valid_metaslab(mg, activation_weight, dva, d,
want_unique, asize, allocator, try_hard, zal,
search, &was_active);
}
mutex_exit(&mg->mg_lock);
if (msp == NULL) {
kmem_free(search, sizeof (*search));
return (-1ULL);
}
mutex_enter(&msp->ms_lock);
metaslab_active_mask_verify(msp);
/*
* This code is disabled out because of issues with
* tracepoints in non-gpl kernel modules.
*/
#if 0
DTRACE_PROBE3(ms__activation__attempt,
metaslab_t *, msp, uint64_t, activation_weight,
boolean_t, was_active);
#endif
/*
* Ensure that the metaslab we have selected is still
* capable of handling our request. It's possible that
* another thread may have changed the weight while we
* were blocked on the metaslab lock. We check the
* active status first to see if we need to set_selected_txg
* a new metaslab.
*/
if (was_active && !(msp->ms_weight & METASLAB_ACTIVE_MASK)) {
ASSERT3S(msp->ms_allocator, ==, -1);
mutex_exit(&msp->ms_lock);
continue;
}
/*
* If the metaslab was activated for another allocator
* while we were waiting in the ms_lock above, or it's
* a primary and we're seeking a secondary (or vice versa),
* we go back and select a new metaslab.
*/
if (!was_active && (msp->ms_weight & METASLAB_ACTIVE_MASK) &&
(msp->ms_allocator != -1) &&
(msp->ms_allocator != allocator || ((activation_weight ==
METASLAB_WEIGHT_PRIMARY) != msp->ms_primary))) {
ASSERT(msp->ms_loaded);
ASSERT((msp->ms_weight & METASLAB_WEIGHT_CLAIM) ||
msp->ms_allocator != -1);
mutex_exit(&msp->ms_lock);
continue;
}
/*
* This metaslab was used for claiming regions allocated
* by the ZIL during pool import. Once these regions are
* claimed we don't need to keep the CLAIM bit set
* anymore. Passivate this metaslab to zero its activation
* mask.
*/
if (msp->ms_weight & METASLAB_WEIGHT_CLAIM &&
activation_weight != METASLAB_WEIGHT_CLAIM) {
ASSERT(msp->ms_loaded);
ASSERT3S(msp->ms_allocator, ==, -1);
metaslab_passivate(msp, msp->ms_weight &
~METASLAB_WEIGHT_CLAIM);
mutex_exit(&msp->ms_lock);
continue;
}
metaslab_set_selected_txg(msp, txg);
int activation_error =
metaslab_activate(msp, allocator, activation_weight);
metaslab_active_mask_verify(msp);
/*
* If the metaslab was activated by another thread for
* another allocator or activation_weight (EBUSY), or it
* failed because another metaslab was assigned as primary
* for this allocator (EEXIST) we continue using this
* metaslab for our allocation, rather than going on to a
* worse metaslab (we waited for that metaslab to be loaded
* after all).
*
* If the activation failed due to an I/O error or ENOSPC we
* skip to the next metaslab.
*/
boolean_t activated;
if (activation_error == 0) {
activated = B_TRUE;
} else if (activation_error == EBUSY ||
activation_error == EEXIST) {
activated = B_FALSE;
} else {
mutex_exit(&msp->ms_lock);
continue;
}
ASSERT(msp->ms_loaded);
/*
* Now that we have the lock, recheck to see if we should
* continue to use this metaslab for this allocation. The
* the metaslab is now loaded so metaslab_should_allocate()
* can accurately determine if the allocation attempt should
* proceed.
*/
if (!metaslab_should_allocate(msp, asize, try_hard)) {
/* Passivate this metaslab and select a new one. */
metaslab_trace_add(zal, mg, msp, asize, d,
TRACE_TOO_SMALL, allocator);
goto next;
}
/*
* If this metaslab is currently condensing then pick again
* as we can't manipulate this metaslab until it's committed
* to disk. If this metaslab is being initialized, we shouldn't
* allocate from it since the allocated region might be
* overwritten after allocation.
*/
if (msp->ms_condensing) {
metaslab_trace_add(zal, mg, msp, asize, d,
TRACE_CONDENSING, allocator);
if (activated) {
metaslab_passivate(msp, msp->ms_weight &
~METASLAB_ACTIVE_MASK);
}
mutex_exit(&msp->ms_lock);
continue;
} else if (msp->ms_disabled > 0) {
metaslab_trace_add(zal, mg, msp, asize, d,
TRACE_DISABLED, allocator);
if (activated) {
metaslab_passivate(msp, msp->ms_weight &
~METASLAB_ACTIVE_MASK);
}
mutex_exit(&msp->ms_lock);
continue;
}
offset = metaslab_block_alloc(msp, asize, txg);
metaslab_trace_add(zal, mg, msp, asize, d, offset, allocator);
if (offset != -1ULL) {
/* Proactively passivate the metaslab, if needed */
if (activated)
metaslab_segment_may_passivate(msp);
break;
}
next:
ASSERT(msp->ms_loaded);
/*
* This code is disabled out because of issues with
* tracepoints in non-gpl kernel modules.
*/
#if 0
DTRACE_PROBE2(ms__alloc__failure, metaslab_t *, msp,
uint64_t, asize);
#endif
/*
* We were unable to allocate from this metaslab so determine
* a new weight for this metaslab. Now that we have loaded
* the metaslab we can provide a better hint to the metaslab
* selector.
*
* For space-based metaslabs, we use the maximum block size.
* This information is only available when the metaslab
* is loaded and is more accurate than the generic free
* space weight that was calculated by metaslab_weight().
* This information allows us to quickly compare the maximum
* available allocation in the metaslab to the allocation
* size being requested.
*
* For segment-based metaslabs, determine the new weight
* based on the highest bucket in the range tree. We
* explicitly use the loaded segment weight (i.e. the range
* tree histogram) since it contains the space that is
* currently available for allocation and is accurate
* even within a sync pass.
*/
uint64_t weight;
if (WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
weight = metaslab_largest_allocatable(msp);
WEIGHT_SET_SPACEBASED(weight);
} else {
weight = metaslab_weight_from_range_tree(msp);
}
if (activated) {
metaslab_passivate(msp, weight);
} else {
/*
* For the case where we use the metaslab that is
* active for another allocator we want to make
* sure that we retain the activation mask.
*
* Note that we could attempt to use something like
* metaslab_recalculate_weight_and_sort() that
* retains the activation mask here. That function
* uses metaslab_weight() to set the weight though
* which is not as accurate as the calculations
* above.
*/
weight |= msp->ms_weight & METASLAB_ACTIVE_MASK;
metaslab_group_sort(mg, msp, weight);
}
metaslab_active_mask_verify(msp);
/*
* We have just failed an allocation attempt, check
* that metaslab_should_allocate() agrees. Otherwise,
* we may end up in an infinite loop retrying the same
* metaslab.
*/
ASSERT(!metaslab_should_allocate(msp, asize, try_hard));
mutex_exit(&msp->ms_lock);
}
mutex_exit(&msp->ms_lock);
kmem_free(search, sizeof (*search));
return (offset);
}
static uint64_t
metaslab_group_alloc(metaslab_group_t *mg, zio_alloc_list_t *zal,
uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, int d,
int allocator, boolean_t try_hard)
{
uint64_t offset;
- ASSERT(mg->mg_initialized);
offset = metaslab_group_alloc_normal(mg, zal, asize, txg, want_unique,
dva, d, allocator, try_hard);
mutex_enter(&mg->mg_lock);
if (offset == -1ULL) {
mg->mg_failed_allocations++;
metaslab_trace_add(zal, mg, NULL, asize, d,
TRACE_GROUP_FAILURE, allocator);
if (asize == SPA_GANGBLOCKSIZE) {
/*
* This metaslab group was unable to allocate
* the minimum gang block size so it must be out of
* space. We must notify the allocation throttle
* to start skipping allocation attempts to this
* metaslab group until more space becomes available.
* Note: this failure cannot be caused by the
* allocation throttle since the allocation throttle
* is only responsible for skipping devices and
* not failing block allocations.
*/
mg->mg_no_free_space = B_TRUE;
}
}
mg->mg_allocations++;
mutex_exit(&mg->mg_lock);
return (offset);
}
/*
* Allocate a block for the specified i/o.
*/
int
metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags,
zio_alloc_list_t *zal, int allocator)
{
metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
metaslab_group_t *mg, *rotor;
vdev_t *vd;
boolean_t try_hard = B_FALSE;
ASSERT(!DVA_IS_VALID(&dva[d]));
/*
* For testing, make some blocks above a certain size be gang blocks.
* This will result in more split blocks when using device removal,
* and a large number of split blocks coupled with ztest-induced
* damage can result in extremely long reconstruction times. This
* will also test spilling from special to normal.
*/
if (psize >= metaslab_force_ganging &&
metaslab_force_ganging_pct > 0 &&
(random_in_range(100) < MIN(metaslab_force_ganging_pct, 100))) {
metaslab_trace_add(zal, NULL, NULL, psize, d, TRACE_FORCE_GANG,
allocator);
return (SET_ERROR(ENOSPC));
}
/*
* Start at the rotor and loop through all mgs until we find something.
* Note that there's no locking on mca_rotor or mca_aliquot because
* nothing actually breaks if we miss a few updates -- we just won't
* allocate quite as evenly. It all balances out over time.
*
* If we are doing ditto or log blocks, try to spread them across
* consecutive vdevs. If we're forced to reuse a vdev before we've
* allocated all of our ditto blocks, then try and spread them out on
* that vdev as much as possible. If it turns out to not be possible,
* gradually lower our standards until anything becomes acceptable.
* Also, allocating on consecutive vdevs (as opposed to random vdevs)
* gives us hope of containing our fault domains to something we're
* able to reason about. Otherwise, any two top-level vdev failures
* will guarantee the loss of data. With consecutive allocation,
* only two adjacent top-level vdev failures will result in data loss.
*
* If we are doing gang blocks (hintdva is non-NULL), try to keep
* ourselves on the same vdev as our gang block header. That
* way, we can hope for locality in vdev_cache, plus it makes our
* fault domains something tractable.
*/
if (hintdva) {
vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
/*
* It's possible the vdev we're using as the hint no
* longer exists or its mg has been closed (e.g. by
* device removal). Consult the rotor when
* all else fails.
*/
if (vd != NULL && vd->vdev_mg != NULL) {
mg = vdev_get_mg(vd, mc);
if (flags & METASLAB_HINTBP_AVOID)
mg = mg->mg_next;
} else {
mg = mca->mca_rotor;
}
} else if (d != 0) {
vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
mg = vd->vdev_mg->mg_next;
} else {
ASSERT(mca->mca_rotor != NULL);
mg = mca->mca_rotor;
}
/*
* If the hint put us into the wrong metaslab class, or into a
* metaslab group that has been passivated, just follow the rotor.
*/
if (mg->mg_class != mc || mg->mg_activation_count <= 0)
mg = mca->mca_rotor;
rotor = mg;
top:
do {
boolean_t allocatable;
ASSERT(mg->mg_activation_count == 1);
vd = mg->mg_vd;
/*
* Don't allocate from faulted devices.
*/
if (try_hard) {
spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
allocatable = vdev_allocatable(vd);
spa_config_exit(spa, SCL_ZIO, FTAG);
} else {
allocatable = vdev_allocatable(vd);
}
/*
* Determine if the selected metaslab group is eligible
* for allocations. If we're ganging then don't allow
* this metaslab group to skip allocations since that would
* inadvertently return ENOSPC and suspend the pool
* even though space is still available.
*/
if (allocatable && !GANG_ALLOCATION(flags) && !try_hard) {
allocatable = metaslab_group_allocatable(mg, rotor,
flags, psize, allocator, d);
}
if (!allocatable) {
metaslab_trace_add(zal, mg, NULL, psize, d,
TRACE_NOT_ALLOCATABLE, allocator);
goto next;
}
- ASSERT(mg->mg_initialized);
-
/*
* Avoid writing single-copy data to an unhealthy,
* non-redundant vdev, unless we've already tried all
* other vdevs.
*/
if (vd->vdev_state < VDEV_STATE_HEALTHY &&
d == 0 && !try_hard && vd->vdev_children == 0) {
metaslab_trace_add(zal, mg, NULL, psize, d,
TRACE_VDEV_ERROR, allocator);
goto next;
}
ASSERT(mg->mg_class == mc);
uint64_t asize = vdev_psize_to_asize(vd, psize);
ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
/*
* If we don't need to try hard, then require that the
* block be on a different metaslab from any other DVAs
* in this BP (unique=true). If we are trying hard, then
* allow any metaslab to be used (unique=false).
*/
uint64_t offset = metaslab_group_alloc(mg, zal, asize, txg,
!try_hard, dva, d, allocator, try_hard);
if (offset != -1ULL) {
/*
* If we've just selected this metaslab group,
* figure out whether the corresponding vdev is
* over- or under-used relative to the pool,
* and set an allocation bias to even it out.
*
* Bias is also used to compensate for unequally
* sized vdevs so that space is allocated fairly.
*/
if (mca->mca_aliquot == 0 && metaslab_bias_enabled) {
vdev_stat_t *vs = &vd->vdev_stat;
int64_t vs_free = vs->vs_space - vs->vs_alloc;
int64_t mc_free = mc->mc_space - mc->mc_alloc;
int64_t ratio;
/*
* Calculate how much more or less we should
* try to allocate from this device during
* this iteration around the rotor.
*
* This basically introduces a zero-centered
* bias towards the devices with the most
* free space, while compensating for vdev
* size differences.
*
* Examples:
* vdev V1 = 16M/128M
* vdev V2 = 16M/128M
* ratio(V1) = 100% ratio(V2) = 100%
*
* vdev V1 = 16M/128M
* vdev V2 = 64M/128M
* ratio(V1) = 127% ratio(V2) = 72%
*
* vdev V1 = 16M/128M
* vdev V2 = 64M/512M
* ratio(V1) = 40% ratio(V2) = 160%
*/
ratio = (vs_free * mc->mc_alloc_groups * 100) /
(mc_free + 1);
mg->mg_bias = ((ratio - 100) *
(int64_t)mg->mg_aliquot) / 100;
} else if (!metaslab_bias_enabled) {
mg->mg_bias = 0;
}
if ((flags & METASLAB_ZIL) ||
atomic_add_64_nv(&mca->mca_aliquot, asize) >=
mg->mg_aliquot + mg->mg_bias) {
mca->mca_rotor = mg->mg_next;
mca->mca_aliquot = 0;
}
DVA_SET_VDEV(&dva[d], vd->vdev_id);
DVA_SET_OFFSET(&dva[d], offset);
DVA_SET_GANG(&dva[d],
((flags & METASLAB_GANG_HEADER) ? 1 : 0));
DVA_SET_ASIZE(&dva[d], asize);
return (0);
}
next:
mca->mca_rotor = mg->mg_next;
mca->mca_aliquot = 0;
} while ((mg = mg->mg_next) != rotor);
/*
* If we haven't tried hard, perhaps do so now.
*/
if (!try_hard && (zfs_metaslab_try_hard_before_gang ||
GANG_ALLOCATION(flags) || (flags & METASLAB_ZIL) != 0 ||
psize <= 1 << spa->spa_min_ashift)) {
METASLABSTAT_BUMP(metaslabstat_try_hard);
try_hard = B_TRUE;
goto top;
}
memset(&dva[d], 0, sizeof (dva_t));
metaslab_trace_add(zal, rotor, NULL, psize, d, TRACE_ENOSPC, allocator);
return (SET_ERROR(ENOSPC));
}
void
metaslab_free_concrete(vdev_t *vd, uint64_t offset, uint64_t asize,
boolean_t checkpoint)
{
metaslab_t *msp;
spa_t *spa = vd->vdev_spa;
ASSERT(vdev_is_concrete(vd));
ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
VERIFY(!msp->ms_condensing);
VERIFY3U(offset, >=, msp->ms_start);
VERIFY3U(offset + asize, <=, msp->ms_start + msp->ms_size);
VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
VERIFY0(P2PHASE(asize, 1ULL << vd->vdev_ashift));
metaslab_check_free_impl(vd, offset, asize);
mutex_enter(&msp->ms_lock);
if (range_tree_is_empty(msp->ms_freeing) &&
range_tree_is_empty(msp->ms_checkpointing)) {
vdev_dirty(vd, VDD_METASLAB, msp, spa_syncing_txg(spa));
}
if (checkpoint) {
ASSERT(spa_has_checkpoint(spa));
range_tree_add(msp->ms_checkpointing, offset, asize);
} else {
range_tree_add(msp->ms_freeing, offset, asize);
}
mutex_exit(&msp->ms_lock);
}
void
metaslab_free_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
uint64_t size, void *arg)
{
(void) inner_offset;
boolean_t *checkpoint = arg;
ASSERT3P(checkpoint, !=, NULL);
if (vd->vdev_ops->vdev_op_remap != NULL)
vdev_indirect_mark_obsolete(vd, offset, size);
else
metaslab_free_impl(vd, offset, size, *checkpoint);
}
static void
metaslab_free_impl(vdev_t *vd, uint64_t offset, uint64_t size,
boolean_t checkpoint)
{
spa_t *spa = vd->vdev_spa;
ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
if (spa_syncing_txg(spa) > spa_freeze_txg(spa))
return;
if (spa->spa_vdev_removal != NULL &&
spa->spa_vdev_removal->svr_vdev_id == vd->vdev_id &&
vdev_is_concrete(vd)) {
/*
* Note: we check if the vdev is concrete because when
* we complete the removal, we first change the vdev to be
* an indirect vdev (in open context), and then (in syncing
* context) clear spa_vdev_removal.
*/
free_from_removing_vdev(vd, offset, size);
} else if (vd->vdev_ops->vdev_op_remap != NULL) {
vdev_indirect_mark_obsolete(vd, offset, size);
vd->vdev_ops->vdev_op_remap(vd, offset, size,
metaslab_free_impl_cb, &checkpoint);
} else {
metaslab_free_concrete(vd, offset, size, checkpoint);
}
}
typedef struct remap_blkptr_cb_arg {
blkptr_t *rbca_bp;
spa_remap_cb_t rbca_cb;
vdev_t *rbca_remap_vd;
uint64_t rbca_remap_offset;
void *rbca_cb_arg;
} remap_blkptr_cb_arg_t;
static void
remap_blkptr_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
uint64_t size, void *arg)
{
remap_blkptr_cb_arg_t *rbca = arg;
blkptr_t *bp = rbca->rbca_bp;
/* We can not remap split blocks. */
if (size != DVA_GET_ASIZE(&bp->blk_dva[0]))
return;
ASSERT0(inner_offset);
if (rbca->rbca_cb != NULL) {
/*
* At this point we know that we are not handling split
* blocks and we invoke the callback on the previous
* vdev which must be indirect.
*/
ASSERT3P(rbca->rbca_remap_vd->vdev_ops, ==, &vdev_indirect_ops);
rbca->rbca_cb(rbca->rbca_remap_vd->vdev_id,
rbca->rbca_remap_offset, size, rbca->rbca_cb_arg);
/* set up remap_blkptr_cb_arg for the next call */
rbca->rbca_remap_vd = vd;
rbca->rbca_remap_offset = offset;
}
/*
* The phys birth time is that of dva[0]. This ensures that we know
* when each dva was written, so that resilver can determine which
* blocks need to be scrubbed (i.e. those written during the time
* the vdev was offline). It also ensures that the key used in
* the ARC hash table is unique (i.e. dva[0] + phys_birth). If
* we didn't change the phys_birth, a lookup in the ARC for a
* remapped BP could find the data that was previously stored at
* this vdev + offset.
*/
vdev_t *oldvd = vdev_lookup_top(vd->vdev_spa,
DVA_GET_VDEV(&bp->blk_dva[0]));
vdev_indirect_births_t *vib = oldvd->vdev_indirect_births;
bp->blk_phys_birth = vdev_indirect_births_physbirth(vib,
DVA_GET_OFFSET(&bp->blk_dva[0]), DVA_GET_ASIZE(&bp->blk_dva[0]));
DVA_SET_VDEV(&bp->blk_dva[0], vd->vdev_id);
DVA_SET_OFFSET(&bp->blk_dva[0], offset);
}
/*
* If the block pointer contains any indirect DVAs, modify them to refer to
* concrete DVAs. Note that this will sometimes not be possible, leaving
* the indirect DVA in place. This happens if the indirect DVA spans multiple
* segments in the mapping (i.e. it is a "split block").
*
* If the BP was remapped, calls the callback on the original dva (note the
* callback can be called multiple times if the original indirect DVA refers
* to another indirect DVA, etc).
*
* Returns TRUE if the BP was remapped.
*/
boolean_t
spa_remap_blkptr(spa_t *spa, blkptr_t *bp, spa_remap_cb_t callback, void *arg)
{
remap_blkptr_cb_arg_t rbca;
if (!zfs_remap_blkptr_enable)
return (B_FALSE);
if (!spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS))
return (B_FALSE);
/*
* Dedup BP's can not be remapped, because ddt_phys_select() depends
* on DVA[0] being the same in the BP as in the DDT (dedup table).
*/
if (BP_GET_DEDUP(bp))
return (B_FALSE);
/*
* Gang blocks can not be remapped, because
* zio_checksum_gang_verifier() depends on the DVA[0] that's in
* the BP used to read the gang block header (GBH) being the same
* as the DVA[0] that we allocated for the GBH.
*/
if (BP_IS_GANG(bp))
return (B_FALSE);
/*
* Embedded BP's have no DVA to remap.
*/
if (BP_GET_NDVAS(bp) < 1)
return (B_FALSE);
/*
* Note: we only remap dva[0]. If we remapped other dvas, we
* would no longer know what their phys birth txg is.
*/
dva_t *dva = &bp->blk_dva[0];
uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t size = DVA_GET_ASIZE(dva);
vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
if (vd->vdev_ops->vdev_op_remap == NULL)
return (B_FALSE);
rbca.rbca_bp = bp;
rbca.rbca_cb = callback;
rbca.rbca_remap_vd = vd;
rbca.rbca_remap_offset = offset;
rbca.rbca_cb_arg = arg;
/*
* remap_blkptr_cb() will be called in order for each level of
* indirection, until a concrete vdev is reached or a split block is
* encountered. old_vd and old_offset are updated within the callback
* as we go from the one indirect vdev to the next one (either concrete
* or indirect again) in that order.
*/
vd->vdev_ops->vdev_op_remap(vd, offset, size, remap_blkptr_cb, &rbca);
/* Check if the DVA wasn't remapped because it is a split block */
if (DVA_GET_VDEV(&rbca.rbca_bp->blk_dva[0]) == vd->vdev_id)
return (B_FALSE);
return (B_TRUE);
}
/*
* Undo the allocation of a DVA which happened in the given transaction group.
*/
void
metaslab_unalloc_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
{
metaslab_t *msp;
vdev_t *vd;
uint64_t vdev = DVA_GET_VDEV(dva);
uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t size = DVA_GET_ASIZE(dva);
ASSERT(DVA_IS_VALID(dva));
ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
if (txg > spa_freeze_txg(spa))
return;
if ((vd = vdev_lookup_top(spa, vdev)) == NULL || !DVA_IS_VALID(dva) ||
(offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
zfs_panic_recover("metaslab_free_dva(): bad DVA %llu:%llu:%llu",
(u_longlong_t)vdev, (u_longlong_t)offset,
(u_longlong_t)size);
return;
}
ASSERT(!vd->vdev_removing);
ASSERT(vdev_is_concrete(vd));
ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
ASSERT3P(vd->vdev_indirect_mapping, ==, NULL);
if (DVA_GET_GANG(dva))
size = vdev_gang_header_asize(vd);
msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
mutex_enter(&msp->ms_lock);
range_tree_remove(msp->ms_allocating[txg & TXG_MASK],
offset, size);
msp->ms_allocating_total -= size;
VERIFY(!msp->ms_condensing);
VERIFY3U(offset, >=, msp->ms_start);
VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size);
VERIFY3U(range_tree_space(msp->ms_allocatable) + size, <=,
msp->ms_size);
VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
range_tree_add(msp->ms_allocatable, offset, size);
mutex_exit(&msp->ms_lock);
}
/*
* Free the block represented by the given DVA.
*/
void
metaslab_free_dva(spa_t *spa, const dva_t *dva, boolean_t checkpoint)
{
uint64_t vdev = DVA_GET_VDEV(dva);
uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t size = DVA_GET_ASIZE(dva);
vdev_t *vd = vdev_lookup_top(spa, vdev);
ASSERT(DVA_IS_VALID(dva));
ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
if (DVA_GET_GANG(dva)) {
size = vdev_gang_header_asize(vd);
}
metaslab_free_impl(vd, offset, size, checkpoint);
}
/*
* Reserve some allocation slots. The reservation system must be called
* before we call into the allocator. If there aren't any available slots
* then the I/O will be throttled until an I/O completes and its slots are
* freed up. The function returns true if it was successful in placing
* the reservation.
*/
boolean_t
metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, int allocator,
zio_t *zio, int flags)
{
metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
uint64_t max = mca->mca_alloc_max_slots;
ASSERT(mc->mc_alloc_throttle_enabled);
if (GANG_ALLOCATION(flags) || (flags & METASLAB_MUST_RESERVE) ||
zfs_refcount_count(&mca->mca_alloc_slots) + slots <= max) {
/*
* The potential race between _count() and _add() is covered
* by the allocator lock in most cases, or irrelevant due to
* GANG_ALLOCATION() or METASLAB_MUST_RESERVE set in others.
* But even if we assume some other non-existing scenario, the
* worst that can happen is few more I/Os get to allocation
* earlier, that is not a problem.
*
* We reserve the slots individually so that we can unreserve
* them individually when an I/O completes.
*/
zfs_refcount_add_few(&mca->mca_alloc_slots, slots, zio);
zio->io_flags |= ZIO_FLAG_IO_ALLOCATING;
return (B_TRUE);
}
return (B_FALSE);
}
void
metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots,
int allocator, zio_t *zio)
{
metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
ASSERT(mc->mc_alloc_throttle_enabled);
zfs_refcount_remove_few(&mca->mca_alloc_slots, slots, zio);
}
static int
metaslab_claim_concrete(vdev_t *vd, uint64_t offset, uint64_t size,
uint64_t txg)
{
metaslab_t *msp;
spa_t *spa = vd->vdev_spa;
int error = 0;
if (offset >> vd->vdev_ms_shift >= vd->vdev_ms_count)
return (SET_ERROR(ENXIO));
ASSERT3P(vd->vdev_ms, !=, NULL);
msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
mutex_enter(&msp->ms_lock);
if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded) {
error = metaslab_activate(msp, 0, METASLAB_WEIGHT_CLAIM);
if (error == EBUSY) {
ASSERT(msp->ms_loaded);
ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
error = 0;
}
}
if (error == 0 &&
!range_tree_contains(msp->ms_allocatable, offset, size))
error = SET_ERROR(ENOENT);
if (error || txg == 0) { /* txg == 0 indicates dry run */
mutex_exit(&msp->ms_lock);
return (error);
}
VERIFY(!msp->ms_condensing);
VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
VERIFY3U(range_tree_space(msp->ms_allocatable) - size, <=,
msp->ms_size);
range_tree_remove(msp->ms_allocatable, offset, size);
range_tree_clear(msp->ms_trim, offset, size);
if (spa_writeable(spa)) { /* don't dirty if we're zdb(8) */
metaslab_class_t *mc = msp->ms_group->mg_class;
multilist_sublist_t *mls =
multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
if (!multilist_link_active(&msp->ms_class_txg_node)) {
msp->ms_selected_txg = txg;
multilist_sublist_insert_head(mls, msp);
}
multilist_sublist_unlock(mls);
if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
vdev_dirty(vd, VDD_METASLAB, msp, txg);
range_tree_add(msp->ms_allocating[txg & TXG_MASK],
offset, size);
msp->ms_allocating_total += size;
}
mutex_exit(&msp->ms_lock);
return (0);
}
typedef struct metaslab_claim_cb_arg_t {
uint64_t mcca_txg;
int mcca_error;
} metaslab_claim_cb_arg_t;
static void
metaslab_claim_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
uint64_t size, void *arg)
{
(void) inner_offset;
metaslab_claim_cb_arg_t *mcca_arg = arg;
if (mcca_arg->mcca_error == 0) {
mcca_arg->mcca_error = metaslab_claim_concrete(vd, offset,
size, mcca_arg->mcca_txg);
}
}
int
metaslab_claim_impl(vdev_t *vd, uint64_t offset, uint64_t size, uint64_t txg)
{
if (vd->vdev_ops->vdev_op_remap != NULL) {
metaslab_claim_cb_arg_t arg;
/*
* Only zdb(8) can claim on indirect vdevs. This is used
* to detect leaks of mapped space (that are not accounted
* for in the obsolete counts, spacemap, or bpobj).
*/
ASSERT(!spa_writeable(vd->vdev_spa));
arg.mcca_error = 0;
arg.mcca_txg = txg;
vd->vdev_ops->vdev_op_remap(vd, offset, size,
metaslab_claim_impl_cb, &arg);
if (arg.mcca_error == 0) {
arg.mcca_error = metaslab_claim_concrete(vd,
offset, size, txg);
}
return (arg.mcca_error);
} else {
return (metaslab_claim_concrete(vd, offset, size, txg));
}
}
/*
* Intent log support: upon opening the pool after a crash, notify the SPA
* of blocks that the intent log has allocated for immediate write, but
* which are still considered free by the SPA because the last transaction
* group didn't commit yet.
*/
static int
metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
{
uint64_t vdev = DVA_GET_VDEV(dva);
uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t size = DVA_GET_ASIZE(dva);
vdev_t *vd;
if ((vd = vdev_lookup_top(spa, vdev)) == NULL) {
return (SET_ERROR(ENXIO));
}
ASSERT(DVA_IS_VALID(dva));
if (DVA_GET_GANG(dva))
size = vdev_gang_header_asize(vd);
return (metaslab_claim_impl(vd, offset, size, txg));
}
int
metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
int ndvas, uint64_t txg, blkptr_t *hintbp, int flags,
zio_alloc_list_t *zal, zio_t *zio, int allocator)
{
dva_t *dva = bp->blk_dva;
dva_t *hintdva = (hintbp != NULL) ? hintbp->blk_dva : NULL;
int error = 0;
ASSERT(bp->blk_birth == 0);
ASSERT(BP_PHYSICAL_BIRTH(bp) == 0);
spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
if (mc->mc_allocator[allocator].mca_rotor == NULL) {
/* no vdevs in this class */
spa_config_exit(spa, SCL_ALLOC, FTAG);
return (SET_ERROR(ENOSPC));
}
ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
ASSERT(BP_GET_NDVAS(bp) == 0);
ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
ASSERT3P(zal, !=, NULL);
for (int d = 0; d < ndvas; d++) {
error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
txg, flags, zal, allocator);
if (error != 0) {
for (d--; d >= 0; d--) {
metaslab_unalloc_dva(spa, &dva[d], txg);
metaslab_group_alloc_decrement(spa,
DVA_GET_VDEV(&dva[d]), zio, flags,
allocator, B_FALSE);
memset(&dva[d], 0, sizeof (dva_t));
}
spa_config_exit(spa, SCL_ALLOC, FTAG);
return (error);
} else {
/*
* Update the metaslab group's queue depth
* based on the newly allocated dva.
*/
metaslab_group_alloc_increment(spa,
DVA_GET_VDEV(&dva[d]), zio, flags, allocator);
}
}
ASSERT(error == 0);
ASSERT(BP_GET_NDVAS(bp) == ndvas);
spa_config_exit(spa, SCL_ALLOC, FTAG);
BP_SET_BIRTH(bp, txg, 0);
return (0);
}
void
metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
{
const dva_t *dva = bp->blk_dva;
int ndvas = BP_GET_NDVAS(bp);
ASSERT(!BP_IS_HOLE(bp));
ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa));
/*
* If we have a checkpoint for the pool we need to make sure that
* the blocks that we free that are part of the checkpoint won't be
* reused until the checkpoint is discarded or we revert to it.
*
* The checkpoint flag is passed down the metaslab_free code path
* and is set whenever we want to add a block to the checkpoint's
* accounting. That is, we "checkpoint" blocks that existed at the
* time the checkpoint was created and are therefore referenced by
* the checkpointed uberblock.
*
* Note that, we don't checkpoint any blocks if the current
* syncing txg <= spa_checkpoint_txg. We want these frees to sync
* normally as they will be referenced by the checkpointed uberblock.
*/
boolean_t checkpoint = B_FALSE;
if (bp->blk_birth <= spa->spa_checkpoint_txg &&
spa_syncing_txg(spa) > spa->spa_checkpoint_txg) {
/*
* At this point, if the block is part of the checkpoint
* there is no way it was created in the current txg.
*/
ASSERT(!now);
ASSERT3U(spa_syncing_txg(spa), ==, txg);
checkpoint = B_TRUE;
}
spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
for (int d = 0; d < ndvas; d++) {
if (now) {
metaslab_unalloc_dva(spa, &dva[d], txg);
} else {
ASSERT3U(txg, ==, spa_syncing_txg(spa));
metaslab_free_dva(spa, &dva[d], checkpoint);
}
}
spa_config_exit(spa, SCL_FREE, FTAG);
}
int
metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
{
const dva_t *dva = bp->blk_dva;
int ndvas = BP_GET_NDVAS(bp);
int error = 0;
ASSERT(!BP_IS_HOLE(bp));
if (txg != 0) {
/*
* First do a dry run to make sure all DVAs are claimable,
* so we don't have to unwind from partial failures below.
*/
if ((error = metaslab_claim(spa, bp, 0)) != 0)
return (error);
}
spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
for (int d = 0; d < ndvas; d++) {
error = metaslab_claim_dva(spa, &dva[d], txg);
if (error != 0)
break;
}
spa_config_exit(spa, SCL_ALLOC, FTAG);
ASSERT(error == 0 || txg == 0);
return (error);
}
static void
metaslab_check_free_impl_cb(uint64_t inner, vdev_t *vd, uint64_t offset,
uint64_t size, void *arg)
{
(void) inner, (void) arg;
if (vd->vdev_ops == &vdev_indirect_ops)
return;
metaslab_check_free_impl(vd, offset, size);
}
static void
metaslab_check_free_impl(vdev_t *vd, uint64_t offset, uint64_t size)
{
metaslab_t *msp;
spa_t *spa __maybe_unused = vd->vdev_spa;
if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
return;
if (vd->vdev_ops->vdev_op_remap != NULL) {
vd->vdev_ops->vdev_op_remap(vd, offset, size,
metaslab_check_free_impl_cb, NULL);
return;
}
ASSERT(vdev_is_concrete(vd));
ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
mutex_enter(&msp->ms_lock);
if (msp->ms_loaded) {
range_tree_verify_not_present(msp->ms_allocatable,
offset, size);
}
/*
* Check all segments that currently exist in the freeing pipeline.
*
* It would intuitively make sense to also check the current allocating
* tree since metaslab_unalloc_dva() exists for extents that are
* allocated and freed in the same sync pass within the same txg.
* Unfortunately there are places (e.g. the ZIL) where we allocate a
* segment but then we free part of it within the same txg
* [see zil_sync()]. Thus, we don't call range_tree_verify() in the
* current allocating tree.
*/
range_tree_verify_not_present(msp->ms_freeing, offset, size);
range_tree_verify_not_present(msp->ms_checkpointing, offset, size);
range_tree_verify_not_present(msp->ms_freed, offset, size);
for (int j = 0; j < TXG_DEFER_SIZE; j++)
range_tree_verify_not_present(msp->ms_defer[j], offset, size);
range_tree_verify_not_present(msp->ms_trim, offset, size);
mutex_exit(&msp->ms_lock);
}
void
metaslab_check_free(spa_t *spa, const blkptr_t *bp)
{
if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
return;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
vdev_t *vd = vdev_lookup_top(spa, vdev);
uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]);
if (DVA_GET_GANG(&bp->blk_dva[i]))
size = vdev_gang_header_asize(vd);
ASSERT3P(vd, !=, NULL);
metaslab_check_free_impl(vd, offset, size);
}
spa_config_exit(spa, SCL_VDEV, FTAG);
}
static void
metaslab_group_disable_wait(metaslab_group_t *mg)
{
ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock));
while (mg->mg_disabled_updating) {
cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock);
}
}
static void
metaslab_group_disabled_increment(metaslab_group_t *mg)
{
ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock));
ASSERT(mg->mg_disabled_updating);
while (mg->mg_ms_disabled >= max_disabled_ms) {
cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock);
}
mg->mg_ms_disabled++;
ASSERT3U(mg->mg_ms_disabled, <=, max_disabled_ms);
}
/*
* Mark the metaslab as disabled to prevent any allocations on this metaslab.
* We must also track how many metaslabs are currently disabled within a
* metaslab group and limit them to prevent allocation failures from
* occurring because all metaslabs are disabled.
*/
void
metaslab_disable(metaslab_t *msp)
{
ASSERT(!MUTEX_HELD(&msp->ms_lock));
metaslab_group_t *mg = msp->ms_group;
mutex_enter(&mg->mg_ms_disabled_lock);
/*
* To keep an accurate count of how many threads have disabled
* a specific metaslab group, we only allow one thread to mark
* the metaslab group at a time. This ensures that the value of
* ms_disabled will be accurate when we decide to mark a metaslab
* group as disabled. To do this we force all other threads
* to wait till the metaslab's mg_disabled_updating flag is no
* longer set.
*/
metaslab_group_disable_wait(mg);
mg->mg_disabled_updating = B_TRUE;
if (msp->ms_disabled == 0) {
metaslab_group_disabled_increment(mg);
}
mutex_enter(&msp->ms_lock);
msp->ms_disabled++;
mutex_exit(&msp->ms_lock);
mg->mg_disabled_updating = B_FALSE;
cv_broadcast(&mg->mg_ms_disabled_cv);
mutex_exit(&mg->mg_ms_disabled_lock);
}
void
metaslab_enable(metaslab_t *msp, boolean_t sync, boolean_t unload)
{
metaslab_group_t *mg = msp->ms_group;
spa_t *spa = mg->mg_vd->vdev_spa;
/*
* Wait for the outstanding IO to be synced to prevent newly
* allocated blocks from being overwritten. This used by
* initialize and TRIM which are modifying unallocated space.
*/
if (sync)
txg_wait_synced(spa_get_dsl(spa), 0);
mutex_enter(&mg->mg_ms_disabled_lock);
mutex_enter(&msp->ms_lock);
if (--msp->ms_disabled == 0) {
mg->mg_ms_disabled--;
cv_broadcast(&mg->mg_ms_disabled_cv);
if (unload)
metaslab_unload(msp);
}
mutex_exit(&msp->ms_lock);
mutex_exit(&mg->mg_ms_disabled_lock);
}
void
metaslab_set_unflushed_dirty(metaslab_t *ms, boolean_t dirty)
{
ms->ms_unflushed_dirty = dirty;
}
static void
metaslab_update_ondisk_flush_data(metaslab_t *ms, dmu_tx_t *tx)
{
vdev_t *vd = ms->ms_group->mg_vd;
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa_meta_objset(spa);
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
metaslab_unflushed_phys_t entry = {
.msp_unflushed_txg = metaslab_unflushed_txg(ms),
};
uint64_t entry_size = sizeof (entry);
uint64_t entry_offset = ms->ms_id * entry_size;
uint64_t object = 0;
int err = zap_lookup(mos, vd->vdev_top_zap,
VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1,
&object);
if (err == ENOENT) {
object = dmu_object_alloc(mos, DMU_OTN_UINT64_METADATA,
SPA_OLD_MAXBLOCKSIZE, DMU_OT_NONE, 0, tx);
VERIFY0(zap_add(mos, vd->vdev_top_zap,
VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1,
&object, tx));
} else {
VERIFY0(err);
}
dmu_write(spa_meta_objset(spa), object, entry_offset, entry_size,
&entry, tx);
}
void
metaslab_set_unflushed_txg(metaslab_t *ms, uint64_t txg, dmu_tx_t *tx)
{
ms->ms_unflushed_txg = txg;
metaslab_update_ondisk_flush_data(ms, tx);
}
boolean_t
metaslab_unflushed_dirty(metaslab_t *ms)
{
return (ms->ms_unflushed_dirty);
}
uint64_t
metaslab_unflushed_txg(metaslab_t *ms)
{
return (ms->ms_unflushed_txg);
}
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, aliquot, U64, ZMOD_RW,
"Allocation granularity (a.k.a. stripe size)");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, debug_load, INT, ZMOD_RW,
"Load all metaslabs when pool is first opened");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, debug_unload, INT, ZMOD_RW,
"Prevent metaslabs from being unloaded");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, preload_enabled, INT, ZMOD_RW,
"Preload potential metaslabs during reassessment");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, preload_limit, UINT, ZMOD_RW,
"Max number of metaslabs per group to preload");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay, UINT, ZMOD_RW,
"Delay in txgs after metaslab was last used before unloading");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay_ms, UINT, ZMOD_RW,
"Delay in milliseconds after metaslab was last used before unloading");
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, noalloc_threshold, UINT, ZMOD_RW,
"Percentage of metaslab group size that should be free to make it "
"eligible for allocation");
ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, fragmentation_threshold, UINT, ZMOD_RW,
"Percentage of metaslab group size that should be considered eligible "
"for allocations unless all metaslab groups within the metaslab class "
"have also crossed this threshold");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, fragmentation_factor_enabled, INT,
ZMOD_RW,
"Use the fragmentation metric to prefer less fragmented metaslabs");
/* END CSTYLED */
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, fragmentation_threshold, UINT,
ZMOD_RW, "Fragmentation for metaslab to allow allocation");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, lba_weighting_enabled, INT, ZMOD_RW,
"Prefer metaslabs with lower LBAs");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, bias_enabled, INT, ZMOD_RW,
"Enable metaslab group biasing");
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, segment_weight_enabled, INT,
ZMOD_RW, "Enable segment-based metaslab selection");
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, switch_threshold, INT, ZMOD_RW,
"Segment-based metaslab selection maximum buckets before switching");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, force_ganging, U64, ZMOD_RW,
"Blocks larger than this size are sometimes forced to be gang blocks");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, force_ganging_pct, UINT, ZMOD_RW,
"Percentage of large blocks that will be forced to be gang blocks");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_max_search, UINT, ZMOD_RW,
"Max distance (bytes) to search forward before using size tree");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_use_largest_segment, INT, ZMOD_RW,
"When looking in size tree, use largest segment instead of exact fit");
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, max_size_cache_sec, U64,
ZMOD_RW, "How long to trust the cached max chunk size of a metaslab");
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, mem_limit, UINT, ZMOD_RW,
"Percentage of memory that can be used to store metaslab range trees");
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, try_hard_before_gang, INT,
ZMOD_RW, "Try hard to allocate before ganging");
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, find_max_tries, UINT, ZMOD_RW,
"Normally only consider this many of the best metaslabs in each vdev");
diff --git a/sys/contrib/openzfs/module/zfs/spa.c b/sys/contrib/openzfs/module/zfs/spa.c
index 1410651c63cc..d7fe96cde6a4 100644
--- a/sys/contrib/openzfs/module/zfs/spa.c
+++ b/sys/contrib/openzfs/module/zfs/spa.c
@@ -1,10213 +1,10491 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2018, Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright 2013 Saso Kiselkov. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2016 Toomas Soome <tsoome@me.com>
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
* Copyright 2018 Joyent, Inc.
* Copyright (c) 2017, 2019, Datto Inc. All rights reserved.
* Copyright 2017 Joyent, Inc.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
* Copyright (c) 2023 Hewlett Packard Enterprise Development LP.
*/
/*
* SPA: Storage Pool Allocator
*
* This file contains all the routines used when modifying on-disk SPA state.
* This includes opening, importing, destroying, exporting a pool, and syncing a
* pool.
*/
#include <sys/zfs_context.h>
#include <sys/fm/fs/zfs.h>
#include <sys/spa_impl.h>
#include <sys/zio.h>
#include <sys/zio_checksum.h>
#include <sys/dmu.h>
#include <sys/dmu_tx.h>
#include <sys/zap.h>
#include <sys/zil.h>
#include <sys/brt.h>
#include <sys/ddt.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_removal.h>
#include <sys/vdev_indirect_mapping.h>
#include <sys/vdev_indirect_births.h>
#include <sys/vdev_initialize.h>
#include <sys/vdev_rebuild.h>
#include <sys/vdev_trim.h>
#include <sys/vdev_disk.h>
#include <sys/vdev_draid.h>
#include <sys/metaslab.h>
#include <sys/metaslab_impl.h>
#include <sys/mmp.h>
#include <sys/uberblock_impl.h>
#include <sys/txg.h>
#include <sys/avl.h>
#include <sys/bpobj.h>
#include <sys/dmu_traverse.h>
#include <sys/dmu_objset.h>
#include <sys/unique.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_synctask.h>
#include <sys/fs/zfs.h>
#include <sys/arc.h>
#include <sys/callb.h>
#include <sys/systeminfo.h>
#include <sys/zfs_ioctl.h>
#include <sys/dsl_scan.h>
#include <sys/zfeature.h>
#include <sys/dsl_destroy.h>
#include <sys/zvol.h>
#ifdef _KERNEL
#include <sys/fm/protocol.h>
#include <sys/fm/util.h>
#include <sys/callb.h>
#include <sys/zone.h>
#include <sys/vmsystm.h>
#endif /* _KERNEL */
#include "zfs_prop.h"
#include "zfs_comutil.h"
/*
* The interval, in seconds, at which failed configuration cache file writes
* should be retried.
*/
int zfs_ccw_retry_interval = 300;
typedef enum zti_modes {
ZTI_MODE_FIXED, /* value is # of threads (min 1) */
ZTI_MODE_BATCH, /* cpu-intensive; value is ignored */
ZTI_MODE_SCALE, /* Taskqs scale with CPUs. */
ZTI_MODE_NULL, /* don't create a taskq */
ZTI_NMODES
} zti_modes_t;
#define ZTI_P(n, q) { ZTI_MODE_FIXED, (n), (q) }
#define ZTI_PCT(n) { ZTI_MODE_ONLINE_PERCENT, (n), 1 }
#define ZTI_BATCH { ZTI_MODE_BATCH, 0, 1 }
#define ZTI_SCALE { ZTI_MODE_SCALE, 0, 1 }
#define ZTI_NULL { ZTI_MODE_NULL, 0, 0 }
#define ZTI_N(n) ZTI_P(n, 1)
#define ZTI_ONE ZTI_N(1)
typedef struct zio_taskq_info {
zti_modes_t zti_mode;
uint_t zti_value;
uint_t zti_count;
} zio_taskq_info_t;
static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = {
"iss", "iss_h", "int", "int_h"
};
/*
* This table defines the taskq settings for each ZFS I/O type. When
* initializing a pool, we use this table to create an appropriately sized
* taskq. Some operations are low volume and therefore have a small, static
* number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE
* macros. Other operations process a large amount of data; the ZTI_BATCH
* macro causes us to create a taskq oriented for throughput. Some operations
* are so high frequency and short-lived that the taskq itself can become a
* point of lock contention. The ZTI_P(#, #) macro indicates that we need an
* additional degree of parallelism specified by the number of threads per-
* taskq and the number of taskqs; when dispatching an event in this case, the
* particular taskq is chosen at random. ZTI_SCALE is similar to ZTI_BATCH,
* but with number of taskqs also scaling with number of CPUs.
*
* The different taskq priorities are to handle the different contexts (issue
* and interrupt) and then to reserve threads for ZIO_PRIORITY_NOW I/Os that
* need to be handled with minimum delay.
*/
-static const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = {
+static zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = {
/* ISSUE ISSUE_HIGH INTR INTR_HIGH */
{ ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* NULL */
{ ZTI_N(8), ZTI_NULL, ZTI_SCALE, ZTI_NULL }, /* READ */
{ ZTI_BATCH, ZTI_N(5), ZTI_SCALE, ZTI_N(5) }, /* WRITE */
{ ZTI_SCALE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */
{ ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* CLAIM */
{ ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* IOCTL */
{ ZTI_N(4), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* TRIM */
};
static void spa_sync_version(void *arg, dmu_tx_t *tx);
static void spa_sync_props(void *arg, dmu_tx_t *tx);
static boolean_t spa_has_active_shared_spare(spa_t *spa);
static int spa_load_impl(spa_t *spa, spa_import_type_t type,
const char **ereport);
static void spa_vdev_resilver_done(spa_t *spa);
/*
* Percentage of all CPUs that can be used by the metaslab preload taskq.
*/
static uint_t metaslab_preload_pct = 50;
static uint_t zio_taskq_batch_pct = 80; /* 1 thread per cpu in pset */
static uint_t zio_taskq_batch_tpq; /* threads per taskq */
static const boolean_t zio_taskq_sysdc = B_TRUE; /* use SDC scheduling class */
static const uint_t zio_taskq_basedc = 80; /* base duty cycle */
static const boolean_t spa_create_process = B_TRUE; /* no process => no sysdc */
/*
* Report any spa_load_verify errors found, but do not fail spa_load.
* This is used by zdb to analyze non-idle pools.
*/
boolean_t spa_load_verify_dryrun = B_FALSE;
/*
* Allow read spacemaps in case of readonly import (spa_mode == SPA_MODE_READ).
* This is used by zdb for spacemaps verification.
*/
boolean_t spa_mode_readable_spacemaps = B_FALSE;
/*
* This (illegal) pool name is used when temporarily importing a spa_t in order
* to get the vdev stats associated with the imported devices.
*/
#define TRYIMPORT_NAME "$import"
/*
* For debugging purposes: print out vdev tree during pool import.
*/
static int spa_load_print_vdev_tree = B_FALSE;
/*
* A non-zero value for zfs_max_missing_tvds means that we allow importing
* pools with missing top-level vdevs. This is strictly intended for advanced
* pool recovery cases since missing data is almost inevitable. Pools with
* missing devices can only be imported read-only for safety reasons, and their
* fail-mode will be automatically set to "continue".
*
* With 1 missing vdev we should be able to import the pool and mount all
* datasets. User data that was not modified after the missing device has been
* added should be recoverable. This means that snapshots created prior to the
* addition of that device should be completely intact.
*
* With 2 missing vdevs, some datasets may fail to mount since there are
* dataset statistics that are stored as regular metadata. Some data might be
* recoverable if those vdevs were added recently.
*
* With 3 or more missing vdevs, the pool is severely damaged and MOS entries
* may be missing entirely. Chances of data recovery are very low. Note that
* there are also risks of performing an inadvertent rewind as we might be
* missing all the vdevs with the latest uberblocks.
*/
uint64_t zfs_max_missing_tvds = 0;
/*
* The parameters below are similar to zfs_max_missing_tvds but are only
* intended for a preliminary open of the pool with an untrusted config which
* might be incomplete or out-dated.
*
* We are more tolerant for pools opened from a cachefile since we could have
* an out-dated cachefile where a device removal was not registered.
* We could have set the limit arbitrarily high but in the case where devices
* are really missing we would want to return the proper error codes; we chose
* SPA_DVAS_PER_BP - 1 so that some copies of the MOS would still be available
* and we get a chance to retrieve the trusted config.
*/
uint64_t zfs_max_missing_tvds_cachefile = SPA_DVAS_PER_BP - 1;
/*
* In the case where config was assembled by scanning device paths (/dev/dsks
* by default) we are less tolerant since all the existing devices should have
* been detected and we want spa_load to return the right error codes.
*/
uint64_t zfs_max_missing_tvds_scan = 0;
/*
* Debugging aid that pauses spa_sync() towards the end.
*/
static const boolean_t zfs_pause_spa_sync = B_FALSE;
/*
* Variables to indicate the livelist condense zthr func should wait at certain
* points for the livelist to be removed - used to test condense/destroy races
*/
static int zfs_livelist_condense_zthr_pause = 0;
static int zfs_livelist_condense_sync_pause = 0;
/*
* Variables to track whether or not condense cancellation has been
* triggered in testing.
*/
static int zfs_livelist_condense_sync_cancel = 0;
static int zfs_livelist_condense_zthr_cancel = 0;
/*
* Variable to track whether or not extra ALLOC blkptrs were added to a
* livelist entry while it was being condensed (caused by the way we track
* remapped blkptrs in dbuf_remap_impl)
*/
static int zfs_livelist_condense_new_alloc = 0;
/*
* ==========================================================================
* SPA properties routines
* ==========================================================================
*/
/*
* Add a (source=src, propname=propval) list to an nvlist.
*/
static void
spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, const char *strval,
uint64_t intval, zprop_source_t src)
{
const char *propname = zpool_prop_to_name(prop);
nvlist_t *propval;
propval = fnvlist_alloc();
fnvlist_add_uint64(propval, ZPROP_SOURCE, src);
if (strval != NULL)
fnvlist_add_string(propval, ZPROP_VALUE, strval);
else
fnvlist_add_uint64(propval, ZPROP_VALUE, intval);
fnvlist_add_nvlist(nvl, propname, propval);
nvlist_free(propval);
}
/*
* Add a user property (source=src, propname=propval) to an nvlist.
*/
static void
spa_prop_add_user(nvlist_t *nvl, const char *propname, char *strval,
zprop_source_t src)
{
nvlist_t *propval;
VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0);
VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0);
nvlist_free(propval);
}
/*
* Get property values from the spa configuration.
*/
static void
spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
{
vdev_t *rvd = spa->spa_root_vdev;
dsl_pool_t *pool = spa->spa_dsl_pool;
uint64_t size, alloc, cap, version;
const zprop_source_t src = ZPROP_SRC_NONE;
spa_config_dirent_t *dp;
metaslab_class_t *mc = spa_normal_class(spa);
ASSERT(MUTEX_HELD(&spa->spa_props_lock));
if (rvd != NULL) {
alloc = metaslab_class_get_alloc(mc);
alloc += metaslab_class_get_alloc(spa_special_class(spa));
alloc += metaslab_class_get_alloc(spa_dedup_class(spa));
alloc += metaslab_class_get_alloc(spa_embedded_log_class(spa));
size = metaslab_class_get_space(mc);
size += metaslab_class_get_space(spa_special_class(spa));
size += metaslab_class_get_space(spa_dedup_class(spa));
size += metaslab_class_get_space(spa_embedded_log_class(spa));
spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src);
spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src);
spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL,
size - alloc, src);
spa_prop_add_list(*nvp, ZPOOL_PROP_CHECKPOINT, NULL,
spa->spa_checkpoint_info.sci_dspace, src);
spa_prop_add_list(*nvp, ZPOOL_PROP_FRAGMENTATION, NULL,
metaslab_class_fragmentation(mc), src);
spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL,
metaslab_class_expandable_space(mc), src);
spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL,
(spa_mode(spa) == SPA_MODE_READ), src);
cap = (size == 0) ? 0 : (alloc * 100 / size);
spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src);
spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUPRATIO, NULL,
ddt_get_pool_dedup_ratio(spa), src);
spa_prop_add_list(*nvp, ZPOOL_PROP_BCLONEUSED, NULL,
brt_get_used(spa), src);
spa_prop_add_list(*nvp, ZPOOL_PROP_BCLONESAVED, NULL,
brt_get_saved(spa), src);
spa_prop_add_list(*nvp, ZPOOL_PROP_BCLONERATIO, NULL,
brt_get_ratio(spa), src);
spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL,
rvd->vdev_state, src);
version = spa_version(spa);
if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) {
spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL,
version, ZPROP_SRC_DEFAULT);
} else {
spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL,
version, ZPROP_SRC_LOCAL);
}
spa_prop_add_list(*nvp, ZPOOL_PROP_LOAD_GUID,
NULL, spa_load_guid(spa), src);
}
if (pool != NULL) {
/*
* The $FREE directory was introduced in SPA_VERSION_DEADLISTS,
* when opening pools before this version freedir will be NULL.
*/
if (pool->dp_free_dir != NULL) {
spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL,
dsl_dir_phys(pool->dp_free_dir)->dd_used_bytes,
src);
} else {
spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING,
NULL, 0, src);
}
if (pool->dp_leak_dir != NULL) {
spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, NULL,
dsl_dir_phys(pool->dp_leak_dir)->dd_used_bytes,
src);
} else {
spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED,
NULL, 0, src);
}
}
spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
if (spa->spa_comment != NULL) {
spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment,
0, ZPROP_SRC_LOCAL);
}
if (spa->spa_compatibility != NULL) {
spa_prop_add_list(*nvp, ZPOOL_PROP_COMPATIBILITY,
spa->spa_compatibility, 0, ZPROP_SRC_LOCAL);
}
if (spa->spa_root != NULL)
spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root,
0, ZPROP_SRC_LOCAL);
if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) {
spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
MIN(zfs_max_recordsize, SPA_MAXBLOCKSIZE), ZPROP_SRC_NONE);
} else {
spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
SPA_OLD_MAXBLOCKSIZE, ZPROP_SRC_NONE);
}
if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE)) {
spa_prop_add_list(*nvp, ZPOOL_PROP_MAXDNODESIZE, NULL,
DNODE_MAX_SIZE, ZPROP_SRC_NONE);
} else {
spa_prop_add_list(*nvp, ZPOOL_PROP_MAXDNODESIZE, NULL,
DNODE_MIN_SIZE, ZPROP_SRC_NONE);
}
if ((dp = list_head(&spa->spa_config_list)) != NULL) {
if (dp->scd_path == NULL) {
spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
"none", 0, ZPROP_SRC_LOCAL);
} else if (strcmp(dp->scd_path, spa_config_path) != 0) {
spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
dp->scd_path, 0, ZPROP_SRC_LOCAL);
}
}
}
/*
* Get zpool property values.
*/
int
spa_prop_get(spa_t *spa, nvlist_t **nvp)
{
objset_t *mos = spa->spa_meta_objset;
zap_cursor_t zc;
zap_attribute_t za;
dsl_pool_t *dp;
int err;
err = nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP);
if (err)
return (err);
dp = spa_get_dsl(spa);
dsl_pool_config_enter(dp, FTAG);
mutex_enter(&spa->spa_props_lock);
/*
* Get properties from the spa config.
*/
spa_prop_get_config(spa, nvp);
/* If no pool property object, no more prop to get. */
if (mos == NULL || spa->spa_pool_props_object == 0)
goto out;
/*
* Get properties from the MOS pool property object.
*/
for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
(err = zap_cursor_retrieve(&zc, &za)) == 0;
zap_cursor_advance(&zc)) {
uint64_t intval = 0;
char *strval = NULL;
zprop_source_t src = ZPROP_SRC_DEFAULT;
zpool_prop_t prop;
if ((prop = zpool_name_to_prop(za.za_name)) ==
ZPOOL_PROP_INVAL && !zfs_prop_user(za.za_name))
continue;
switch (za.za_integer_length) {
case 8:
/* integer property */
if (za.za_first_integer !=
zpool_prop_default_numeric(prop))
src = ZPROP_SRC_LOCAL;
if (prop == ZPOOL_PROP_BOOTFS) {
dsl_dataset_t *ds = NULL;
err = dsl_dataset_hold_obj(dp,
za.za_first_integer, FTAG, &ds);
if (err != 0)
break;
strval = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN,
KM_SLEEP);
dsl_dataset_name(ds, strval);
dsl_dataset_rele(ds, FTAG);
} else {
strval = NULL;
intval = za.za_first_integer;
}
spa_prop_add_list(*nvp, prop, strval, intval, src);
if (strval != NULL)
kmem_free(strval, ZFS_MAX_DATASET_NAME_LEN);
break;
case 1:
/* string property */
strval = kmem_alloc(za.za_num_integers, KM_SLEEP);
err = zap_lookup(mos, spa->spa_pool_props_object,
za.za_name, 1, za.za_num_integers, strval);
if (err) {
kmem_free(strval, za.za_num_integers);
break;
}
if (prop != ZPOOL_PROP_INVAL) {
spa_prop_add_list(*nvp, prop, strval, 0, src);
} else {
src = ZPROP_SRC_LOCAL;
spa_prop_add_user(*nvp, za.za_name, strval,
src);
}
kmem_free(strval, za.za_num_integers);
break;
default:
break;
}
}
zap_cursor_fini(&zc);
out:
mutex_exit(&spa->spa_props_lock);
dsl_pool_config_exit(dp, FTAG);
if (err && err != ENOENT) {
nvlist_free(*nvp);
*nvp = NULL;
return (err);
}
return (0);
}
/*
* Validate the given pool properties nvlist and modify the list
* for the property values to be set.
*/
static int
spa_prop_validate(spa_t *spa, nvlist_t *props)
{
nvpair_t *elem;
int error = 0, reset_bootfs = 0;
uint64_t objnum = 0;
boolean_t has_feature = B_FALSE;
elem = NULL;
while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
uint64_t intval;
const char *strval, *slash, *check, *fname;
const char *propname = nvpair_name(elem);
zpool_prop_t prop = zpool_name_to_prop(propname);
switch (prop) {
case ZPOOL_PROP_INVAL:
/*
* Sanitize the input.
*/
if (zfs_prop_user(propname)) {
if (strlen(propname) >= ZAP_MAXNAMELEN) {
error = SET_ERROR(ENAMETOOLONG);
break;
}
if (strlen(fnvpair_value_string(elem)) >=
ZAP_MAXVALUELEN) {
error = SET_ERROR(E2BIG);
break;
}
} else if (zpool_prop_feature(propname)) {
if (nvpair_type(elem) != DATA_TYPE_UINT64) {
error = SET_ERROR(EINVAL);
break;
}
if (nvpair_value_uint64(elem, &intval) != 0) {
error = SET_ERROR(EINVAL);
break;
}
if (intval != 0) {
error = SET_ERROR(EINVAL);
break;
}
fname = strchr(propname, '@') + 1;
if (zfeature_lookup_name(fname, NULL) != 0) {
error = SET_ERROR(EINVAL);
break;
}
has_feature = B_TRUE;
} else {
error = SET_ERROR(EINVAL);
break;
}
break;
case ZPOOL_PROP_VERSION:
error = nvpair_value_uint64(elem, &intval);
if (!error &&
(intval < spa_version(spa) ||
intval > SPA_VERSION_BEFORE_FEATURES ||
has_feature))
error = SET_ERROR(EINVAL);
break;
case ZPOOL_PROP_DELEGATION:
case ZPOOL_PROP_AUTOREPLACE:
case ZPOOL_PROP_LISTSNAPS:
case ZPOOL_PROP_AUTOEXPAND:
case ZPOOL_PROP_AUTOTRIM:
error = nvpair_value_uint64(elem, &intval);
if (!error && intval > 1)
error = SET_ERROR(EINVAL);
break;
case ZPOOL_PROP_MULTIHOST:
error = nvpair_value_uint64(elem, &intval);
if (!error && intval > 1)
error = SET_ERROR(EINVAL);
if (!error) {
uint32_t hostid = zone_get_hostid(NULL);
if (hostid)
spa->spa_hostid = hostid;
else
error = SET_ERROR(ENOTSUP);
}
break;
case ZPOOL_PROP_BOOTFS:
/*
* If the pool version is less than SPA_VERSION_BOOTFS,
* or the pool is still being created (version == 0),
* the bootfs property cannot be set.
*/
if (spa_version(spa) < SPA_VERSION_BOOTFS) {
error = SET_ERROR(ENOTSUP);
break;
}
/*
* Make sure the vdev config is bootable
*/
if (!vdev_is_bootable(spa->spa_root_vdev)) {
error = SET_ERROR(ENOTSUP);
break;
}
reset_bootfs = 1;
error = nvpair_value_string(elem, &strval);
if (!error) {
objset_t *os;
if (strval == NULL || strval[0] == '\0') {
objnum = zpool_prop_default_numeric(
ZPOOL_PROP_BOOTFS);
break;
}
error = dmu_objset_hold(strval, FTAG, &os);
if (error != 0)
break;
/* Must be ZPL. */
if (dmu_objset_type(os) != DMU_OST_ZFS) {
error = SET_ERROR(ENOTSUP);
} else {
objnum = dmu_objset_id(os);
}
dmu_objset_rele(os, FTAG);
}
break;
case ZPOOL_PROP_FAILUREMODE:
error = nvpair_value_uint64(elem, &intval);
if (!error && intval > ZIO_FAILURE_MODE_PANIC)
error = SET_ERROR(EINVAL);
/*
* This is a special case which only occurs when
* the pool has completely failed. This allows
* the user to change the in-core failmode property
* without syncing it out to disk (I/Os might
* currently be blocked). We do this by returning
* EIO to the caller (spa_prop_set) to trick it
* into thinking we encountered a property validation
* error.
*/
if (!error && spa_suspended(spa)) {
spa->spa_failmode = intval;
error = SET_ERROR(EIO);
}
break;
case ZPOOL_PROP_CACHEFILE:
if ((error = nvpair_value_string(elem, &strval)) != 0)
break;
if (strval[0] == '\0')
break;
if (strcmp(strval, "none") == 0)
break;
if (strval[0] != '/') {
error = SET_ERROR(EINVAL);
break;
}
slash = strrchr(strval, '/');
ASSERT(slash != NULL);
if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
strcmp(slash, "/..") == 0)
error = SET_ERROR(EINVAL);
break;
case ZPOOL_PROP_COMMENT:
if ((error = nvpair_value_string(elem, &strval)) != 0)
break;
for (check = strval; *check != '\0'; check++) {
if (!isprint(*check)) {
error = SET_ERROR(EINVAL);
break;
}
}
if (strlen(strval) > ZPROP_MAX_COMMENT)
error = SET_ERROR(E2BIG);
break;
default:
break;
}
if (error)
break;
}
(void) nvlist_remove_all(props,
zpool_prop_to_name(ZPOOL_PROP_DEDUPDITTO));
if (!error && reset_bootfs) {
error = nvlist_remove(props,
zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING);
if (!error) {
error = nvlist_add_uint64(props,
zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum);
}
}
return (error);
}
void
spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync)
{
const char *cachefile;
spa_config_dirent_t *dp;
if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE),
&cachefile) != 0)
return;
dp = kmem_alloc(sizeof (spa_config_dirent_t),
KM_SLEEP);
if (cachefile[0] == '\0')
dp->scd_path = spa_strdup(spa_config_path);
else if (strcmp(cachefile, "none") == 0)
dp->scd_path = NULL;
else
dp->scd_path = spa_strdup(cachefile);
list_insert_head(&spa->spa_config_list, dp);
if (need_sync)
spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
}
int
spa_prop_set(spa_t *spa, nvlist_t *nvp)
{
int error;
nvpair_t *elem = NULL;
boolean_t need_sync = B_FALSE;
if ((error = spa_prop_validate(spa, nvp)) != 0)
return (error);
while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) {
zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem));
if (prop == ZPOOL_PROP_CACHEFILE ||
prop == ZPOOL_PROP_ALTROOT ||
prop == ZPOOL_PROP_READONLY)
continue;
if (prop == ZPOOL_PROP_INVAL &&
zfs_prop_user(nvpair_name(elem))) {
need_sync = B_TRUE;
break;
}
if (prop == ZPOOL_PROP_VERSION || prop == ZPOOL_PROP_INVAL) {
uint64_t ver = 0;
if (prop == ZPOOL_PROP_VERSION) {
VERIFY(nvpair_value_uint64(elem, &ver) == 0);
} else {
ASSERT(zpool_prop_feature(nvpair_name(elem)));
ver = SPA_VERSION_FEATURES;
need_sync = B_TRUE;
}
/* Save time if the version is already set. */
if (ver == spa_version(spa))
continue;
/*
* In addition to the pool directory object, we might
* create the pool properties object, the features for
* read object, the features for write object, or the
* feature descriptions object.
*/
error = dsl_sync_task(spa->spa_name, NULL,
spa_sync_version, &ver,
6, ZFS_SPACE_CHECK_RESERVED);
if (error)
return (error);
continue;
}
need_sync = B_TRUE;
break;
}
if (need_sync) {
return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props,
nvp, 6, ZFS_SPACE_CHECK_RESERVED));
}
return (0);
}
/*
* If the bootfs property value is dsobj, clear it.
*/
void
spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
{
if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
VERIFY(zap_remove(spa->spa_meta_objset,
spa->spa_pool_props_object,
zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
spa->spa_bootfs = 0;
}
}
static int
spa_change_guid_check(void *arg, dmu_tx_t *tx)
{
uint64_t *newguid __maybe_unused = arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
vdev_t *rvd = spa->spa_root_vdev;
uint64_t vdev_state;
if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
int error = (spa_has_checkpoint(spa)) ?
ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
return (SET_ERROR(error));
}
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
vdev_state = rvd->vdev_state;
spa_config_exit(spa, SCL_STATE, FTAG);
if (vdev_state != VDEV_STATE_HEALTHY)
return (SET_ERROR(ENXIO));
ASSERT3U(spa_guid(spa), !=, *newguid);
return (0);
}
static void
spa_change_guid_sync(void *arg, dmu_tx_t *tx)
{
uint64_t *newguid = arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
uint64_t oldguid;
vdev_t *rvd = spa->spa_root_vdev;
oldguid = spa_guid(spa);
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
rvd->vdev_guid = *newguid;
rvd->vdev_guid_sum += (*newguid - oldguid);
vdev_config_dirty(rvd);
spa_config_exit(spa, SCL_STATE, FTAG);
spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu",
(u_longlong_t)oldguid, (u_longlong_t)*newguid);
}
/*
* Change the GUID for the pool. This is done so that we can later
* re-import a pool built from a clone of our own vdevs. We will modify
* the root vdev's guid, our own pool guid, and then mark all of our
* vdevs dirty. Note that we must make sure that all our vdevs are
* online when we do this, or else any vdevs that weren't present
* would be orphaned from our pool. We are also going to issue a
* sysevent to update any watchers.
*/
int
spa_change_guid(spa_t *spa)
{
int error;
uint64_t guid;
mutex_enter(&spa->spa_vdev_top_lock);
mutex_enter(&spa_namespace_lock);
guid = spa_generate_guid(NULL);
error = dsl_sync_task(spa->spa_name, spa_change_guid_check,
spa_change_guid_sync, &guid, 5, ZFS_SPACE_CHECK_RESERVED);
if (error == 0) {
/*
* Clear the kobj flag from all the vdevs to allow
* vdev_cache_process_kobj_evt() to post events to all the
* vdevs since GUID is updated.
*/
vdev_clear_kobj_evt(spa->spa_root_vdev);
for (int i = 0; i < spa->spa_l2cache.sav_count; i++)
vdev_clear_kobj_evt(spa->spa_l2cache.sav_vdevs[i]);
spa_write_cachefile(spa, B_FALSE, B_TRUE, B_TRUE);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_REGUID);
}
mutex_exit(&spa_namespace_lock);
mutex_exit(&spa->spa_vdev_top_lock);
return (error);
}
/*
* ==========================================================================
* SPA state manipulation (open/create/destroy/import/export)
* ==========================================================================
*/
static int
spa_error_entry_compare(const void *a, const void *b)
{
const spa_error_entry_t *sa = (const spa_error_entry_t *)a;
const spa_error_entry_t *sb = (const spa_error_entry_t *)b;
int ret;
ret = memcmp(&sa->se_bookmark, &sb->se_bookmark,
sizeof (zbookmark_phys_t));
return (TREE_ISIGN(ret));
}
/*
* Utility function which retrieves copies of the current logs and
* re-initializes them in the process.
*/
void
spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
{
ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
memcpy(last, &spa->spa_errlist_last, sizeof (avl_tree_t));
memcpy(scrub, &spa->spa_errlist_scrub, sizeof (avl_tree_t));
avl_create(&spa->spa_errlist_scrub,
spa_error_entry_compare, sizeof (spa_error_entry_t),
offsetof(spa_error_entry_t, se_avl));
avl_create(&spa->spa_errlist_last,
spa_error_entry_compare, sizeof (spa_error_entry_t),
offsetof(spa_error_entry_t, se_avl));
}
static void
spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
{
const zio_taskq_info_t *ztip = &zio_taskqs[t][q];
enum zti_modes mode = ztip->zti_mode;
uint_t value = ztip->zti_value;
uint_t count = ztip->zti_count;
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
uint_t cpus, flags = TASKQ_DYNAMIC;
boolean_t batch = B_FALSE;
switch (mode) {
case ZTI_MODE_FIXED:
ASSERT3U(value, >, 0);
break;
case ZTI_MODE_BATCH:
batch = B_TRUE;
flags |= TASKQ_THREADS_CPU_PCT;
value = MIN(zio_taskq_batch_pct, 100);
break;
case ZTI_MODE_SCALE:
flags |= TASKQ_THREADS_CPU_PCT;
/*
* We want more taskqs to reduce lock contention, but we want
* less for better request ordering and CPU utilization.
*/
cpus = MAX(1, boot_ncpus * zio_taskq_batch_pct / 100);
if (zio_taskq_batch_tpq > 0) {
count = MAX(1, (cpus + zio_taskq_batch_tpq / 2) /
zio_taskq_batch_tpq);
} else {
/*
* Prefer 6 threads per taskq, but no more taskqs
* than threads in them on large systems. For 80%:
*
* taskq taskq total
* cpus taskqs percent threads threads
* ------- ------- ------- ------- -------
* 1 1 80% 1 1
* 2 1 80% 1 1
* 4 1 80% 3 3
* 8 2 40% 3 6
* 16 3 27% 4 12
* 32 5 16% 5 25
* 64 7 11% 7 49
* 128 10 8% 10 100
* 256 14 6% 15 210
*/
count = 1 + cpus / 6;
while (count * count > cpus)
count--;
}
/* Limit each taskq within 100% to not trigger assertion. */
count = MAX(count, (zio_taskq_batch_pct + 99) / 100);
value = (zio_taskq_batch_pct + count / 2) / count;
break;
case ZTI_MODE_NULL:
tqs->stqs_count = 0;
tqs->stqs_taskq = NULL;
return;
default:
panic("unrecognized mode for %s_%s taskq (%u:%u) in "
"spa_activate()",
zio_type_name[t], zio_taskq_types[q], mode, value);
break;
}
ASSERT3U(count, >, 0);
tqs->stqs_count = count;
tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP);
for (uint_t i = 0; i < count; i++) {
taskq_t *tq;
char name[32];
if (count > 1)
(void) snprintf(name, sizeof (name), "%s_%s_%u",
zio_type_name[t], zio_taskq_types[q], i);
else
(void) snprintf(name, sizeof (name), "%s_%s",
zio_type_name[t], zio_taskq_types[q]);
if (zio_taskq_sysdc && spa->spa_proc != &p0) {
if (batch)
flags |= TASKQ_DC_BATCH;
(void) zio_taskq_basedc;
tq = taskq_create_sysdc(name, value, 50, INT_MAX,
spa->spa_proc, zio_taskq_basedc, flags);
} else {
pri_t pri = maxclsyspri;
/*
* The write issue taskq can be extremely CPU
* intensive. Run it at slightly less important
* priority than the other taskqs.
*
* Under Linux and FreeBSD this means incrementing
* the priority value as opposed to platforms like
* illumos where it should be decremented.
*
* On FreeBSD, if priorities divided by four (RQ_PPQ)
* are equal then a difference between them is
* insignificant.
*/
if (t == ZIO_TYPE_WRITE && q == ZIO_TASKQ_ISSUE) {
#if defined(__linux__)
pri++;
#elif defined(__FreeBSD__)
pri += 4;
#else
#error "unknown OS"
#endif
}
tq = taskq_create_proc(name, value, pri, 50,
INT_MAX, spa->spa_proc, flags);
}
tqs->stqs_taskq[i] = tq;
}
}
static void
spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
{
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
if (tqs->stqs_taskq == NULL) {
ASSERT3U(tqs->stqs_count, ==, 0);
return;
}
for (uint_t i = 0; i < tqs->stqs_count; i++) {
ASSERT3P(tqs->stqs_taskq[i], !=, NULL);
taskq_destroy(tqs->stqs_taskq[i]);
}
kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *));
tqs->stqs_taskq = NULL;
}
+#ifdef _KERNEL
+/*
+ * The READ and WRITE rows of zio_taskqs are configurable at module load time
+ * by setting zio_taskq_read or zio_taskq_write.
+ *
+ * Example (the defaults for READ and WRITE)
+ * zio_taskq_read='fixed,1,8 null scale null'
+ * zio_taskq_write='batch fixed,1,5 scale fixed,1,5'
+ *
+ * Each sets the entire row at a time.
+ *
+ * 'fixed' is parameterised: fixed,Q,T where Q is number of taskqs, T is number
+ * of threads per taskq.
+ *
+ * 'null' can only be set on the high-priority queues (queue selection for
+ * high-priority queues will fall back to the regular queue if the high-pri
+ * is NULL.
+ */
+static const char *const modes[ZTI_NMODES] = {
+ "fixed", "batch", "scale", "null"
+};
+
+/* Parse the incoming config string. Modifies cfg */
+static int
+spa_taskq_param_set(zio_type_t t, char *cfg)
+{
+ int err = 0;
+
+ zio_taskq_info_t row[ZIO_TASKQ_TYPES] = {{0}};
+
+ char *next = cfg, *tok, *c;
+
+ /*
+ * Parse out each element from the string and fill `row`. The entire
+ * row has to be set at once, so any errors are flagged by just
+ * breaking out of this loop early.
+ */
+ uint_t q;
+ for (q = 0; q < ZIO_TASKQ_TYPES; q++) {
+ /* `next` is the start of the config */
+ if (next == NULL)
+ break;
+
+ /* Eat up leading space */
+ while (isspace(*next))
+ next++;
+ if (*next == '\0')
+ break;
+
+ /* Mode ends at space or end of string */
+ tok = next;
+ next = strchr(tok, ' ');
+ if (next != NULL) *next++ = '\0';
+
+ /* Parameters start after a comma */
+ c = strchr(tok, ',');
+ if (c != NULL) *c++ = '\0';
+
+ /* Match mode string */
+ uint_t mode;
+ for (mode = 0; mode < ZTI_NMODES; mode++)
+ if (strcmp(tok, modes[mode]) == 0)
+ break;
+ if (mode == ZTI_NMODES)
+ break;
+
+ /* Invalid canary */
+ row[q].zti_mode = ZTI_NMODES;
+
+ /* Per-mode setup */
+ switch (mode) {
+
+ /*
+ * FIXED is parameterised: number of queues, and number of
+ * threads per queue.
+ */
+ case ZTI_MODE_FIXED: {
+ /* No parameters? */
+ if (c == NULL || *c == '\0')
+ break;
+
+ /* Find next parameter */
+ tok = c;
+ c = strchr(tok, ',');
+ if (c == NULL)
+ break;
+
+ /* Take digits and convert */
+ unsigned long long nq;
+ if (!(isdigit(*tok)))
+ break;
+ err = ddi_strtoull(tok, &tok, 10, &nq);
+ /* Must succeed and also end at the next param sep */
+ if (err != 0 || tok != c)
+ break;
+
+ /* Move past the comma */
+ tok++;
+ /* Need another number */
+ if (!(isdigit(*tok)))
+ break;
+ /* Remember start to make sure we moved */
+ c = tok;
+
+ /* Take digits */
+ unsigned long long ntpq;
+ err = ddi_strtoull(tok, &tok, 10, &ntpq);
+ /* Must succeed, and moved forward */
+ if (err != 0 || tok == c || *tok != '\0')
+ break;
+
+ /*
+ * sanity; zero queues/threads make no sense, and
+ * 16K is almost certainly more than anyone will ever
+ * need and avoids silly numbers like UINT32_MAX
+ */
+ if (nq == 0 || nq >= 16384 ||
+ ntpq == 0 || ntpq >= 16384)
+ break;
+
+ const zio_taskq_info_t zti = ZTI_P(ntpq, nq);
+ row[q] = zti;
+ break;
+ }
+
+ case ZTI_MODE_BATCH: {
+ const zio_taskq_info_t zti = ZTI_BATCH;
+ row[q] = zti;
+ break;
+ }
+
+ case ZTI_MODE_SCALE: {
+ const zio_taskq_info_t zti = ZTI_SCALE;
+ row[q] = zti;
+ break;
+ }
+
+ case ZTI_MODE_NULL: {
+ /*
+ * Can only null the high-priority queues; the general-
+ * purpose ones have to exist.
+ */
+ if (q != ZIO_TASKQ_ISSUE_HIGH &&
+ q != ZIO_TASKQ_INTERRUPT_HIGH)
+ break;
+
+ const zio_taskq_info_t zti = ZTI_NULL;
+ row[q] = zti;
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ /* Ensure we set a mode */
+ if (row[q].zti_mode == ZTI_NMODES)
+ break;
+ }
+
+ /* Didn't get a full row, fail */
+ if (q < ZIO_TASKQ_TYPES)
+ return (SET_ERROR(EINVAL));
+
+ /* Eat trailing space */
+ if (next != NULL)
+ while (isspace(*next))
+ next++;
+
+ /* If there's anything left over then fail */
+ if (next != NULL && *next != '\0')
+ return (SET_ERROR(EINVAL));
+
+ /* Success! Copy it into the real config */
+ for (q = 0; q < ZIO_TASKQ_TYPES; q++)
+ zio_taskqs[t][q] = row[q];
+
+ return (0);
+}
+
+static int
+spa_taskq_param_get(zio_type_t t, char *buf, boolean_t add_newline)
+{
+ int pos = 0;
+
+ /* Build paramater string from live config */
+ const char *sep = "";
+ for (uint_t q = 0; q < ZIO_TASKQ_TYPES; q++) {
+ const zio_taskq_info_t *zti = &zio_taskqs[t][q];
+ if (zti->zti_mode == ZTI_MODE_FIXED)
+ pos += sprintf(&buf[pos], "%s%s,%u,%u", sep,
+ modes[zti->zti_mode], zti->zti_count,
+ zti->zti_value);
+ else
+ pos += sprintf(&buf[pos], "%s%s", sep,
+ modes[zti->zti_mode]);
+ sep = " ";
+ }
+
+ if (add_newline)
+ buf[pos++] = '\n';
+ buf[pos] = '\0';
+
+ return (pos);
+}
+
+#ifdef __linux__
+static int
+spa_taskq_read_param_set(const char *val, zfs_kernel_param_t *kp)
+{
+ char *cfg = kmem_strdup(val);
+ int err = spa_taskq_param_set(ZIO_TYPE_READ, cfg);
+ kmem_free(cfg, strlen(val)+1);
+ return (-err);
+}
+static int
+spa_taskq_read_param_get(char *buf, zfs_kernel_param_t *kp)
+{
+ return (spa_taskq_param_get(ZIO_TYPE_READ, buf, TRUE));
+}
+
+static int
+spa_taskq_write_param_set(const char *val, zfs_kernel_param_t *kp)
+{
+ char *cfg = kmem_strdup(val);
+ int err = spa_taskq_param_set(ZIO_TYPE_WRITE, cfg);
+ kmem_free(cfg, strlen(val)+1);
+ return (-err);
+}
+static int
+spa_taskq_write_param_get(char *buf, zfs_kernel_param_t *kp)
+{
+ return (spa_taskq_param_get(ZIO_TYPE_WRITE, buf, TRUE));
+}
+#else
+/*
+ * On FreeBSD load-time parameters can be set up before malloc() is available,
+ * so we have to do all the parsing work on the stack.
+ */
+#define SPA_TASKQ_PARAM_MAX (128)
+
+static int
+spa_taskq_read_param(ZFS_MODULE_PARAM_ARGS)
+{
+ char buf[SPA_TASKQ_PARAM_MAX];
+ int err;
+
+ (void) spa_taskq_param_get(ZIO_TYPE_READ, buf, FALSE);
+ err = sysctl_handle_string(oidp, buf, sizeof (buf), req);
+ if (err || req->newptr == NULL)
+ return (err);
+ return (spa_taskq_param_set(ZIO_TYPE_READ, buf));
+}
+
+static int
+spa_taskq_write_param(ZFS_MODULE_PARAM_ARGS)
+{
+ char buf[SPA_TASKQ_PARAM_MAX];
+ int err;
+
+ (void) spa_taskq_param_get(ZIO_TYPE_WRITE, buf, FALSE);
+ err = sysctl_handle_string(oidp, buf, sizeof (buf), req);
+ if (err || req->newptr == NULL)
+ return (err);
+ return (spa_taskq_param_set(ZIO_TYPE_WRITE, buf));
+}
+#endif
+#endif /* _KERNEL */
+
/*
* Dispatch a task to the appropriate taskq for the ZFS I/O type and priority.
* Note that a type may have multiple discrete taskqs to avoid lock contention
* on the taskq itself. In that case we choose which taskq at random by using
* the low bits of gethrtime().
*/
void
spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent)
{
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
taskq_t *tq;
ASSERT3P(tqs->stqs_taskq, !=, NULL);
ASSERT3U(tqs->stqs_count, !=, 0);
if (tqs->stqs_count == 1) {
tq = tqs->stqs_taskq[0];
} else {
tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count];
}
taskq_dispatch_ent(tq, func, arg, flags, ent);
}
/*
* Same as spa_taskq_dispatch_ent() but block on the task until completion.
*/
void
spa_taskq_dispatch_sync(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
task_func_t *func, void *arg, uint_t flags)
{
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
taskq_t *tq;
taskqid_t id;
ASSERT3P(tqs->stqs_taskq, !=, NULL);
ASSERT3U(tqs->stqs_count, !=, 0);
if (tqs->stqs_count == 1) {
tq = tqs->stqs_taskq[0];
} else {
tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count];
}
id = taskq_dispatch(tq, func, arg, flags);
if (id)
taskq_wait_id(tq, id);
}
static void
spa_create_zio_taskqs(spa_t *spa)
{
for (int t = 0; t < ZIO_TYPES; t++) {
for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
spa_taskqs_init(spa, t, q);
}
}
}
/*
* Disabled until spa_thread() can be adapted for Linux.
*/
#undef HAVE_SPA_THREAD
#if defined(_KERNEL) && defined(HAVE_SPA_THREAD)
static void
spa_thread(void *arg)
{
psetid_t zio_taskq_psrset_bind = PS_NONE;
callb_cpr_t cprinfo;
spa_t *spa = arg;
user_t *pu = PTOU(curproc);
CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr,
spa->spa_name);
ASSERT(curproc != &p0);
(void) snprintf(pu->u_psargs, sizeof (pu->u_psargs),
"zpool-%s", spa->spa_name);
(void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm));
/* bind this thread to the requested psrset */
if (zio_taskq_psrset_bind != PS_NONE) {
pool_lock();
mutex_enter(&cpu_lock);
mutex_enter(&pidlock);
mutex_enter(&curproc->p_lock);
if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind,
0, NULL, NULL) == 0) {
curthread->t_bind_pset = zio_taskq_psrset_bind;
} else {
cmn_err(CE_WARN,
"Couldn't bind process for zfs pool \"%s\" to "
"pset %d\n", spa->spa_name, zio_taskq_psrset_bind);
}
mutex_exit(&curproc->p_lock);
mutex_exit(&pidlock);
mutex_exit(&cpu_lock);
pool_unlock();
}
if (zio_taskq_sysdc) {
sysdc_thread_enter(curthread, 100, 0);
}
spa->spa_proc = curproc;
spa->spa_did = curthread->t_did;
spa_create_zio_taskqs(spa);
mutex_enter(&spa->spa_proc_lock);
ASSERT(spa->spa_proc_state == SPA_PROC_CREATED);
spa->spa_proc_state = SPA_PROC_ACTIVE;
cv_broadcast(&spa->spa_proc_cv);
CALLB_CPR_SAFE_BEGIN(&cprinfo);
while (spa->spa_proc_state == SPA_PROC_ACTIVE)
cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock);
ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE);
spa->spa_proc_state = SPA_PROC_GONE;
spa->spa_proc = &p0;
cv_broadcast(&spa->spa_proc_cv);
CALLB_CPR_EXIT(&cprinfo); /* drops spa_proc_lock */
mutex_enter(&curproc->p_lock);
lwp_exit();
}
#endif
/*
* Activate an uninitialized pool.
*/
static void
spa_activate(spa_t *spa, spa_mode_t mode)
{
ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
spa->spa_state = POOL_STATE_ACTIVE;
spa->spa_mode = mode;
spa->spa_read_spacemaps = spa_mode_readable_spacemaps;
spa->spa_normal_class = metaslab_class_create(spa, &zfs_metaslab_ops);
spa->spa_log_class = metaslab_class_create(spa, &zfs_metaslab_ops);
spa->spa_embedded_log_class =
metaslab_class_create(spa, &zfs_metaslab_ops);
spa->spa_special_class = metaslab_class_create(spa, &zfs_metaslab_ops);
spa->spa_dedup_class = metaslab_class_create(spa, &zfs_metaslab_ops);
/* Try to create a covering process */
mutex_enter(&spa->spa_proc_lock);
ASSERT(spa->spa_proc_state == SPA_PROC_NONE);
ASSERT(spa->spa_proc == &p0);
spa->spa_did = 0;
(void) spa_create_process;
#ifdef HAVE_SPA_THREAD
/* Only create a process if we're going to be around a while. */
if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) {
if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri,
NULL, 0) == 0) {
spa->spa_proc_state = SPA_PROC_CREATED;
while (spa->spa_proc_state == SPA_PROC_CREATED) {
cv_wait(&spa->spa_proc_cv,
&spa->spa_proc_lock);
}
ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
ASSERT(spa->spa_proc != &p0);
ASSERT(spa->spa_did != 0);
} else {
#ifdef _KERNEL
cmn_err(CE_WARN,
"Couldn't create process for zfs pool \"%s\"\n",
spa->spa_name);
#endif
}
}
#endif /* HAVE_SPA_THREAD */
mutex_exit(&spa->spa_proc_lock);
/* If we didn't create a process, we need to create our taskqs. */
if (spa->spa_proc == &p0) {
spa_create_zio_taskqs(spa);
}
for (size_t i = 0; i < TXG_SIZE; i++) {
spa->spa_txg_zio[i] = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL);
}
list_create(&spa->spa_config_dirty_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_config_dirty_node));
list_create(&spa->spa_evicting_os_list, sizeof (objset_t),
offsetof(objset_t, os_evicting_node));
list_create(&spa->spa_state_dirty_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_state_dirty_node));
txg_list_create(&spa->spa_vdev_txg_list, spa,
offsetof(struct vdev, vdev_txg_node));
avl_create(&spa->spa_errlist_scrub,
spa_error_entry_compare, sizeof (spa_error_entry_t),
offsetof(spa_error_entry_t, se_avl));
avl_create(&spa->spa_errlist_last,
spa_error_entry_compare, sizeof (spa_error_entry_t),
offsetof(spa_error_entry_t, se_avl));
avl_create(&spa->spa_errlist_healed,
spa_error_entry_compare, sizeof (spa_error_entry_t),
offsetof(spa_error_entry_t, se_avl));
spa_activate_os(spa);
spa_keystore_init(&spa->spa_keystore);
/*
* This taskq is used to perform zvol-minor-related tasks
* asynchronously. This has several advantages, including easy
* resolution of various deadlocks.
*
* The taskq must be single threaded to ensure tasks are always
* processed in the order in which they were dispatched.
*
* A taskq per pool allows one to keep the pools independent.
* This way if one pool is suspended, it will not impact another.
*
* The preferred location to dispatch a zvol minor task is a sync
* task. In this context, there is easy access to the spa_t and minimal
* error handling is required because the sync task must succeed.
*/
spa->spa_zvol_taskq = taskq_create("z_zvol", 1, defclsyspri,
1, INT_MAX, 0);
/*
* The taskq to preload metaslabs.
*/
spa->spa_metaslab_taskq = taskq_create("z_metaslab",
metaslab_preload_pct, maxclsyspri, 1, INT_MAX,
TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT);
/*
* Taskq dedicated to prefetcher threads: this is used to prevent the
* pool traverse code from monopolizing the global (and limited)
* system_taskq by inappropriately scheduling long running tasks on it.
*/
spa->spa_prefetch_taskq = taskq_create("z_prefetch", 100,
defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT);
/*
* The taskq to upgrade datasets in this pool. Currently used by
* feature SPA_FEATURE_USEROBJ_ACCOUNTING/SPA_FEATURE_PROJECT_QUOTA.
*/
spa->spa_upgrade_taskq = taskq_create("z_upgrade", 100,
defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT);
}
/*
* Opposite of spa_activate().
*/
static void
spa_deactivate(spa_t *spa)
{
ASSERT(spa->spa_sync_on == B_FALSE);
ASSERT(spa->spa_dsl_pool == NULL);
ASSERT(spa->spa_root_vdev == NULL);
ASSERT(spa->spa_async_zio_root == NULL);
ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
spa_evicting_os_wait(spa);
if (spa->spa_zvol_taskq) {
taskq_destroy(spa->spa_zvol_taskq);
spa->spa_zvol_taskq = NULL;
}
if (spa->spa_metaslab_taskq) {
taskq_destroy(spa->spa_metaslab_taskq);
spa->spa_metaslab_taskq = NULL;
}
if (spa->spa_prefetch_taskq) {
taskq_destroy(spa->spa_prefetch_taskq);
spa->spa_prefetch_taskq = NULL;
}
if (spa->spa_upgrade_taskq) {
taskq_destroy(spa->spa_upgrade_taskq);
spa->spa_upgrade_taskq = NULL;
}
txg_list_destroy(&spa->spa_vdev_txg_list);
list_destroy(&spa->spa_config_dirty_list);
list_destroy(&spa->spa_evicting_os_list);
list_destroy(&spa->spa_state_dirty_list);
taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
for (int t = 0; t < ZIO_TYPES; t++) {
for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
spa_taskqs_fini(spa, t, q);
}
}
for (size_t i = 0; i < TXG_SIZE; i++) {
ASSERT3P(spa->spa_txg_zio[i], !=, NULL);
VERIFY0(zio_wait(spa->spa_txg_zio[i]));
spa->spa_txg_zio[i] = NULL;
}
metaslab_class_destroy(spa->spa_normal_class);
spa->spa_normal_class = NULL;
metaslab_class_destroy(spa->spa_log_class);
spa->spa_log_class = NULL;
metaslab_class_destroy(spa->spa_embedded_log_class);
spa->spa_embedded_log_class = NULL;
metaslab_class_destroy(spa->spa_special_class);
spa->spa_special_class = NULL;
metaslab_class_destroy(spa->spa_dedup_class);
spa->spa_dedup_class = NULL;
/*
* If this was part of an import or the open otherwise failed, we may
* still have errors left in the queues. Empty them just in case.
*/
spa_errlog_drain(spa);
avl_destroy(&spa->spa_errlist_scrub);
avl_destroy(&spa->spa_errlist_last);
avl_destroy(&spa->spa_errlist_healed);
spa_keystore_fini(&spa->spa_keystore);
spa->spa_state = POOL_STATE_UNINITIALIZED;
mutex_enter(&spa->spa_proc_lock);
if (spa->spa_proc_state != SPA_PROC_NONE) {
ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
spa->spa_proc_state = SPA_PROC_DEACTIVATE;
cv_broadcast(&spa->spa_proc_cv);
while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) {
ASSERT(spa->spa_proc != &p0);
cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
}
ASSERT(spa->spa_proc_state == SPA_PROC_GONE);
spa->spa_proc_state = SPA_PROC_NONE;
}
ASSERT(spa->spa_proc == &p0);
mutex_exit(&spa->spa_proc_lock);
/*
* We want to make sure spa_thread() has actually exited the ZFS
* module, so that the module can't be unloaded out from underneath
* it.
*/
if (spa->spa_did != 0) {
thread_join(spa->spa_did);
spa->spa_did = 0;
}
spa_deactivate_os(spa);
}
/*
* Verify a pool configuration, and construct the vdev tree appropriately. This
* will create all the necessary vdevs in the appropriate layout, with each vdev
* in the CLOSED state. This will prep the pool before open/creation/import.
* All vdev validation is done by the vdev_alloc() routine.
*/
int
spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
uint_t id, int atype)
{
nvlist_t **child;
uint_t children;
int error;
if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
return (error);
if ((*vdp)->vdev_ops->vdev_op_leaf)
return (0);
error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children);
if (error == ENOENT)
return (0);
if (error) {
vdev_free(*vdp);
*vdp = NULL;
return (SET_ERROR(EINVAL));
}
for (int c = 0; c < children; c++) {
vdev_t *vd;
if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
atype)) != 0) {
vdev_free(*vdp);
*vdp = NULL;
return (error);
}
}
ASSERT(*vdp != NULL);
return (0);
}
static boolean_t
spa_should_flush_logs_on_unload(spa_t *spa)
{
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
return (B_FALSE);
if (!spa_writeable(spa))
return (B_FALSE);
if (!spa->spa_sync_on)
return (B_FALSE);
if (spa_state(spa) != POOL_STATE_EXPORTED)
return (B_FALSE);
if (zfs_keep_log_spacemaps_at_export)
return (B_FALSE);
return (B_TRUE);
}
/*
* Opens a transaction that will set the flag that will instruct
* spa_sync to attempt to flush all the metaslabs for that txg.
*/
static void
spa_unload_log_sm_flush_all(spa_t *spa)
{
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
ASSERT3U(spa->spa_log_flushall_txg, ==, 0);
spa->spa_log_flushall_txg = dmu_tx_get_txg(tx);
dmu_tx_commit(tx);
txg_wait_synced(spa_get_dsl(spa), spa->spa_log_flushall_txg);
}
static void
spa_unload_log_sm_metadata(spa_t *spa)
{
void *cookie = NULL;
spa_log_sm_t *sls;
log_summary_entry_t *e;
while ((sls = avl_destroy_nodes(&spa->spa_sm_logs_by_txg,
&cookie)) != NULL) {
VERIFY0(sls->sls_mscount);
kmem_free(sls, sizeof (spa_log_sm_t));
}
while ((e = list_remove_head(&spa->spa_log_summary)) != NULL) {
VERIFY0(e->lse_mscount);
kmem_free(e, sizeof (log_summary_entry_t));
}
spa->spa_unflushed_stats.sus_nblocks = 0;
spa->spa_unflushed_stats.sus_memused = 0;
spa->spa_unflushed_stats.sus_blocklimit = 0;
}
static void
spa_destroy_aux_threads(spa_t *spa)
{
if (spa->spa_condense_zthr != NULL) {
zthr_destroy(spa->spa_condense_zthr);
spa->spa_condense_zthr = NULL;
}
if (spa->spa_checkpoint_discard_zthr != NULL) {
zthr_destroy(spa->spa_checkpoint_discard_zthr);
spa->spa_checkpoint_discard_zthr = NULL;
}
if (spa->spa_livelist_delete_zthr != NULL) {
zthr_destroy(spa->spa_livelist_delete_zthr);
spa->spa_livelist_delete_zthr = NULL;
}
if (spa->spa_livelist_condense_zthr != NULL) {
zthr_destroy(spa->spa_livelist_condense_zthr);
spa->spa_livelist_condense_zthr = NULL;
}
}
/*
* Opposite of spa_load().
*/
static void
spa_unload(spa_t *spa)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_state(spa) != POOL_STATE_UNINITIALIZED);
spa_import_progress_remove(spa_guid(spa));
spa_load_note(spa, "UNLOADING");
spa_wake_waiters(spa);
/*
* If we have set the spa_final_txg, we have already performed the
* tasks below in spa_export_common(). We should not redo it here since
* we delay the final TXGs beyond what spa_final_txg is set at.
*/
if (spa->spa_final_txg == UINT64_MAX) {
/*
* If the log space map feature is enabled and the pool is
* getting exported (but not destroyed), we want to spend some
* time flushing as many metaslabs as we can in an attempt to
* destroy log space maps and save import time.
*/
if (spa_should_flush_logs_on_unload(spa))
spa_unload_log_sm_flush_all(spa);
/*
* Stop async tasks.
*/
spa_async_suspend(spa);
if (spa->spa_root_vdev) {
vdev_t *root_vdev = spa->spa_root_vdev;
vdev_initialize_stop_all(root_vdev,
VDEV_INITIALIZE_ACTIVE);
vdev_trim_stop_all(root_vdev, VDEV_TRIM_ACTIVE);
vdev_autotrim_stop_all(spa);
vdev_rebuild_stop_all(spa);
}
}
/*
* Stop syncing.
*/
if (spa->spa_sync_on) {
txg_sync_stop(spa->spa_dsl_pool);
spa->spa_sync_on = B_FALSE;
}
/*
* This ensures that there is no async metaslab prefetching
* while we attempt to unload the spa.
*/
taskq_wait(spa->spa_metaslab_taskq);
if (spa->spa_mmp.mmp_thread)
mmp_thread_stop(spa);
/*
* Wait for any outstanding async I/O to complete.
*/
if (spa->spa_async_zio_root != NULL) {
for (int i = 0; i < max_ncpus; i++)
(void) zio_wait(spa->spa_async_zio_root[i]);
kmem_free(spa->spa_async_zio_root, max_ncpus * sizeof (void *));
spa->spa_async_zio_root = NULL;
}
if (spa->spa_vdev_removal != NULL) {
spa_vdev_removal_destroy(spa->spa_vdev_removal);
spa->spa_vdev_removal = NULL;
}
spa_destroy_aux_threads(spa);
spa_condense_fini(spa);
bpobj_close(&spa->spa_deferred_bpobj);
spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
/*
* Close all vdevs.
*/
if (spa->spa_root_vdev)
vdev_free(spa->spa_root_vdev);
ASSERT(spa->spa_root_vdev == NULL);
/*
* Close the dsl pool.
*/
if (spa->spa_dsl_pool) {
dsl_pool_close(spa->spa_dsl_pool);
spa->spa_dsl_pool = NULL;
spa->spa_meta_objset = NULL;
}
ddt_unload(spa);
brt_unload(spa);
spa_unload_log_sm_metadata(spa);
/*
* Drop and purge level 2 cache
*/
spa_l2cache_drop(spa);
if (spa->spa_spares.sav_vdevs) {
for (int i = 0; i < spa->spa_spares.sav_count; i++)
vdev_free(spa->spa_spares.sav_vdevs[i]);
kmem_free(spa->spa_spares.sav_vdevs,
spa->spa_spares.sav_count * sizeof (void *));
spa->spa_spares.sav_vdevs = NULL;
}
if (spa->spa_spares.sav_config) {
nvlist_free(spa->spa_spares.sav_config);
spa->spa_spares.sav_config = NULL;
}
spa->spa_spares.sav_count = 0;
if (spa->spa_l2cache.sav_vdevs) {
for (int i = 0; i < spa->spa_l2cache.sav_count; i++) {
vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]);
vdev_free(spa->spa_l2cache.sav_vdevs[i]);
}
kmem_free(spa->spa_l2cache.sav_vdevs,
spa->spa_l2cache.sav_count * sizeof (void *));
spa->spa_l2cache.sav_vdevs = NULL;
}
if (spa->spa_l2cache.sav_config) {
nvlist_free(spa->spa_l2cache.sav_config);
spa->spa_l2cache.sav_config = NULL;
}
spa->spa_l2cache.sav_count = 0;
spa->spa_async_suspended = 0;
spa->spa_indirect_vdevs_loaded = B_FALSE;
if (spa->spa_comment != NULL) {
spa_strfree(spa->spa_comment);
spa->spa_comment = NULL;
}
if (spa->spa_compatibility != NULL) {
spa_strfree(spa->spa_compatibility);
spa->spa_compatibility = NULL;
}
spa_config_exit(spa, SCL_ALL, spa);
}
/*
* Load (or re-load) the current list of vdevs describing the active spares for
* this pool. When this is called, we have some form of basic information in
* 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and
* then re-generate a more complete list including status information.
*/
void
spa_load_spares(spa_t *spa)
{
nvlist_t **spares;
uint_t nspares;
int i;
vdev_t *vd, *tvd;
#ifndef _KERNEL
/*
* zdb opens both the current state of the pool and the
* checkpointed state (if present), with a different spa_t.
*
* As spare vdevs are shared among open pools, we skip loading
* them when we load the checkpointed state of the pool.
*/
if (!spa_writeable(spa))
return;
#endif
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
/*
* First, close and free any existing spare vdevs.
*/
if (spa->spa_spares.sav_vdevs) {
for (i = 0; i < spa->spa_spares.sav_count; i++) {
vd = spa->spa_spares.sav_vdevs[i];
/* Undo the call to spa_activate() below */
if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
B_FALSE)) != NULL && tvd->vdev_isspare)
spa_spare_remove(tvd);
vdev_close(vd);
vdev_free(vd);
}
kmem_free(spa->spa_spares.sav_vdevs,
spa->spa_spares.sav_count * sizeof (void *));
}
if (spa->spa_spares.sav_config == NULL)
nspares = 0;
else
VERIFY0(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, &spares, &nspares));
spa->spa_spares.sav_count = (int)nspares;
spa->spa_spares.sav_vdevs = NULL;
if (nspares == 0)
return;
/*
* Construct the array of vdevs, opening them to get status in the
* process. For each spare, there is potentially two different vdev_t
* structures associated with it: one in the list of spares (used only
* for basic validation purposes) and one in the active vdev
* configuration (if it's spared in). During this phase we open and
* validate each vdev on the spare list. If the vdev also exists in the
* active configuration, then we also mark this vdev as an active spare.
*/
spa->spa_spares.sav_vdevs = kmem_zalloc(nspares * sizeof (void *),
KM_SLEEP);
for (i = 0; i < spa->spa_spares.sav_count; i++) {
VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
VDEV_ALLOC_SPARE) == 0);
ASSERT(vd != NULL);
spa->spa_spares.sav_vdevs[i] = vd;
if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
B_FALSE)) != NULL) {
if (!tvd->vdev_isspare)
spa_spare_add(tvd);
/*
* We only mark the spare active if we were successfully
* able to load the vdev. Otherwise, importing a pool
* with a bad active spare would result in strange
* behavior, because multiple pool would think the spare
* is actively in use.
*
* There is a vulnerability here to an equally bizarre
* circumstance, where a dead active spare is later
* brought back to life (onlined or otherwise). Given
* the rarity of this scenario, and the extra complexity
* it adds, we ignore the possibility.
*/
if (!vdev_is_dead(tvd))
spa_spare_activate(tvd);
}
vd->vdev_top = vd;
vd->vdev_aux = &spa->spa_spares;
if (vdev_open(vd) != 0)
continue;
if (vdev_validate_aux(vd) == 0)
spa_spare_add(vd);
}
/*
* Recompute the stashed list of spares, with status information
* this time.
*/
fnvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES);
spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
KM_SLEEP);
for (i = 0; i < spa->spa_spares.sav_count; i++)
spares[i] = vdev_config_generate(spa,
spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE);
fnvlist_add_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, (const nvlist_t * const *)spares,
spa->spa_spares.sav_count);
for (i = 0; i < spa->spa_spares.sav_count; i++)
nvlist_free(spares[i]);
kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *));
}
/*
* Load (or re-load) the current list of vdevs describing the active l2cache for
* this pool. When this is called, we have some form of basic information in
* 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and
* then re-generate a more complete list including status information.
* Devices which are already active have their details maintained, and are
* not re-opened.
*/
void
spa_load_l2cache(spa_t *spa)
{
nvlist_t **l2cache = NULL;
uint_t nl2cache;
int i, j, oldnvdevs;
uint64_t guid;
vdev_t *vd, **oldvdevs, **newvdevs;
spa_aux_vdev_t *sav = &spa->spa_l2cache;
#ifndef _KERNEL
/*
* zdb opens both the current state of the pool and the
* checkpointed state (if present), with a different spa_t.
*
* As L2 caches are part of the ARC which is shared among open
* pools, we skip loading them when we load the checkpointed
* state of the pool.
*/
if (!spa_writeable(spa))
return;
#endif
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
oldvdevs = sav->sav_vdevs;
oldnvdevs = sav->sav_count;
sav->sav_vdevs = NULL;
sav->sav_count = 0;
if (sav->sav_config == NULL) {
nl2cache = 0;
newvdevs = NULL;
goto out;
}
VERIFY0(nvlist_lookup_nvlist_array(sav->sav_config,
ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache));
newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP);
/*
* Process new nvlist of vdevs.
*/
for (i = 0; i < nl2cache; i++) {
guid = fnvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID);
newvdevs[i] = NULL;
for (j = 0; j < oldnvdevs; j++) {
vd = oldvdevs[j];
if (vd != NULL && guid == vd->vdev_guid) {
/*
* Retain previous vdev for add/remove ops.
*/
newvdevs[i] = vd;
oldvdevs[j] = NULL;
break;
}
}
if (newvdevs[i] == NULL) {
/*
* Create new vdev
*/
VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
VDEV_ALLOC_L2CACHE) == 0);
ASSERT(vd != NULL);
newvdevs[i] = vd;
/*
* Commit this vdev as an l2cache device,
* even if it fails to open.
*/
spa_l2cache_add(vd);
vd->vdev_top = vd;
vd->vdev_aux = sav;
spa_l2cache_activate(vd);
if (vdev_open(vd) != 0)
continue;
(void) vdev_validate_aux(vd);
if (!vdev_is_dead(vd))
l2arc_add_vdev(spa, vd);
/*
* Upon cache device addition to a pool or pool
* creation with a cache device or if the header
* of the device is invalid we issue an async
* TRIM command for the whole device which will
* execute if l2arc_trim_ahead > 0.
*/
spa_async_request(spa, SPA_ASYNC_L2CACHE_TRIM);
}
}
sav->sav_vdevs = newvdevs;
sav->sav_count = (int)nl2cache;
/*
* Recompute the stashed list of l2cache devices, with status
* information this time.
*/
fnvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE);
if (sav->sav_count > 0)
l2cache = kmem_alloc(sav->sav_count * sizeof (void *),
KM_SLEEP);
for (i = 0; i < sav->sav_count; i++)
l2cache[i] = vdev_config_generate(spa,
sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE);
fnvlist_add_nvlist_array(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
(const nvlist_t * const *)l2cache, sav->sav_count);
out:
/*
* Purge vdevs that were dropped
*/
if (oldvdevs) {
for (i = 0; i < oldnvdevs; i++) {
uint64_t pool;
vd = oldvdevs[i];
if (vd != NULL) {
ASSERT(vd->vdev_isl2cache);
if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
pool != 0ULL && l2arc_vdev_present(vd))
l2arc_remove_vdev(vd);
vdev_clear_stats(vd);
vdev_free(vd);
}
}
kmem_free(oldvdevs, oldnvdevs * sizeof (void *));
}
for (i = 0; i < sav->sav_count; i++)
nvlist_free(l2cache[i]);
if (sav->sav_count)
kmem_free(l2cache, sav->sav_count * sizeof (void *));
}
static int
load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
{
dmu_buf_t *db;
char *packed = NULL;
size_t nvsize = 0;
int error;
*value = NULL;
error = dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db);
if (error)
return (error);
nvsize = *(uint64_t *)db->db_data;
dmu_buf_rele(db, FTAG);
packed = vmem_alloc(nvsize, KM_SLEEP);
error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed,
DMU_READ_PREFETCH);
if (error == 0)
error = nvlist_unpack(packed, nvsize, value, 0);
vmem_free(packed, nvsize);
return (error);
}
/*
* Concrete top-level vdevs that are not missing and are not logs. At every
* spa_sync we write new uberblocks to at least SPA_SYNC_MIN_VDEVS core tvds.
*/
static uint64_t
spa_healthy_core_tvds(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
uint64_t tvds = 0;
for (uint64_t i = 0; i < rvd->vdev_children; i++) {
vdev_t *vd = rvd->vdev_child[i];
if (vd->vdev_islog)
continue;
if (vdev_is_concrete(vd) && !vdev_is_dead(vd))
tvds++;
}
return (tvds);
}
/*
* Checks to see if the given vdev could not be opened, in which case we post a
* sysevent to notify the autoreplace code that the device has been removed.
*/
static void
spa_check_removed(vdev_t *vd)
{
for (uint64_t c = 0; c < vd->vdev_children; c++)
spa_check_removed(vd->vdev_child[c]);
if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) &&
vdev_is_concrete(vd)) {
zfs_post_autoreplace(vd->vdev_spa, vd);
spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_CHECK);
}
}
static int
spa_check_for_missing_logs(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
/*
* If we're doing a normal import, then build up any additional
* diagnostic information about missing log devices.
* We'll pass this up to the user for further processing.
*/
if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) {
nvlist_t **child, *nv;
uint64_t idx = 0;
child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t *),
KM_SLEEP);
nv = fnvlist_alloc();
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
/*
* We consider a device as missing only if it failed
* to open (i.e. offline or faulted is not considered
* as missing).
*/
if (tvd->vdev_islog &&
tvd->vdev_state == VDEV_STATE_CANT_OPEN) {
child[idx++] = vdev_config_generate(spa, tvd,
B_FALSE, VDEV_CONFIG_MISSING);
}
}
if (idx > 0) {
fnvlist_add_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
(const nvlist_t * const *)child, idx);
fnvlist_add_nvlist(spa->spa_load_info,
ZPOOL_CONFIG_MISSING_DEVICES, nv);
for (uint64_t i = 0; i < idx; i++)
nvlist_free(child[i]);
}
nvlist_free(nv);
kmem_free(child, rvd->vdev_children * sizeof (char **));
if (idx > 0) {
spa_load_failed(spa, "some log devices are missing");
vdev_dbgmsg_print_tree(rvd, 2);
return (SET_ERROR(ENXIO));
}
} else {
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
if (tvd->vdev_islog &&
tvd->vdev_state == VDEV_STATE_CANT_OPEN) {
spa_set_log_state(spa, SPA_LOG_CLEAR);
spa_load_note(spa, "some log devices are "
"missing, ZIL is dropped.");
vdev_dbgmsg_print_tree(rvd, 2);
break;
}
}
}
return (0);
}
/*
* Check for missing log devices
*/
static boolean_t
spa_check_logs(spa_t *spa)
{
boolean_t rv = B_FALSE;
dsl_pool_t *dp = spa_get_dsl(spa);
switch (spa->spa_log_state) {
default:
break;
case SPA_LOG_MISSING:
/* need to recheck in case slog has been restored */
case SPA_LOG_UNKNOWN:
rv = (dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
zil_check_log_chain, NULL, DS_FIND_CHILDREN) != 0);
if (rv)
spa_set_log_state(spa, SPA_LOG_MISSING);
break;
}
return (rv);
}
/*
* Passivate any log vdevs (note, does not apply to embedded log metaslabs).
*/
static boolean_t
spa_passivate_log(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
boolean_t slog_found = B_FALSE;
ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
if (tvd->vdev_islog) {
ASSERT3P(tvd->vdev_log_mg, ==, NULL);
metaslab_group_passivate(tvd->vdev_mg);
slog_found = B_TRUE;
}
}
return (slog_found);
}
/*
* Activate any log vdevs (note, does not apply to embedded log metaslabs).
*/
static void
spa_activate_log(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
if (tvd->vdev_islog) {
ASSERT3P(tvd->vdev_log_mg, ==, NULL);
metaslab_group_activate(tvd->vdev_mg);
}
}
}
int
spa_reset_logs(spa_t *spa)
{
int error;
error = dmu_objset_find(spa_name(spa), zil_reset,
NULL, DS_FIND_CHILDREN);
if (error == 0) {
/*
* We successfully offlined the log device, sync out the
* current txg so that the "stubby" block can be removed
* by zil_sync().
*/
txg_wait_synced(spa->spa_dsl_pool, 0);
}
return (error);
}
static void
spa_aux_check_removed(spa_aux_vdev_t *sav)
{
for (int i = 0; i < sav->sav_count; i++)
spa_check_removed(sav->sav_vdevs[i]);
}
void
spa_claim_notify(zio_t *zio)
{
spa_t *spa = zio->io_spa;
if (zio->io_error)
return;
mutex_enter(&spa->spa_props_lock); /* any mutex will do */
if (spa->spa_claim_max_txg < zio->io_bp->blk_birth)
spa->spa_claim_max_txg = zio->io_bp->blk_birth;
mutex_exit(&spa->spa_props_lock);
}
typedef struct spa_load_error {
boolean_t sle_verify_data;
uint64_t sle_meta_count;
uint64_t sle_data_count;
} spa_load_error_t;
static void
spa_load_verify_done(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
spa_load_error_t *sle = zio->io_private;
dmu_object_type_t type = BP_GET_TYPE(bp);
int error = zio->io_error;
spa_t *spa = zio->io_spa;
abd_free(zio->io_abd);
if (error) {
if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) &&
type != DMU_OT_INTENT_LOG)
atomic_inc_64(&sle->sle_meta_count);
else
atomic_inc_64(&sle->sle_data_count);
}
mutex_enter(&spa->spa_scrub_lock);
spa->spa_load_verify_bytes -= BP_GET_PSIZE(bp);
cv_broadcast(&spa->spa_scrub_io_cv);
mutex_exit(&spa->spa_scrub_lock);
}
/*
* Maximum number of inflight bytes is the log2 fraction of the arc size.
* By default, we set it to 1/16th of the arc.
*/
static uint_t spa_load_verify_shift = 4;
static int spa_load_verify_metadata = B_TRUE;
static int spa_load_verify_data = B_TRUE;
static int
spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
{
zio_t *rio = arg;
spa_load_error_t *sle = rio->io_private;
(void) zilog, (void) dnp;
/*
* Note: normally this routine will not be called if
* spa_load_verify_metadata is not set. However, it may be useful
* to manually set the flag after the traversal has begun.
*/
if (!spa_load_verify_metadata)
return (0);
/*
* Sanity check the block pointer in order to detect obvious damage
* before using the contents in subsequent checks or in zio_read().
* When damaged consider it to be a metadata error since we cannot
* trust the BP_GET_TYPE and BP_GET_LEVEL values.
*/
if (!zfs_blkptr_verify(spa, bp, BLK_CONFIG_NEEDED, BLK_VERIFY_LOG)) {
atomic_inc_64(&sle->sle_meta_count);
return (0);
}
if (zb->zb_level == ZB_DNODE_LEVEL || BP_IS_HOLE(bp) ||
BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp))
return (0);
if (!BP_IS_METADATA(bp) &&
(!spa_load_verify_data || !sle->sle_verify_data))
return (0);
uint64_t maxinflight_bytes =
arc_target_bytes() >> spa_load_verify_shift;
size_t size = BP_GET_PSIZE(bp);
mutex_enter(&spa->spa_scrub_lock);
while (spa->spa_load_verify_bytes >= maxinflight_bytes)
cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
spa->spa_load_verify_bytes += size;
mutex_exit(&spa->spa_scrub_lock);
zio_nowait(zio_read(rio, spa, bp, abd_alloc_for_io(size, B_FALSE), size,
spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB,
ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL |
ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb));
return (0);
}
static int
verify_dataset_name_len(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
{
(void) dp, (void) arg;
if (dsl_dataset_namelen(ds) >= ZFS_MAX_DATASET_NAME_LEN)
return (SET_ERROR(ENAMETOOLONG));
return (0);
}
static int
spa_load_verify(spa_t *spa)
{
zio_t *rio;
spa_load_error_t sle = { 0 };
zpool_load_policy_t policy;
boolean_t verify_ok = B_FALSE;
int error = 0;
zpool_get_load_policy(spa->spa_config, &policy);
if (policy.zlp_rewind & ZPOOL_NEVER_REWIND ||
policy.zlp_maxmeta == UINT64_MAX)
return (0);
dsl_pool_config_enter(spa->spa_dsl_pool, FTAG);
error = dmu_objset_find_dp(spa->spa_dsl_pool,
spa->spa_dsl_pool->dp_root_dir_obj, verify_dataset_name_len, NULL,
DS_FIND_CHILDREN);
dsl_pool_config_exit(spa->spa_dsl_pool, FTAG);
if (error != 0)
return (error);
/*
* Verify data only if we are rewinding or error limit was set.
* Otherwise nothing except dbgmsg care about it to waste time.
*/
sle.sle_verify_data = (policy.zlp_rewind & ZPOOL_REWIND_MASK) ||
(policy.zlp_maxdata < UINT64_MAX);
rio = zio_root(spa, NULL, &sle,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
if (spa_load_verify_metadata) {
if (spa->spa_extreme_rewind) {
spa_load_note(spa, "performing a complete scan of the "
"pool since extreme rewind is on. This may take "
"a very long time.\n (spa_load_verify_data=%u, "
"spa_load_verify_metadata=%u)",
spa_load_verify_data, spa_load_verify_metadata);
}
error = traverse_pool(spa, spa->spa_verify_min_txg,
TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA |
TRAVERSE_NO_DECRYPT, spa_load_verify_cb, rio);
}
(void) zio_wait(rio);
ASSERT0(spa->spa_load_verify_bytes);
spa->spa_load_meta_errors = sle.sle_meta_count;
spa->spa_load_data_errors = sle.sle_data_count;
if (sle.sle_meta_count != 0 || sle.sle_data_count != 0) {
spa_load_note(spa, "spa_load_verify found %llu metadata errors "
"and %llu data errors", (u_longlong_t)sle.sle_meta_count,
(u_longlong_t)sle.sle_data_count);
}
if (spa_load_verify_dryrun ||
(!error && sle.sle_meta_count <= policy.zlp_maxmeta &&
sle.sle_data_count <= policy.zlp_maxdata)) {
int64_t loss = 0;
verify_ok = B_TRUE;
spa->spa_load_txg = spa->spa_uberblock.ub_txg;
spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp;
loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts;
fnvlist_add_uint64(spa->spa_load_info, ZPOOL_CONFIG_LOAD_TIME,
spa->spa_load_txg_ts);
fnvlist_add_int64(spa->spa_load_info, ZPOOL_CONFIG_REWIND_TIME,
loss);
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_LOAD_META_ERRORS, sle.sle_meta_count);
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count);
} else {
spa->spa_load_max_txg = spa->spa_uberblock.ub_txg;
}
if (spa_load_verify_dryrun)
return (0);
if (error) {
if (error != ENXIO && error != EIO)
error = SET_ERROR(EIO);
return (error);
}
return (verify_ok ? 0 : EIO);
}
/*
* Find a value in the pool props object.
*/
static void
spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val)
{
(void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object,
zpool_prop_to_name(prop), sizeof (uint64_t), 1, val);
}
/*
* Find a value in the pool directory object.
*/
static int
spa_dir_prop(spa_t *spa, const char *name, uint64_t *val, boolean_t log_enoent)
{
int error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
name, sizeof (uint64_t), 1, val);
if (error != 0 && (error != ENOENT || log_enoent)) {
spa_load_failed(spa, "couldn't get '%s' value in MOS directory "
"[error=%d]", name, error);
}
return (error);
}
static int
spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err)
{
vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux);
return (SET_ERROR(err));
}
boolean_t
spa_livelist_delete_check(spa_t *spa)
{
return (spa->spa_livelists_to_delete != 0);
}
static boolean_t
spa_livelist_delete_cb_check(void *arg, zthr_t *z)
{
(void) z;
spa_t *spa = arg;
return (spa_livelist_delete_check(spa));
}
static int
delete_blkptr_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
spa_t *spa = arg;
zio_free(spa, tx->tx_txg, bp);
dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD,
-bp_get_dsize_sync(spa, bp),
-BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx);
return (0);
}
static int
dsl_get_next_livelist_obj(objset_t *os, uint64_t zap_obj, uint64_t *llp)
{
int err;
zap_cursor_t zc;
zap_attribute_t za;
zap_cursor_init(&zc, os, zap_obj);
err = zap_cursor_retrieve(&zc, &za);
zap_cursor_fini(&zc);
if (err == 0)
*llp = za.za_first_integer;
return (err);
}
/*
* Components of livelist deletion that must be performed in syncing
* context: freeing block pointers and updating the pool-wide data
* structures to indicate how much work is left to do
*/
typedef struct sublist_delete_arg {
spa_t *spa;
dsl_deadlist_t *ll;
uint64_t key;
bplist_t *to_free;
} sublist_delete_arg_t;
static void
sublist_delete_sync(void *arg, dmu_tx_t *tx)
{
sublist_delete_arg_t *sda = arg;
spa_t *spa = sda->spa;
dsl_deadlist_t *ll = sda->ll;
uint64_t key = sda->key;
bplist_t *to_free = sda->to_free;
bplist_iterate(to_free, delete_blkptr_cb, spa, tx);
dsl_deadlist_remove_entry(ll, key, tx);
}
typedef struct livelist_delete_arg {
spa_t *spa;
uint64_t ll_obj;
uint64_t zap_obj;
} livelist_delete_arg_t;
static void
livelist_delete_sync(void *arg, dmu_tx_t *tx)
{
livelist_delete_arg_t *lda = arg;
spa_t *spa = lda->spa;
uint64_t ll_obj = lda->ll_obj;
uint64_t zap_obj = lda->zap_obj;
objset_t *mos = spa->spa_meta_objset;
uint64_t count;
/* free the livelist and decrement the feature count */
VERIFY0(zap_remove_int(mos, zap_obj, ll_obj, tx));
dsl_deadlist_free(mos, ll_obj, tx);
spa_feature_decr(spa, SPA_FEATURE_LIVELIST, tx);
VERIFY0(zap_count(mos, zap_obj, &count));
if (count == 0) {
/* no more livelists to delete */
VERIFY0(zap_remove(mos, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_DELETED_CLONES, tx));
VERIFY0(zap_destroy(mos, zap_obj, tx));
spa->spa_livelists_to_delete = 0;
spa_notify_waiters(spa);
}
}
/*
* Load in the value for the livelist to be removed and open it. Then,
* load its first sublist and determine which block pointers should actually
* be freed. Then, call a synctask which performs the actual frees and updates
* the pool-wide livelist data.
*/
static void
spa_livelist_delete_cb(void *arg, zthr_t *z)
{
spa_t *spa = arg;
uint64_t ll_obj = 0, count;
objset_t *mos = spa->spa_meta_objset;
uint64_t zap_obj = spa->spa_livelists_to_delete;
/*
* Determine the next livelist to delete. This function should only
* be called if there is at least one deleted clone.
*/
VERIFY0(dsl_get_next_livelist_obj(mos, zap_obj, &ll_obj));
VERIFY0(zap_count(mos, ll_obj, &count));
if (count > 0) {
dsl_deadlist_t *ll;
dsl_deadlist_entry_t *dle;
bplist_t to_free;
ll = kmem_zalloc(sizeof (dsl_deadlist_t), KM_SLEEP);
dsl_deadlist_open(ll, mos, ll_obj);
dle = dsl_deadlist_first(ll);
ASSERT3P(dle, !=, NULL);
bplist_create(&to_free);
int err = dsl_process_sub_livelist(&dle->dle_bpobj, &to_free,
z, NULL);
if (err == 0) {
sublist_delete_arg_t sync_arg = {
.spa = spa,
.ll = ll,
.key = dle->dle_mintxg,
.to_free = &to_free
};
zfs_dbgmsg("deleting sublist (id %llu) from"
" livelist %llu, %lld remaining",
(u_longlong_t)dle->dle_bpobj.bpo_object,
(u_longlong_t)ll_obj, (longlong_t)count - 1);
VERIFY0(dsl_sync_task(spa_name(spa), NULL,
sublist_delete_sync, &sync_arg, 0,
ZFS_SPACE_CHECK_DESTROY));
} else {
VERIFY3U(err, ==, EINTR);
}
bplist_clear(&to_free);
bplist_destroy(&to_free);
dsl_deadlist_close(ll);
kmem_free(ll, sizeof (dsl_deadlist_t));
} else {
livelist_delete_arg_t sync_arg = {
.spa = spa,
.ll_obj = ll_obj,
.zap_obj = zap_obj
};
zfs_dbgmsg("deletion of livelist %llu completed",
(u_longlong_t)ll_obj);
VERIFY0(dsl_sync_task(spa_name(spa), NULL, livelist_delete_sync,
&sync_arg, 0, ZFS_SPACE_CHECK_DESTROY));
}
}
static void
spa_start_livelist_destroy_thread(spa_t *spa)
{
ASSERT3P(spa->spa_livelist_delete_zthr, ==, NULL);
spa->spa_livelist_delete_zthr =
zthr_create("z_livelist_destroy",
spa_livelist_delete_cb_check, spa_livelist_delete_cb, spa,
minclsyspri);
}
typedef struct livelist_new_arg {
bplist_t *allocs;
bplist_t *frees;
} livelist_new_arg_t;
static int
livelist_track_new_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
ASSERT(tx == NULL);
livelist_new_arg_t *lna = arg;
if (bp_freed) {
bplist_append(lna->frees, bp);
} else {
bplist_append(lna->allocs, bp);
zfs_livelist_condense_new_alloc++;
}
return (0);
}
typedef struct livelist_condense_arg {
spa_t *spa;
bplist_t to_keep;
uint64_t first_size;
uint64_t next_size;
} livelist_condense_arg_t;
static void
spa_livelist_condense_sync(void *arg, dmu_tx_t *tx)
{
livelist_condense_arg_t *lca = arg;
spa_t *spa = lca->spa;
bplist_t new_frees;
dsl_dataset_t *ds = spa->spa_to_condense.ds;
/* Have we been cancelled? */
if (spa->spa_to_condense.cancelled) {
zfs_livelist_condense_sync_cancel++;
goto out;
}
dsl_deadlist_entry_t *first = spa->spa_to_condense.first;
dsl_deadlist_entry_t *next = spa->spa_to_condense.next;
dsl_deadlist_t *ll = &ds->ds_dir->dd_livelist;
/*
* It's possible that the livelist was changed while the zthr was
* running. Therefore, we need to check for new blkptrs in the two
* entries being condensed and continue to track them in the livelist.
* Because of the way we handle remapped blkptrs (see dbuf_remap_impl),
* it's possible that the newly added blkptrs are FREEs or ALLOCs so
* we need to sort them into two different bplists.
*/
uint64_t first_obj = first->dle_bpobj.bpo_object;
uint64_t next_obj = next->dle_bpobj.bpo_object;
uint64_t cur_first_size = first->dle_bpobj.bpo_phys->bpo_num_blkptrs;
uint64_t cur_next_size = next->dle_bpobj.bpo_phys->bpo_num_blkptrs;
bplist_create(&new_frees);
livelist_new_arg_t new_bps = {
.allocs = &lca->to_keep,
.frees = &new_frees,
};
if (cur_first_size > lca->first_size) {
VERIFY0(livelist_bpobj_iterate_from_nofree(&first->dle_bpobj,
livelist_track_new_cb, &new_bps, lca->first_size));
}
if (cur_next_size > lca->next_size) {
VERIFY0(livelist_bpobj_iterate_from_nofree(&next->dle_bpobj,
livelist_track_new_cb, &new_bps, lca->next_size));
}
dsl_deadlist_clear_entry(first, ll, tx);
ASSERT(bpobj_is_empty(&first->dle_bpobj));
dsl_deadlist_remove_entry(ll, next->dle_mintxg, tx);
bplist_iterate(&lca->to_keep, dsl_deadlist_insert_alloc_cb, ll, tx);
bplist_iterate(&new_frees, dsl_deadlist_insert_free_cb, ll, tx);
bplist_destroy(&new_frees);
char dsname[ZFS_MAX_DATASET_NAME_LEN];
dsl_dataset_name(ds, dsname);
zfs_dbgmsg("txg %llu condensing livelist of %s (id %llu), bpobj %llu "
"(%llu blkptrs) and bpobj %llu (%llu blkptrs) -> bpobj %llu "
"(%llu blkptrs)", (u_longlong_t)tx->tx_txg, dsname,
(u_longlong_t)ds->ds_object, (u_longlong_t)first_obj,
(u_longlong_t)cur_first_size, (u_longlong_t)next_obj,
(u_longlong_t)cur_next_size,
(u_longlong_t)first->dle_bpobj.bpo_object,
(u_longlong_t)first->dle_bpobj.bpo_phys->bpo_num_blkptrs);
out:
dmu_buf_rele(ds->ds_dbuf, spa);
spa->spa_to_condense.ds = NULL;
bplist_clear(&lca->to_keep);
bplist_destroy(&lca->to_keep);
kmem_free(lca, sizeof (livelist_condense_arg_t));
spa->spa_to_condense.syncing = B_FALSE;
}
static void
spa_livelist_condense_cb(void *arg, zthr_t *t)
{
while (zfs_livelist_condense_zthr_pause &&
!(zthr_has_waiters(t) || zthr_iscancelled(t)))
delay(1);
spa_t *spa = arg;
dsl_deadlist_entry_t *first = spa->spa_to_condense.first;
dsl_deadlist_entry_t *next = spa->spa_to_condense.next;
uint64_t first_size, next_size;
livelist_condense_arg_t *lca =
kmem_alloc(sizeof (livelist_condense_arg_t), KM_SLEEP);
bplist_create(&lca->to_keep);
/*
* Process the livelists (matching FREEs and ALLOCs) in open context
* so we have minimal work in syncing context to condense.
*
* We save bpobj sizes (first_size and next_size) to use later in
* syncing context to determine if entries were added to these sublists
* while in open context. This is possible because the clone is still
* active and open for normal writes and we want to make sure the new,
* unprocessed blockpointers are inserted into the livelist normally.
*
* Note that dsl_process_sub_livelist() both stores the size number of
* blockpointers and iterates over them while the bpobj's lock held, so
* the sizes returned to us are consistent which what was actually
* processed.
*/
int err = dsl_process_sub_livelist(&first->dle_bpobj, &lca->to_keep, t,
&first_size);
if (err == 0)
err = dsl_process_sub_livelist(&next->dle_bpobj, &lca->to_keep,
t, &next_size);
if (err == 0) {
while (zfs_livelist_condense_sync_pause &&
!(zthr_has_waiters(t) || zthr_iscancelled(t)))
delay(1);
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
dmu_tx_mark_netfree(tx);
dmu_tx_hold_space(tx, 1);
err = dmu_tx_assign(tx, TXG_NOWAIT | TXG_NOTHROTTLE);
if (err == 0) {
/*
* Prevent the condense zthr restarting before
* the synctask completes.
*/
spa->spa_to_condense.syncing = B_TRUE;
lca->spa = spa;
lca->first_size = first_size;
lca->next_size = next_size;
dsl_sync_task_nowait(spa_get_dsl(spa),
spa_livelist_condense_sync, lca, tx);
dmu_tx_commit(tx);
return;
}
}
/*
* Condensing can not continue: either it was externally stopped or
* we were unable to assign to a tx because the pool has run out of
* space. In the second case, we'll just end up trying to condense
* again in a later txg.
*/
ASSERT(err != 0);
bplist_clear(&lca->to_keep);
bplist_destroy(&lca->to_keep);
kmem_free(lca, sizeof (livelist_condense_arg_t));
dmu_buf_rele(spa->spa_to_condense.ds->ds_dbuf, spa);
spa->spa_to_condense.ds = NULL;
if (err == EINTR)
zfs_livelist_condense_zthr_cancel++;
}
/*
* Check that there is something to condense but that a condense is not
* already in progress and that condensing has not been cancelled.
*/
static boolean_t
spa_livelist_condense_cb_check(void *arg, zthr_t *z)
{
(void) z;
spa_t *spa = arg;
if ((spa->spa_to_condense.ds != NULL) &&
(spa->spa_to_condense.syncing == B_FALSE) &&
(spa->spa_to_condense.cancelled == B_FALSE)) {
return (B_TRUE);
}
return (B_FALSE);
}
static void
spa_start_livelist_condensing_thread(spa_t *spa)
{
spa->spa_to_condense.ds = NULL;
spa->spa_to_condense.first = NULL;
spa->spa_to_condense.next = NULL;
spa->spa_to_condense.syncing = B_FALSE;
spa->spa_to_condense.cancelled = B_FALSE;
ASSERT3P(spa->spa_livelist_condense_zthr, ==, NULL);
spa->spa_livelist_condense_zthr =
zthr_create("z_livelist_condense",
spa_livelist_condense_cb_check,
spa_livelist_condense_cb, spa, minclsyspri);
}
static void
spa_spawn_aux_threads(spa_t *spa)
{
ASSERT(spa_writeable(spa));
ASSERT(MUTEX_HELD(&spa_namespace_lock));
spa_start_indirect_condensing_thread(spa);
spa_start_livelist_destroy_thread(spa);
spa_start_livelist_condensing_thread(spa);
ASSERT3P(spa->spa_checkpoint_discard_zthr, ==, NULL);
spa->spa_checkpoint_discard_zthr =
zthr_create("z_checkpoint_discard",
spa_checkpoint_discard_thread_check,
spa_checkpoint_discard_thread, spa, minclsyspri);
}
/*
* Fix up config after a partly-completed split. This is done with the
* ZPOOL_CONFIG_SPLIT nvlist. Both the splitting pool and the split-off
* pool have that entry in their config, but only the splitting one contains
* a list of all the guids of the vdevs that are being split off.
*
* This function determines what to do with that list: either rejoin
* all the disks to the pool, or complete the splitting process. To attempt
* the rejoin, each disk that is offlined is marked online again, and
* we do a reopen() call. If the vdev label for every disk that was
* marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL)
* then we call vdev_split() on each disk, and complete the split.
*
* Otherwise we leave the config alone, with all the vdevs in place in
* the original pool.
*/
static void
spa_try_repair(spa_t *spa, nvlist_t *config)
{
uint_t extracted;
uint64_t *glist;
uint_t i, gcount;
nvlist_t *nvl;
vdev_t **vd;
boolean_t attempt_reopen;
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0)
return;
/* check that the config is complete */
if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
&glist, &gcount) != 0)
return;
vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP);
/* attempt to online all the vdevs & validate */
attempt_reopen = B_TRUE;
for (i = 0; i < gcount; i++) {
if (glist[i] == 0) /* vdev is hole */
continue;
vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE);
if (vd[i] == NULL) {
/*
* Don't bother attempting to reopen the disks;
* just do the split.
*/
attempt_reopen = B_FALSE;
} else {
/* attempt to re-online it */
vd[i]->vdev_offline = B_FALSE;
}
}
if (attempt_reopen) {
vdev_reopen(spa->spa_root_vdev);
/* check each device to see what state it's in */
for (extracted = 0, i = 0; i < gcount; i++) {
if (vd[i] != NULL &&
vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL)
break;
++extracted;
}
}
/*
* If every disk has been moved to the new pool, or if we never
* even attempted to look at them, then we split them off for
* good.
*/
if (!attempt_reopen || gcount == extracted) {
for (i = 0; i < gcount; i++)
if (vd[i] != NULL)
vdev_split(vd[i]);
vdev_reopen(spa->spa_root_vdev);
}
kmem_free(vd, gcount * sizeof (vdev_t *));
}
static int
spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type)
{
const char *ereport = FM_EREPORT_ZFS_POOL;
int error;
spa->spa_load_state = state;
(void) spa_import_progress_set_state(spa_guid(spa),
spa_load_state(spa));
gethrestime(&spa->spa_loaded_ts);
error = spa_load_impl(spa, type, &ereport);
/*
* Don't count references from objsets that are already closed
* and are making their way through the eviction process.
*/
spa_evicting_os_wait(spa);
spa->spa_minref = zfs_refcount_count(&spa->spa_refcount);
if (error) {
if (error != EEXIST) {
spa->spa_loaded_ts.tv_sec = 0;
spa->spa_loaded_ts.tv_nsec = 0;
}
if (error != EBADF) {
(void) zfs_ereport_post(ereport, spa,
NULL, NULL, NULL, 0);
}
}
spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE;
spa->spa_ena = 0;
(void) spa_import_progress_set_state(spa_guid(spa),
spa_load_state(spa));
return (error);
}
#ifdef ZFS_DEBUG
/*
* Count the number of per-vdev ZAPs associated with all of the vdevs in the
* vdev tree rooted in the given vd, and ensure that each ZAP is present in the
* spa's per-vdev ZAP list.
*/
static uint64_t
vdev_count_verify_zaps(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
uint64_t total = 0;
if (spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_AVZ_V2) &&
vd->vdev_root_zap != 0) {
total++;
ASSERT0(zap_lookup_int(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, vd->vdev_root_zap));
}
if (vd->vdev_top_zap != 0) {
total++;
ASSERT0(zap_lookup_int(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, vd->vdev_top_zap));
}
if (vd->vdev_leaf_zap != 0) {
total++;
ASSERT0(zap_lookup_int(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, vd->vdev_leaf_zap));
}
for (uint64_t i = 0; i < vd->vdev_children; i++) {
total += vdev_count_verify_zaps(vd->vdev_child[i]);
}
return (total);
}
#else
#define vdev_count_verify_zaps(vd) ((void) sizeof (vd), 0)
#endif
/*
* Determine whether the activity check is required.
*/
static boolean_t
spa_activity_check_required(spa_t *spa, uberblock_t *ub, nvlist_t *label,
nvlist_t *config)
{
uint64_t state = 0;
uint64_t hostid = 0;
uint64_t tryconfig_txg = 0;
uint64_t tryconfig_timestamp = 0;
uint16_t tryconfig_mmp_seq = 0;
nvlist_t *nvinfo;
if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) {
nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
(void) nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG,
&tryconfig_txg);
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
&tryconfig_timestamp);
(void) nvlist_lookup_uint16(nvinfo, ZPOOL_CONFIG_MMP_SEQ,
&tryconfig_mmp_seq);
}
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, &state);
/*
* Disable the MMP activity check - This is used by zdb which
* is intended to be used on potentially active pools.
*/
if (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP)
return (B_FALSE);
/*
* Skip the activity check when the MMP feature is disabled.
*/
if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay == 0)
return (B_FALSE);
/*
* If the tryconfig_ values are nonzero, they are the results of an
* earlier tryimport. If they all match the uberblock we just found,
* then the pool has not changed and we return false so we do not test
* a second time.
*/
if (tryconfig_txg && tryconfig_txg == ub->ub_txg &&
tryconfig_timestamp && tryconfig_timestamp == ub->ub_timestamp &&
tryconfig_mmp_seq && tryconfig_mmp_seq ==
(MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0))
return (B_FALSE);
/*
* Allow the activity check to be skipped when importing the pool
* on the same host which last imported it. Since the hostid from
* configuration may be stale use the one read from the label.
*/
if (nvlist_exists(label, ZPOOL_CONFIG_HOSTID))
hostid = fnvlist_lookup_uint64(label, ZPOOL_CONFIG_HOSTID);
if (hostid == spa_get_hostid(spa))
return (B_FALSE);
/*
* Skip the activity test when the pool was cleanly exported.
*/
if (state != POOL_STATE_ACTIVE)
return (B_FALSE);
return (B_TRUE);
}
/*
* Nanoseconds the activity check must watch for changes on-disk.
*/
static uint64_t
spa_activity_check_duration(spa_t *spa, uberblock_t *ub)
{
uint64_t import_intervals = MAX(zfs_multihost_import_intervals, 1);
uint64_t multihost_interval = MSEC2NSEC(
MMP_INTERVAL_OK(zfs_multihost_interval));
uint64_t import_delay = MAX(NANOSEC, import_intervals *
multihost_interval);
/*
* Local tunables determine a minimum duration except for the case
* where we know when the remote host will suspend the pool if MMP
* writes do not land.
*
* See Big Theory comment at the top of mmp.c for the reasoning behind
* these cases and times.
*/
ASSERT(MMP_IMPORT_SAFETY_FACTOR >= 100);
if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) &&
MMP_FAIL_INT(ub) > 0) {
/* MMP on remote host will suspend pool after failed writes */
import_delay = MMP_FAIL_INT(ub) * MSEC2NSEC(MMP_INTERVAL(ub)) *
MMP_IMPORT_SAFETY_FACTOR / 100;
zfs_dbgmsg("fail_intvals>0 import_delay=%llu ub_mmp "
"mmp_fails=%llu ub_mmp mmp_interval=%llu "
"import_intervals=%llu", (u_longlong_t)import_delay,
(u_longlong_t)MMP_FAIL_INT(ub),
(u_longlong_t)MMP_INTERVAL(ub),
(u_longlong_t)import_intervals);
} else if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) &&
MMP_FAIL_INT(ub) == 0) {
/* MMP on remote host will never suspend pool */
import_delay = MAX(import_delay, (MSEC2NSEC(MMP_INTERVAL(ub)) +
ub->ub_mmp_delay) * import_intervals);
zfs_dbgmsg("fail_intvals=0 import_delay=%llu ub_mmp "
"mmp_interval=%llu ub_mmp_delay=%llu "
"import_intervals=%llu", (u_longlong_t)import_delay,
(u_longlong_t)MMP_INTERVAL(ub),
(u_longlong_t)ub->ub_mmp_delay,
(u_longlong_t)import_intervals);
} else if (MMP_VALID(ub)) {
/*
* zfs-0.7 compatibility case
*/
import_delay = MAX(import_delay, (multihost_interval +
ub->ub_mmp_delay) * import_intervals);
zfs_dbgmsg("import_delay=%llu ub_mmp_delay=%llu "
"import_intervals=%llu leaves=%u",
(u_longlong_t)import_delay,
(u_longlong_t)ub->ub_mmp_delay,
(u_longlong_t)import_intervals,
vdev_count_leaves(spa));
} else {
/* Using local tunings is the only reasonable option */
zfs_dbgmsg("pool last imported on non-MMP aware "
"host using import_delay=%llu multihost_interval=%llu "
"import_intervals=%llu", (u_longlong_t)import_delay,
(u_longlong_t)multihost_interval,
(u_longlong_t)import_intervals);
}
return (import_delay);
}
/*
* Perform the import activity check. If the user canceled the import or
* we detected activity then fail.
*/
static int
spa_activity_check(spa_t *spa, uberblock_t *ub, nvlist_t *config)
{
uint64_t txg = ub->ub_txg;
uint64_t timestamp = ub->ub_timestamp;
uint64_t mmp_config = ub->ub_mmp_config;
uint16_t mmp_seq = MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0;
uint64_t import_delay;
hrtime_t import_expire;
nvlist_t *mmp_label = NULL;
vdev_t *rvd = spa->spa_root_vdev;
kcondvar_t cv;
kmutex_t mtx;
int error = 0;
cv_init(&cv, NULL, CV_DEFAULT, NULL);
mutex_init(&mtx, NULL, MUTEX_DEFAULT, NULL);
mutex_enter(&mtx);
/*
* If ZPOOL_CONFIG_MMP_TXG is present an activity check was performed
* during the earlier tryimport. If the txg recorded there is 0 then
* the pool is known to be active on another host.
*
* Otherwise, the pool might be in use on another host. Check for
* changes in the uberblocks on disk if necessary.
*/
if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) {
nvlist_t *nvinfo = fnvlist_lookup_nvlist(config,
ZPOOL_CONFIG_LOAD_INFO);
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_TXG) &&
fnvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG) == 0) {
vdev_uberblock_load(rvd, ub, &mmp_label);
error = SET_ERROR(EREMOTEIO);
goto out;
}
}
import_delay = spa_activity_check_duration(spa, ub);
/* Add a small random factor in case of simultaneous imports (0-25%) */
import_delay += import_delay * random_in_range(250) / 1000;
import_expire = gethrtime() + import_delay;
while (gethrtime() < import_expire) {
(void) spa_import_progress_set_mmp_check(spa_guid(spa),
NSEC2SEC(import_expire - gethrtime()));
vdev_uberblock_load(rvd, ub, &mmp_label);
if (txg != ub->ub_txg || timestamp != ub->ub_timestamp ||
mmp_seq != (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0)) {
zfs_dbgmsg("multihost activity detected "
"txg %llu ub_txg %llu "
"timestamp %llu ub_timestamp %llu "
"mmp_config %#llx ub_mmp_config %#llx",
(u_longlong_t)txg, (u_longlong_t)ub->ub_txg,
(u_longlong_t)timestamp,
(u_longlong_t)ub->ub_timestamp,
(u_longlong_t)mmp_config,
(u_longlong_t)ub->ub_mmp_config);
error = SET_ERROR(EREMOTEIO);
break;
}
if (mmp_label) {
nvlist_free(mmp_label);
mmp_label = NULL;
}
error = cv_timedwait_sig(&cv, &mtx, ddi_get_lbolt() + hz);
if (error != -1) {
error = SET_ERROR(EINTR);
break;
}
error = 0;
}
out:
mutex_exit(&mtx);
mutex_destroy(&mtx);
cv_destroy(&cv);
/*
* If the pool is determined to be active store the status in the
* spa->spa_load_info nvlist. If the remote hostname or hostid are
* available from configuration read from disk store them as well.
* This allows 'zpool import' to generate a more useful message.
*
* ZPOOL_CONFIG_MMP_STATE - observed pool status (mandatory)
* ZPOOL_CONFIG_MMP_HOSTNAME - hostname from the active pool
* ZPOOL_CONFIG_MMP_HOSTID - hostid from the active pool
*/
if (error == EREMOTEIO) {
const char *hostname = "<unknown>";
uint64_t hostid = 0;
if (mmp_label) {
if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTNAME)) {
hostname = fnvlist_lookup_string(mmp_label,
ZPOOL_CONFIG_HOSTNAME);
fnvlist_add_string(spa->spa_load_info,
ZPOOL_CONFIG_MMP_HOSTNAME, hostname);
}
if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTID)) {
hostid = fnvlist_lookup_uint64(mmp_label,
ZPOOL_CONFIG_HOSTID);
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_HOSTID, hostid);
}
}
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_STATE, MMP_STATE_ACTIVE);
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_TXG, 0);
error = spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO);
}
if (mmp_label)
nvlist_free(mmp_label);
return (error);
}
static int
spa_verify_host(spa_t *spa, nvlist_t *mos_config)
{
uint64_t hostid;
const char *hostname;
uint64_t myhostid = 0;
if (!spa_is_root(spa) && nvlist_lookup_uint64(mos_config,
ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
hostname = fnvlist_lookup_string(mos_config,
ZPOOL_CONFIG_HOSTNAME);
myhostid = zone_get_hostid(NULL);
if (hostid != 0 && myhostid != 0 && hostid != myhostid) {
cmn_err(CE_WARN, "pool '%s' could not be "
"loaded as it was last accessed by "
"another system (host: %s hostid: 0x%llx). "
"See: https://openzfs.github.io/openzfs-docs/msg/"
"ZFS-8000-EY",
spa_name(spa), hostname, (u_longlong_t)hostid);
spa_load_failed(spa, "hostid verification failed: pool "
"last accessed by host: %s (hostid: 0x%llx)",
hostname, (u_longlong_t)hostid);
return (SET_ERROR(EBADF));
}
}
return (0);
}
static int
spa_ld_parse_config(spa_t *spa, spa_import_type_t type)
{
int error = 0;
nvlist_t *nvtree, *nvl, *config = spa->spa_config;
int parse;
vdev_t *rvd;
uint64_t pool_guid;
const char *comment;
const char *compatibility;
/*
* Versioning wasn't explicitly added to the label until later, so if
* it's not present treat it as the initial version.
*/
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
&spa->spa_ubsync.ub_version) != 0)
spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL;
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) {
spa_load_failed(spa, "invalid config provided: '%s' missing",
ZPOOL_CONFIG_POOL_GUID);
return (SET_ERROR(EINVAL));
}
/*
* If we are doing an import, ensure that the pool is not already
* imported by checking if its pool guid already exists in the
* spa namespace.
*
* The only case that we allow an already imported pool to be
* imported again, is when the pool is checkpointed and we want to
* look at its checkpointed state from userland tools like zdb.
*/
#ifdef _KERNEL
if ((spa->spa_load_state == SPA_LOAD_IMPORT ||
spa->spa_load_state == SPA_LOAD_TRYIMPORT) &&
spa_guid_exists(pool_guid, 0)) {
#else
if ((spa->spa_load_state == SPA_LOAD_IMPORT ||
spa->spa_load_state == SPA_LOAD_TRYIMPORT) &&
spa_guid_exists(pool_guid, 0) &&
!spa_importing_readonly_checkpoint(spa)) {
#endif
spa_load_failed(spa, "a pool with guid %llu is already open",
(u_longlong_t)pool_guid);
return (SET_ERROR(EEXIST));
}
spa->spa_config_guid = pool_guid;
nvlist_free(spa->spa_load_info);
spa->spa_load_info = fnvlist_alloc();
ASSERT(spa->spa_comment == NULL);
if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
spa->spa_comment = spa_strdup(comment);
ASSERT(spa->spa_compatibility == NULL);
if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMPATIBILITY,
&compatibility) == 0)
spa->spa_compatibility = spa_strdup(compatibility);
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
&spa->spa_config_txg);
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) == 0)
spa->spa_config_splitting = fnvlist_dup(nvl);
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtree)) {
spa_load_failed(spa, "invalid config provided: '%s' missing",
ZPOOL_CONFIG_VDEV_TREE);
return (SET_ERROR(EINVAL));
}
/*
* Create "The Godfather" zio to hold all async IOs
*/
spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
KM_SLEEP);
for (int i = 0; i < max_ncpus; i++) {
spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_GODFATHER);
}
/*
* Parse the configuration into a vdev tree. We explicitly set the
* value that will be returned by spa_version() since parsing the
* configuration requires knowing the version number.
*/
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
parse = (type == SPA_IMPORT_EXISTING ?
VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT);
error = spa_config_parse(spa, &rvd, nvtree, NULL, 0, parse);
spa_config_exit(spa, SCL_ALL, FTAG);
if (error != 0) {
spa_load_failed(spa, "unable to parse config [error=%d]",
error);
return (error);
}
ASSERT(spa->spa_root_vdev == rvd);
ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT);
ASSERT3U(spa->spa_max_ashift, <=, SPA_MAXBLOCKSHIFT);
if (type != SPA_IMPORT_ASSEMBLE) {
ASSERT(spa_guid(spa) == pool_guid);
}
return (0);
}
/*
* Recursively open all vdevs in the vdev tree. This function is called twice:
* first with the untrusted config, then with the trusted config.
*/
static int
spa_ld_open_vdevs(spa_t *spa)
{
int error = 0;
/*
* spa_missing_tvds_allowed defines how many top-level vdevs can be
* missing/unopenable for the root vdev to be still considered openable.
*/
if (spa->spa_trust_config) {
spa->spa_missing_tvds_allowed = zfs_max_missing_tvds;
} else if (spa->spa_config_source == SPA_CONFIG_SRC_CACHEFILE) {
spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_cachefile;
} else if (spa->spa_config_source == SPA_CONFIG_SRC_SCAN) {
spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_scan;
} else {
spa->spa_missing_tvds_allowed = 0;
}
spa->spa_missing_tvds_allowed =
MAX(zfs_max_missing_tvds, spa->spa_missing_tvds_allowed);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
error = vdev_open(spa->spa_root_vdev);
spa_config_exit(spa, SCL_ALL, FTAG);
if (spa->spa_missing_tvds != 0) {
spa_load_note(spa, "vdev tree has %lld missing top-level "
"vdevs.", (u_longlong_t)spa->spa_missing_tvds);
if (spa->spa_trust_config && (spa->spa_mode & SPA_MODE_WRITE)) {
/*
* Although theoretically we could allow users to open
* incomplete pools in RW mode, we'd need to add a lot
* of extra logic (e.g. adjust pool space to account
* for missing vdevs).
* This limitation also prevents users from accidentally
* opening the pool in RW mode during data recovery and
* damaging it further.
*/
spa_load_note(spa, "pools with missing top-level "
"vdevs can only be opened in read-only mode.");
error = SET_ERROR(ENXIO);
} else {
spa_load_note(spa, "current settings allow for maximum "
"%lld missing top-level vdevs at this stage.",
(u_longlong_t)spa->spa_missing_tvds_allowed);
}
}
if (error != 0) {
spa_load_failed(spa, "unable to open vdev tree [error=%d]",
error);
}
if (spa->spa_missing_tvds != 0 || error != 0)
vdev_dbgmsg_print_tree(spa->spa_root_vdev, 2);
return (error);
}
/*
* We need to validate the vdev labels against the configuration that
* we have in hand. This function is called twice: first with an untrusted
* config, then with a trusted config. The validation is more strict when the
* config is trusted.
*/
static int
spa_ld_validate_vdevs(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
error = vdev_validate(rvd);
spa_config_exit(spa, SCL_ALL, FTAG);
if (error != 0) {
spa_load_failed(spa, "vdev_validate failed [error=%d]", error);
return (error);
}
if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
spa_load_failed(spa, "cannot open vdev tree after invalidating "
"some vdevs");
vdev_dbgmsg_print_tree(rvd, 2);
return (SET_ERROR(ENXIO));
}
return (0);
}
static void
spa_ld_select_uberblock_done(spa_t *spa, uberblock_t *ub)
{
spa->spa_state = POOL_STATE_ACTIVE;
spa->spa_ubsync = spa->spa_uberblock;
spa->spa_verify_min_txg = spa->spa_extreme_rewind ?
TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1;
spa->spa_first_txg = spa->spa_last_ubsync_txg ?
spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1;
spa->spa_claim_max_txg = spa->spa_first_txg;
spa->spa_prev_software_version = ub->ub_software_version;
}
static int
spa_ld_select_uberblock(spa_t *spa, spa_import_type_t type)
{
vdev_t *rvd = spa->spa_root_vdev;
nvlist_t *label;
uberblock_t *ub = &spa->spa_uberblock;
boolean_t activity_check = B_FALSE;
/*
* If we are opening the checkpointed state of the pool by
* rewinding to it, at this point we will have written the
* checkpointed uberblock to the vdev labels, so searching
* the labels will find the right uberblock. However, if
* we are opening the checkpointed state read-only, we have
* not modified the labels. Therefore, we must ignore the
* labels and continue using the spa_uberblock that was set
* by spa_ld_checkpoint_rewind.
*
* Note that it would be fine to ignore the labels when
* rewinding (opening writeable) as well. However, if we
* crash just after writing the labels, we will end up
* searching the labels. Doing so in the common case means
* that this code path gets exercised normally, rather than
* just in the edge case.
*/
if (ub->ub_checkpoint_txg != 0 &&
spa_importing_readonly_checkpoint(spa)) {
spa_ld_select_uberblock_done(spa, ub);
return (0);
}
/*
* Find the best uberblock.
*/
vdev_uberblock_load(rvd, ub, &label);
/*
* If we weren't able to find a single valid uberblock, return failure.
*/
if (ub->ub_txg == 0) {
nvlist_free(label);
spa_load_failed(spa, "no valid uberblock found");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO));
}
if (spa->spa_load_max_txg != UINT64_MAX) {
(void) spa_import_progress_set_max_txg(spa_guid(spa),
(u_longlong_t)spa->spa_load_max_txg);
}
spa_load_note(spa, "using uberblock with txg=%llu",
(u_longlong_t)ub->ub_txg);
/*
* For pools which have the multihost property on determine if the
* pool is truly inactive and can be safely imported. Prevent
* hosts which don't have a hostid set from importing the pool.
*/
activity_check = spa_activity_check_required(spa, ub, label,
spa->spa_config);
if (activity_check) {
if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay &&
spa_get_hostid(spa) == 0) {
nvlist_free(label);
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID);
return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO));
}
int error = spa_activity_check(spa, ub, spa->spa_config);
if (error) {
nvlist_free(label);
return (error);
}
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_STATE, MMP_STATE_INACTIVE);
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_TXG, ub->ub_txg);
fnvlist_add_uint16(spa->spa_load_info,
ZPOOL_CONFIG_MMP_SEQ,
(MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0));
}
/*
* If the pool has an unsupported version we can't open it.
*/
if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) {
nvlist_free(label);
spa_load_failed(spa, "version %llu is not supported",
(u_longlong_t)ub->ub_version);
return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP));
}
if (ub->ub_version >= SPA_VERSION_FEATURES) {
nvlist_t *features;
/*
* If we weren't able to find what's necessary for reading the
* MOS in the label, return failure.
*/
if (label == NULL) {
spa_load_failed(spa, "label config unavailable");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
ENXIO));
}
if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_FEATURES_FOR_READ,
&features) != 0) {
nvlist_free(label);
spa_load_failed(spa, "invalid label: '%s' missing",
ZPOOL_CONFIG_FEATURES_FOR_READ);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
ENXIO));
}
/*
* Update our in-core representation with the definitive values
* from the label.
*/
nvlist_free(spa->spa_label_features);
spa->spa_label_features = fnvlist_dup(features);
}
nvlist_free(label);
/*
* Look through entries in the label nvlist's features_for_read. If
* there is a feature listed there which we don't understand then we
* cannot open a pool.
*/
if (ub->ub_version >= SPA_VERSION_FEATURES) {
nvlist_t *unsup_feat;
unsup_feat = fnvlist_alloc();
for (nvpair_t *nvp = nvlist_next_nvpair(spa->spa_label_features,
NULL); nvp != NULL;
nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) {
if (!zfeature_is_supported(nvpair_name(nvp))) {
fnvlist_add_string(unsup_feat,
nvpair_name(nvp), "");
}
}
if (!nvlist_empty(unsup_feat)) {
fnvlist_add_nvlist(spa->spa_load_info,
ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat);
nvlist_free(unsup_feat);
spa_load_failed(spa, "some features are unsupported");
return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
ENOTSUP));
}
nvlist_free(unsup_feat);
}
if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) {
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_try_repair(spa, spa->spa_config);
spa_config_exit(spa, SCL_ALL, FTAG);
nvlist_free(spa->spa_config_splitting);
spa->spa_config_splitting = NULL;
}
/*
* Initialize internal SPA structures.
*/
spa_ld_select_uberblock_done(spa, ub);
return (0);
}
static int
spa_ld_open_rootbp(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
if (error != 0) {
spa_load_failed(spa, "unable to open rootbp in dsl_pool_init "
"[error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
return (0);
}
static int
spa_ld_trusted_config(spa_t *spa, spa_import_type_t type,
boolean_t reloading)
{
vdev_t *mrvd, *rvd = spa->spa_root_vdev;
nvlist_t *nv, *mos_config, *policy;
int error = 0, copy_error;
uint64_t healthy_tvds, healthy_tvds_mos;
uint64_t mos_config_txg;
if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object, B_TRUE)
!= 0)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/*
* If we're assembling a pool from a split, the config provided is
* already trusted so there is nothing to do.
*/
if (type == SPA_IMPORT_ASSEMBLE)
return (0);
healthy_tvds = spa_healthy_core_tvds(spa);
if (load_nvlist(spa, spa->spa_config_object, &mos_config)
!= 0) {
spa_load_failed(spa, "unable to retrieve MOS config");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
/*
* If we are doing an open, pool owner wasn't verified yet, thus do
* the verification here.
*/
if (spa->spa_load_state == SPA_LOAD_OPEN) {
error = spa_verify_host(spa, mos_config);
if (error != 0) {
nvlist_free(mos_config);
return (error);
}
}
nv = fnvlist_lookup_nvlist(mos_config, ZPOOL_CONFIG_VDEV_TREE);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
/*
* Build a new vdev tree from the trusted config
*/
error = spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD);
if (error != 0) {
nvlist_free(mos_config);
spa_config_exit(spa, SCL_ALL, FTAG);
spa_load_failed(spa, "spa_config_parse failed [error=%d]",
error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
}
/*
* Vdev paths in the MOS may be obsolete. If the untrusted config was
* obtained by scanning /dev/dsk, then it will have the right vdev
* paths. We update the trusted MOS config with this information.
* We first try to copy the paths with vdev_copy_path_strict, which
* succeeds only when both configs have exactly the same vdev tree.
* If that fails, we fall back to a more flexible method that has a
* best effort policy.
*/
copy_error = vdev_copy_path_strict(rvd, mrvd);
if (copy_error != 0 || spa_load_print_vdev_tree) {
spa_load_note(spa, "provided vdev tree:");
vdev_dbgmsg_print_tree(rvd, 2);
spa_load_note(spa, "MOS vdev tree:");
vdev_dbgmsg_print_tree(mrvd, 2);
}
if (copy_error != 0) {
spa_load_note(spa, "vdev_copy_path_strict failed, falling "
"back to vdev_copy_path_relaxed");
vdev_copy_path_relaxed(rvd, mrvd);
}
vdev_close(rvd);
vdev_free(rvd);
spa->spa_root_vdev = mrvd;
rvd = mrvd;
spa_config_exit(spa, SCL_ALL, FTAG);
/*
* If 'zpool import' used a cached config, then the on-disk hostid and
* hostname may be different to the cached config in ways that should
* prevent import. Userspace can't discover this without a scan, but
* we know, so we add these values to LOAD_INFO so the caller can know
* the difference.
*
* Note that we have to do this before the config is regenerated,
* because the new config will have the hostid and hostname for this
* host, in readiness for import.
*/
if (nvlist_exists(mos_config, ZPOOL_CONFIG_HOSTID))
fnvlist_add_uint64(spa->spa_load_info, ZPOOL_CONFIG_HOSTID,
fnvlist_lookup_uint64(mos_config, ZPOOL_CONFIG_HOSTID));
if (nvlist_exists(mos_config, ZPOOL_CONFIG_HOSTNAME))
fnvlist_add_string(spa->spa_load_info, ZPOOL_CONFIG_HOSTNAME,
fnvlist_lookup_string(mos_config, ZPOOL_CONFIG_HOSTNAME));
/*
* We will use spa_config if we decide to reload the spa or if spa_load
* fails and we rewind. We must thus regenerate the config using the
* MOS information with the updated paths. ZPOOL_LOAD_POLICY is used to
* pass settings on how to load the pool and is not stored in the MOS.
* We copy it over to our new, trusted config.
*/
mos_config_txg = fnvlist_lookup_uint64(mos_config,
ZPOOL_CONFIG_POOL_TXG);
nvlist_free(mos_config);
mos_config = spa_config_generate(spa, NULL, mos_config_txg, B_FALSE);
if (nvlist_lookup_nvlist(spa->spa_config, ZPOOL_LOAD_POLICY,
&policy) == 0)
fnvlist_add_nvlist(mos_config, ZPOOL_LOAD_POLICY, policy);
spa_config_set(spa, mos_config);
spa->spa_config_source = SPA_CONFIG_SRC_MOS;
/*
* Now that we got the config from the MOS, we should be more strict
* in checking blkptrs and can make assumptions about the consistency
* of the vdev tree. spa_trust_config must be set to true before opening
* vdevs in order for them to be writeable.
*/
spa->spa_trust_config = B_TRUE;
/*
* Open and validate the new vdev tree
*/
error = spa_ld_open_vdevs(spa);
if (error != 0)
return (error);
error = spa_ld_validate_vdevs(spa);
if (error != 0)
return (error);
if (copy_error != 0 || spa_load_print_vdev_tree) {
spa_load_note(spa, "final vdev tree:");
vdev_dbgmsg_print_tree(rvd, 2);
}
if (spa->spa_load_state != SPA_LOAD_TRYIMPORT &&
!spa->spa_extreme_rewind && zfs_max_missing_tvds == 0) {
/*
* Sanity check to make sure that we are indeed loading the
* latest uberblock. If we missed SPA_SYNC_MIN_VDEVS tvds
* in the config provided and they happened to be the only ones
* to have the latest uberblock, we could involuntarily perform
* an extreme rewind.
*/
healthy_tvds_mos = spa_healthy_core_tvds(spa);
if (healthy_tvds_mos - healthy_tvds >=
SPA_SYNC_MIN_VDEVS) {
spa_load_note(spa, "config provided misses too many "
"top-level vdevs compared to MOS (%lld vs %lld). ",
(u_longlong_t)healthy_tvds,
(u_longlong_t)healthy_tvds_mos);
spa_load_note(spa, "vdev tree:");
vdev_dbgmsg_print_tree(rvd, 2);
if (reloading) {
spa_load_failed(spa, "config was already "
"provided from MOS. Aborting.");
return (spa_vdev_err(rvd,
VDEV_AUX_CORRUPT_DATA, EIO));
}
spa_load_note(spa, "spa must be reloaded using MOS "
"config");
return (SET_ERROR(EAGAIN));
}
}
error = spa_check_for_missing_logs(spa);
if (error != 0)
return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO));
if (rvd->vdev_guid_sum != spa->spa_uberblock.ub_guid_sum) {
spa_load_failed(spa, "uberblock guid sum doesn't match MOS "
"guid sum (%llu != %llu)",
(u_longlong_t)spa->spa_uberblock.ub_guid_sum,
(u_longlong_t)rvd->vdev_guid_sum);
return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM,
ENXIO));
}
return (0);
}
static int
spa_ld_open_indirect_vdev_metadata(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
/*
* Everything that we read before spa_remove_init() must be stored
* on concreted vdevs. Therefore we do this as early as possible.
*/
error = spa_remove_init(spa);
if (error != 0) {
spa_load_failed(spa, "spa_remove_init failed [error=%d]",
error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
/*
* Retrieve information needed to condense indirect vdev mappings.
*/
error = spa_condense_init(spa);
if (error != 0) {
spa_load_failed(spa, "spa_condense_init failed [error=%d]",
error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
}
return (0);
}
static int
spa_ld_check_features(spa_t *spa, boolean_t *missing_feat_writep)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
if (spa_version(spa) >= SPA_VERSION_FEATURES) {
boolean_t missing_feat_read = B_FALSE;
nvlist_t *unsup_feat, *enabled_feat;
if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ,
&spa->spa_feat_for_read_obj, B_TRUE) != 0) {
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE,
&spa->spa_feat_for_write_obj, B_TRUE) != 0) {
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS,
&spa->spa_feat_desc_obj, B_TRUE) != 0) {
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
enabled_feat = fnvlist_alloc();
unsup_feat = fnvlist_alloc();
if (!spa_features_check(spa, B_FALSE,
unsup_feat, enabled_feat))
missing_feat_read = B_TRUE;
if (spa_writeable(spa) ||
spa->spa_load_state == SPA_LOAD_TRYIMPORT) {
if (!spa_features_check(spa, B_TRUE,
unsup_feat, enabled_feat)) {
*missing_feat_writep = B_TRUE;
}
}
fnvlist_add_nvlist(spa->spa_load_info,
ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat);
if (!nvlist_empty(unsup_feat)) {
fnvlist_add_nvlist(spa->spa_load_info,
ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat);
}
fnvlist_free(enabled_feat);
fnvlist_free(unsup_feat);
if (!missing_feat_read) {
fnvlist_add_boolean(spa->spa_load_info,
ZPOOL_CONFIG_CAN_RDONLY);
}
/*
* If the state is SPA_LOAD_TRYIMPORT, our objective is
* twofold: to determine whether the pool is available for
* import in read-write mode and (if it is not) whether the
* pool is available for import in read-only mode. If the pool
* is available for import in read-write mode, it is displayed
* as available in userland; if it is not available for import
* in read-only mode, it is displayed as unavailable in
* userland. If the pool is available for import in read-only
* mode but not read-write mode, it is displayed as unavailable
* in userland with a special note that the pool is actually
* available for open in read-only mode.
*
* As a result, if the state is SPA_LOAD_TRYIMPORT and we are
* missing a feature for write, we must first determine whether
* the pool can be opened read-only before returning to
* userland in order to know whether to display the
* abovementioned note.
*/
if (missing_feat_read || (*missing_feat_writep &&
spa_writeable(spa))) {
spa_load_failed(spa, "pool uses unsupported features");
return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
ENOTSUP));
}
/*
* Load refcounts for ZFS features from disk into an in-memory
* cache during SPA initialization.
*/
for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
uint64_t refcount;
error = feature_get_refcount_from_disk(spa,
&spa_feature_table[i], &refcount);
if (error == 0) {
spa->spa_feat_refcount_cache[i] = refcount;
} else if (error == ENOTSUP) {
spa->spa_feat_refcount_cache[i] =
SPA_FEATURE_DISABLED;
} else {
spa_load_failed(spa, "error getting refcount "
"for feature %s [error=%d]",
spa_feature_table[i].fi_guid, error);
return (spa_vdev_err(rvd,
VDEV_AUX_CORRUPT_DATA, EIO));
}
}
}
if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) {
if (spa_dir_prop(spa, DMU_POOL_FEATURE_ENABLED_TXG,
&spa->spa_feat_enabled_txg_obj, B_TRUE) != 0)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
/*
* Encryption was added before bookmark_v2, even though bookmark_v2
* is now a dependency. If this pool has encryption enabled without
* bookmark_v2, trigger an errata message.
*/
if (spa_feature_is_enabled(spa, SPA_FEATURE_ENCRYPTION) &&
!spa_feature_is_enabled(spa, SPA_FEATURE_BOOKMARK_V2)) {
spa->spa_errata = ZPOOL_ERRATA_ZOL_8308_ENCRYPTION;
}
return (0);
}
static int
spa_ld_load_special_directories(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
spa->spa_is_initializing = B_TRUE;
error = dsl_pool_open(spa->spa_dsl_pool);
spa->spa_is_initializing = B_FALSE;
if (error != 0) {
spa_load_failed(spa, "dsl_pool_open failed [error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
return (0);
}
static int
spa_ld_get_props(spa_t *spa)
{
int error = 0;
uint64_t obj;
vdev_t *rvd = spa->spa_root_vdev;
/* Grab the checksum salt from the MOS. */
error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_CHECKSUM_SALT, 1,
sizeof (spa->spa_cksum_salt.zcs_bytes),
spa->spa_cksum_salt.zcs_bytes);
if (error == ENOENT) {
/* Generate a new salt for subsequent use */
(void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
sizeof (spa->spa_cksum_salt.zcs_bytes));
} else if (error != 0) {
spa_load_failed(spa, "unable to retrieve checksum salt from "
"MOS [error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj, B_TRUE) != 0)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj);
if (error != 0) {
spa_load_failed(spa, "error opening deferred-frees bpobj "
"[error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
/*
* Load the bit that tells us to use the new accounting function
* (raid-z deflation). If we have an older pool, this will not
* be present.
*/
error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION,
&spa->spa_creation_version, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/*
* Load the persistent error log. If we have an older pool, this will
* not be present.
*/
error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last,
B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB,
&spa->spa_errlog_scrub, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/*
* Load the livelist deletion field. If a livelist is queued for
* deletion, indicate that in the spa
*/
error = spa_dir_prop(spa, DMU_POOL_DELETED_CLONES,
&spa->spa_livelists_to_delete, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/*
* Load the history object. If we have an older pool, this
* will not be present.
*/
error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/*
* Load the per-vdev ZAP map. If we have an older pool, this will not
* be present; in this case, defer its creation to a later time to
* avoid dirtying the MOS this early / out of sync context. See
* spa_sync_config_object.
*/
/* The sentinel is only available in the MOS config. */
nvlist_t *mos_config;
if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0) {
spa_load_failed(spa, "unable to retrieve MOS config");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
error = spa_dir_prop(spa, DMU_POOL_VDEV_ZAP_MAP,
&spa->spa_all_vdev_zaps, B_FALSE);
if (error == ENOENT) {
VERIFY(!nvlist_exists(mos_config,
ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS));
spa->spa_avz_action = AVZ_ACTION_INITIALIZE;
ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
} else if (error != 0) {
nvlist_free(mos_config);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
} else if (!nvlist_exists(mos_config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)) {
/*
* An older version of ZFS overwrote the sentinel value, so
* we have orphaned per-vdev ZAPs in the MOS. Defer their
* destruction to later; see spa_sync_config_object.
*/
spa->spa_avz_action = AVZ_ACTION_DESTROY;
/*
* We're assuming that no vdevs have had their ZAPs created
* before this. Better be sure of it.
*/
ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
}
nvlist_free(mos_config);
spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object,
B_FALSE);
if (error && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
if (error == 0) {
uint64_t autoreplace = 0;
spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs);
spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace);
spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation);
spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode);
spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand);
spa_prop_find(spa, ZPOOL_PROP_MULTIHOST, &spa->spa_multihost);
spa_prop_find(spa, ZPOOL_PROP_AUTOTRIM, &spa->spa_autotrim);
spa->spa_autoreplace = (autoreplace != 0);
}
/*
* If we are importing a pool with missing top-level vdevs,
* we enforce that the pool doesn't panic or get suspended on
* error since the likelihood of missing data is extremely high.
*/
if (spa->spa_missing_tvds > 0 &&
spa->spa_failmode != ZIO_FAILURE_MODE_CONTINUE &&
spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
spa_load_note(spa, "forcing failmode to 'continue' "
"as some top level vdevs are missing");
spa->spa_failmode = ZIO_FAILURE_MODE_CONTINUE;
}
return (0);
}
static int
spa_ld_open_aux_vdevs(spa_t *spa, spa_import_type_t type)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
/*
* If we're assembling the pool from the split-off vdevs of
* an existing pool, we don't want to attach the spares & cache
* devices.
*/
/*
* Load any hot spares for this pool.
*/
error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object,
B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
if (load_nvlist(spa, spa->spa_spares.sav_object,
&spa->spa_spares.sav_config) != 0) {
spa_load_failed(spa, "error loading spares nvlist");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_spares(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
} else if (error == 0) {
spa->spa_spares.sav_sync = B_TRUE;
}
/*
* Load any level 2 ARC devices for this pool.
*/
error = spa_dir_prop(spa, DMU_POOL_L2CACHE,
&spa->spa_l2cache.sav_object, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
if (load_nvlist(spa, spa->spa_l2cache.sav_object,
&spa->spa_l2cache.sav_config) != 0) {
spa_load_failed(spa, "error loading l2cache nvlist");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_l2cache(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
} else if (error == 0) {
spa->spa_l2cache.sav_sync = B_TRUE;
}
return (0);
}
static int
spa_ld_load_vdev_metadata(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
/*
* If the 'multihost' property is set, then never allow a pool to
* be imported when the system hostid is zero. The exception to
* this rule is zdb which is always allowed to access pools.
*/
if (spa_multihost(spa) && spa_get_hostid(spa) == 0 &&
(spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP) == 0) {
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID);
return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO));
}
/*
* If the 'autoreplace' property is set, then post a resource notifying
* the ZFS DE that it should not issue any faults for unopenable
* devices. We also iterate over the vdevs, and post a sysevent for any
* unopenable vdevs so that the normal autoreplace handler can take
* over.
*/
if (spa->spa_autoreplace && spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
spa_check_removed(spa->spa_root_vdev);
/*
* For the import case, this is done in spa_import(), because
* at this point we're using the spare definitions from
* the MOS config, not necessarily from the userland config.
*/
if (spa->spa_load_state != SPA_LOAD_IMPORT) {
spa_aux_check_removed(&spa->spa_spares);
spa_aux_check_removed(&spa->spa_l2cache);
}
}
/*
* Load the vdev metadata such as metaslabs, DTLs, spacemap object, etc.
*/
error = vdev_load(rvd);
if (error != 0) {
spa_load_failed(spa, "vdev_load failed [error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
}
error = spa_ld_log_spacemaps(spa);
if (error != 0) {
spa_load_failed(spa, "spa_ld_log_spacemaps failed [error=%d]",
error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
}
/*
* Propagate the leaf DTLs we just loaded all the way up the vdev tree.
*/
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
vdev_dtl_reassess(rvd, 0, 0, B_FALSE, B_FALSE);
spa_config_exit(spa, SCL_ALL, FTAG);
return (0);
}
static int
spa_ld_load_dedup_tables(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
error = ddt_load(spa);
if (error != 0) {
spa_load_failed(spa, "ddt_load failed [error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
return (0);
}
static int
spa_ld_load_brt(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
error = brt_load(spa);
if (error != 0) {
spa_load_failed(spa, "brt_load failed [error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
return (0);
}
static int
spa_ld_verify_logs(spa_t *spa, spa_import_type_t type, const char **ereport)
{
vdev_t *rvd = spa->spa_root_vdev;
if (type != SPA_IMPORT_ASSEMBLE && spa_writeable(spa)) {
boolean_t missing = spa_check_logs(spa);
if (missing) {
if (spa->spa_missing_tvds != 0) {
spa_load_note(spa, "spa_check_logs failed "
"so dropping the logs");
} else {
*ereport = FM_EREPORT_ZFS_LOG_REPLAY;
spa_load_failed(spa, "spa_check_logs failed");
return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG,
ENXIO));
}
}
}
return (0);
}
static int
spa_ld_verify_pool_data(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
/*
* We've successfully opened the pool, verify that we're ready
* to start pushing transactions.
*/
if (spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
error = spa_load_verify(spa);
if (error != 0) {
spa_load_failed(spa, "spa_load_verify failed "
"[error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
error));
}
}
return (0);
}
static void
spa_ld_claim_log_blocks(spa_t *spa)
{
dmu_tx_t *tx;
dsl_pool_t *dp = spa_get_dsl(spa);
/*
* Claim log blocks that haven't been committed yet.
* This must all happen in a single txg.
* Note: spa_claim_max_txg is updated by spa_claim_notify(),
* invoked from zil_claim_log_block()'s i/o done callback.
* Price of rollback is that we abandon the log.
*/
spa->spa_claiming = B_TRUE;
tx = dmu_tx_create_assigned(dp, spa_first_txg(spa));
(void) dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
zil_claim, tx, DS_FIND_CHILDREN);
dmu_tx_commit(tx);
spa->spa_claiming = B_FALSE;
spa_set_log_state(spa, SPA_LOG_GOOD);
}
static void
spa_ld_check_for_config_update(spa_t *spa, uint64_t config_cache_txg,
boolean_t update_config_cache)
{
vdev_t *rvd = spa->spa_root_vdev;
int need_update = B_FALSE;
/*
* If the config cache is stale, or we have uninitialized
* metaslabs (see spa_vdev_add()), then update the config.
*
* If this is a verbatim import, trust the current
* in-core spa_config and update the disk labels.
*/
if (update_config_cache || config_cache_txg != spa->spa_config_txg ||
spa->spa_load_state == SPA_LOAD_IMPORT ||
spa->spa_load_state == SPA_LOAD_RECOVER ||
(spa->spa_import_flags & ZFS_IMPORT_VERBATIM))
need_update = B_TRUE;
for (int c = 0; c < rvd->vdev_children; c++)
if (rvd->vdev_child[c]->vdev_ms_array == 0)
need_update = B_TRUE;
/*
* Update the config cache asynchronously in case we're the
* root pool, in which case the config cache isn't writable yet.
*/
if (need_update)
spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
}
static void
spa_ld_prepare_for_reload(spa_t *spa)
{
spa_mode_t mode = spa->spa_mode;
int async_suspended = spa->spa_async_suspended;
spa_unload(spa);
spa_deactivate(spa);
spa_activate(spa, mode);
/*
* We save the value of spa_async_suspended as it gets reset to 0 by
* spa_unload(). We want to restore it back to the original value before
* returning as we might be calling spa_async_resume() later.
*/
spa->spa_async_suspended = async_suspended;
}
static int
spa_ld_read_checkpoint_txg(spa_t *spa)
{
uberblock_t checkpoint;
int error = 0;
ASSERT0(spa->spa_checkpoint_txg);
ASSERT(MUTEX_HELD(&spa_namespace_lock));
error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t),
sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint);
if (error == ENOENT)
return (0);
if (error != 0)
return (error);
ASSERT3U(checkpoint.ub_txg, !=, 0);
ASSERT3U(checkpoint.ub_checkpoint_txg, !=, 0);
ASSERT3U(checkpoint.ub_timestamp, !=, 0);
spa->spa_checkpoint_txg = checkpoint.ub_txg;
spa->spa_checkpoint_info.sci_timestamp = checkpoint.ub_timestamp;
return (0);
}
static int
spa_ld_mos_init(spa_t *spa, spa_import_type_t type)
{
int error = 0;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE);
/*
* Never trust the config that is provided unless we are assembling
* a pool following a split.
* This means don't trust blkptrs and the vdev tree in general. This
* also effectively puts the spa in read-only mode since
* spa_writeable() checks for spa_trust_config to be true.
* We will later load a trusted config from the MOS.
*/
if (type != SPA_IMPORT_ASSEMBLE)
spa->spa_trust_config = B_FALSE;
/*
* Parse the config provided to create a vdev tree.
*/
error = spa_ld_parse_config(spa, type);
if (error != 0)
return (error);
spa_import_progress_add(spa);
/*
* Now that we have the vdev tree, try to open each vdev. This involves
* opening the underlying physical device, retrieving its geometry and
* probing the vdev with a dummy I/O. The state of each vdev will be set
* based on the success of those operations. After this we'll be ready
* to read from the vdevs.
*/
error = spa_ld_open_vdevs(spa);
if (error != 0)
return (error);
/*
* Read the label of each vdev and make sure that the GUIDs stored
* there match the GUIDs in the config provided.
* If we're assembling a new pool that's been split off from an
* existing pool, the labels haven't yet been updated so we skip
* validation for now.
*/
if (type != SPA_IMPORT_ASSEMBLE) {
error = spa_ld_validate_vdevs(spa);
if (error != 0)
return (error);
}
/*
* Read all vdev labels to find the best uberblock (i.e. latest,
* unless spa_load_max_txg is set) and store it in spa_uberblock. We
* get the list of features required to read blkptrs in the MOS from
* the vdev label with the best uberblock and verify that our version
* of zfs supports them all.
*/
error = spa_ld_select_uberblock(spa, type);
if (error != 0)
return (error);
/*
* Pass that uberblock to the dsl_pool layer which will open the root
* blkptr. This blkptr points to the latest version of the MOS and will
* allow us to read its contents.
*/
error = spa_ld_open_rootbp(spa);
if (error != 0)
return (error);
return (0);
}
static int
spa_ld_checkpoint_rewind(spa_t *spa)
{
uberblock_t checkpoint;
int error = 0;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t),
sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint);
if (error != 0) {
spa_load_failed(spa, "unable to retrieve checkpointed "
"uberblock from the MOS config [error=%d]", error);
if (error == ENOENT)
error = ZFS_ERR_NO_CHECKPOINT;
return (error);
}
ASSERT3U(checkpoint.ub_txg, <, spa->spa_uberblock.ub_txg);
ASSERT3U(checkpoint.ub_txg, ==, checkpoint.ub_checkpoint_txg);
/*
* We need to update the txg and timestamp of the checkpointed
* uberblock to be higher than the latest one. This ensures that
* the checkpointed uberblock is selected if we were to close and
* reopen the pool right after we've written it in the vdev labels.
* (also see block comment in vdev_uberblock_compare)
*/
checkpoint.ub_txg = spa->spa_uberblock.ub_txg + 1;
checkpoint.ub_timestamp = gethrestime_sec();
/*
* Set current uberblock to be the checkpointed uberblock.
*/
spa->spa_uberblock = checkpoint;
/*
* If we are doing a normal rewind, then the pool is open for
* writing and we sync the "updated" checkpointed uberblock to
* disk. Once this is done, we've basically rewound the whole
* pool and there is no way back.
*
* There are cases when we don't want to attempt and sync the
* checkpointed uberblock to disk because we are opening a
* pool as read-only. Specifically, verifying the checkpointed
* state with zdb, and importing the checkpointed state to get
* a "preview" of its content.
*/
if (spa_writeable(spa)) {
vdev_t *rvd = spa->spa_root_vdev;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL };
int svdcount = 0;
int children = rvd->vdev_children;
int c0 = random_in_range(children);
for (int c = 0; c < children; c++) {
vdev_t *vd = rvd->vdev_child[(c0 + c) % children];
/* Stop when revisiting the first vdev */
if (c > 0 && svd[0] == vd)
break;
if (vd->vdev_ms_array == 0 || vd->vdev_islog ||
!vdev_is_concrete(vd))
continue;
svd[svdcount++] = vd;
if (svdcount == SPA_SYNC_MIN_VDEVS)
break;
}
error = vdev_config_sync(svd, svdcount, spa->spa_first_txg);
if (error == 0)
spa->spa_last_synced_guid = rvd->vdev_guid;
spa_config_exit(spa, SCL_ALL, FTAG);
if (error != 0) {
spa_load_failed(spa, "failed to write checkpointed "
"uberblock to the vdev labels [error=%d]", error);
return (error);
}
}
return (0);
}
static int
spa_ld_mos_with_trusted_config(spa_t *spa, spa_import_type_t type,
boolean_t *update_config_cache)
{
int error;
/*
* Parse the config for pool, open and validate vdevs,
* select an uberblock, and use that uberblock to open
* the MOS.
*/
error = spa_ld_mos_init(spa, type);
if (error != 0)
return (error);
/*
* Retrieve the trusted config stored in the MOS and use it to create
* a new, exact version of the vdev tree, then reopen all vdevs.
*/
error = spa_ld_trusted_config(spa, type, B_FALSE);
if (error == EAGAIN) {
if (update_config_cache != NULL)
*update_config_cache = B_TRUE;
/*
* Redo the loading process with the trusted config if it is
* too different from the untrusted config.
*/
spa_ld_prepare_for_reload(spa);
spa_load_note(spa, "RELOADING");
error = spa_ld_mos_init(spa, type);
if (error != 0)
return (error);
error = spa_ld_trusted_config(spa, type, B_TRUE);
if (error != 0)
return (error);
} else if (error != 0) {
return (error);
}
return (0);
}
/*
* Load an existing storage pool, using the config provided. This config
* describes which vdevs are part of the pool and is later validated against
* partial configs present in each vdev's label and an entire copy of the
* config stored in the MOS.
*/
static int
spa_load_impl(spa_t *spa, spa_import_type_t type, const char **ereport)
{
int error = 0;
boolean_t missing_feat_write = B_FALSE;
boolean_t checkpoint_rewind =
(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
boolean_t update_config_cache = B_FALSE;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE);
spa_load_note(spa, "LOADING");
error = spa_ld_mos_with_trusted_config(spa, type, &update_config_cache);
if (error != 0)
return (error);
/*
* If we are rewinding to the checkpoint then we need to repeat
* everything we've done so far in this function but this time
* selecting the checkpointed uberblock and using that to open
* the MOS.
*/
if (checkpoint_rewind) {
/*
* If we are rewinding to the checkpoint update config cache
* anyway.
*/
update_config_cache = B_TRUE;
/*
* Extract the checkpointed uberblock from the current MOS
* and use this as the pool's uberblock from now on. If the
* pool is imported as writeable we also write the checkpoint
* uberblock to the labels, making the rewind permanent.
*/
error = spa_ld_checkpoint_rewind(spa);
if (error != 0)
return (error);
/*
* Redo the loading process again with the
* checkpointed uberblock.
*/
spa_ld_prepare_for_reload(spa);
spa_load_note(spa, "LOADING checkpointed uberblock");
error = spa_ld_mos_with_trusted_config(spa, type, NULL);
if (error != 0)
return (error);
}
/*
* Retrieve the checkpoint txg if the pool has a checkpoint.
*/
error = spa_ld_read_checkpoint_txg(spa);
if (error != 0)
return (error);
/*
* Retrieve the mapping of indirect vdevs. Those vdevs were removed
* from the pool and their contents were re-mapped to other vdevs. Note
* that everything that we read before this step must have been
* rewritten on concrete vdevs after the last device removal was
* initiated. Otherwise we could be reading from indirect vdevs before
* we have loaded their mappings.
*/
error = spa_ld_open_indirect_vdev_metadata(spa);
if (error != 0)
return (error);
/*
* Retrieve the full list of active features from the MOS and check if
* they are all supported.
*/
error = spa_ld_check_features(spa, &missing_feat_write);
if (error != 0)
return (error);
/*
* Load several special directories from the MOS needed by the dsl_pool
* layer.
*/
error = spa_ld_load_special_directories(spa);
if (error != 0)
return (error);
/*
* Retrieve pool properties from the MOS.
*/
error = spa_ld_get_props(spa);
if (error != 0)
return (error);
/*
* Retrieve the list of auxiliary devices - cache devices and spares -
* and open them.
*/
error = spa_ld_open_aux_vdevs(spa, type);
if (error != 0)
return (error);
/*
* Load the metadata for all vdevs. Also check if unopenable devices
* should be autoreplaced.
*/
error = spa_ld_load_vdev_metadata(spa);
if (error != 0)
return (error);
error = spa_ld_load_dedup_tables(spa);
if (error != 0)
return (error);
error = spa_ld_load_brt(spa);
if (error != 0)
return (error);
/*
* Verify the logs now to make sure we don't have any unexpected errors
* when we claim log blocks later.
*/
error = spa_ld_verify_logs(spa, type, ereport);
if (error != 0)
return (error);
if (missing_feat_write) {
ASSERT(spa->spa_load_state == SPA_LOAD_TRYIMPORT);
/*
* At this point, we know that we can open the pool in
* read-only mode but not read-write mode. We now have enough
* information and can return to userland.
*/
return (spa_vdev_err(spa->spa_root_vdev, VDEV_AUX_UNSUP_FEAT,
ENOTSUP));
}
/*
* Traverse the last txgs to make sure the pool was left off in a safe
* state. When performing an extreme rewind, we verify the whole pool,
* which can take a very long time.
*/
error = spa_ld_verify_pool_data(spa);
if (error != 0)
return (error);
/*
* Calculate the deflated space for the pool. This must be done before
* we write anything to the pool because we'd need to update the space
* accounting using the deflated sizes.
*/
spa_update_dspace(spa);
/*
* We have now retrieved all the information we needed to open the
* pool. If we are importing the pool in read-write mode, a few
* additional steps must be performed to finish the import.
*/
if (spa_writeable(spa) && (spa->spa_load_state == SPA_LOAD_RECOVER ||
spa->spa_load_max_txg == UINT64_MAX)) {
uint64_t config_cache_txg = spa->spa_config_txg;
ASSERT(spa->spa_load_state != SPA_LOAD_TRYIMPORT);
/*
* In case of a checkpoint rewind, log the original txg
* of the checkpointed uberblock.
*/
if (checkpoint_rewind) {
spa_history_log_internal(spa, "checkpoint rewind",
NULL, "rewound state to txg=%llu",
(u_longlong_t)spa->spa_uberblock.ub_checkpoint_txg);
}
/*
* Traverse the ZIL and claim all blocks.
*/
spa_ld_claim_log_blocks(spa);
/*
* Kick-off the syncing thread.
*/
spa->spa_sync_on = B_TRUE;
txg_sync_start(spa->spa_dsl_pool);
mmp_thread_start(spa);
/*
* Wait for all claims to sync. We sync up to the highest
* claimed log block birth time so that claimed log blocks
* don't appear to be from the future. spa_claim_max_txg
* will have been set for us by ZIL traversal operations
* performed above.
*/
txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg);
/*
* Check if we need to request an update of the config. On the
* next sync, we would update the config stored in vdev labels
* and the cachefile (by default /etc/zfs/zpool.cache).
*/
spa_ld_check_for_config_update(spa, config_cache_txg,
update_config_cache);
/*
* Check if a rebuild was in progress and if so resume it.
* Then check all DTLs to see if anything needs resilvering.
* The resilver will be deferred if a rebuild was started.
*/
if (vdev_rebuild_active(spa->spa_root_vdev)) {
vdev_rebuild_restart(spa);
} else if (!dsl_scan_resilvering(spa->spa_dsl_pool) &&
vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
spa_async_request(spa, SPA_ASYNC_RESILVER);
}
/*
* Log the fact that we booted up (so that we can detect if
* we rebooted in the middle of an operation).
*/
spa_history_log_version(spa, "open", NULL);
spa_restart_removal(spa);
spa_spawn_aux_threads(spa);
/*
* Delete any inconsistent datasets.
*
* Note:
* Since we may be issuing deletes for clones here,
* we make sure to do so after we've spawned all the
* auxiliary threads above (from which the livelist
* deletion zthr is part of).
*/
(void) dmu_objset_find(spa_name(spa),
dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN);
/*
* Clean up any stale temporary dataset userrefs.
*/
dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_initialize_restart(spa->spa_root_vdev);
vdev_trim_restart(spa->spa_root_vdev);
vdev_autotrim_restart(spa);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
spa_import_progress_remove(spa_guid(spa));
spa_async_request(spa, SPA_ASYNC_L2CACHE_REBUILD);
spa_load_note(spa, "LOADED");
return (0);
}
static int
spa_load_retry(spa_t *spa, spa_load_state_t state)
{
spa_mode_t mode = spa->spa_mode;
spa_unload(spa);
spa_deactivate(spa);
spa->spa_load_max_txg = spa->spa_uberblock.ub_txg - 1;
spa_activate(spa, mode);
spa_async_suspend(spa);
spa_load_note(spa, "spa_load_retry: rewind, max txg: %llu",
(u_longlong_t)spa->spa_load_max_txg);
return (spa_load(spa, state, SPA_IMPORT_EXISTING));
}
/*
* If spa_load() fails this function will try loading prior txg's. If
* 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool
* will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this
* function will not rewind the pool and will return the same error as
* spa_load().
*/
static int
spa_load_best(spa_t *spa, spa_load_state_t state, uint64_t max_request,
int rewind_flags)
{
nvlist_t *loadinfo = NULL;
nvlist_t *config = NULL;
int load_error, rewind_error;
uint64_t safe_rewind_txg;
uint64_t min_txg;
if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) {
spa->spa_load_max_txg = spa->spa_load_txg;
spa_set_log_state(spa, SPA_LOG_CLEAR);
} else {
spa->spa_load_max_txg = max_request;
if (max_request != UINT64_MAX)
spa->spa_extreme_rewind = B_TRUE;
}
load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING);
if (load_error == 0)
return (0);
if (load_error == ZFS_ERR_NO_CHECKPOINT) {
/*
* When attempting checkpoint-rewind on a pool with no
* checkpoint, we should not attempt to load uberblocks
* from previous txgs when spa_load fails.
*/
ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
spa_import_progress_remove(spa_guid(spa));
return (load_error);
}
if (spa->spa_root_vdev != NULL)
config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg;
spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp;
if (rewind_flags & ZPOOL_NEVER_REWIND) {
nvlist_free(config);
spa_import_progress_remove(spa_guid(spa));
return (load_error);
}
if (state == SPA_LOAD_RECOVER) {
/* Price of rolling back is discarding txgs, including log */
spa_set_log_state(spa, SPA_LOG_CLEAR);
} else {
/*
* If we aren't rolling back save the load info from our first
* import attempt so that we can restore it after attempting
* to rewind.
*/
loadinfo = spa->spa_load_info;
spa->spa_load_info = fnvlist_alloc();
}
spa->spa_load_max_txg = spa->spa_last_ubsync_txg;
safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE;
min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ?
TXG_INITIAL : safe_rewind_txg;
/*
* Continue as long as we're finding errors, we're still within
* the acceptable rewind range, and we're still finding uberblocks
*/
while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg &&
spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) {
if (spa->spa_load_max_txg < safe_rewind_txg)
spa->spa_extreme_rewind = B_TRUE;
rewind_error = spa_load_retry(spa, state);
}
spa->spa_extreme_rewind = B_FALSE;
spa->spa_load_max_txg = UINT64_MAX;
if (config && (rewind_error || state != SPA_LOAD_RECOVER))
spa_config_set(spa, config);
else
nvlist_free(config);
if (state == SPA_LOAD_RECOVER) {
ASSERT3P(loadinfo, ==, NULL);
spa_import_progress_remove(spa_guid(spa));
return (rewind_error);
} else {
/* Store the rewind info as part of the initial load info */
fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO,
spa->spa_load_info);
/* Restore the initial load info */
fnvlist_free(spa->spa_load_info);
spa->spa_load_info = loadinfo;
spa_import_progress_remove(spa_guid(spa));
return (load_error);
}
}
/*
* Pool Open/Import
*
* The import case is identical to an open except that the configuration is sent
* down from userland, instead of grabbed from the configuration cache. For the
* case of an open, the pool configuration will exist in the
* POOL_STATE_UNINITIALIZED state.
*
* The stats information (gen/count/ustats) is used to gather vdev statistics at
* the same time open the pool, without having to keep around the spa_t in some
* ambiguous state.
*/
static int
spa_open_common(const char *pool, spa_t **spapp, const void *tag,
nvlist_t *nvpolicy, nvlist_t **config)
{
spa_t *spa;
spa_load_state_t state = SPA_LOAD_OPEN;
int error;
int locked = B_FALSE;
int firstopen = B_FALSE;
*spapp = NULL;
/*
* As disgusting as this is, we need to support recursive calls to this
* function because dsl_dir_open() is called during spa_load(), and ends
* up calling spa_open() again. The real fix is to figure out how to
* avoid dsl_dir_open() calling this in the first place.
*/
if (MUTEX_NOT_HELD(&spa_namespace_lock)) {
mutex_enter(&spa_namespace_lock);
locked = B_TRUE;
}
if ((spa = spa_lookup(pool)) == NULL) {
if (locked)
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(ENOENT));
}
if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
zpool_load_policy_t policy;
firstopen = B_TRUE;
zpool_get_load_policy(nvpolicy ? nvpolicy : spa->spa_config,
&policy);
if (policy.zlp_rewind & ZPOOL_DO_REWIND)
state = SPA_LOAD_RECOVER;
spa_activate(spa, spa_mode_global);
if (state != SPA_LOAD_RECOVER)
spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE;
zfs_dbgmsg("spa_open_common: opening %s", pool);
error = spa_load_best(spa, state, policy.zlp_txg,
policy.zlp_rewind);
if (error == EBADF) {
/*
* If vdev_validate() returns failure (indicated by
* EBADF), it indicates that one of the vdevs indicates
* that the pool has been exported or destroyed. If
* this is the case, the config cache is out of sync and
* we should remove the pool from the namespace.
*/
spa_unload(spa);
spa_deactivate(spa);
spa_write_cachefile(spa, B_TRUE, B_TRUE, B_FALSE);
spa_remove(spa);
if (locked)
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(ENOENT));
}
if (error) {
/*
* We can't open the pool, but we still have useful
* information: the state of each vdev after the
* attempted vdev_open(). Return this to the user.
*/
if (config != NULL && spa->spa_config) {
*config = fnvlist_dup(spa->spa_config);
fnvlist_add_nvlist(*config,
ZPOOL_CONFIG_LOAD_INFO,
spa->spa_load_info);
}
spa_unload(spa);
spa_deactivate(spa);
spa->spa_last_open_failed = error;
if (locked)
mutex_exit(&spa_namespace_lock);
*spapp = NULL;
return (error);
}
}
spa_open_ref(spa, tag);
if (config != NULL)
*config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
/*
* If we've recovered the pool, pass back any information we
* gathered while doing the load.
*/
if (state == SPA_LOAD_RECOVER && config != NULL) {
fnvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO,
spa->spa_load_info);
}
if (locked) {
spa->spa_last_open_failed = 0;
spa->spa_last_ubsync_txg = 0;
spa->spa_load_txg = 0;
mutex_exit(&spa_namespace_lock);
}
if (firstopen)
zvol_create_minors_recursive(spa_name(spa));
*spapp = spa;
return (0);
}
int
spa_open_rewind(const char *name, spa_t **spapp, const void *tag,
nvlist_t *policy, nvlist_t **config)
{
return (spa_open_common(name, spapp, tag, policy, config));
}
int
spa_open(const char *name, spa_t **spapp, const void *tag)
{
return (spa_open_common(name, spapp, tag, NULL, NULL));
}
/*
* Lookup the given spa_t, incrementing the inject count in the process,
* preventing it from being exported or destroyed.
*/
spa_t *
spa_inject_addref(char *name)
{
spa_t *spa;
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(name)) == NULL) {
mutex_exit(&spa_namespace_lock);
return (NULL);
}
spa->spa_inject_ref++;
mutex_exit(&spa_namespace_lock);
return (spa);
}
void
spa_inject_delref(spa_t *spa)
{
mutex_enter(&spa_namespace_lock);
spa->spa_inject_ref--;
mutex_exit(&spa_namespace_lock);
}
/*
* Add spares device information to the nvlist.
*/
static void
spa_add_spares(spa_t *spa, nvlist_t *config)
{
nvlist_t **spares;
uint_t i, nspares;
nvlist_t *nvroot;
uint64_t guid;
vdev_stat_t *vs;
uint_t vsc;
uint64_t pool;
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
if (spa->spa_spares.sav_count == 0)
return;
nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
VERIFY0(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, &spares, &nspares));
if (nspares != 0) {
fnvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
(const nvlist_t * const *)spares, nspares);
VERIFY0(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares));
/*
* Go through and find any spares which have since been
* repurposed as an active spare. If this is the case, update
* their status appropriately.
*/
for (i = 0; i < nspares; i++) {
guid = fnvlist_lookup_uint64(spares[i],
ZPOOL_CONFIG_GUID);
VERIFY0(nvlist_lookup_uint64_array(spares[i],
ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc));
if (spa_spare_exists(guid, &pool, NULL) &&
pool != 0ULL) {
vs->vs_state = VDEV_STATE_CANT_OPEN;
vs->vs_aux = VDEV_AUX_SPARED;
} else {
vs->vs_state =
spa->spa_spares.sav_vdevs[i]->vdev_state;
}
}
}
}
/*
* Add l2cache device information to the nvlist, including vdev stats.
*/
static void
spa_add_l2cache(spa_t *spa, nvlist_t *config)
{
nvlist_t **l2cache;
uint_t i, j, nl2cache;
nvlist_t *nvroot;
uint64_t guid;
vdev_t *vd;
vdev_stat_t *vs;
uint_t vsc;
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
if (spa->spa_l2cache.sav_count == 0)
return;
nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
VERIFY0(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache));
if (nl2cache != 0) {
fnvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
(const nvlist_t * const *)l2cache, nl2cache);
VERIFY0(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2cache, &nl2cache));
/*
* Update level 2 cache device stats.
*/
for (i = 0; i < nl2cache; i++) {
guid = fnvlist_lookup_uint64(l2cache[i],
ZPOOL_CONFIG_GUID);
vd = NULL;
for (j = 0; j < spa->spa_l2cache.sav_count; j++) {
if (guid ==
spa->spa_l2cache.sav_vdevs[j]->vdev_guid) {
vd = spa->spa_l2cache.sav_vdevs[j];
break;
}
}
ASSERT(vd != NULL);
VERIFY0(nvlist_lookup_uint64_array(l2cache[i],
ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc));
vdev_get_stats(vd, vs);
vdev_config_generate_stats(vd, l2cache[i]);
}
}
}
static void
spa_feature_stats_from_disk(spa_t *spa, nvlist_t *features)
{
zap_cursor_t zc;
zap_attribute_t za;
if (spa->spa_feat_for_read_obj != 0) {
for (zap_cursor_init(&zc, spa->spa_meta_objset,
spa->spa_feat_for_read_obj);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
ASSERT(za.za_integer_length == sizeof (uint64_t) &&
za.za_num_integers == 1);
VERIFY0(nvlist_add_uint64(features, za.za_name,
za.za_first_integer));
}
zap_cursor_fini(&zc);
}
if (spa->spa_feat_for_write_obj != 0) {
for (zap_cursor_init(&zc, spa->spa_meta_objset,
spa->spa_feat_for_write_obj);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
ASSERT(za.za_integer_length == sizeof (uint64_t) &&
za.za_num_integers == 1);
VERIFY0(nvlist_add_uint64(features, za.za_name,
za.za_first_integer));
}
zap_cursor_fini(&zc);
}
}
static void
spa_feature_stats_from_cache(spa_t *spa, nvlist_t *features)
{
int i;
for (i = 0; i < SPA_FEATURES; i++) {
zfeature_info_t feature = spa_feature_table[i];
uint64_t refcount;
if (feature_get_refcount(spa, &feature, &refcount) != 0)
continue;
VERIFY0(nvlist_add_uint64(features, feature.fi_guid, refcount));
}
}
/*
* Store a list of pool features and their reference counts in the
* config.
*
* The first time this is called on a spa, allocate a new nvlist, fetch
* the pool features and reference counts from disk, then save the list
* in the spa. In subsequent calls on the same spa use the saved nvlist
* and refresh its values from the cached reference counts. This
* ensures we don't block here on I/O on a suspended pool so 'zpool
* clear' can resume the pool.
*/
static void
spa_add_feature_stats(spa_t *spa, nvlist_t *config)
{
nvlist_t *features;
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
mutex_enter(&spa->spa_feat_stats_lock);
features = spa->spa_feat_stats;
if (features != NULL) {
spa_feature_stats_from_cache(spa, features);
} else {
VERIFY0(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP));
spa->spa_feat_stats = features;
spa_feature_stats_from_disk(spa, features);
}
VERIFY0(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS,
features));
mutex_exit(&spa->spa_feat_stats_lock);
}
int
spa_get_stats(const char *name, nvlist_t **config,
char *altroot, size_t buflen)
{
int error;
spa_t *spa;
*config = NULL;
error = spa_open_common(name, &spa, FTAG, NULL, config);
if (spa != NULL) {
/*
* This still leaves a window of inconsistency where the spares
* or l2cache devices could change and the config would be
* self-inconsistent.
*/
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
if (*config != NULL) {
uint64_t loadtimes[2];
loadtimes[0] = spa->spa_loaded_ts.tv_sec;
loadtimes[1] = spa->spa_loaded_ts.tv_nsec;
fnvlist_add_uint64_array(*config,
ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2);
fnvlist_add_uint64(*config,
ZPOOL_CONFIG_ERRCOUNT,
spa_approx_errlog_size(spa));
if (spa_suspended(spa)) {
fnvlist_add_uint64(*config,
ZPOOL_CONFIG_SUSPENDED,
spa->spa_failmode);
fnvlist_add_uint64(*config,
ZPOOL_CONFIG_SUSPENDED_REASON,
spa->spa_suspended);
}
spa_add_spares(spa, *config);
spa_add_l2cache(spa, *config);
spa_add_feature_stats(spa, *config);
}
}
/*
* We want to get the alternate root even for faulted pools, so we cheat
* and call spa_lookup() directly.
*/
if (altroot) {
if (spa == NULL) {
mutex_enter(&spa_namespace_lock);
spa = spa_lookup(name);
if (spa)
spa_altroot(spa, altroot, buflen);
else
altroot[0] = '\0';
spa = NULL;
mutex_exit(&spa_namespace_lock);
} else {
spa_altroot(spa, altroot, buflen);
}
}
if (spa != NULL) {
spa_config_exit(spa, SCL_CONFIG, FTAG);
spa_close(spa, FTAG);
}
return (error);
}
/*
* Validate that the auxiliary device array is well formed. We must have an
* array of nvlists, each which describes a valid leaf vdev. If this is an
* import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be
* specified, as long as they are well-formed.
*/
static int
spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
spa_aux_vdev_t *sav, const char *config, uint64_t version,
vdev_labeltype_t label)
{
nvlist_t **dev;
uint_t i, ndev;
vdev_t *vd;
int error;
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
/*
* It's acceptable to have no devs specified.
*/
if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0)
return (0);
if (ndev == 0)
return (SET_ERROR(EINVAL));
/*
* Make sure the pool is formatted with a version that supports this
* device type.
*/
if (spa_version(spa) < version)
return (SET_ERROR(ENOTSUP));
/*
* Set the pending device list so we correctly handle device in-use
* checking.
*/
sav->sav_pending = dev;
sav->sav_npending = ndev;
for (i = 0; i < ndev; i++) {
if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0,
mode)) != 0)
goto out;
if (!vd->vdev_ops->vdev_op_leaf) {
vdev_free(vd);
error = SET_ERROR(EINVAL);
goto out;
}
vd->vdev_top = vd;
if ((error = vdev_open(vd)) == 0 &&
(error = vdev_label_init(vd, crtxg, label)) == 0) {
fnvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID,
vd->vdev_guid);
}
vdev_free(vd);
if (error &&
(mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE))
goto out;
else
error = 0;
}
out:
sav->sav_pending = NULL;
sav->sav_npending = 0;
return (error);
}
static int
spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
{
int error;
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode,
&spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES,
VDEV_LABEL_SPARE)) != 0) {
return (error);
}
return (spa_validate_aux_devs(spa, nvroot, crtxg, mode,
&spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE,
VDEV_LABEL_L2CACHE));
}
static void
spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
const char *config)
{
int i;
if (sav->sav_config != NULL) {
nvlist_t **olddevs;
uint_t oldndevs;
nvlist_t **newdevs;
/*
* Generate new dev list by concatenating with the
* current dev list.
*/
VERIFY0(nvlist_lookup_nvlist_array(sav->sav_config, config,
&olddevs, &oldndevs));
newdevs = kmem_alloc(sizeof (void *) *
(ndevs + oldndevs), KM_SLEEP);
for (i = 0; i < oldndevs; i++)
newdevs[i] = fnvlist_dup(olddevs[i]);
for (i = 0; i < ndevs; i++)
newdevs[i + oldndevs] = fnvlist_dup(devs[i]);
fnvlist_remove(sav->sav_config, config);
fnvlist_add_nvlist_array(sav->sav_config, config,
(const nvlist_t * const *)newdevs, ndevs + oldndevs);
for (i = 0; i < oldndevs + ndevs; i++)
nvlist_free(newdevs[i]);
kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *));
} else {
/*
* Generate a new dev list.
*/
sav->sav_config = fnvlist_alloc();
fnvlist_add_nvlist_array(sav->sav_config, config,
(const nvlist_t * const *)devs, ndevs);
}
}
/*
* Stop and drop level 2 ARC devices
*/
void
spa_l2cache_drop(spa_t *spa)
{
vdev_t *vd;
int i;
spa_aux_vdev_t *sav = &spa->spa_l2cache;
for (i = 0; i < sav->sav_count; i++) {
uint64_t pool;
vd = sav->sav_vdevs[i];
ASSERT(vd != NULL);
if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
pool != 0ULL && l2arc_vdev_present(vd))
l2arc_remove_vdev(vd);
}
}
/*
* Verify encryption parameters for spa creation. If we are encrypting, we must
* have the encryption feature flag enabled.
*/
static int
spa_create_check_encryption_params(dsl_crypto_params_t *dcp,
boolean_t has_encryption)
{
if (dcp->cp_crypt != ZIO_CRYPT_OFF &&
dcp->cp_crypt != ZIO_CRYPT_INHERIT &&
!has_encryption)
return (SET_ERROR(ENOTSUP));
return (dmu_objset_create_crypt_check(NULL, dcp, NULL));
}
/*
* Pool Creation
*/
int
spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
nvlist_t *zplprops, dsl_crypto_params_t *dcp)
{
spa_t *spa;
const char *altroot = NULL;
vdev_t *rvd;
dsl_pool_t *dp;
dmu_tx_t *tx;
int error = 0;
uint64_t txg = TXG_INITIAL;
nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache;
uint64_t version, obj, ndraid = 0;
boolean_t has_features;
boolean_t has_encryption;
boolean_t has_allocclass;
spa_feature_t feat;
const char *feat_name;
const char *poolname;
nvlist_t *nvl;
if (props == NULL ||
nvlist_lookup_string(props, "tname", &poolname) != 0)
poolname = (char *)pool;
/*
* If this pool already exists, return failure.
*/
mutex_enter(&spa_namespace_lock);
if (spa_lookup(poolname) != NULL) {
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(EEXIST));
}
/*
* Allocate a new spa_t structure.
*/
nvl = fnvlist_alloc();
fnvlist_add_string(nvl, ZPOOL_CONFIG_POOL_NAME, pool);
(void) nvlist_lookup_string(props,
zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
spa = spa_add(poolname, nvl, altroot);
fnvlist_free(nvl);
spa_activate(spa, spa_mode_global);
if (props && (error = spa_prop_validate(spa, props))) {
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (error);
}
/*
* Temporary pool names should never be written to disk.
*/
if (poolname != pool)
spa->spa_import_flags |= ZFS_IMPORT_TEMP_NAME;
has_features = B_FALSE;
has_encryption = B_FALSE;
has_allocclass = B_FALSE;
for (nvpair_t *elem = nvlist_next_nvpair(props, NULL);
elem != NULL; elem = nvlist_next_nvpair(props, elem)) {
if (zpool_prop_feature(nvpair_name(elem))) {
has_features = B_TRUE;
feat_name = strchr(nvpair_name(elem), '@') + 1;
VERIFY0(zfeature_lookup_name(feat_name, &feat));
if (feat == SPA_FEATURE_ENCRYPTION)
has_encryption = B_TRUE;
if (feat == SPA_FEATURE_ALLOCATION_CLASSES)
has_allocclass = B_TRUE;
}
}
/* verify encryption params, if they were provided */
if (dcp != NULL) {
error = spa_create_check_encryption_params(dcp, has_encryption);
if (error != 0) {
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (error);
}
}
if (!has_allocclass && zfs_special_devs(nvroot, NULL)) {
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (ENOTSUP);
}
if (has_features || nvlist_lookup_uint64(props,
zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) {
version = SPA_VERSION;
}
ASSERT(SPA_VERSION_IS_SUPPORTED(version));
spa->spa_first_txg = txg;
spa->spa_uberblock.ub_txg = txg - 1;
spa->spa_uberblock.ub_version = version;
spa->spa_ubsync = spa->spa_uberblock;
spa->spa_load_state = SPA_LOAD_CREATE;
spa->spa_removing_phys.sr_state = DSS_NONE;
spa->spa_removing_phys.sr_removing_vdev = -1;
spa->spa_removing_phys.sr_prev_indirect_vdev = -1;
spa->spa_indirect_vdevs_loaded = B_TRUE;
/*
* Create "The Godfather" zio to hold all async IOs
*/
spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
KM_SLEEP);
for (int i = 0; i < max_ncpus; i++) {
spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_GODFATHER);
}
/*
* Create the root vdev.
*/
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
ASSERT(error != 0 || rvd != NULL);
ASSERT(error != 0 || spa->spa_root_vdev == rvd);
if (error == 0 && !zfs_allocatable_devs(nvroot))
error = SET_ERROR(EINVAL);
if (error == 0 &&
(error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
(error = vdev_draid_spare_create(nvroot, rvd, &ndraid, 0)) == 0 &&
(error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) == 0) {
/*
* instantiate the metaslab groups (this will dirty the vdevs)
* we can no longer error exit past this point
*/
for (int c = 0; error == 0 && c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
vdev_metaslab_set_size(vd);
vdev_expand(vd, txg);
}
}
spa_config_exit(spa, SCL_ALL, FTAG);
if (error != 0) {
spa_unload(spa);
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (error);
}
/*
* Get the list of spares, if specified.
*/
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) == 0) {
spa->spa_spares.sav_config = fnvlist_alloc();
fnvlist_add_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, (const nvlist_t * const *)spares,
nspares);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_spares(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
spa->spa_spares.sav_sync = B_TRUE;
}
/*
* Get the list of level 2 cache devices, if specified.
*/
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2cache, &nl2cache) == 0) {
VERIFY0(nvlist_alloc(&spa->spa_l2cache.sav_config,
NV_UNIQUE_NAME, KM_SLEEP));
fnvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
ZPOOL_CONFIG_L2CACHE, (const nvlist_t * const *)l2cache,
nl2cache);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_l2cache(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
spa->spa_l2cache.sav_sync = B_TRUE;
}
spa->spa_is_initializing = B_TRUE;
spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, dcp, txg);
spa->spa_is_initializing = B_FALSE;
/*
* Create DDTs (dedup tables).
*/
ddt_create(spa);
/*
* Create BRT table and BRT table object.
*/
brt_create(spa);
spa_update_dspace(spa);
tx = dmu_tx_create_assigned(dp, txg);
/*
* Create the pool's history object.
*/
if (version >= SPA_VERSION_ZPOOL_HISTORY && !spa->spa_history)
spa_history_create_obj(spa, tx);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_CREATE);
spa_history_log_version(spa, "create", tx);
/*
* Create the pool config object.
*/
spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE,
DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
if (zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
cmn_err(CE_PANIC, "failed to add pool config");
}
if (zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION,
sizeof (uint64_t), 1, &version, tx) != 0) {
cmn_err(CE_PANIC, "failed to add pool version");
}
/* Newly created pools with the right version are always deflated. */
if (version >= SPA_VERSION_RAIDZ_DEFLATE) {
spa->spa_deflate = TRUE;
if (zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
cmn_err(CE_PANIC, "failed to add deflate");
}
}
/*
* Create the deferred-free bpobj. Turn off compression
* because sync-to-convergence takes longer if the blocksize
* keeps changing.
*/
obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx);
dmu_object_set_compress(spa->spa_meta_objset, obj,
ZIO_COMPRESS_OFF, tx);
if (zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ,
sizeof (uint64_t), 1, &obj, tx) != 0) {
cmn_err(CE_PANIC, "failed to add bpobj");
}
VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj,
spa->spa_meta_objset, obj));
/*
* Generate some random noise for salted checksums to operate on.
*/
(void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
sizeof (spa->spa_cksum_salt.zcs_bytes));
/*
* Set pool properties.
*/
spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND);
spa->spa_multihost = zpool_prop_default_numeric(ZPOOL_PROP_MULTIHOST);
spa->spa_autotrim = zpool_prop_default_numeric(ZPOOL_PROP_AUTOTRIM);
if (props != NULL) {
spa_configfile_set(spa, props, B_FALSE);
spa_sync_props(props, tx);
}
for (int i = 0; i < ndraid; i++)
spa_feature_incr(spa, SPA_FEATURE_DRAID, tx);
dmu_tx_commit(tx);
spa->spa_sync_on = B_TRUE;
txg_sync_start(dp);
mmp_thread_start(spa);
txg_wait_synced(dp, txg);
spa_spawn_aux_threads(spa);
spa_write_cachefile(spa, B_FALSE, B_TRUE, B_TRUE);
/*
* Don't count references from objsets that are already closed
* and are making their way through the eviction process.
*/
spa_evicting_os_wait(spa);
spa->spa_minref = zfs_refcount_count(&spa->spa_refcount);
spa->spa_load_state = SPA_LOAD_NONE;
spa_import_os(spa);
mutex_exit(&spa_namespace_lock);
return (0);
}
/*
* Import a non-root pool into the system.
*/
int
spa_import(char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
{
spa_t *spa;
const char *altroot = NULL;
spa_load_state_t state = SPA_LOAD_IMPORT;
zpool_load_policy_t policy;
spa_mode_t mode = spa_mode_global;
uint64_t readonly = B_FALSE;
int error;
nvlist_t *nvroot;
nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache;
/*
* If a pool with this name exists, return failure.
*/
mutex_enter(&spa_namespace_lock);
if (spa_lookup(pool) != NULL) {
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(EEXIST));
}
/*
* Create and initialize the spa structure.
*/
(void) nvlist_lookup_string(props,
zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
(void) nvlist_lookup_uint64(props,
zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
if (readonly)
mode = SPA_MODE_READ;
spa = spa_add(pool, config, altroot);
spa->spa_import_flags = flags;
/*
* Verbatim import - Take a pool and insert it into the namespace
* as if it had been loaded at boot.
*/
if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) {
if (props != NULL)
spa_configfile_set(spa, props, B_FALSE);
spa_write_cachefile(spa, B_FALSE, B_TRUE, B_FALSE);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT);
zfs_dbgmsg("spa_import: verbatim import of %s", pool);
mutex_exit(&spa_namespace_lock);
return (0);
}
spa_activate(spa, mode);
/*
* Don't start async tasks until we know everything is healthy.
*/
spa_async_suspend(spa);
zpool_get_load_policy(config, &policy);
if (policy.zlp_rewind & ZPOOL_DO_REWIND)
state = SPA_LOAD_RECOVER;
spa->spa_config_source = SPA_CONFIG_SRC_TRYIMPORT;
if (state != SPA_LOAD_RECOVER) {
spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
zfs_dbgmsg("spa_import: importing %s", pool);
} else {
zfs_dbgmsg("spa_import: importing %s, max_txg=%lld "
"(RECOVERY MODE)", pool, (longlong_t)policy.zlp_txg);
}
error = spa_load_best(spa, state, policy.zlp_txg, policy.zlp_rewind);
/*
* Propagate anything learned while loading the pool and pass it
* back to caller (i.e. rewind info, missing devices, etc).
*/
fnvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, spa->spa_load_info);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
/*
* Toss any existing sparelist, as it doesn't have any validity
* anymore, and conflicts with spa_has_spare().
*/
if (spa->spa_spares.sav_config) {
nvlist_free(spa->spa_spares.sav_config);
spa->spa_spares.sav_config = NULL;
spa_load_spares(spa);
}
if (spa->spa_l2cache.sav_config) {
nvlist_free(spa->spa_l2cache.sav_config);
spa->spa_l2cache.sav_config = NULL;
spa_load_l2cache(spa);
}
nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
spa_config_exit(spa, SCL_ALL, FTAG);
if (props != NULL)
spa_configfile_set(spa, props, B_FALSE);
if (error != 0 || (props && spa_writeable(spa) &&
(error = spa_prop_set(spa, props)))) {
spa_unload(spa);
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (error);
}
spa_async_resume(spa);
/*
* Override any spares and level 2 cache devices as specified by
* the user, as these may have correct device names/devids, etc.
*/
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) == 0) {
if (spa->spa_spares.sav_config)
fnvlist_remove(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES);
else
spa->spa_spares.sav_config = fnvlist_alloc();
fnvlist_add_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, (const nvlist_t * const *)spares,
nspares);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_spares(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
spa->spa_spares.sav_sync = B_TRUE;
}
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2cache, &nl2cache) == 0) {
if (spa->spa_l2cache.sav_config)
fnvlist_remove(spa->spa_l2cache.sav_config,
ZPOOL_CONFIG_L2CACHE);
else
spa->spa_l2cache.sav_config = fnvlist_alloc();
fnvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
ZPOOL_CONFIG_L2CACHE, (const nvlist_t * const *)l2cache,
nl2cache);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_l2cache(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
spa->spa_l2cache.sav_sync = B_TRUE;
}
/*
* Check for any removed devices.
*/
if (spa->spa_autoreplace) {
spa_aux_check_removed(&spa->spa_spares);
spa_aux_check_removed(&spa->spa_l2cache);
}
if (spa_writeable(spa)) {
/*
* Update the config cache to include the newly-imported pool.
*/
spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
}
/*
* It's possible that the pool was expanded while it was exported.
* We kick off an async task to handle this for us.
*/
spa_async_request(spa, SPA_ASYNC_AUTOEXPAND);
spa_history_log_version(spa, "import", NULL);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT);
mutex_exit(&spa_namespace_lock);
zvol_create_minors_recursive(pool);
spa_import_os(spa);
return (0);
}
nvlist_t *
spa_tryimport(nvlist_t *tryconfig)
{
nvlist_t *config = NULL;
const char *poolname, *cachefile;
spa_t *spa;
uint64_t state;
int error;
zpool_load_policy_t policy;
if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
return (NULL);
if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
return (NULL);
/*
* Create and initialize the spa structure.
*/
mutex_enter(&spa_namespace_lock);
spa = spa_add(TRYIMPORT_NAME, tryconfig, NULL);
spa_activate(spa, SPA_MODE_READ);
/*
* Rewind pool if a max txg was provided.
*/
zpool_get_load_policy(spa->spa_config, &policy);
if (policy.zlp_txg != UINT64_MAX) {
spa->spa_load_max_txg = policy.zlp_txg;
spa->spa_extreme_rewind = B_TRUE;
zfs_dbgmsg("spa_tryimport: importing %s, max_txg=%lld",
poolname, (longlong_t)policy.zlp_txg);
} else {
zfs_dbgmsg("spa_tryimport: importing %s", poolname);
}
if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_CACHEFILE, &cachefile)
== 0) {
zfs_dbgmsg("spa_tryimport: using cachefile '%s'", cachefile);
spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE;
} else {
spa->spa_config_source = SPA_CONFIG_SRC_SCAN;
}
/*
* spa_import() relies on a pool config fetched by spa_try_import()
* for spare/cache devices. Import flags are not passed to
* spa_tryimport(), which makes it return early due to a missing log
* device and missing retrieving the cache device and spare eventually.
* Passing ZFS_IMPORT_MISSING_LOG to spa_tryimport() makes it fetch
* the correct configuration regardless of the missing log device.
*/
spa->spa_import_flags |= ZFS_IMPORT_MISSING_LOG;
error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING);
/*
* If 'tryconfig' was at least parsable, return the current config.
*/
if (spa->spa_root_vdev != NULL) {
config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
fnvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, poolname);
fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, state);
fnvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
spa->spa_uberblock.ub_timestamp);
fnvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
spa->spa_load_info);
fnvlist_add_uint64(config, ZPOOL_CONFIG_ERRATA,
spa->spa_errata);
/*
* If the bootfs property exists on this pool then we
* copy it out so that external consumers can tell which
* pools are bootable.
*/
if ((!error || error == EEXIST) && spa->spa_bootfs) {
char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
/*
* We have to play games with the name since the
* pool was opened as TRYIMPORT_NAME.
*/
if (dsl_dsobj_to_dsname(spa_name(spa),
spa->spa_bootfs, tmpname) == 0) {
char *cp;
char *dsname;
dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
cp = strchr(tmpname, '/');
if (cp == NULL) {
(void) strlcpy(dsname, tmpname,
MAXPATHLEN);
} else {
(void) snprintf(dsname, MAXPATHLEN,
"%s/%s", poolname, ++cp);
}
fnvlist_add_string(config, ZPOOL_CONFIG_BOOTFS,
dsname);
kmem_free(dsname, MAXPATHLEN);
}
kmem_free(tmpname, MAXPATHLEN);
}
/*
* Add the list of hot spares and level 2 cache devices.
*/
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
spa_add_spares(spa, config);
spa_add_l2cache(spa, config);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
spa_unload(spa);
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (config);
}
/*
* Pool export/destroy
*
* The act of destroying or exporting a pool is very simple. We make sure there
* is no more pending I/O and any references to the pool are gone. Then, we
* update the pool state and sync all the labels to disk, removing the
* configuration from the cache afterwards. If the 'hardforce' flag is set, then
* we don't sync the labels or remove the configuration cache.
*/
static int
spa_export_common(const char *pool, int new_state, nvlist_t **oldconfig,
boolean_t force, boolean_t hardforce)
{
int error;
spa_t *spa;
if (oldconfig)
*oldconfig = NULL;
if (!(spa_mode_global & SPA_MODE_WRITE))
return (SET_ERROR(EROFS));
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(pool)) == NULL) {
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(ENOENT));
}
if (spa->spa_is_exporting) {
/* the pool is being exported by another thread */
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(ZFS_ERR_EXPORT_IN_PROGRESS));
}
spa->spa_is_exporting = B_TRUE;
/*
* Put a hold on the pool, drop the namespace lock, stop async tasks,
* reacquire the namespace lock, and see if we can export.
*/
spa_open_ref(spa, FTAG);
mutex_exit(&spa_namespace_lock);
spa_async_suspend(spa);
if (spa->spa_zvol_taskq) {
zvol_remove_minors(spa, spa_name(spa), B_TRUE);
taskq_wait(spa->spa_zvol_taskq);
}
mutex_enter(&spa_namespace_lock);
spa_close(spa, FTAG);
if (spa->spa_state == POOL_STATE_UNINITIALIZED)
goto export_spa;
/*
* The pool will be in core if it's openable, in which case we can
* modify its state. Objsets may be open only because they're dirty,
* so we have to force it to sync before checking spa_refcnt.
*/
if (spa->spa_sync_on) {
txg_wait_synced(spa->spa_dsl_pool, 0);
spa_evicting_os_wait(spa);
}
/*
* A pool cannot be exported or destroyed if there are active
* references. If we are resetting a pool, allow references by
* fault injection handlers.
*/
if (!spa_refcount_zero(spa) || (spa->spa_inject_ref != 0)) {
error = SET_ERROR(EBUSY);
goto fail;
}
if (spa->spa_sync_on) {
vdev_t *rvd = spa->spa_root_vdev;
/*
* A pool cannot be exported if it has an active shared spare.
* This is to prevent other pools stealing the active spare
* from an exported pool. At user's own will, such pool can
* be forcedly exported.
*/
if (!force && new_state == POOL_STATE_EXPORTED &&
spa_has_active_shared_spare(spa)) {
error = SET_ERROR(EXDEV);
goto fail;
}
/*
* We're about to export or destroy this pool. Make sure
* we stop all initialization and trim activity here before
* we set the spa_final_txg. This will ensure that all
* dirty data resulting from the initialization is
* committed to disk before we unload the pool.
*/
vdev_initialize_stop_all(rvd, VDEV_INITIALIZE_ACTIVE);
vdev_trim_stop_all(rvd, VDEV_TRIM_ACTIVE);
vdev_autotrim_stop_all(spa);
vdev_rebuild_stop_all(spa);
/*
* We want this to be reflected on every label,
* so mark them all dirty. spa_unload() will do the
* final sync that pushes these changes out.
*/
if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa->spa_state = new_state;
vdev_config_dirty(rvd);
spa_config_exit(spa, SCL_ALL, FTAG);
}
/*
* If the log space map feature is enabled and the pool is
* getting exported (but not destroyed), we want to spend some
* time flushing as many metaslabs as we can in an attempt to
* destroy log space maps and save import time. This has to be
* done before we set the spa_final_txg, otherwise
* spa_sync() -> spa_flush_metaslabs() may dirty the final TXGs.
* spa_should_flush_logs_on_unload() should be called after
* spa_state has been set to the new_state.
*/
if (spa_should_flush_logs_on_unload(spa))
spa_unload_log_sm_flush_all(spa);
if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa->spa_final_txg = spa_last_synced_txg(spa) +
TXG_DEFER_SIZE + 1;
spa_config_exit(spa, SCL_ALL, FTAG);
}
}
export_spa:
spa_export_os(spa);
if (new_state == POOL_STATE_DESTROYED)
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_DESTROY);
else if (new_state == POOL_STATE_EXPORTED)
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_EXPORT);
if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
spa_unload(spa);
spa_deactivate(spa);
}
if (oldconfig && spa->spa_config)
*oldconfig = fnvlist_dup(spa->spa_config);
if (new_state != POOL_STATE_UNINITIALIZED) {
if (!hardforce)
spa_write_cachefile(spa, B_TRUE, B_TRUE, B_FALSE);
spa_remove(spa);
} else {
/*
* If spa_remove() is not called for this spa_t and
* there is any possibility that it can be reused,
* we make sure to reset the exporting flag.
*/
spa->spa_is_exporting = B_FALSE;
}
mutex_exit(&spa_namespace_lock);
return (0);
fail:
spa->spa_is_exporting = B_FALSE;
spa_async_resume(spa);
mutex_exit(&spa_namespace_lock);
return (error);
}
/*
* Destroy a storage pool.
*/
int
spa_destroy(const char *pool)
{
return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL,
B_FALSE, B_FALSE));
}
/*
* Export a storage pool.
*/
int
spa_export(const char *pool, nvlist_t **oldconfig, boolean_t force,
boolean_t hardforce)
{
return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig,
force, hardforce));
}
/*
* Similar to spa_export(), this unloads the spa_t without actually removing it
* from the namespace in any way.
*/
int
spa_reset(const char *pool)
{
return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL,
B_FALSE, B_FALSE));
}
/*
* ==========================================================================
* Device manipulation
* ==========================================================================
*/
/*
* This is called as a synctask to increment the draid feature flag
*/
static void
spa_draid_feature_incr(void *arg, dmu_tx_t *tx)
{
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
int draid = (int)(uintptr_t)arg;
for (int c = 0; c < draid; c++)
spa_feature_incr(spa, SPA_FEATURE_DRAID, tx);
}
/*
* Add a device to a storage pool.
*/
int
spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
{
uint64_t txg, ndraid = 0;
int error;
vdev_t *rvd = spa->spa_root_vdev;
vdev_t *vd, *tvd;
nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache;
ASSERT(spa_writeable(spa));
txg = spa_vdev_enter(spa);
if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
VDEV_ALLOC_ADD)) != 0)
return (spa_vdev_exit(spa, NULL, txg, error));
spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares,
&nspares) != 0)
nspares = 0;
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache,
&nl2cache) != 0)
nl2cache = 0;
if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0)
return (spa_vdev_exit(spa, vd, txg, EINVAL));
if (vd->vdev_children != 0 &&
(error = vdev_create(vd, txg, B_FALSE)) != 0) {
return (spa_vdev_exit(spa, vd, txg, error));
}
/*
* The virtual dRAID spares must be added after vdev tree is created
* and the vdev guids are generated. The guid of their associated
* dRAID is stored in the config and used when opening the spare.
*/
if ((error = vdev_draid_spare_create(nvroot, vd, &ndraid,
rvd->vdev_children)) == 0) {
if (ndraid > 0 && nvlist_lookup_nvlist_array(nvroot,
ZPOOL_CONFIG_SPARES, &spares, &nspares) != 0)
nspares = 0;
} else {
return (spa_vdev_exit(spa, vd, txg, error));
}
/*
* We must validate the spares and l2cache devices after checking the
* children. Otherwise, vdev_inuse() will blindly overwrite the spare.
*/
if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0)
return (spa_vdev_exit(spa, vd, txg, error));
/*
* If we are in the middle of a device removal, we can only add
* devices which match the existing devices in the pool.
* If we are in the middle of a removal, or have some indirect
* vdevs, we can not add raidz or dRAID top levels.
*/
if (spa->spa_vdev_removal != NULL ||
spa->spa_removing_phys.sr_prev_indirect_vdev != -1) {
for (int c = 0; c < vd->vdev_children; c++) {
tvd = vd->vdev_child[c];
if (spa->spa_vdev_removal != NULL &&
tvd->vdev_ashift != spa->spa_max_ashift) {
return (spa_vdev_exit(spa, vd, txg, EINVAL));
}
/* Fail if top level vdev is raidz or a dRAID */
if (vdev_get_nparity(tvd) != 0)
return (spa_vdev_exit(spa, vd, txg, EINVAL));
/*
* Need the top level mirror to be
* a mirror of leaf vdevs only
*/
if (tvd->vdev_ops == &vdev_mirror_ops) {
for (uint64_t cid = 0;
cid < tvd->vdev_children; cid++) {
vdev_t *cvd = tvd->vdev_child[cid];
if (!cvd->vdev_ops->vdev_op_leaf) {
return (spa_vdev_exit(spa, vd,
txg, EINVAL));
}
}
}
}
}
for (int c = 0; c < vd->vdev_children; c++) {
tvd = vd->vdev_child[c];
vdev_remove_child(vd, tvd);
tvd->vdev_id = rvd->vdev_children;
vdev_add_child(rvd, tvd);
vdev_config_dirty(tvd);
}
if (nspares != 0) {
spa_set_aux_vdevs(&spa->spa_spares, spares, nspares,
ZPOOL_CONFIG_SPARES);
spa_load_spares(spa);
spa->spa_spares.sav_sync = B_TRUE;
}
if (nl2cache != 0) {
spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache,
ZPOOL_CONFIG_L2CACHE);
spa_load_l2cache(spa);
spa->spa_l2cache.sav_sync = B_TRUE;
}
/*
* We can't increment a feature while holding spa_vdev so we
* have to do it in a synctask.
*/
if (ndraid != 0) {
dmu_tx_t *tx;
tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
dsl_sync_task_nowait(spa->spa_dsl_pool, spa_draid_feature_incr,
(void *)(uintptr_t)ndraid, tx);
dmu_tx_commit(tx);
}
/*
* We have to be careful when adding new vdevs to an existing pool.
* If other threads start allocating from these vdevs before we
* sync the config cache, and we lose power, then upon reboot we may
* fail to open the pool because there are DVAs that the config cache
* can't translate. Therefore, we first add the vdevs without
* initializing metaslabs; sync the config cache (via spa_vdev_exit());
* and then let spa_config_update() initialize the new metaslabs.
*
* spa_load() checks for added-but-not-initialized vdevs, so that
* if we lose power at any point in this sequence, the remaining
* steps will be completed the next time we load the pool.
*/
(void) spa_vdev_exit(spa, vd, txg, 0);
mutex_enter(&spa_namespace_lock);
spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_VDEV_ADD);
mutex_exit(&spa_namespace_lock);
return (0);
}
/*
* Attach a device to a mirror. The arguments are the path to any device
* in the mirror, and the nvroot for the new device. If the path specifies
* a device that is not mirrored, we automatically insert the mirror vdev.
*
* If 'replacing' is specified, the new device is intended to replace the
* existing device; in this case the two devices are made into their own
* mirror using the 'replacing' vdev, which is functionally identical to
* the mirror vdev (it actually reuses all the same ops) but has a few
* extra rules: you can't attach to it after it's been created, and upon
* completion of resilvering, the first disk (the one being replaced)
* is automatically detached.
*
* If 'rebuild' is specified, then sequential reconstruction (a.ka. rebuild)
* should be performed instead of traditional healing reconstruction. From
* an administrators perspective these are both resilver operations.
*/
int
spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing,
int rebuild)
{
uint64_t txg, dtl_max_txg;
vdev_t *rvd = spa->spa_root_vdev;
vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
vdev_ops_t *pvops;
char *oldvdpath, *newvdpath;
int newvd_isspare;
int error;
ASSERT(spa_writeable(spa));
txg = spa_vdev_enter(spa);
oldvd = spa_lookup_by_guid(spa, guid, B_FALSE);
ASSERT(MUTEX_HELD(&spa_namespace_lock));
if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
error = (spa_has_checkpoint(spa)) ?
ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
return (spa_vdev_exit(spa, NULL, txg, error));
}
if (rebuild) {
if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REBUILD))
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
if (dsl_scan_resilvering(spa_get_dsl(spa)) ||
dsl_scan_resilver_scheduled(spa_get_dsl(spa))) {
return (spa_vdev_exit(spa, NULL, txg,
ZFS_ERR_RESILVER_IN_PROGRESS));
}
} else {
if (vdev_rebuild_active(rvd))
return (spa_vdev_exit(spa, NULL, txg,
ZFS_ERR_REBUILD_IN_PROGRESS));
}
if (spa->spa_vdev_removal != NULL)
return (spa_vdev_exit(spa, NULL, txg, EBUSY));
if (oldvd == NULL)
return (spa_vdev_exit(spa, NULL, txg, ENODEV));
if (!oldvd->vdev_ops->vdev_op_leaf)
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
pvd = oldvd->vdev_parent;
if (spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
VDEV_ALLOC_ATTACH) != 0)
return (spa_vdev_exit(spa, NULL, txg, EINVAL));
if (newrootvd->vdev_children != 1)
return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
newvd = newrootvd->vdev_child[0];
if (!newvd->vdev_ops->vdev_op_leaf)
return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
return (spa_vdev_exit(spa, newrootvd, txg, error));
/*
* log, dedup and special vdevs should not be replaced by spares.
*/
if ((oldvd->vdev_top->vdev_alloc_bias != VDEV_BIAS_NONE ||
oldvd->vdev_top->vdev_islog) && newvd->vdev_isspare) {
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
}
/*
* A dRAID spare can only replace a child of its parent dRAID vdev.
*/
if (newvd->vdev_ops == &vdev_draid_spare_ops &&
oldvd->vdev_top != vdev_draid_spare_get_parent(newvd)) {
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
}
if (rebuild) {
/*
* For rebuilds, the top vdev must support reconstruction
* using only space maps. This means the only allowable
* vdevs types are the root vdev, a mirror, or dRAID.
*/
tvd = pvd;
if (pvd->vdev_top != NULL)
tvd = pvd->vdev_top;
if (tvd->vdev_ops != &vdev_mirror_ops &&
tvd->vdev_ops != &vdev_root_ops &&
tvd->vdev_ops != &vdev_draid_ops) {
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
}
}
if (!replacing) {
/*
* For attach, the only allowable parent is a mirror or the root
* vdev.
*/
if (pvd->vdev_ops != &vdev_mirror_ops &&
pvd->vdev_ops != &vdev_root_ops)
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
pvops = &vdev_mirror_ops;
} else {
/*
* Active hot spares can only be replaced by inactive hot
* spares.
*/
if (pvd->vdev_ops == &vdev_spare_ops &&
oldvd->vdev_isspare &&
!spa_has_spare(spa, newvd->vdev_guid))
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
/*
* If the source is a hot spare, and the parent isn't already a
* spare, then we want to create a new hot spare. Otherwise, we
* want to create a replacing vdev. The user is not allowed to
* attach to a spared vdev child unless the 'isspare' state is
* the same (spare replaces spare, non-spare replaces
* non-spare).
*/
if (pvd->vdev_ops == &vdev_replacing_ops &&
spa_version(spa) < SPA_VERSION_MULTI_REPLACE) {
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
} else if (pvd->vdev_ops == &vdev_spare_ops &&
newvd->vdev_isspare != oldvd->vdev_isspare) {
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
}
if (newvd->vdev_isspare)
pvops = &vdev_spare_ops;
else
pvops = &vdev_replacing_ops;
}
/*
* Make sure the new device is big enough.
*/
if (newvd->vdev_asize < vdev_get_min_asize(oldvd))
return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
/*
* The new device cannot have a higher alignment requirement
* than the top-level vdev.
*/
if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift)
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
/*
* If this is an in-place replacement, update oldvd's path and devid
* to make it distinguishable from newvd, and unopenable from now on.
*/
if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) {
spa_strfree(oldvd->vdev_path);
oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
KM_SLEEP);
(void) snprintf(oldvd->vdev_path, strlen(newvd->vdev_path) + 5,
"%s/%s", newvd->vdev_path, "old");
if (oldvd->vdev_devid != NULL) {
spa_strfree(oldvd->vdev_devid);
oldvd->vdev_devid = NULL;
}
}
/*
* If the parent is not a mirror, or if we're replacing, insert the new
* mirror/replacing/spare vdev above oldvd.
*/
if (pvd->vdev_ops != pvops)
pvd = vdev_add_parent(oldvd, pvops);
ASSERT(pvd->vdev_top->vdev_parent == rvd);
ASSERT(pvd->vdev_ops == pvops);
ASSERT(oldvd->vdev_parent == pvd);
/*
* Extract the new device from its root and add it to pvd.
*/
vdev_remove_child(newrootvd, newvd);
newvd->vdev_id = pvd->vdev_children;
newvd->vdev_crtxg = oldvd->vdev_crtxg;
vdev_add_child(pvd, newvd);
/*
* Reevaluate the parent vdev state.
*/
vdev_propagate_state(pvd);
tvd = newvd->vdev_top;
ASSERT(pvd->vdev_top == tvd);
ASSERT(tvd->vdev_parent == rvd);
vdev_config_dirty(tvd);
/*
* Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account
* for any dmu_sync-ed blocks. It will propagate upward when
* spa_vdev_exit() calls vdev_dtl_reassess().
*/
dtl_max_txg = txg + TXG_CONCURRENT_STATES;
vdev_dtl_dirty(newvd, DTL_MISSING,
TXG_INITIAL, dtl_max_txg - TXG_INITIAL);
if (newvd->vdev_isspare) {
spa_spare_activate(newvd);
spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_SPARE);
}
oldvdpath = spa_strdup(oldvd->vdev_path);
newvdpath = spa_strdup(newvd->vdev_path);
newvd_isspare = newvd->vdev_isspare;
/*
* Mark newvd's DTL dirty in this txg.
*/
vdev_dirty(tvd, VDD_DTL, newvd, txg);
/*
* Schedule the resilver or rebuild to restart in the future. We do
* this to ensure that dmu_sync-ed blocks have been stitched into the
* respective datasets.
*/
if (rebuild) {
newvd->vdev_rebuild_txg = txg;
vdev_rebuild(tvd);
} else {
newvd->vdev_resilver_txg = txg;
if (dsl_scan_resilvering(spa_get_dsl(spa)) &&
spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER)) {
vdev_defer_resilver(newvd);
} else {
dsl_scan_restart_resilver(spa->spa_dsl_pool,
dtl_max_txg);
}
}
if (spa->spa_bootfs)
spa_event_notify(spa, newvd, NULL, ESC_ZFS_BOOTFS_VDEV_ATTACH);
spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_ATTACH);
/*
* Commit the config
*/
(void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0);
spa_history_log_internal(spa, "vdev attach", NULL,
"%s vdev=%s %s vdev=%s",
replacing && newvd_isspare ? "spare in" :
replacing ? "replace" : "attach", newvdpath,
replacing ? "for" : "to", oldvdpath);
spa_strfree(oldvdpath);
spa_strfree(newvdpath);
return (0);
}
/*
* Detach a device from a mirror or replacing vdev.
*
* If 'replace_done' is specified, only detach if the parent
* is a replacing or a spare vdev.
*/
int
spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
{
uint64_t txg;
int error;
vdev_t *rvd __maybe_unused = spa->spa_root_vdev;
vdev_t *vd, *pvd, *cvd, *tvd;
boolean_t unspare = B_FALSE;
uint64_t unspare_guid = 0;
char *vdpath;
ASSERT(spa_writeable(spa));
txg = spa_vdev_detach_enter(spa, guid);
vd = spa_lookup_by_guid(spa, guid, B_FALSE);
/*
* Besides being called directly from the userland through the
* ioctl interface, spa_vdev_detach() can be potentially called
* at the end of spa_vdev_resilver_done().
*
* In the regular case, when we have a checkpoint this shouldn't
* happen as we never empty the DTLs of a vdev during the scrub
* [see comment in dsl_scan_done()]. Thus spa_vdev_resilvering_done()
* should never get here when we have a checkpoint.
*
* That said, even in a case when we checkpoint the pool exactly
* as spa_vdev_resilver_done() calls this function everything
* should be fine as the resilver will return right away.
*/
ASSERT(MUTEX_HELD(&spa_namespace_lock));
if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
error = (spa_has_checkpoint(spa)) ?
ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
return (spa_vdev_exit(spa, NULL, txg, error));
}
if (vd == NULL)
return (spa_vdev_exit(spa, NULL, txg, ENODEV));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
pvd = vd->vdev_parent;
/*
* If the parent/child relationship is not as expected, don't do it.
* Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing
* vdev that's replacing B with C. The user's intent in replacing
* is to go from M(A,B) to M(A,C). If the user decides to cancel
* the replace by detaching C, the expected behavior is to end up
* M(A,B). But suppose that right after deciding to detach C,
* the replacement of B completes. We would have M(A,C), and then
* ask to detach C, which would leave us with just A -- not what
* the user wanted. To prevent this, we make sure that the
* parent/child relationship hasn't changed -- in this example,
* that C's parent is still the replacing vdev R.
*/
if (pvd->vdev_guid != pguid && pguid != 0)
return (spa_vdev_exit(spa, NULL, txg, EBUSY));
/*
* Only 'replacing' or 'spare' vdevs can be replaced.
*/
if (replace_done && pvd->vdev_ops != &vdev_replacing_ops &&
pvd->vdev_ops != &vdev_spare_ops)
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
spa_version(spa) >= SPA_VERSION_SPARES);
/*
* Only mirror, replacing, and spare vdevs support detach.
*/
if (pvd->vdev_ops != &vdev_replacing_ops &&
pvd->vdev_ops != &vdev_mirror_ops &&
pvd->vdev_ops != &vdev_spare_ops)
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
/*
* If this device has the only valid copy of some data,
* we cannot safely detach it.
*/
if (vdev_dtl_required(vd))
return (spa_vdev_exit(spa, NULL, txg, EBUSY));
ASSERT(pvd->vdev_children >= 2);
/*
* If we are detaching the second disk from a replacing vdev, then
* check to see if we changed the original vdev's path to have "/old"
* at the end in spa_vdev_attach(). If so, undo that change now.
*/
if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 &&
vd->vdev_path != NULL) {
size_t len = strlen(vd->vdev_path);
for (int c = 0; c < pvd->vdev_children; c++) {
cvd = pvd->vdev_child[c];
if (cvd == vd || cvd->vdev_path == NULL)
continue;
if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 &&
strcmp(cvd->vdev_path + len, "/old") == 0) {
spa_strfree(cvd->vdev_path);
cvd->vdev_path = spa_strdup(vd->vdev_path);
break;
}
}
}
/*
* If we are detaching the original disk from a normal spare, then it
* implies that the spare should become a real disk, and be removed
* from the active spare list for the pool. dRAID spares on the
* other hand are coupled to the pool and thus should never be removed
* from the spares list.
*/
if (pvd->vdev_ops == &vdev_spare_ops && vd->vdev_id == 0) {
vdev_t *last_cvd = pvd->vdev_child[pvd->vdev_children - 1];
if (last_cvd->vdev_isspare &&
last_cvd->vdev_ops != &vdev_draid_spare_ops) {
unspare = B_TRUE;
}
}
/*
* Erase the disk labels so the disk can be used for other things.
* This must be done after all other error cases are handled,
* but before we disembowel vd (so we can still do I/O to it).
* But if we can't do it, don't treat the error as fatal --
* it may be that the unwritability of the disk is the reason
* it's being detached!
*/
(void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
/*
* Remove vd from its parent and compact the parent's children.
*/
vdev_remove_child(pvd, vd);
vdev_compact_children(pvd);
/*
* Remember one of the remaining children so we can get tvd below.
*/
cvd = pvd->vdev_child[pvd->vdev_children - 1];
/*
* If we need to remove the remaining child from the list of hot spares,
* do it now, marking the vdev as no longer a spare in the process.
* We must do this before vdev_remove_parent(), because that can
* change the GUID if it creates a new toplevel GUID. For a similar
* reason, we must remove the spare now, in the same txg as the detach;
* otherwise someone could attach a new sibling, change the GUID, and
* the subsequent attempt to spa_vdev_remove(unspare_guid) would fail.
*/
if (unspare) {
ASSERT(cvd->vdev_isspare);
spa_spare_remove(cvd);
unspare_guid = cvd->vdev_guid;
(void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
cvd->vdev_unspare = B_TRUE;
}
/*
* If the parent mirror/replacing vdev only has one child,
* the parent is no longer needed. Remove it from the tree.
*/
if (pvd->vdev_children == 1) {
if (pvd->vdev_ops == &vdev_spare_ops)
cvd->vdev_unspare = B_FALSE;
vdev_remove_parent(cvd);
}
/*
* We don't set tvd until now because the parent we just removed
* may have been the previous top-level vdev.
*/
tvd = cvd->vdev_top;
ASSERT(tvd->vdev_parent == rvd);
/*
* Reevaluate the parent vdev state.
*/
vdev_propagate_state(cvd);
/*
* If the 'autoexpand' property is set on the pool then automatically
* try to expand the size of the pool. For example if the device we
* just detached was smaller than the others, it may be possible to
* add metaslabs (i.e. grow the pool). We need to reopen the vdev
* first so that we can obtain the updated sizes of the leaf vdevs.
*/
if (spa->spa_autoexpand) {
vdev_reopen(tvd);
vdev_expand(tvd, txg);
}
vdev_config_dirty(tvd);
/*
* Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that
* vd->vdev_detached is set and free vd's DTL object in syncing context.
* But first make sure we're not on any *other* txg's DTL list, to
* prevent vd from being accessed after it's freed.
*/
vdpath = spa_strdup(vd->vdev_path ? vd->vdev_path : "none");
for (int t = 0; t < TXG_SIZE; t++)
(void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
vd->vdev_detached = B_TRUE;
vdev_dirty(tvd, VDD_DTL, vd, txg);
spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE);
spa_notify_waiters(spa);
/* hang on to the spa before we release the lock */
spa_open_ref(spa, FTAG);
error = spa_vdev_exit(spa, vd, txg, 0);
spa_history_log_internal(spa, "detach", NULL,
"vdev=%s", vdpath);
spa_strfree(vdpath);
/*
* If this was the removal of the original device in a hot spare vdev,
* then we want to go through and remove the device from the hot spare
* list of every other pool.
*/
if (unspare) {
spa_t *altspa = NULL;
mutex_enter(&spa_namespace_lock);
while ((altspa = spa_next(altspa)) != NULL) {
if (altspa->spa_state != POOL_STATE_ACTIVE ||
altspa == spa)
continue;
spa_open_ref(altspa, FTAG);
mutex_exit(&spa_namespace_lock);
(void) spa_vdev_remove(altspa, unspare_guid, B_TRUE);
mutex_enter(&spa_namespace_lock);
spa_close(altspa, FTAG);
}
mutex_exit(&spa_namespace_lock);
/* search the rest of the vdevs for spares to remove */
spa_vdev_resilver_done(spa);
}
/* all done with the spa; OK to release */
mutex_enter(&spa_namespace_lock);
spa_close(spa, FTAG);
mutex_exit(&spa_namespace_lock);
return (error);
}
static int
spa_vdev_initialize_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type,
list_t *vd_list)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
/* Look up vdev and ensure it's a leaf. */
vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE);
if (vd == NULL || vd->vdev_detached) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(ENODEV));
} else if (!vd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(vd)) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(EINVAL));
} else if (!vdev_writeable(vd)) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(EROFS));
}
mutex_enter(&vd->vdev_initialize_lock);
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
/*
* When we activate an initialize action we check to see
* if the vdev_initialize_thread is NULL. We do this instead
* of using the vdev_initialize_state since there might be
* a previous initialization process which has completed but
* the thread is not exited.
*/
if (cmd_type == POOL_INITIALIZE_START &&
(vd->vdev_initialize_thread != NULL ||
vd->vdev_top->vdev_removing)) {
mutex_exit(&vd->vdev_initialize_lock);
return (SET_ERROR(EBUSY));
} else if (cmd_type == POOL_INITIALIZE_CANCEL &&
(vd->vdev_initialize_state != VDEV_INITIALIZE_ACTIVE &&
vd->vdev_initialize_state != VDEV_INITIALIZE_SUSPENDED)) {
mutex_exit(&vd->vdev_initialize_lock);
return (SET_ERROR(ESRCH));
} else if (cmd_type == POOL_INITIALIZE_SUSPEND &&
vd->vdev_initialize_state != VDEV_INITIALIZE_ACTIVE) {
mutex_exit(&vd->vdev_initialize_lock);
return (SET_ERROR(ESRCH));
} else if (cmd_type == POOL_INITIALIZE_UNINIT &&
vd->vdev_initialize_thread != NULL) {
mutex_exit(&vd->vdev_initialize_lock);
return (SET_ERROR(EBUSY));
}
switch (cmd_type) {
case POOL_INITIALIZE_START:
vdev_initialize(vd);
break;
case POOL_INITIALIZE_CANCEL:
vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED, vd_list);
break;
case POOL_INITIALIZE_SUSPEND:
vdev_initialize_stop(vd, VDEV_INITIALIZE_SUSPENDED, vd_list);
break;
case POOL_INITIALIZE_UNINIT:
vdev_uninitialize(vd);
break;
default:
panic("invalid cmd_type %llu", (unsigned long long)cmd_type);
}
mutex_exit(&vd->vdev_initialize_lock);
return (0);
}
int
spa_vdev_initialize(spa_t *spa, nvlist_t *nv, uint64_t cmd_type,
nvlist_t *vdev_errlist)
{
int total_errors = 0;
list_t vd_list;
list_create(&vd_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_initialize_node));
/*
* We hold the namespace lock through the whole function
* to prevent any changes to the pool while we're starting or
* stopping initialization. The config and state locks are held so that
* we can properly assess the vdev state before we commit to
* the initializing operation.
*/
mutex_enter(&spa_namespace_lock);
for (nvpair_t *pair = nvlist_next_nvpair(nv, NULL);
pair != NULL; pair = nvlist_next_nvpair(nv, pair)) {
uint64_t vdev_guid = fnvpair_value_uint64(pair);
int error = spa_vdev_initialize_impl(spa, vdev_guid, cmd_type,
&vd_list);
if (error != 0) {
char guid_as_str[MAXNAMELEN];
(void) snprintf(guid_as_str, sizeof (guid_as_str),
"%llu", (unsigned long long)vdev_guid);
fnvlist_add_int64(vdev_errlist, guid_as_str, error);
total_errors++;
}
}
/* Wait for all initialize threads to stop. */
vdev_initialize_stop_wait(spa, &vd_list);
/* Sync out the initializing state */
txg_wait_synced(spa->spa_dsl_pool, 0);
mutex_exit(&spa_namespace_lock);
list_destroy(&vd_list);
return (total_errors);
}
static int
spa_vdev_trim_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type,
uint64_t rate, boolean_t partial, boolean_t secure, list_t *vd_list)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
/* Look up vdev and ensure it's a leaf. */
vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE);
if (vd == NULL || vd->vdev_detached) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(ENODEV));
} else if (!vd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(vd)) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(EINVAL));
} else if (!vdev_writeable(vd)) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(EROFS));
} else if (!vd->vdev_has_trim) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(EOPNOTSUPP));
} else if (secure && !vd->vdev_has_securetrim) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(EOPNOTSUPP));
}
mutex_enter(&vd->vdev_trim_lock);
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
/*
* When we activate a TRIM action we check to see if the
* vdev_trim_thread is NULL. We do this instead of using the
* vdev_trim_state since there might be a previous TRIM process
* which has completed but the thread is not exited.
*/
if (cmd_type == POOL_TRIM_START &&
(vd->vdev_trim_thread != NULL || vd->vdev_top->vdev_removing)) {
mutex_exit(&vd->vdev_trim_lock);
return (SET_ERROR(EBUSY));
} else if (cmd_type == POOL_TRIM_CANCEL &&
(vd->vdev_trim_state != VDEV_TRIM_ACTIVE &&
vd->vdev_trim_state != VDEV_TRIM_SUSPENDED)) {
mutex_exit(&vd->vdev_trim_lock);
return (SET_ERROR(ESRCH));
} else if (cmd_type == POOL_TRIM_SUSPEND &&
vd->vdev_trim_state != VDEV_TRIM_ACTIVE) {
mutex_exit(&vd->vdev_trim_lock);
return (SET_ERROR(ESRCH));
}
switch (cmd_type) {
case POOL_TRIM_START:
vdev_trim(vd, rate, partial, secure);
break;
case POOL_TRIM_CANCEL:
vdev_trim_stop(vd, VDEV_TRIM_CANCELED, vd_list);
break;
case POOL_TRIM_SUSPEND:
vdev_trim_stop(vd, VDEV_TRIM_SUSPENDED, vd_list);
break;
default:
panic("invalid cmd_type %llu", (unsigned long long)cmd_type);
}
mutex_exit(&vd->vdev_trim_lock);
return (0);
}
/*
* Initiates a manual TRIM for the requested vdevs. This kicks off individual
* TRIM threads for each child vdev. These threads pass over all of the free
* space in the vdev's metaslabs and issues TRIM commands for that space.
*/
int
spa_vdev_trim(spa_t *spa, nvlist_t *nv, uint64_t cmd_type, uint64_t rate,
boolean_t partial, boolean_t secure, nvlist_t *vdev_errlist)
{
int total_errors = 0;
list_t vd_list;
list_create(&vd_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_trim_node));
/*
* We hold the namespace lock through the whole function
* to prevent any changes to the pool while we're starting or
* stopping TRIM. The config and state locks are held so that
* we can properly assess the vdev state before we commit to
* the TRIM operation.
*/
mutex_enter(&spa_namespace_lock);
for (nvpair_t *pair = nvlist_next_nvpair(nv, NULL);
pair != NULL; pair = nvlist_next_nvpair(nv, pair)) {
uint64_t vdev_guid = fnvpair_value_uint64(pair);
int error = spa_vdev_trim_impl(spa, vdev_guid, cmd_type,
rate, partial, secure, &vd_list);
if (error != 0) {
char guid_as_str[MAXNAMELEN];
(void) snprintf(guid_as_str, sizeof (guid_as_str),
"%llu", (unsigned long long)vdev_guid);
fnvlist_add_int64(vdev_errlist, guid_as_str, error);
total_errors++;
}
}
/* Wait for all TRIM threads to stop. */
vdev_trim_stop_wait(spa, &vd_list);
/* Sync out the TRIM state */
txg_wait_synced(spa->spa_dsl_pool, 0);
mutex_exit(&spa_namespace_lock);
list_destroy(&vd_list);
return (total_errors);
}
/*
* Split a set of devices from their mirrors, and create a new pool from them.
*/
int
spa_vdev_split_mirror(spa_t *spa, const char *newname, nvlist_t *config,
nvlist_t *props, boolean_t exp)
{
int error = 0;
uint64_t txg, *glist;
spa_t *newspa;
uint_t c, children, lastlog;
nvlist_t **child, *nvl, *tmp;
dmu_tx_t *tx;
const char *altroot = NULL;
vdev_t *rvd, **vml = NULL; /* vdev modify list */
boolean_t activate_slog;
ASSERT(spa_writeable(spa));
txg = spa_vdev_enter(spa);
ASSERT(MUTEX_HELD(&spa_namespace_lock));
if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
error = (spa_has_checkpoint(spa)) ?
ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
return (spa_vdev_exit(spa, NULL, txg, error));
}
/* clear the log and flush everything up to now */
activate_slog = spa_passivate_log(spa);
(void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
error = spa_reset_logs(spa);
txg = spa_vdev_config_enter(spa);
if (activate_slog)
spa_activate_log(spa);
if (error != 0)
return (spa_vdev_exit(spa, NULL, txg, error));
/* check new spa name before going any further */
if (spa_lookup(newname) != NULL)
return (spa_vdev_exit(spa, NULL, txg, EEXIST));
/*
* scan through all the children to ensure they're all mirrors
*/
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 ||
nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child,
&children) != 0)
return (spa_vdev_exit(spa, NULL, txg, EINVAL));
/* first, check to ensure we've got the right child count */
rvd = spa->spa_root_vdev;
lastlog = 0;
for (c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
/* don't count the holes & logs as children */
if (vd->vdev_islog || (vd->vdev_ops != &vdev_indirect_ops &&
!vdev_is_concrete(vd))) {
if (lastlog == 0)
lastlog = c;
continue;
}
lastlog = 0;
}
if (children != (lastlog != 0 ? lastlog : rvd->vdev_children))
return (spa_vdev_exit(spa, NULL, txg, EINVAL));
/* next, ensure no spare or cache devices are part of the split */
if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 ||
nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0)
return (spa_vdev_exit(spa, NULL, txg, EINVAL));
vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP);
glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP);
/* then, loop over each vdev and validate it */
for (c = 0; c < children; c++) {
uint64_t is_hole = 0;
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
&is_hole);
if (is_hole != 0) {
if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole ||
spa->spa_root_vdev->vdev_child[c]->vdev_islog) {
continue;
} else {
error = SET_ERROR(EINVAL);
break;
}
}
/* deal with indirect vdevs */
if (spa->spa_root_vdev->vdev_child[c]->vdev_ops ==
&vdev_indirect_ops)
continue;
/* which disk is going to be split? */
if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID,
&glist[c]) != 0) {
error = SET_ERROR(EINVAL);
break;
}
/* look it up in the spa */
vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE);
if (vml[c] == NULL) {
error = SET_ERROR(ENODEV);
break;
}
/* make sure there's nothing stopping the split */
if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops ||
vml[c]->vdev_islog ||
!vdev_is_concrete(vml[c]) ||
vml[c]->vdev_isspare ||
vml[c]->vdev_isl2cache ||
!vdev_writeable(vml[c]) ||
vml[c]->vdev_children != 0 ||
vml[c]->vdev_state != VDEV_STATE_HEALTHY ||
c != spa->spa_root_vdev->vdev_child[c]->vdev_id) {
error = SET_ERROR(EINVAL);
break;
}
if (vdev_dtl_required(vml[c]) ||
vdev_resilver_needed(vml[c], NULL, NULL)) {
error = SET_ERROR(EBUSY);
break;
}
/* we need certain info from the top level */
fnvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY,
vml[c]->vdev_top->vdev_ms_array);
fnvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT,
vml[c]->vdev_top->vdev_ms_shift);
fnvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE,
vml[c]->vdev_top->vdev_asize);
fnvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT,
vml[c]->vdev_top->vdev_ashift);
/* transfer per-vdev ZAPs */
ASSERT3U(vml[c]->vdev_leaf_zap, !=, 0);
VERIFY0(nvlist_add_uint64(child[c],
ZPOOL_CONFIG_VDEV_LEAF_ZAP, vml[c]->vdev_leaf_zap));
ASSERT3U(vml[c]->vdev_top->vdev_top_zap, !=, 0);
VERIFY0(nvlist_add_uint64(child[c],
ZPOOL_CONFIG_VDEV_TOP_ZAP,
vml[c]->vdev_parent->vdev_top_zap));
}
if (error != 0) {
kmem_free(vml, children * sizeof (vdev_t *));
kmem_free(glist, children * sizeof (uint64_t));
return (spa_vdev_exit(spa, NULL, txg, error));
}
/* stop writers from using the disks */
for (c = 0; c < children; c++) {
if (vml[c] != NULL)
vml[c]->vdev_offline = B_TRUE;
}
vdev_reopen(spa->spa_root_vdev);
/*
* Temporarily record the splitting vdevs in the spa config. This
* will disappear once the config is regenerated.
*/
nvl = fnvlist_alloc();
fnvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST, glist, children);
kmem_free(glist, children * sizeof (uint64_t));
mutex_enter(&spa->spa_props_lock);
fnvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT, nvl);
mutex_exit(&spa->spa_props_lock);
spa->spa_config_splitting = nvl;
vdev_config_dirty(spa->spa_root_vdev);
/* configure and create the new pool */
fnvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname);
fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE);
fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, spa_version(spa));
fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, spa->spa_config_txg);
fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID,
spa_generate_guid(NULL));
VERIFY0(nvlist_add_boolean(config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS));
(void) nvlist_lookup_string(props,
zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
/* add the new pool to the namespace */
newspa = spa_add(newname, config, altroot);
newspa->spa_avz_action = AVZ_ACTION_REBUILD;
newspa->spa_config_txg = spa->spa_config_txg;
spa_set_log_state(newspa, SPA_LOG_CLEAR);
/* release the spa config lock, retaining the namespace lock */
spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
if (zio_injection_enabled)
zio_handle_panic_injection(spa, FTAG, 1);
spa_activate(newspa, spa_mode_global);
spa_async_suspend(newspa);
/*
* Temporarily stop the initializing and TRIM activity. We set the
* state to ACTIVE so that we know to resume initializing or TRIM
* once the split has completed.
*/
list_t vd_initialize_list;
list_create(&vd_initialize_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_initialize_node));
list_t vd_trim_list;
list_create(&vd_trim_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_trim_node));
for (c = 0; c < children; c++) {
if (vml[c] != NULL && vml[c]->vdev_ops != &vdev_indirect_ops) {
mutex_enter(&vml[c]->vdev_initialize_lock);
vdev_initialize_stop(vml[c],
VDEV_INITIALIZE_ACTIVE, &vd_initialize_list);
mutex_exit(&vml[c]->vdev_initialize_lock);
mutex_enter(&vml[c]->vdev_trim_lock);
vdev_trim_stop(vml[c], VDEV_TRIM_ACTIVE, &vd_trim_list);
mutex_exit(&vml[c]->vdev_trim_lock);
}
}
vdev_initialize_stop_wait(spa, &vd_initialize_list);
vdev_trim_stop_wait(spa, &vd_trim_list);
list_destroy(&vd_initialize_list);
list_destroy(&vd_trim_list);
newspa->spa_config_source = SPA_CONFIG_SRC_SPLIT;
newspa->spa_is_splitting = B_TRUE;
/* create the new pool from the disks of the original pool */
error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE);
if (error)
goto out;
/* if that worked, generate a real config for the new pool */
if (newspa->spa_root_vdev != NULL) {
newspa->spa_config_splitting = fnvlist_alloc();
fnvlist_add_uint64(newspa->spa_config_splitting,
ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa));
spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL,
B_TRUE));
}
/* set the props */
if (props != NULL) {
spa_configfile_set(newspa, props, B_FALSE);
error = spa_prop_set(newspa, props);
if (error)
goto out;
}
/* flush everything */
txg = spa_vdev_config_enter(newspa);
vdev_config_dirty(newspa->spa_root_vdev);
(void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG);
if (zio_injection_enabled)
zio_handle_panic_injection(spa, FTAG, 2);
spa_async_resume(newspa);
/* finally, update the original pool's config */
txg = spa_vdev_config_enter(spa);
tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error != 0)
dmu_tx_abort(tx);
for (c = 0; c < children; c++) {
if (vml[c] != NULL && vml[c]->vdev_ops != &vdev_indirect_ops) {
vdev_t *tvd = vml[c]->vdev_top;
/*
* Need to be sure the detachable VDEV is not
* on any *other* txg's DTL list to prevent it
* from being accessed after it's freed.
*/
for (int t = 0; t < TXG_SIZE; t++) {
(void) txg_list_remove_this(
&tvd->vdev_dtl_list, vml[c], t);
}
vdev_split(vml[c]);
if (error == 0)
spa_history_log_internal(spa, "detach", tx,
"vdev=%s", vml[c]->vdev_path);
vdev_free(vml[c]);
}
}
spa->spa_avz_action = AVZ_ACTION_REBUILD;
vdev_config_dirty(spa->spa_root_vdev);
spa->spa_config_splitting = NULL;
nvlist_free(nvl);
if (error == 0)
dmu_tx_commit(tx);
(void) spa_vdev_exit(spa, NULL, txg, 0);
if (zio_injection_enabled)
zio_handle_panic_injection(spa, FTAG, 3);
/* split is complete; log a history record */
spa_history_log_internal(newspa, "split", NULL,
"from pool %s", spa_name(spa));
newspa->spa_is_splitting = B_FALSE;
kmem_free(vml, children * sizeof (vdev_t *));
/* if we're not going to mount the filesystems in userland, export */
if (exp)
error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL,
B_FALSE, B_FALSE);
return (error);
out:
spa_unload(newspa);
spa_deactivate(newspa);
spa_remove(newspa);
txg = spa_vdev_config_enter(spa);
/* re-online all offlined disks */
for (c = 0; c < children; c++) {
if (vml[c] != NULL)
vml[c]->vdev_offline = B_FALSE;
}
/* restart initializing or trimming disks as necessary */
spa_async_request(spa, SPA_ASYNC_INITIALIZE_RESTART);
spa_async_request(spa, SPA_ASYNC_TRIM_RESTART);
spa_async_request(spa, SPA_ASYNC_AUTOTRIM_RESTART);
vdev_reopen(spa->spa_root_vdev);
nvlist_free(spa->spa_config_splitting);
spa->spa_config_splitting = NULL;
(void) spa_vdev_exit(spa, NULL, txg, error);
kmem_free(vml, children * sizeof (vdev_t *));
return (error);
}
/*
* Find any device that's done replacing, or a vdev marked 'unspare' that's
* currently spared, so we can detach it.
*/
static vdev_t *
spa_vdev_resilver_done_hunt(vdev_t *vd)
{
vdev_t *newvd, *oldvd;
for (int c = 0; c < vd->vdev_children; c++) {
oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
if (oldvd != NULL)
return (oldvd);
}
/*
* Check for a completed replacement. We always consider the first
* vdev in the list to be the oldest vdev, and the last one to be
* the newest (see spa_vdev_attach() for how that works). In
* the case where the newest vdev is faulted, we will not automatically
* remove it after a resilver completes. This is OK as it will require
* user intervention to determine which disk the admin wishes to keep.
*/
if (vd->vdev_ops == &vdev_replacing_ops) {
ASSERT(vd->vdev_children > 1);
newvd = vd->vdev_child[vd->vdev_children - 1];
oldvd = vd->vdev_child[0];
if (vdev_dtl_empty(newvd, DTL_MISSING) &&
vdev_dtl_empty(newvd, DTL_OUTAGE) &&
!vdev_dtl_required(oldvd))
return (oldvd);
}
/*
* Check for a completed resilver with the 'unspare' flag set.
* Also potentially update faulted state.
*/
if (vd->vdev_ops == &vdev_spare_ops) {
vdev_t *first = vd->vdev_child[0];
vdev_t *last = vd->vdev_child[vd->vdev_children - 1];
if (last->vdev_unspare) {
oldvd = first;
newvd = last;
} else if (first->vdev_unspare) {
oldvd = last;
newvd = first;
} else {
oldvd = NULL;
}
if (oldvd != NULL &&
vdev_dtl_empty(newvd, DTL_MISSING) &&
vdev_dtl_empty(newvd, DTL_OUTAGE) &&
!vdev_dtl_required(oldvd))
return (oldvd);
vdev_propagate_state(vd);
/*
* If there are more than two spares attached to a disk,
* and those spares are not required, then we want to
* attempt to free them up now so that they can be used
* by other pools. Once we're back down to a single
* disk+spare, we stop removing them.
*/
if (vd->vdev_children > 2) {
newvd = vd->vdev_child[1];
if (newvd->vdev_isspare && last->vdev_isspare &&
vdev_dtl_empty(last, DTL_MISSING) &&
vdev_dtl_empty(last, DTL_OUTAGE) &&
!vdev_dtl_required(newvd))
return (newvd);
}
}
return (NULL);
}
static void
spa_vdev_resilver_done(spa_t *spa)
{
vdev_t *vd, *pvd, *ppvd;
uint64_t guid, sguid, pguid, ppguid;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) {
pvd = vd->vdev_parent;
ppvd = pvd->vdev_parent;
guid = vd->vdev_guid;
pguid = pvd->vdev_guid;
ppguid = ppvd->vdev_guid;
sguid = 0;
/*
* If we have just finished replacing a hot spared device, then
* we need to detach the parent's first child (the original hot
* spare) as well.
*/
if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 &&
ppvd->vdev_children == 2) {
ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
sguid = ppvd->vdev_child[1]->vdev_guid;
}
ASSERT(vd->vdev_resilver_txg == 0 || !vdev_dtl_required(vd));
spa_config_exit(spa, SCL_ALL, FTAG);
if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0)
return;
if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0)
return;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
}
spa_config_exit(spa, SCL_ALL, FTAG);
/*
* If a detach was not performed above replace waiters will not have
* been notified. In which case we must do so now.
*/
spa_notify_waiters(spa);
}
/*
* Update the stored path or FRU for this vdev.
*/
static int
spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value,
boolean_t ispath)
{
vdev_t *vd;
boolean_t sync = B_FALSE;
ASSERT(spa_writeable(spa));
spa_vdev_state_enter(spa, SCL_ALL);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, ENOENT));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
if (ispath) {
if (strcmp(value, vd->vdev_path) != 0) {
spa_strfree(vd->vdev_path);
vd->vdev_path = spa_strdup(value);
sync = B_TRUE;
}
} else {
if (vd->vdev_fru == NULL) {
vd->vdev_fru = spa_strdup(value);
sync = B_TRUE;
} else if (strcmp(value, vd->vdev_fru) != 0) {
spa_strfree(vd->vdev_fru);
vd->vdev_fru = spa_strdup(value);
sync = B_TRUE;
}
}
return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0));
}
int
spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
{
return (spa_vdev_set_common(spa, guid, newpath, B_TRUE));
}
int
spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru)
{
return (spa_vdev_set_common(spa, guid, newfru, B_FALSE));
}
/*
* ==========================================================================
* SPA Scanning
* ==========================================================================
*/
int
spa_scrub_pause_resume(spa_t *spa, pool_scrub_cmd_t cmd)
{
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
if (dsl_scan_resilvering(spa->spa_dsl_pool))
return (SET_ERROR(EBUSY));
return (dsl_scrub_set_pause_resume(spa->spa_dsl_pool, cmd));
}
int
spa_scan_stop(spa_t *spa)
{
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
if (dsl_scan_resilvering(spa->spa_dsl_pool))
return (SET_ERROR(EBUSY));
return (dsl_scan_cancel(spa->spa_dsl_pool));
}
int
spa_scan(spa_t *spa, pool_scan_func_t func)
{
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE)
return (SET_ERROR(ENOTSUP));
if (func == POOL_SCAN_RESILVER &&
!spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER))
return (SET_ERROR(ENOTSUP));
/*
* If a resilver was requested, but there is no DTL on a
* writeable leaf device, we have nothing to do.
*/
if (func == POOL_SCAN_RESILVER &&
!vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
return (0);
}
if (func == POOL_SCAN_ERRORSCRUB &&
!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG))
return (SET_ERROR(ENOTSUP));
return (dsl_scan(spa->spa_dsl_pool, func));
}
/*
* ==========================================================================
* SPA async task processing
* ==========================================================================
*/
static void
spa_async_remove(spa_t *spa, vdev_t *vd)
{
if (vd->vdev_remove_wanted) {
vd->vdev_remove_wanted = B_FALSE;
vd->vdev_delayed_close = B_FALSE;
vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE);
/*
* We want to clear the stats, but we don't want to do a full
* vdev_clear() as that will cause us to throw away
* degraded/faulted state as well as attempt to reopen the
* device, all of which is a waste.
*/
vd->vdev_stat.vs_read_errors = 0;
vd->vdev_stat.vs_write_errors = 0;
vd->vdev_stat.vs_checksum_errors = 0;
vdev_state_dirty(vd->vdev_top);
/* Tell userspace that the vdev is gone. */
zfs_post_remove(spa, vd);
}
for (int c = 0; c < vd->vdev_children; c++)
spa_async_remove(spa, vd->vdev_child[c]);
}
static void
spa_async_probe(spa_t *spa, vdev_t *vd)
{
if (vd->vdev_probe_wanted) {
vd->vdev_probe_wanted = B_FALSE;
vdev_reopen(vd); /* vdev_open() does the actual probe */
}
for (int c = 0; c < vd->vdev_children; c++)
spa_async_probe(spa, vd->vdev_child[c]);
}
static void
spa_async_autoexpand(spa_t *spa, vdev_t *vd)
{
if (!spa->spa_autoexpand)
return;
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
spa_async_autoexpand(spa, cvd);
}
if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL)
return;
spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_AUTOEXPAND);
}
static __attribute__((noreturn)) void
spa_async_thread(void *arg)
{
spa_t *spa = (spa_t *)arg;
dsl_pool_t *dp = spa->spa_dsl_pool;
int tasks;
ASSERT(spa->spa_sync_on);
mutex_enter(&spa->spa_async_lock);
tasks = spa->spa_async_tasks;
spa->spa_async_tasks = 0;
mutex_exit(&spa->spa_async_lock);
/*
* See if the config needs to be updated.
*/
if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
uint64_t old_space, new_space;
mutex_enter(&spa_namespace_lock);
old_space = metaslab_class_get_space(spa_normal_class(spa));
old_space += metaslab_class_get_space(spa_special_class(spa));
old_space += metaslab_class_get_space(spa_dedup_class(spa));
old_space += metaslab_class_get_space(
spa_embedded_log_class(spa));
spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
new_space = metaslab_class_get_space(spa_normal_class(spa));
new_space += metaslab_class_get_space(spa_special_class(spa));
new_space += metaslab_class_get_space(spa_dedup_class(spa));
new_space += metaslab_class_get_space(
spa_embedded_log_class(spa));
mutex_exit(&spa_namespace_lock);
/*
* If the pool grew as a result of the config update,
* then log an internal history event.
*/
if (new_space != old_space) {
spa_history_log_internal(spa, "vdev online", NULL,
"pool '%s' size: %llu(+%llu)",
spa_name(spa), (u_longlong_t)new_space,
(u_longlong_t)(new_space - old_space));
}
}
/*
* See if any devices need to be marked REMOVED.
*/
if (tasks & SPA_ASYNC_REMOVE) {
spa_vdev_state_enter(spa, SCL_NONE);
spa_async_remove(spa, spa->spa_root_vdev);
for (int i = 0; i < spa->spa_l2cache.sav_count; i++)
spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]);
for (int i = 0; i < spa->spa_spares.sav_count; i++)
spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]);
(void) spa_vdev_state_exit(spa, NULL, 0);
}
if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) {
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
spa_async_autoexpand(spa, spa->spa_root_vdev);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
/*
* See if any devices need to be probed.
*/
if (tasks & SPA_ASYNC_PROBE) {
spa_vdev_state_enter(spa, SCL_NONE);
spa_async_probe(spa, spa->spa_root_vdev);
(void) spa_vdev_state_exit(spa, NULL, 0);
}
/*
* If any devices are done replacing, detach them.
*/
if (tasks & SPA_ASYNC_RESILVER_DONE ||
tasks & SPA_ASYNC_REBUILD_DONE ||
tasks & SPA_ASYNC_DETACH_SPARE) {
spa_vdev_resilver_done(spa);
}
/*
* Kick off a resilver.
*/
if (tasks & SPA_ASYNC_RESILVER &&
!vdev_rebuild_active(spa->spa_root_vdev) &&
(!dsl_scan_resilvering(dp) ||
!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER)))
dsl_scan_restart_resilver(dp, 0);
if (tasks & SPA_ASYNC_INITIALIZE_RESTART) {
mutex_enter(&spa_namespace_lock);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_initialize_restart(spa->spa_root_vdev);
spa_config_exit(spa, SCL_CONFIG, FTAG);
mutex_exit(&spa_namespace_lock);
}
if (tasks & SPA_ASYNC_TRIM_RESTART) {
mutex_enter(&spa_namespace_lock);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_trim_restart(spa->spa_root_vdev);
spa_config_exit(spa, SCL_CONFIG, FTAG);
mutex_exit(&spa_namespace_lock);
}
if (tasks & SPA_ASYNC_AUTOTRIM_RESTART) {
mutex_enter(&spa_namespace_lock);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_autotrim_restart(spa);
spa_config_exit(spa, SCL_CONFIG, FTAG);
mutex_exit(&spa_namespace_lock);
}
/*
* Kick off L2 cache whole device TRIM.
*/
if (tasks & SPA_ASYNC_L2CACHE_TRIM) {
mutex_enter(&spa_namespace_lock);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_trim_l2arc(spa);
spa_config_exit(spa, SCL_CONFIG, FTAG);
mutex_exit(&spa_namespace_lock);
}
/*
* Kick off L2 cache rebuilding.
*/
if (tasks & SPA_ASYNC_L2CACHE_REBUILD) {
mutex_enter(&spa_namespace_lock);
spa_config_enter(spa, SCL_L2ARC, FTAG, RW_READER);
l2arc_spa_rebuild_start(spa);
spa_config_exit(spa, SCL_L2ARC, FTAG);
mutex_exit(&spa_namespace_lock);
}
/*
* Let the world know that we're done.
*/
mutex_enter(&spa->spa_async_lock);
spa->spa_async_thread = NULL;
cv_broadcast(&spa->spa_async_cv);
mutex_exit(&spa->spa_async_lock);
thread_exit();
}
void
spa_async_suspend(spa_t *spa)
{
mutex_enter(&spa->spa_async_lock);
spa->spa_async_suspended++;
while (spa->spa_async_thread != NULL)
cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
mutex_exit(&spa->spa_async_lock);
spa_vdev_remove_suspend(spa);
zthr_t *condense_thread = spa->spa_condense_zthr;
if (condense_thread != NULL)
zthr_cancel(condense_thread);
zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr;
if (discard_thread != NULL)
zthr_cancel(discard_thread);
zthr_t *ll_delete_thread = spa->spa_livelist_delete_zthr;
if (ll_delete_thread != NULL)
zthr_cancel(ll_delete_thread);
zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr;
if (ll_condense_thread != NULL)
zthr_cancel(ll_condense_thread);
}
void
spa_async_resume(spa_t *spa)
{
mutex_enter(&spa->spa_async_lock);
ASSERT(spa->spa_async_suspended != 0);
spa->spa_async_suspended--;
mutex_exit(&spa->spa_async_lock);
spa_restart_removal(spa);
zthr_t *condense_thread = spa->spa_condense_zthr;
if (condense_thread != NULL)
zthr_resume(condense_thread);
zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr;
if (discard_thread != NULL)
zthr_resume(discard_thread);
zthr_t *ll_delete_thread = spa->spa_livelist_delete_zthr;
if (ll_delete_thread != NULL)
zthr_resume(ll_delete_thread);
zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr;
if (ll_condense_thread != NULL)
zthr_resume(ll_condense_thread);
}
static boolean_t
spa_async_tasks_pending(spa_t *spa)
{
uint_t non_config_tasks;
uint_t config_task;
boolean_t config_task_suspended;
non_config_tasks = spa->spa_async_tasks & ~SPA_ASYNC_CONFIG_UPDATE;
config_task = spa->spa_async_tasks & SPA_ASYNC_CONFIG_UPDATE;
if (spa->spa_ccw_fail_time == 0) {
config_task_suspended = B_FALSE;
} else {
config_task_suspended =
(gethrtime() - spa->spa_ccw_fail_time) <
((hrtime_t)zfs_ccw_retry_interval * NANOSEC);
}
return (non_config_tasks || (config_task && !config_task_suspended));
}
static void
spa_async_dispatch(spa_t *spa)
{
mutex_enter(&spa->spa_async_lock);
if (spa_async_tasks_pending(spa) &&
!spa->spa_async_suspended &&
spa->spa_async_thread == NULL)
spa->spa_async_thread = thread_create(NULL, 0,
spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
mutex_exit(&spa->spa_async_lock);
}
void
spa_async_request(spa_t *spa, int task)
{
zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task);
mutex_enter(&spa->spa_async_lock);
spa->spa_async_tasks |= task;
mutex_exit(&spa->spa_async_lock);
}
int
spa_async_tasks(spa_t *spa)
{
return (spa->spa_async_tasks);
}
/*
* ==========================================================================
* SPA syncing routines
* ==========================================================================
*/
static int
bpobj_enqueue_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
bpobj_t *bpo = arg;
bpobj_enqueue(bpo, bp, bp_freed, tx);
return (0);
}
int
bpobj_enqueue_alloc_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
return (bpobj_enqueue_cb(arg, bp, B_FALSE, tx));
}
int
bpobj_enqueue_free_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
return (bpobj_enqueue_cb(arg, bp, B_TRUE, tx));
}
static int
spa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
zio_t *pio = arg;
zio_nowait(zio_free_sync(pio, pio->io_spa, dmu_tx_get_txg(tx), bp,
pio->io_flags));
return (0);
}
static int
bpobj_spa_free_sync_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
ASSERT(!bp_freed);
return (spa_free_sync_cb(arg, bp, tx));
}
/*
* Note: this simple function is not inlined to make it easier to dtrace the
* amount of time spent syncing frees.
*/
static void
spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx)
{
zio_t *zio = zio_root(spa, NULL, NULL, 0);
bplist_iterate(bpl, spa_free_sync_cb, zio, tx);
VERIFY(zio_wait(zio) == 0);
}
/*
* Note: this simple function is not inlined to make it easier to dtrace the
* amount of time spent syncing deferred frees.
*/
static void
spa_sync_deferred_frees(spa_t *spa, dmu_tx_t *tx)
{
if (spa_sync_pass(spa) != 1)
return;
/*
* Note:
* If the log space map feature is active, we stop deferring
* frees to the next TXG and therefore running this function
* would be considered a no-op as spa_deferred_bpobj should
* not have any entries.
*
* That said we run this function anyway (instead of returning
* immediately) for the edge-case scenario where we just
* activated the log space map feature in this TXG but we have
* deferred frees from the previous TXG.
*/
zio_t *zio = zio_root(spa, NULL, NULL, 0);
VERIFY3U(bpobj_iterate(&spa->spa_deferred_bpobj,
bpobj_spa_free_sync_cb, zio, tx), ==, 0);
VERIFY0(zio_wait(zio));
}
static void
spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
{
char *packed = NULL;
size_t bufsize;
size_t nvsize = 0;
dmu_buf_t *db;
VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
/*
* Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
* information. This avoids the dmu_buf_will_dirty() path and
* saves us a pre-read to get data we don't actually care about.
*/
bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE);
packed = vmem_alloc(bufsize, KM_SLEEP);
VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
KM_SLEEP) == 0);
memset(packed + nvsize, 0, bufsize - nvsize);
dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
vmem_free(packed, bufsize);
VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
dmu_buf_will_dirty(db, tx);
*(uint64_t *)db->db_data = nvsize;
dmu_buf_rele(db, FTAG);
}
static void
spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx,
const char *config, const char *entry)
{
nvlist_t *nvroot;
nvlist_t **list;
int i;
if (!sav->sav_sync)
return;
/*
* Update the MOS nvlist describing the list of available devices.
* spa_validate_aux() will have already made sure this nvlist is
* valid and the vdevs are labeled appropriately.
*/
if (sav->sav_object == 0) {
sav->sav_object = dmu_object_alloc(spa->spa_meta_objset,
DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE,
sizeof (uint64_t), tx);
VERIFY(zap_update(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1,
&sav->sav_object, tx) == 0);
}
nvroot = fnvlist_alloc();
if (sav->sav_count == 0) {
fnvlist_add_nvlist_array(nvroot, config,
(const nvlist_t * const *)NULL, 0);
} else {
list = kmem_alloc(sav->sav_count*sizeof (void *), KM_SLEEP);
for (i = 0; i < sav->sav_count; i++)
list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
B_FALSE, VDEV_CONFIG_L2CACHE);
fnvlist_add_nvlist_array(nvroot, config,
(const nvlist_t * const *)list, sav->sav_count);
for (i = 0; i < sav->sav_count; i++)
nvlist_free(list[i]);
kmem_free(list, sav->sav_count * sizeof (void *));
}
spa_sync_nvlist(spa, sav->sav_object, nvroot, tx);
nvlist_free(nvroot);
sav->sav_sync = B_FALSE;
}
/*
* Rebuild spa's all-vdev ZAP from the vdev ZAPs indicated in each vdev_t.
* The all-vdev ZAP must be empty.
*/
static void
spa_avz_build(vdev_t *vd, uint64_t avz, dmu_tx_t *tx)
{
spa_t *spa = vd->vdev_spa;
if (vd->vdev_root_zap != 0 &&
spa_feature_is_active(spa, SPA_FEATURE_AVZ_V2)) {
VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
vd->vdev_root_zap, tx));
}
if (vd->vdev_top_zap != 0) {
VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
vd->vdev_top_zap, tx));
}
if (vd->vdev_leaf_zap != 0) {
VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
vd->vdev_leaf_zap, tx));
}
for (uint64_t i = 0; i < vd->vdev_children; i++) {
spa_avz_build(vd->vdev_child[i], avz, tx);
}
}
static void
spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
{
nvlist_t *config;
/*
* If the pool is being imported from a pre-per-vdev-ZAP version of ZFS,
* its config may not be dirty but we still need to build per-vdev ZAPs.
* Similarly, if the pool is being assembled (e.g. after a split), we
* need to rebuild the AVZ although the config may not be dirty.
*/
if (list_is_empty(&spa->spa_config_dirty_list) &&
spa->spa_avz_action == AVZ_ACTION_NONE)
return;
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
ASSERT(spa->spa_avz_action == AVZ_ACTION_NONE ||
spa->spa_avz_action == AVZ_ACTION_INITIALIZE ||
spa->spa_all_vdev_zaps != 0);
if (spa->spa_avz_action == AVZ_ACTION_REBUILD) {
/* Make and build the new AVZ */
uint64_t new_avz = zap_create(spa->spa_meta_objset,
DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0, tx);
spa_avz_build(spa->spa_root_vdev, new_avz, tx);
/* Diff old AVZ with new one */
zap_cursor_t zc;
zap_attribute_t za;
for (zap_cursor_init(&zc, spa->spa_meta_objset,
spa->spa_all_vdev_zaps);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
uint64_t vdzap = za.za_first_integer;
if (zap_lookup_int(spa->spa_meta_objset, new_avz,
vdzap) == ENOENT) {
/*
* ZAP is listed in old AVZ but not in new one;
* destroy it
*/
VERIFY0(zap_destroy(spa->spa_meta_objset, vdzap,
tx));
}
}
zap_cursor_fini(&zc);
/* Destroy the old AVZ */
VERIFY0(zap_destroy(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, tx));
/* Replace the old AVZ in the dir obj with the new one */
VERIFY0(zap_update(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP,
sizeof (new_avz), 1, &new_avz, tx));
spa->spa_all_vdev_zaps = new_avz;
} else if (spa->spa_avz_action == AVZ_ACTION_DESTROY) {
zap_cursor_t zc;
zap_attribute_t za;
/* Walk through the AVZ and destroy all listed ZAPs */
for (zap_cursor_init(&zc, spa->spa_meta_objset,
spa->spa_all_vdev_zaps);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
uint64_t zap = za.za_first_integer;
VERIFY0(zap_destroy(spa->spa_meta_objset, zap, tx));
}
zap_cursor_fini(&zc);
/* Destroy and unlink the AVZ itself */
VERIFY0(zap_destroy(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, tx));
VERIFY0(zap_remove(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP, tx));
spa->spa_all_vdev_zaps = 0;
}
if (spa->spa_all_vdev_zaps == 0) {
spa->spa_all_vdev_zaps = zap_create_link(spa->spa_meta_objset,
DMU_OTN_ZAP_METADATA, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_VDEV_ZAP_MAP, tx);
}
spa->spa_avz_action = AVZ_ACTION_NONE;
/* Create ZAPs for vdevs that don't have them. */
vdev_construct_zaps(spa->spa_root_vdev, tx);
config = spa_config_generate(spa, spa->spa_root_vdev,
dmu_tx_get_txg(tx), B_FALSE);
/*
* If we're upgrading the spa version then make sure that
* the config object gets updated with the correct version.
*/
if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version)
fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
spa->spa_uberblock.ub_version);
spa_config_exit(spa, SCL_STATE, FTAG);
nvlist_free(spa->spa_config_syncing);
spa->spa_config_syncing = config;
spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
}
static void
spa_sync_version(void *arg, dmu_tx_t *tx)
{
uint64_t *versionp = arg;
uint64_t version = *versionp;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
/*
* Setting the version is special cased when first creating the pool.
*/
ASSERT(tx->tx_txg != TXG_INITIAL);
ASSERT(SPA_VERSION_IS_SUPPORTED(version));
ASSERT(version >= spa_version(spa));
spa->spa_uberblock.ub_version = version;
vdev_config_dirty(spa->spa_root_vdev);
spa_history_log_internal(spa, "set", tx, "version=%lld",
(longlong_t)version);
}
/*
* Set zpool properties.
*/
static void
spa_sync_props(void *arg, dmu_tx_t *tx)
{
nvlist_t *nvp = arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
objset_t *mos = spa->spa_meta_objset;
nvpair_t *elem = NULL;
mutex_enter(&spa->spa_props_lock);
while ((elem = nvlist_next_nvpair(nvp, elem))) {
uint64_t intval;
const char *strval, *fname;
zpool_prop_t prop;
const char *propname;
const char *elemname = nvpair_name(elem);
zprop_type_t proptype;
spa_feature_t fid;
switch (prop = zpool_name_to_prop(elemname)) {
case ZPOOL_PROP_VERSION:
intval = fnvpair_value_uint64(elem);
/*
* The version is synced separately before other
* properties and should be correct by now.
*/
ASSERT3U(spa_version(spa), >=, intval);
break;
case ZPOOL_PROP_ALTROOT:
/*
* 'altroot' is a non-persistent property. It should
* have been set temporarily at creation or import time.
*/
ASSERT(spa->spa_root != NULL);
break;
case ZPOOL_PROP_READONLY:
case ZPOOL_PROP_CACHEFILE:
/*
* 'readonly' and 'cachefile' are also non-persistent
* properties.
*/
break;
case ZPOOL_PROP_COMMENT:
strval = fnvpair_value_string(elem);
if (spa->spa_comment != NULL)
spa_strfree(spa->spa_comment);
spa->spa_comment = spa_strdup(strval);
/*
* We need to dirty the configuration on all the vdevs
* so that their labels get updated. We also need to
* update the cache file to keep it in sync with the
* MOS version. It's unnecessary to do this for pool
* creation since the vdev's configuration has already
* been dirtied.
*/
if (tx->tx_txg != TXG_INITIAL) {
vdev_config_dirty(spa->spa_root_vdev);
spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
}
spa_history_log_internal(spa, "set", tx,
"%s=%s", elemname, strval);
break;
case ZPOOL_PROP_COMPATIBILITY:
strval = fnvpair_value_string(elem);
if (spa->spa_compatibility != NULL)
spa_strfree(spa->spa_compatibility);
spa->spa_compatibility = spa_strdup(strval);
/*
* Dirty the configuration on vdevs as above.
*/
if (tx->tx_txg != TXG_INITIAL) {
vdev_config_dirty(spa->spa_root_vdev);
spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
}
spa_history_log_internal(spa, "set", tx,
"%s=%s", nvpair_name(elem), strval);
break;
case ZPOOL_PROP_INVAL:
if (zpool_prop_feature(elemname)) {
fname = strchr(elemname, '@') + 1;
VERIFY0(zfeature_lookup_name(fname, &fid));
spa_feature_enable(spa, fid, tx);
spa_history_log_internal(spa, "set", tx,
"%s=enabled", elemname);
break;
} else if (!zfs_prop_user(elemname)) {
ASSERT(zpool_prop_feature(elemname));
break;
}
zfs_fallthrough;
default:
/*
* Set pool property values in the poolprops mos object.
*/
if (spa->spa_pool_props_object == 0) {
spa->spa_pool_props_object =
zap_create_link(mos, DMU_OT_POOL_PROPS,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
tx);
}
/* normalize the property name */
if (prop == ZPOOL_PROP_INVAL) {
propname = elemname;
proptype = PROP_TYPE_STRING;
} else {
propname = zpool_prop_to_name(prop);
proptype = zpool_prop_get_type(prop);
}
if (nvpair_type(elem) == DATA_TYPE_STRING) {
ASSERT(proptype == PROP_TYPE_STRING);
strval = fnvpair_value_string(elem);
VERIFY0(zap_update(mos,
spa->spa_pool_props_object, propname,
1, strlen(strval) + 1, strval, tx));
spa_history_log_internal(spa, "set", tx,
"%s=%s", elemname, strval);
} else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
intval = fnvpair_value_uint64(elem);
if (proptype == PROP_TYPE_INDEX) {
const char *unused;
VERIFY0(zpool_prop_index_to_string(
prop, intval, &unused));
}
VERIFY0(zap_update(mos,
spa->spa_pool_props_object, propname,
8, 1, &intval, tx));
spa_history_log_internal(spa, "set", tx,
"%s=%lld", elemname,
(longlong_t)intval);
switch (prop) {
case ZPOOL_PROP_DELEGATION:
spa->spa_delegation = intval;
break;
case ZPOOL_PROP_BOOTFS:
spa->spa_bootfs = intval;
break;
case ZPOOL_PROP_FAILUREMODE:
spa->spa_failmode = intval;
break;
case ZPOOL_PROP_AUTOTRIM:
spa->spa_autotrim = intval;
spa_async_request(spa,
SPA_ASYNC_AUTOTRIM_RESTART);
break;
case ZPOOL_PROP_AUTOEXPAND:
spa->spa_autoexpand = intval;
if (tx->tx_txg != TXG_INITIAL)
spa_async_request(spa,
SPA_ASYNC_AUTOEXPAND);
break;
case ZPOOL_PROP_MULTIHOST:
spa->spa_multihost = intval;
break;
default:
break;
}
} else {
ASSERT(0); /* not allowed */
}
}
}
mutex_exit(&spa->spa_props_lock);
}
/*
* Perform one-time upgrade on-disk changes. spa_version() does not
* reflect the new version this txg, so there must be no changes this
* txg to anything that the upgrade code depends on after it executes.
* Therefore this must be called after dsl_pool_sync() does the sync
* tasks.
*/
static void
spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx)
{
if (spa_sync_pass(spa) != 1)
return;
dsl_pool_t *dp = spa->spa_dsl_pool;
rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN &&
spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) {
dsl_pool_create_origin(dp, tx);
/* Keeping the origin open increases spa_minref */
spa->spa_minref += 3;
}
if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES &&
spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) {
dsl_pool_upgrade_clones(dp, tx);
}
if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES &&
spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) {
dsl_pool_upgrade_dir_clones(dp, tx);
/* Keeping the freedir open increases spa_minref */
spa->spa_minref += 3;
}
if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES &&
spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
spa_feature_create_zap_objects(spa, tx);
}
/*
* LZ4_COMPRESS feature's behaviour was changed to activate_on_enable
* when possibility to use lz4 compression for metadata was added
* Old pools that have this feature enabled must be upgraded to have
* this feature active
*/
if (spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
boolean_t lz4_en = spa_feature_is_enabled(spa,
SPA_FEATURE_LZ4_COMPRESS);
boolean_t lz4_ac = spa_feature_is_active(spa,
SPA_FEATURE_LZ4_COMPRESS);
if (lz4_en && !lz4_ac)
spa_feature_incr(spa, SPA_FEATURE_LZ4_COMPRESS, tx);
}
/*
* If we haven't written the salt, do so now. Note that the
* feature may not be activated yet, but that's fine since
* the presence of this ZAP entry is backwards compatible.
*/
if (zap_contains(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_CHECKSUM_SALT) == ENOENT) {
VERIFY0(zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CHECKSUM_SALT, 1,
sizeof (spa->spa_cksum_salt.zcs_bytes),
spa->spa_cksum_salt.zcs_bytes, tx));
}
rrw_exit(&dp->dp_config_rwlock, FTAG);
}
static void
vdev_indirect_state_sync_verify(vdev_t *vd)
{
vdev_indirect_mapping_t *vim __maybe_unused = vd->vdev_indirect_mapping;
vdev_indirect_births_t *vib __maybe_unused = vd->vdev_indirect_births;
if (vd->vdev_ops == &vdev_indirect_ops) {
ASSERT(vim != NULL);
ASSERT(vib != NULL);
}
uint64_t obsolete_sm_object = 0;
ASSERT0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
if (obsolete_sm_object != 0) {
ASSERT(vd->vdev_obsolete_sm != NULL);
ASSERT(vd->vdev_removing ||
vd->vdev_ops == &vdev_indirect_ops);
ASSERT(vdev_indirect_mapping_num_entries(vim) > 0);
ASSERT(vdev_indirect_mapping_bytes_mapped(vim) > 0);
ASSERT3U(obsolete_sm_object, ==,
space_map_object(vd->vdev_obsolete_sm));
ASSERT3U(vdev_indirect_mapping_bytes_mapped(vim), >=,
space_map_allocated(vd->vdev_obsolete_sm));
}
ASSERT(vd->vdev_obsolete_segments != NULL);
/*
* Since frees / remaps to an indirect vdev can only
* happen in syncing context, the obsolete segments
* tree must be empty when we start syncing.
*/
ASSERT0(range_tree_space(vd->vdev_obsolete_segments));
}
/*
* Set the top-level vdev's max queue depth. Evaluate each top-level's
* async write queue depth in case it changed. The max queue depth will
* not change in the middle of syncing out this txg.
*/
static void
spa_sync_adjust_vdev_max_queue_depth(spa_t *spa)
{
ASSERT(spa_writeable(spa));
vdev_t *rvd = spa->spa_root_vdev;
uint32_t max_queue_depth = zfs_vdev_async_write_max_active *
zfs_vdev_queue_depth_pct / 100;
metaslab_class_t *normal = spa_normal_class(spa);
metaslab_class_t *special = spa_special_class(spa);
metaslab_class_t *dedup = spa_dedup_class(spa);
uint64_t slots_per_allocator = 0;
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg;
if (mg == NULL || !metaslab_group_initialized(mg))
continue;
metaslab_class_t *mc = mg->mg_class;
if (mc != normal && mc != special && mc != dedup)
continue;
/*
* It is safe to do a lock-free check here because only async
* allocations look at mg_max_alloc_queue_depth, and async
* allocations all happen from spa_sync().
*/
for (int i = 0; i < mg->mg_allocators; i++) {
ASSERT0(zfs_refcount_count(
&(mg->mg_allocator[i].mga_alloc_queue_depth)));
}
mg->mg_max_alloc_queue_depth = max_queue_depth;
for (int i = 0; i < mg->mg_allocators; i++) {
mg->mg_allocator[i].mga_cur_max_alloc_queue_depth =
zfs_vdev_def_queue_depth;
}
slots_per_allocator += zfs_vdev_def_queue_depth;
}
for (int i = 0; i < spa->spa_alloc_count; i++) {
ASSERT0(zfs_refcount_count(&normal->mc_allocator[i].
mca_alloc_slots));
ASSERT0(zfs_refcount_count(&special->mc_allocator[i].
mca_alloc_slots));
ASSERT0(zfs_refcount_count(&dedup->mc_allocator[i].
mca_alloc_slots));
normal->mc_allocator[i].mca_alloc_max_slots =
slots_per_allocator;
special->mc_allocator[i].mca_alloc_max_slots =
slots_per_allocator;
dedup->mc_allocator[i].mca_alloc_max_slots =
slots_per_allocator;
}
normal->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
special->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
dedup->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
}
static void
spa_sync_condense_indirect(spa_t *spa, dmu_tx_t *tx)
{
ASSERT(spa_writeable(spa));
vdev_t *rvd = spa->spa_root_vdev;
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
vdev_indirect_state_sync_verify(vd);
if (vdev_indirect_should_condense(vd)) {
spa_condense_indirect_start_sync(vd, tx);
break;
}
}
}
static void
spa_sync_iterate_to_convergence(spa_t *spa, dmu_tx_t *tx)
{
objset_t *mos = spa->spa_meta_objset;
dsl_pool_t *dp = spa->spa_dsl_pool;
uint64_t txg = tx->tx_txg;
bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK];
do {
int pass = ++spa->spa_sync_pass;
spa_sync_config_object(spa, tx);
spa_sync_aux_dev(spa, &spa->spa_spares, tx,
ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES);
spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE);
spa_errlog_sync(spa, txg);
dsl_pool_sync(dp, txg);
if (pass < zfs_sync_pass_deferred_free ||
spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) {
/*
* If the log space map feature is active we don't
* care about deferred frees and the deferred bpobj
* as the log space map should effectively have the
* same results (i.e. appending only to one object).
*/
spa_sync_frees(spa, free_bpl, tx);
} else {
/*
* We can not defer frees in pass 1, because
* we sync the deferred frees later in pass 1.
*/
ASSERT3U(pass, >, 1);
bplist_iterate(free_bpl, bpobj_enqueue_alloc_cb,
&spa->spa_deferred_bpobj, tx);
}
brt_sync(spa, txg);
ddt_sync(spa, txg);
dsl_scan_sync(dp, tx);
dsl_errorscrub_sync(dp, tx);
svr_sync(spa, tx);
spa_sync_upgrades(spa, tx);
spa_flush_metaslabs(spa, tx);
vdev_t *vd = NULL;
while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, txg))
!= NULL)
vdev_sync(vd, txg);
/*
* Note: We need to check if the MOS is dirty because we could
* have marked the MOS dirty without updating the uberblock
* (e.g. if we have sync tasks but no dirty user data). We need
* to check the uberblock's rootbp because it is updated if we
* have synced out dirty data (though in this case the MOS will
* most likely also be dirty due to second order effects, we
* don't want to rely on that here).
*/
if (pass == 1 &&
spa->spa_uberblock.ub_rootbp.blk_birth < txg &&
!dmu_objset_is_dirty(mos, txg)) {
/*
* Nothing changed on the first pass, therefore this
* TXG is a no-op. Avoid syncing deferred frees, so
* that we can keep this TXG as a no-op.
*/
ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
ASSERT(txg_list_empty(&dp->dp_sync_tasks, txg));
ASSERT(txg_list_empty(&dp->dp_early_sync_tasks, txg));
break;
}
spa_sync_deferred_frees(spa, tx);
} while (dmu_objset_is_dirty(mos, txg));
}
/*
* Rewrite the vdev configuration (which includes the uberblock) to
* commit the transaction group.
*
* If there are no dirty vdevs, we sync the uberblock to a few random
* top-level vdevs that are known to be visible in the config cache
* (see spa_vdev_add() for a complete description). If there *are* dirty
* vdevs, sync the uberblock to all vdevs.
*/
static void
spa_sync_rewrite_vdev_config(spa_t *spa, dmu_tx_t *tx)
{
vdev_t *rvd = spa->spa_root_vdev;
uint64_t txg = tx->tx_txg;
for (;;) {
int error = 0;
/*
* We hold SCL_STATE to prevent vdev open/close/etc.
* while we're attempting to write the vdev labels.
*/
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
if (list_is_empty(&spa->spa_config_dirty_list)) {
vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL };
int svdcount = 0;
int children = rvd->vdev_children;
int c0 = random_in_range(children);
for (int c = 0; c < children; c++) {
vdev_t *vd =
rvd->vdev_child[(c0 + c) % children];
/* Stop when revisiting the first vdev */
if (c > 0 && svd[0] == vd)
break;
if (vd->vdev_ms_array == 0 ||
vd->vdev_islog ||
!vdev_is_concrete(vd))
continue;
svd[svdcount++] = vd;
if (svdcount == SPA_SYNC_MIN_VDEVS)
break;
}
error = vdev_config_sync(svd, svdcount, txg);
} else {
error = vdev_config_sync(rvd->vdev_child,
rvd->vdev_children, txg);
}
if (error == 0)
spa->spa_last_synced_guid = rvd->vdev_guid;
spa_config_exit(spa, SCL_STATE, FTAG);
if (error == 0)
break;
zio_suspend(spa, NULL, ZIO_SUSPEND_IOERR);
zio_resume_wait(spa);
}
}
/*
* Sync the specified transaction group. New blocks may be dirtied as
* part of the process, so we iterate until it converges.
*/
void
spa_sync(spa_t *spa, uint64_t txg)
{
vdev_t *vd = NULL;
VERIFY(spa_writeable(spa));
/*
* Wait for i/os issued in open context that need to complete
* before this txg syncs.
*/
(void) zio_wait(spa->spa_txg_zio[txg & TXG_MASK]);
spa->spa_txg_zio[txg & TXG_MASK] = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL);
/*
* Now that there can be no more cloning in this transaction group,
* but we are still before issuing frees, we can process pending BRT
* updates.
*/
brt_pending_apply(spa, txg);
/*
* Lock out configuration changes.
*/
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
spa->spa_syncing_txg = txg;
spa->spa_sync_pass = 0;
for (int i = 0; i < spa->spa_alloc_count; i++) {
mutex_enter(&spa->spa_allocs[i].spaa_lock);
VERIFY0(avl_numnodes(&spa->spa_allocs[i].spaa_tree));
mutex_exit(&spa->spa_allocs[i].spaa_lock);
}
/*
* If there are any pending vdev state changes, convert them
* into config changes that go out with this transaction group.
*/
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
/* Avoid holding the write lock unless actually necessary */
if (vd->vdev_aux == NULL) {
vdev_state_clean(vd);
vdev_config_dirty(vd);
continue;
}
/*
* We need the write lock here because, for aux vdevs,
* calling vdev_config_dirty() modifies sav_config.
* This is ugly and will become unnecessary when we
* eliminate the aux vdev wart by integrating all vdevs
* into the root vdev tree.
*/
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER);
while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
vdev_state_clean(vd);
vdev_config_dirty(vd);
}
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
}
spa_config_exit(spa, SCL_STATE, FTAG);
dsl_pool_t *dp = spa->spa_dsl_pool;
dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg);
spa->spa_sync_starttime = gethrtime();
taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq,
spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() +
NSEC_TO_TICK(spa->spa_deadman_synctime));
/*
* If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg,
* set spa_deflate if we have no raid-z vdevs.
*/
if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE &&
spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) {
vdev_t *rvd = spa->spa_root_vdev;
int i;
for (i = 0; i < rvd->vdev_children; i++) {
vd = rvd->vdev_child[i];
if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
break;
}
if (i == rvd->vdev_children) {
spa->spa_deflate = TRUE;
VERIFY0(zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
sizeof (uint64_t), 1, &spa->spa_deflate, tx));
}
}
spa_sync_adjust_vdev_max_queue_depth(spa);
spa_sync_condense_indirect(spa, tx);
spa_sync_iterate_to_convergence(spa, tx);
#ifdef ZFS_DEBUG
if (!list_is_empty(&spa->spa_config_dirty_list)) {
/*
* Make sure that the number of ZAPs for all the vdevs matches
* the number of ZAPs in the per-vdev ZAP list. This only gets
* called if the config is dirty; otherwise there may be
* outstanding AVZ operations that weren't completed in
* spa_sync_config_object.
*/
uint64_t all_vdev_zap_entry_count;
ASSERT0(zap_count(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, &all_vdev_zap_entry_count));
ASSERT3U(vdev_count_verify_zaps(spa->spa_root_vdev), ==,
all_vdev_zap_entry_count);
}
#endif
if (spa->spa_vdev_removal != NULL) {
ASSERT0(spa->spa_vdev_removal->svr_bytes_done[txg & TXG_MASK]);
}
spa_sync_rewrite_vdev_config(spa, tx);
dmu_tx_commit(tx);
taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
spa->spa_deadman_tqid = 0;
/*
* Clear the dirty config list.
*/
while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL)
vdev_config_clean(vd);
/*
* Now that the new config has synced transactionally,
* let it become visible to the config cache.
*/
if (spa->spa_config_syncing != NULL) {
spa_config_set(spa, spa->spa_config_syncing);
spa->spa_config_txg = txg;
spa->spa_config_syncing = NULL;
}
dsl_pool_sync_done(dp, txg);
for (int i = 0; i < spa->spa_alloc_count; i++) {
mutex_enter(&spa->spa_allocs[i].spaa_lock);
VERIFY0(avl_numnodes(&spa->spa_allocs[i].spaa_tree));
mutex_exit(&spa->spa_allocs[i].spaa_lock);
}
/*
* Update usable space statistics.
*/
while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))
!= NULL)
vdev_sync_done(vd, txg);
metaslab_class_evict_old(spa->spa_normal_class, txg);
metaslab_class_evict_old(spa->spa_log_class, txg);
spa_sync_close_syncing_log_sm(spa);
spa_update_dspace(spa);
if (spa_get_autotrim(spa) == SPA_AUTOTRIM_ON)
vdev_autotrim_kick(spa);
/*
* It had better be the case that we didn't dirty anything
* since vdev_config_sync().
*/
ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
while (zfs_pause_spa_sync)
delay(1);
spa->spa_sync_pass = 0;
/*
* Update the last synced uberblock here. We want to do this at
* the end of spa_sync() so that consumers of spa_last_synced_txg()
* will be guaranteed that all the processing associated with
* that txg has been completed.
*/
spa->spa_ubsync = spa->spa_uberblock;
spa_config_exit(spa, SCL_CONFIG, FTAG);
spa_handle_ignored_writes(spa);
/*
* If any async tasks have been requested, kick them off.
*/
spa_async_dispatch(spa);
}
/*
* Sync all pools. We don't want to hold the namespace lock across these
* operations, so we take a reference on the spa_t and drop the lock during the
* sync.
*/
void
spa_sync_allpools(void)
{
spa_t *spa = NULL;
mutex_enter(&spa_namespace_lock);
while ((spa = spa_next(spa)) != NULL) {
if (spa_state(spa) != POOL_STATE_ACTIVE ||
!spa_writeable(spa) || spa_suspended(spa))
continue;
spa_open_ref(spa, FTAG);
mutex_exit(&spa_namespace_lock);
txg_wait_synced(spa_get_dsl(spa), 0);
mutex_enter(&spa_namespace_lock);
spa_close(spa, FTAG);
}
mutex_exit(&spa_namespace_lock);
}
/*
* ==========================================================================
* Miscellaneous routines
* ==========================================================================
*/
/*
* Remove all pools in the system.
*/
void
spa_evict_all(void)
{
spa_t *spa;
/*
* Remove all cached state. All pools should be closed now,
* so every spa in the AVL tree should be unreferenced.
*/
mutex_enter(&spa_namespace_lock);
while ((spa = spa_next(NULL)) != NULL) {
/*
* Stop async tasks. The async thread may need to detach
* a device that's been replaced, which requires grabbing
* spa_namespace_lock, so we must drop it here.
*/
spa_open_ref(spa, FTAG);
mutex_exit(&spa_namespace_lock);
spa_async_suspend(spa);
mutex_enter(&spa_namespace_lock);
spa_close(spa, FTAG);
if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
spa_unload(spa);
spa_deactivate(spa);
}
spa_remove(spa);
}
mutex_exit(&spa_namespace_lock);
}
vdev_t *
spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux)
{
vdev_t *vd;
int i;
if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL)
return (vd);
if (aux) {
for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
vd = spa->spa_l2cache.sav_vdevs[i];
if (vd->vdev_guid == guid)
return (vd);
}
for (i = 0; i < spa->spa_spares.sav_count; i++) {
vd = spa->spa_spares.sav_vdevs[i];
if (vd->vdev_guid == guid)
return (vd);
}
}
return (NULL);
}
void
spa_upgrade(spa_t *spa, uint64_t version)
{
ASSERT(spa_writeable(spa));
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
/*
* This should only be called for a non-faulted pool, and since a
* future version would result in an unopenable pool, this shouldn't be
* possible.
*/
ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version));
ASSERT3U(version, >=, spa->spa_uberblock.ub_version);
spa->spa_uberblock.ub_version = version;
vdev_config_dirty(spa->spa_root_vdev);
spa_config_exit(spa, SCL_ALL, FTAG);
txg_wait_synced(spa_get_dsl(spa), 0);
}
static boolean_t
spa_has_aux_vdev(spa_t *spa, uint64_t guid, spa_aux_vdev_t *sav)
{
(void) spa;
int i;
uint64_t vdev_guid;
for (i = 0; i < sav->sav_count; i++)
if (sav->sav_vdevs[i]->vdev_guid == guid)
return (B_TRUE);
for (i = 0; i < sav->sav_npending; i++) {
if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID,
&vdev_guid) == 0 && vdev_guid == guid)
return (B_TRUE);
}
return (B_FALSE);
}
boolean_t
spa_has_l2cache(spa_t *spa, uint64_t guid)
{
return (spa_has_aux_vdev(spa, guid, &spa->spa_l2cache));
}
boolean_t
spa_has_spare(spa_t *spa, uint64_t guid)
{
return (spa_has_aux_vdev(spa, guid, &spa->spa_spares));
}
/*
* Check if a pool has an active shared spare device.
* Note: reference count of an active spare is 2, as a spare and as a replace
*/
static boolean_t
spa_has_active_shared_spare(spa_t *spa)
{
int i, refcnt;
uint64_t pool;
spa_aux_vdev_t *sav = &spa->spa_spares;
for (i = 0; i < sav->sav_count; i++) {
if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool,
&refcnt) && pool != 0ULL && pool == spa_guid(spa) &&
refcnt > 2)
return (B_TRUE);
}
return (B_FALSE);
}
uint64_t
spa_total_metaslabs(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
uint64_t m = 0;
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
if (!vdev_is_concrete(vd))
continue;
m += vd->vdev_ms_count;
}
return (m);
}
/*
* Notify any waiting threads that some activity has switched from being in-
* progress to not-in-progress so that the thread can wake up and determine
* whether it is finished waiting.
*/
void
spa_notify_waiters(spa_t *spa)
{
/*
* Acquiring spa_activities_lock here prevents the cv_broadcast from
* happening between the waiting thread's check and cv_wait.
*/
mutex_enter(&spa->spa_activities_lock);
cv_broadcast(&spa->spa_activities_cv);
mutex_exit(&spa->spa_activities_lock);
}
/*
* Notify any waiting threads that the pool is exporting, and then block until
* they are finished using the spa_t.
*/
void
spa_wake_waiters(spa_t *spa)
{
mutex_enter(&spa->spa_activities_lock);
spa->spa_waiters_cancel = B_TRUE;
cv_broadcast(&spa->spa_activities_cv);
while (spa->spa_waiters != 0)
cv_wait(&spa->spa_waiters_cv, &spa->spa_activities_lock);
spa->spa_waiters_cancel = B_FALSE;
mutex_exit(&spa->spa_activities_lock);
}
/* Whether the vdev or any of its descendants are being initialized/trimmed. */
static boolean_t
spa_vdev_activity_in_progress_impl(vdev_t *vd, zpool_wait_activity_t activity)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_config_held(spa, SCL_CONFIG | SCL_STATE, RW_READER));
ASSERT(MUTEX_HELD(&spa->spa_activities_lock));
ASSERT(activity == ZPOOL_WAIT_INITIALIZE ||
activity == ZPOOL_WAIT_TRIM);
kmutex_t *lock = activity == ZPOOL_WAIT_INITIALIZE ?
&vd->vdev_initialize_lock : &vd->vdev_trim_lock;
mutex_exit(&spa->spa_activities_lock);
mutex_enter(lock);
mutex_enter(&spa->spa_activities_lock);
boolean_t in_progress = (activity == ZPOOL_WAIT_INITIALIZE) ?
(vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE) :
(vd->vdev_trim_state == VDEV_TRIM_ACTIVE);
mutex_exit(lock);
if (in_progress)
return (B_TRUE);
for (int i = 0; i < vd->vdev_children; i++) {
if (spa_vdev_activity_in_progress_impl(vd->vdev_child[i],
activity))
return (B_TRUE);
}
return (B_FALSE);
}
/*
* If use_guid is true, this checks whether the vdev specified by guid is
* being initialized/trimmed. Otherwise, it checks whether any vdev in the pool
* is being initialized/trimmed. The caller must hold the config lock and
* spa_activities_lock.
*/
static int
spa_vdev_activity_in_progress(spa_t *spa, boolean_t use_guid, uint64_t guid,
zpool_wait_activity_t activity, boolean_t *in_progress)
{
mutex_exit(&spa->spa_activities_lock);
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
mutex_enter(&spa->spa_activities_lock);
vdev_t *vd;
if (use_guid) {
vd = spa_lookup_by_guid(spa, guid, B_FALSE);
if (vd == NULL || !vd->vdev_ops->vdev_op_leaf) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (EINVAL);
}
} else {
vd = spa->spa_root_vdev;
}
*in_progress = spa_vdev_activity_in_progress_impl(vd, activity);
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (0);
}
/*
* Locking for waiting threads
* ---------------------------
*
* Waiting threads need a way to check whether a given activity is in progress,
* and then, if it is, wait for it to complete. Each activity will have some
* in-memory representation of the relevant on-disk state which can be used to
* determine whether or not the activity is in progress. The in-memory state and
* the locking used to protect it will be different for each activity, and may
* not be suitable for use with a cvar (e.g., some state is protected by the
* config lock). To allow waiting threads to wait without any races, another
* lock, spa_activities_lock, is used.
*
* When the state is checked, both the activity-specific lock (if there is one)
* and spa_activities_lock are held. In some cases, the activity-specific lock
* is acquired explicitly (e.g. the config lock). In others, the locking is
* internal to some check (e.g. bpobj_is_empty). After checking, the waiting
* thread releases the activity-specific lock and, if the activity is in
* progress, then cv_waits using spa_activities_lock.
*
* The waiting thread is woken when another thread, one completing some
* activity, updates the state of the activity and then calls
* spa_notify_waiters, which will cv_broadcast. This 'completing' thread only
* needs to hold its activity-specific lock when updating the state, and this
* lock can (but doesn't have to) be dropped before calling spa_notify_waiters.
*
* Because spa_notify_waiters acquires spa_activities_lock before broadcasting,
* and because it is held when the waiting thread checks the state of the
* activity, it can never be the case that the completing thread both updates
* the activity state and cv_broadcasts in between the waiting thread's check
* and cv_wait. Thus, a waiting thread can never miss a wakeup.
*
* In order to prevent deadlock, when the waiting thread does its check, in some
* cases it will temporarily drop spa_activities_lock in order to acquire the
* activity-specific lock. The order in which spa_activities_lock and the
* activity specific lock are acquired in the waiting thread is determined by
* the order in which they are acquired in the completing thread; if the
* completing thread calls spa_notify_waiters with the activity-specific lock
* held, then the waiting thread must also acquire the activity-specific lock
* first.
*/
static int
spa_activity_in_progress(spa_t *spa, zpool_wait_activity_t activity,
boolean_t use_tag, uint64_t tag, boolean_t *in_progress)
{
int error = 0;
ASSERT(MUTEX_HELD(&spa->spa_activities_lock));
switch (activity) {
case ZPOOL_WAIT_CKPT_DISCARD:
*in_progress =
(spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT) &&
zap_contains(spa_meta_objset(spa),
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ZPOOL_CHECKPOINT) ==
ENOENT);
break;
case ZPOOL_WAIT_FREE:
*in_progress = ((spa_version(spa) >= SPA_VERSION_DEADLISTS &&
!bpobj_is_empty(&spa->spa_dsl_pool->dp_free_bpobj)) ||
spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY) ||
spa_livelist_delete_check(spa));
break;
case ZPOOL_WAIT_INITIALIZE:
case ZPOOL_WAIT_TRIM:
error = spa_vdev_activity_in_progress(spa, use_tag, tag,
activity, in_progress);
break;
case ZPOOL_WAIT_REPLACE:
mutex_exit(&spa->spa_activities_lock);
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
mutex_enter(&spa->spa_activities_lock);
*in_progress = vdev_replace_in_progress(spa->spa_root_vdev);
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
break;
case ZPOOL_WAIT_REMOVE:
*in_progress = (spa->spa_removing_phys.sr_state ==
DSS_SCANNING);
break;
case ZPOOL_WAIT_RESILVER:
if ((*in_progress = vdev_rebuild_active(spa->spa_root_vdev)))
break;
zfs_fallthrough;
case ZPOOL_WAIT_SCRUB:
{
boolean_t scanning, paused, is_scrub;
dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
is_scrub = (scn->scn_phys.scn_func == POOL_SCAN_SCRUB);
scanning = (scn->scn_phys.scn_state == DSS_SCANNING);
paused = dsl_scan_is_paused_scrub(scn);
*in_progress = (scanning && !paused &&
is_scrub == (activity == ZPOOL_WAIT_SCRUB));
break;
}
default:
panic("unrecognized value for activity %d", activity);
}
return (error);
}
static int
spa_wait_common(const char *pool, zpool_wait_activity_t activity,
boolean_t use_tag, uint64_t tag, boolean_t *waited)
{
/*
* The tag is used to distinguish between instances of an activity.
* 'initialize' and 'trim' are the only activities that we use this for.
* The other activities can only have a single instance in progress in a
* pool at one time, making the tag unnecessary.
*
* There can be multiple devices being replaced at once, but since they
* all finish once resilvering finishes, we don't bother keeping track
* of them individually, we just wait for them all to finish.
*/
if (use_tag && activity != ZPOOL_WAIT_INITIALIZE &&
activity != ZPOOL_WAIT_TRIM)
return (EINVAL);
if (activity < 0 || activity >= ZPOOL_WAIT_NUM_ACTIVITIES)
return (EINVAL);
spa_t *spa;
int error = spa_open(pool, &spa, FTAG);
if (error != 0)
return (error);
/*
* Increment the spa's waiter count so that we can call spa_close and
* still ensure that the spa_t doesn't get freed before this thread is
* finished with it when the pool is exported. We want to call spa_close
* before we start waiting because otherwise the additional ref would
* prevent the pool from being exported or destroyed throughout the
* potentially long wait.
*/
mutex_enter(&spa->spa_activities_lock);
spa->spa_waiters++;
spa_close(spa, FTAG);
*waited = B_FALSE;
for (;;) {
boolean_t in_progress;
error = spa_activity_in_progress(spa, activity, use_tag, tag,
&in_progress);
if (error || !in_progress || spa->spa_waiters_cancel)
break;
*waited = B_TRUE;
if (cv_wait_sig(&spa->spa_activities_cv,
&spa->spa_activities_lock) == 0) {
error = EINTR;
break;
}
}
spa->spa_waiters--;
cv_signal(&spa->spa_waiters_cv);
mutex_exit(&spa->spa_activities_lock);
return (error);
}
/*
* Wait for a particular instance of the specified activity to complete, where
* the instance is identified by 'tag'
*/
int
spa_wait_tag(const char *pool, zpool_wait_activity_t activity, uint64_t tag,
boolean_t *waited)
{
return (spa_wait_common(pool, activity, B_TRUE, tag, waited));
}
/*
* Wait for all instances of the specified activity complete
*/
int
spa_wait(const char *pool, zpool_wait_activity_t activity, boolean_t *waited)
{
return (spa_wait_common(pool, activity, B_FALSE, 0, waited));
}
sysevent_t *
spa_event_create(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name)
{
sysevent_t *ev = NULL;
#ifdef _KERNEL
nvlist_t *resource;
resource = zfs_event_create(spa, vd, FM_SYSEVENT_CLASS, name, hist_nvl);
if (resource) {
ev = kmem_alloc(sizeof (sysevent_t), KM_SLEEP);
ev->resource = resource;
}
#else
(void) spa, (void) vd, (void) hist_nvl, (void) name;
#endif
return (ev);
}
void
spa_event_post(sysevent_t *ev)
{
#ifdef _KERNEL
if (ev) {
zfs_zevent_post(ev->resource, NULL, zfs_zevent_post_cb);
kmem_free(ev, sizeof (*ev));
}
#else
(void) ev;
#endif
}
/*
* Post a zevent corresponding to the given sysevent. The 'name' must be one
* of the event definitions in sys/sysevent/eventdefs.h. The payload will be
* filled in from the spa and (optionally) the vdev. This doesn't do anything
* in the userland libzpool, as we don't want consumers to misinterpret ztest
* or zdb as real changes.
*/
void
spa_event_notify(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name)
{
spa_event_post(spa_event_create(spa, vd, hist_nvl, name));
}
/* state manipulation functions */
EXPORT_SYMBOL(spa_open);
EXPORT_SYMBOL(spa_open_rewind);
EXPORT_SYMBOL(spa_get_stats);
EXPORT_SYMBOL(spa_create);
EXPORT_SYMBOL(spa_import);
EXPORT_SYMBOL(spa_tryimport);
EXPORT_SYMBOL(spa_destroy);
EXPORT_SYMBOL(spa_export);
EXPORT_SYMBOL(spa_reset);
EXPORT_SYMBOL(spa_async_request);
EXPORT_SYMBOL(spa_async_suspend);
EXPORT_SYMBOL(spa_async_resume);
EXPORT_SYMBOL(spa_inject_addref);
EXPORT_SYMBOL(spa_inject_delref);
EXPORT_SYMBOL(spa_scan_stat_init);
EXPORT_SYMBOL(spa_scan_get_stats);
/* device manipulation */
EXPORT_SYMBOL(spa_vdev_add);
EXPORT_SYMBOL(spa_vdev_attach);
EXPORT_SYMBOL(spa_vdev_detach);
EXPORT_SYMBOL(spa_vdev_setpath);
EXPORT_SYMBOL(spa_vdev_setfru);
EXPORT_SYMBOL(spa_vdev_split_mirror);
/* spare statech is global across all pools) */
EXPORT_SYMBOL(spa_spare_add);
EXPORT_SYMBOL(spa_spare_remove);
EXPORT_SYMBOL(spa_spare_exists);
EXPORT_SYMBOL(spa_spare_activate);
/* L2ARC statech is global across all pools) */
EXPORT_SYMBOL(spa_l2cache_add);
EXPORT_SYMBOL(spa_l2cache_remove);
EXPORT_SYMBOL(spa_l2cache_exists);
EXPORT_SYMBOL(spa_l2cache_activate);
EXPORT_SYMBOL(spa_l2cache_drop);
/* scanning */
EXPORT_SYMBOL(spa_scan);
EXPORT_SYMBOL(spa_scan_stop);
/* spa syncing */
EXPORT_SYMBOL(spa_sync); /* only for DMU use */
EXPORT_SYMBOL(spa_sync_allpools);
/* properties */
EXPORT_SYMBOL(spa_prop_set);
EXPORT_SYMBOL(spa_prop_get);
EXPORT_SYMBOL(spa_prop_clear_bootfs);
/* asynchronous event notification */
EXPORT_SYMBOL(spa_event_notify);
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, preload_pct, UINT, ZMOD_RW,
"Percentage of CPUs to run a metaslab preload taskq");
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_shift, UINT, ZMOD_RW,
"log2 fraction of arc that can be used by inflight I/Os when "
"verifying pool during import");
/* END CSTYLED */
ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_metadata, INT, ZMOD_RW,
"Set to traverse metadata on pool import");
ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_data, INT, ZMOD_RW,
"Set to traverse data on pool import");
ZFS_MODULE_PARAM(zfs_spa, spa_, load_print_vdev_tree, INT, ZMOD_RW,
"Print vdev tree to zfs_dbgmsg during pool import");
ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_batch_pct, UINT, ZMOD_RD,
"Percentage of CPUs to run an IO worker thread");
ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_batch_tpq, UINT, ZMOD_RD,
"Number of threads per IO worker taskqueue");
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs, zfs_, max_missing_tvds, U64, ZMOD_RW,
"Allow importing pool with up to this number of missing top-level "
"vdevs (in read-only mode)");
/* END CSTYLED */
ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, zthr_pause, INT,
ZMOD_RW, "Set the livelist condense zthr to pause");
ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, sync_pause, INT,
ZMOD_RW, "Set the livelist condense synctask to pause");
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, sync_cancel,
INT, ZMOD_RW,
"Whether livelist condensing was canceled in the synctask");
ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, zthr_cancel,
INT, ZMOD_RW,
"Whether livelist condensing was canceled in the zthr function");
ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, new_alloc, INT,
ZMOD_RW,
"Whether extra ALLOC blkptrs were added to a livelist entry while it "
"was being condensed");
+
+#ifdef _KERNEL
+ZFS_MODULE_VIRTUAL_PARAM_CALL(zfs_zio, zio_, taskq_read,
+ spa_taskq_read_param_set, spa_taskq_read_param_get, ZMOD_RD,
+ "Configure IO queues for read IO");
+ZFS_MODULE_VIRTUAL_PARAM_CALL(zfs_zio, zio_, taskq_write,
+ spa_taskq_write_param_set, spa_taskq_write_param_get, ZMOD_RD,
+ "Configure IO queues for write IO");
+#endif
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/spa_misc.c b/sys/contrib/openzfs/module/zfs/spa_misc.c
index 72b690162d64..24f038ad7f4b 100644
--- a/sys/contrib/openzfs/module/zfs/spa_misc.c
+++ b/sys/contrib/openzfs/module/zfs/spa_misc.c
@@ -1,3005 +1,3006 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2019 by Delphix. All rights reserved.
* Copyright 2015 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright 2013 Saso Kiselkov. All rights reserved.
* Copyright (c) 2017 Datto Inc.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
* Copyright (c) 2023, Klara Inc.
*/
#include <sys/zfs_context.h>
#include <sys/zfs_chksum.h>
#include <sys/spa_impl.h>
#include <sys/zio.h>
#include <sys/zio_checksum.h>
#include <sys/zio_compress.h>
#include <sys/dmu.h>
#include <sys/dmu_tx.h>
#include <sys/zap.h>
#include <sys/zil.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_initialize.h>
#include <sys/vdev_trim.h>
#include <sys/vdev_file.h>
#include <sys/vdev_raidz.h>
#include <sys/metaslab.h>
#include <sys/uberblock_impl.h>
#include <sys/txg.h>
#include <sys/avl.h>
#include <sys/unique.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_prop.h>
#include <sys/fm/util.h>
#include <sys/dsl_scan.h>
#include <sys/fs/zfs.h>
#include <sys/metaslab_impl.h>
#include <sys/arc.h>
#include <sys/brt.h>
#include <sys/ddt.h>
#include <sys/kstat.h>
#include "zfs_prop.h"
#include <sys/btree.h>
#include <sys/zfeature.h>
#include <sys/qat.h>
#include <sys/zstd/zstd.h>
/*
* SPA locking
*
* There are three basic locks for managing spa_t structures:
*
* spa_namespace_lock (global mutex)
*
* This lock must be acquired to do any of the following:
*
* - Lookup a spa_t by name
* - Add or remove a spa_t from the namespace
* - Increase spa_refcount from non-zero
* - Check if spa_refcount is zero
* - Rename a spa_t
* - add/remove/attach/detach devices
* - Held for the duration of create/destroy/import/export
*
* It does not need to handle recursion. A create or destroy may
* reference objects (files or zvols) in other pools, but by
* definition they must have an existing reference, and will never need
* to lookup a spa_t by name.
*
* spa_refcount (per-spa zfs_refcount_t protected by mutex)
*
* This reference count keep track of any active users of the spa_t. The
* spa_t cannot be destroyed or freed while this is non-zero. Internally,
* the refcount is never really 'zero' - opening a pool implicitly keeps
* some references in the DMU. Internally we check against spa_minref, but
* present the image of a zero/non-zero value to consumers.
*
* spa_config_lock[] (per-spa array of rwlocks)
*
* This protects the spa_t from config changes, and must be held in
* the following circumstances:
*
* - RW_READER to perform I/O to the spa
* - RW_WRITER to change the vdev config
*
* The locking order is fairly straightforward:
*
* spa_namespace_lock -> spa_refcount
*
* The namespace lock must be acquired to increase the refcount from 0
* or to check if it is zero.
*
* spa_refcount -> spa_config_lock[]
*
* There must be at least one valid reference on the spa_t to acquire
* the config lock.
*
* spa_namespace_lock -> spa_config_lock[]
*
* The namespace lock must always be taken before the config lock.
*
*
* The spa_namespace_lock can be acquired directly and is globally visible.
*
* The namespace is manipulated using the following functions, all of which
* require the spa_namespace_lock to be held.
*
* spa_lookup() Lookup a spa_t by name.
*
* spa_add() Create a new spa_t in the namespace.
*
* spa_remove() Remove a spa_t from the namespace. This also
* frees up any memory associated with the spa_t.
*
* spa_next() Returns the next spa_t in the system, or the
* first if NULL is passed.
*
* spa_evict_all() Shutdown and remove all spa_t structures in
* the system.
*
* spa_guid_exists() Determine whether a pool/device guid exists.
*
* The spa_refcount is manipulated using the following functions:
*
* spa_open_ref() Adds a reference to the given spa_t. Must be
* called with spa_namespace_lock held if the
* refcount is currently zero.
*
* spa_close() Remove a reference from the spa_t. This will
* not free the spa_t or remove it from the
* namespace. No locking is required.
*
* spa_refcount_zero() Returns true if the refcount is currently
* zero. Must be called with spa_namespace_lock
* held.
*
* The spa_config_lock[] is an array of rwlocks, ordered as follows:
* SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV.
* spa_config_lock[] is manipulated with spa_config_{enter,exit,held}().
*
* To read the configuration, it suffices to hold one of these locks as reader.
* To modify the configuration, you must hold all locks as writer. To modify
* vdev state without altering the vdev tree's topology (e.g. online/offline),
* you must hold SCL_STATE and SCL_ZIO as writer.
*
* We use these distinct config locks to avoid recursive lock entry.
* For example, spa_sync() (which holds SCL_CONFIG as reader) induces
* block allocations (SCL_ALLOC), which may require reading space maps
* from disk (dmu_read() -> zio_read() -> SCL_ZIO).
*
* The spa config locks cannot be normal rwlocks because we need the
* ability to hand off ownership. For example, SCL_ZIO is acquired
* by the issuing thread and later released by an interrupt thread.
* They do, however, obey the usual write-wanted semantics to prevent
* writer (i.e. system administrator) starvation.
*
* The lock acquisition rules are as follows:
*
* SCL_CONFIG
* Protects changes to the vdev tree topology, such as vdev
* add/remove/attach/detach. Protects the dirty config list
* (spa_config_dirty_list) and the set of spares and l2arc devices.
*
* SCL_STATE
* Protects changes to pool state and vdev state, such as vdev
* online/offline/fault/degrade/clear. Protects the dirty state list
* (spa_state_dirty_list) and global pool state (spa_state).
*
* SCL_ALLOC
* Protects changes to metaslab groups and classes.
* Held as reader by metaslab_alloc() and metaslab_claim().
*
* SCL_ZIO
* Held by bp-level zios (those which have no io_vd upon entry)
* to prevent changes to the vdev tree. The bp-level zio implicitly
* protects all of its vdev child zios, which do not hold SCL_ZIO.
*
* SCL_FREE
* Protects changes to metaslab groups and classes.
* Held as reader by metaslab_free(). SCL_FREE is distinct from
* SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free
* blocks in zio_done() while another i/o that holds either
* SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete.
*
* SCL_VDEV
* Held as reader to prevent changes to the vdev tree during trivial
* inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the
* other locks, and lower than all of them, to ensure that it's safe
* to acquire regardless of caller context.
*
* In addition, the following rules apply:
*
* (a) spa_props_lock protects pool properties, spa_config and spa_config_list.
* The lock ordering is SCL_CONFIG > spa_props_lock.
*
* (b) I/O operations on leaf vdevs. For any zio operation that takes
* an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(),
* or zio_write_phys() -- the caller must ensure that the config cannot
* cannot change in the interim, and that the vdev cannot be reopened.
* SCL_STATE as reader suffices for both.
*
* The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit().
*
* spa_vdev_enter() Acquire the namespace lock and the config lock
* for writing.
*
* spa_vdev_exit() Release the config lock, wait for all I/O
* to complete, sync the updated configs to the
* cache, and release the namespace lock.
*
* vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit().
* Like spa_vdev_enter/exit, these are convenience wrappers -- the actual
* locking is, always, based on spa_namespace_lock and spa_config_lock[].
*/
static avl_tree_t spa_namespace_avl;
kmutex_t spa_namespace_lock;
static kcondvar_t spa_namespace_cv;
static const int spa_max_replication_override = SPA_DVAS_PER_BP;
static kmutex_t spa_spare_lock;
static avl_tree_t spa_spare_avl;
static kmutex_t spa_l2cache_lock;
static avl_tree_t spa_l2cache_avl;
spa_mode_t spa_mode_global = SPA_MODE_UNINIT;
#ifdef ZFS_DEBUG
/*
* Everything except dprintf, set_error, spa, and indirect_remap is on
* by default in debug builds.
*/
int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SET_ERROR |
ZFS_DEBUG_INDIRECT_REMAP);
#else
int zfs_flags = 0;
#endif
/*
* zfs_recover can be set to nonzero to attempt to recover from
* otherwise-fatal errors, typically caused by on-disk corruption. When
* set, calls to zfs_panic_recover() will turn into warning messages.
* This should only be used as a last resort, as it typically results
* in leaked space, or worse.
*/
int zfs_recover = B_FALSE;
/*
* If destroy encounters an EIO while reading metadata (e.g. indirect
* blocks), space referenced by the missing metadata can not be freed.
* Normally this causes the background destroy to become "stalled", as
* it is unable to make forward progress. While in this stalled state,
* all remaining space to free from the error-encountering filesystem is
* "temporarily leaked". Set this flag to cause it to ignore the EIO,
* permanently leak the space from indirect blocks that can not be read,
* and continue to free everything else that it can.
*
* The default, "stalling" behavior is useful if the storage partially
* fails (i.e. some but not all i/os fail), and then later recovers. In
* this case, we will be able to continue pool operations while it is
* partially failed, and when it recovers, we can continue to free the
* space, with no leaks. However, note that this case is actually
* fairly rare.
*
* Typically pools either (a) fail completely (but perhaps temporarily,
* e.g. a top-level vdev going offline), or (b) have localized,
* permanent errors (e.g. disk returns the wrong data due to bit flip or
* firmware bug). In case (a), this setting does not matter because the
* pool will be suspended and the sync thread will not be able to make
* forward progress regardless. In case (b), because the error is
* permanent, the best we can do is leak the minimum amount of space,
* which is what setting this flag will do. Therefore, it is reasonable
* for this flag to normally be set, but we chose the more conservative
* approach of not setting it, so that there is no possibility of
* leaking space in the "partial temporary" failure case.
*/
int zfs_free_leak_on_eio = B_FALSE;
/*
* Expiration time in milliseconds. This value has two meanings. First it is
* used to determine when the spa_deadman() logic should fire. By default the
* spa_deadman() will fire if spa_sync() has not completed in 600 seconds.
* Secondly, the value determines if an I/O is considered "hung". Any I/O that
* has not completed in zfs_deadman_synctime_ms is considered "hung" resulting
* in one of three behaviors controlled by zfs_deadman_failmode.
*/
uint64_t zfs_deadman_synctime_ms = 600000UL; /* 10 min. */
/*
* This value controls the maximum amount of time zio_wait() will block for an
* outstanding IO. By default this is 300 seconds at which point the "hung"
* behavior will be applied as described for zfs_deadman_synctime_ms.
*/
uint64_t zfs_deadman_ziotime_ms = 300000UL; /* 5 min. */
/*
* Check time in milliseconds. This defines the frequency at which we check
* for hung I/O.
*/
uint64_t zfs_deadman_checktime_ms = 60000UL; /* 1 min. */
/*
* By default the deadman is enabled.
*/
int zfs_deadman_enabled = B_TRUE;
/*
* Controls the behavior of the deadman when it detects a "hung" I/O.
* Valid values are zfs_deadman_failmode=<wait|continue|panic>.
*
* wait - Wait for the "hung" I/O (default)
* continue - Attempt to recover from a "hung" I/O
* panic - Panic the system
*/
const char *zfs_deadman_failmode = "wait";
/*
* The worst case is single-sector max-parity RAID-Z blocks, in which
* case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1)
* times the size; so just assume that. Add to this the fact that
* we can have up to 3 DVAs per bp, and one more factor of 2 because
* the block may be dittoed with up to 3 DVAs by ddt_sync(). All together,
* the worst case is:
* (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24
*/
uint_t spa_asize_inflation = 24;
/*
* Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in
* the pool to be consumed (bounded by spa_max_slop). This ensures that we
* don't run the pool completely out of space, due to unaccounted changes (e.g.
* to the MOS). It also limits the worst-case time to allocate space. If we
* have less than this amount of free space, most ZPL operations (e.g. write,
* create) will return ENOSPC. The ZIL metaslabs (spa_embedded_log_class) are
* also part of this 3.2% of space which can't be consumed by normal writes;
* the slop space "proper" (spa_get_slop_space()) is decreased by the embedded
* log space.
*
* Certain operations (e.g. file removal, most administrative actions) can
* use half the slop space. They will only return ENOSPC if less than half
* the slop space is free. Typically, once the pool has less than the slop
* space free, the user will use these operations to free up space in the pool.
* These are the operations that call dsl_pool_adjustedsize() with the netfree
* argument set to TRUE.
*
* Operations that are almost guaranteed to free up space in the absence of
* a pool checkpoint can use up to three quarters of the slop space
* (e.g zfs destroy).
*
* A very restricted set of operations are always permitted, regardless of
* the amount of free space. These are the operations that call
* dsl_sync_task(ZFS_SPACE_CHECK_NONE). If these operations result in a net
* increase in the amount of space used, it is possible to run the pool
* completely out of space, causing it to be permanently read-only.
*
* Note that on very small pools, the slop space will be larger than
* 3.2%, in an effort to have it be at least spa_min_slop (128MB),
* but we never allow it to be more than half the pool size.
*
* Further, on very large pools, the slop space will be smaller than
* 3.2%, to avoid reserving much more space than we actually need; bounded
* by spa_max_slop (128GB).
*
* See also the comments in zfs_space_check_t.
*/
uint_t spa_slop_shift = 5;
static const uint64_t spa_min_slop = 128ULL * 1024 * 1024;
static const uint64_t spa_max_slop = 128ULL * 1024 * 1024 * 1024;
static const int spa_allocators = 4;
void
spa_load_failed(spa_t *spa, const char *fmt, ...)
{
va_list adx;
char buf[256];
va_start(adx, fmt);
(void) vsnprintf(buf, sizeof (buf), fmt, adx);
va_end(adx);
zfs_dbgmsg("spa_load(%s, config %s): FAILED: %s", spa->spa_name,
spa->spa_trust_config ? "trusted" : "untrusted", buf);
}
void
spa_load_note(spa_t *spa, const char *fmt, ...)
{
va_list adx;
char buf[256];
va_start(adx, fmt);
(void) vsnprintf(buf, sizeof (buf), fmt, adx);
va_end(adx);
zfs_dbgmsg("spa_load(%s, config %s): %s", spa->spa_name,
spa->spa_trust_config ? "trusted" : "untrusted", buf);
}
/*
* By default dedup and user data indirects land in the special class
*/
static int zfs_ddt_data_is_special = B_TRUE;
static int zfs_user_indirect_is_special = B_TRUE;
/*
* The percentage of special class final space reserved for metadata only.
* Once we allocate 100 - zfs_special_class_metadata_reserve_pct we only
* let metadata into the class.
*/
static uint_t zfs_special_class_metadata_reserve_pct = 25;
/*
* ==========================================================================
* SPA config locking
* ==========================================================================
*/
static void
spa_config_lock_init(spa_t *spa)
{
for (int i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
scl->scl_writer = NULL;
scl->scl_write_wanted = 0;
scl->scl_count = 0;
}
}
static void
spa_config_lock_destroy(spa_t *spa)
{
for (int i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
mutex_destroy(&scl->scl_lock);
cv_destroy(&scl->scl_cv);
ASSERT(scl->scl_writer == NULL);
ASSERT(scl->scl_write_wanted == 0);
ASSERT(scl->scl_count == 0);
}
}
int
spa_config_tryenter(spa_t *spa, int locks, const void *tag, krw_t rw)
{
for (int i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
if (!(locks & (1 << i)))
continue;
mutex_enter(&scl->scl_lock);
if (rw == RW_READER) {
if (scl->scl_writer || scl->scl_write_wanted) {
mutex_exit(&scl->scl_lock);
spa_config_exit(spa, locks & ((1 << i) - 1),
tag);
return (0);
}
} else {
ASSERT(scl->scl_writer != curthread);
if (scl->scl_count != 0) {
mutex_exit(&scl->scl_lock);
spa_config_exit(spa, locks & ((1 << i) - 1),
tag);
return (0);
}
scl->scl_writer = curthread;
}
scl->scl_count++;
mutex_exit(&scl->scl_lock);
}
return (1);
}
static void
spa_config_enter_impl(spa_t *spa, int locks, const void *tag, krw_t rw,
int mmp_flag)
{
(void) tag;
int wlocks_held = 0;
ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY);
for (int i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
if (scl->scl_writer == curthread)
wlocks_held |= (1 << i);
if (!(locks & (1 << i)))
continue;
mutex_enter(&scl->scl_lock);
if (rw == RW_READER) {
while (scl->scl_writer ||
(!mmp_flag && scl->scl_write_wanted)) {
cv_wait(&scl->scl_cv, &scl->scl_lock);
}
} else {
ASSERT(scl->scl_writer != curthread);
while (scl->scl_count != 0) {
scl->scl_write_wanted++;
cv_wait(&scl->scl_cv, &scl->scl_lock);
scl->scl_write_wanted--;
}
scl->scl_writer = curthread;
}
scl->scl_count++;
mutex_exit(&scl->scl_lock);
}
ASSERT3U(wlocks_held, <=, locks);
}
void
spa_config_enter(spa_t *spa, int locks, const void *tag, krw_t rw)
{
spa_config_enter_impl(spa, locks, tag, rw, 0);
}
/*
* The spa_config_enter_mmp() allows the mmp thread to cut in front of
* outstanding write lock requests. This is needed since the mmp updates are
* time sensitive and failure to service them promptly will result in a
* suspended pool. This pool suspension has been seen in practice when there is
* a single disk in a pool that is responding slowly and presumably about to
* fail.
*/
void
spa_config_enter_mmp(spa_t *spa, int locks, const void *tag, krw_t rw)
{
spa_config_enter_impl(spa, locks, tag, rw, 1);
}
void
spa_config_exit(spa_t *spa, int locks, const void *tag)
{
(void) tag;
for (int i = SCL_LOCKS - 1; i >= 0; i--) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
if (!(locks & (1 << i)))
continue;
mutex_enter(&scl->scl_lock);
ASSERT(scl->scl_count > 0);
if (--scl->scl_count == 0) {
ASSERT(scl->scl_writer == NULL ||
scl->scl_writer == curthread);
scl->scl_writer = NULL; /* OK in either case */
cv_broadcast(&scl->scl_cv);
}
mutex_exit(&scl->scl_lock);
}
}
int
spa_config_held(spa_t *spa, int locks, krw_t rw)
{
int locks_held = 0;
for (int i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
if (!(locks & (1 << i)))
continue;
if ((rw == RW_READER && scl->scl_count != 0) ||
(rw == RW_WRITER && scl->scl_writer == curthread))
locks_held |= 1 << i;
}
return (locks_held);
}
/*
* ==========================================================================
* SPA namespace functions
* ==========================================================================
*/
/*
* Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held.
* Returns NULL if no matching spa_t is found.
*/
spa_t *
spa_lookup(const char *name)
{
static spa_t search; /* spa_t is large; don't allocate on stack */
spa_t *spa;
avl_index_t where;
char *cp;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
(void) strlcpy(search.spa_name, name, sizeof (search.spa_name));
/*
* If it's a full dataset name, figure out the pool name and
* just use that.
*/
cp = strpbrk(search.spa_name, "/@#");
if (cp != NULL)
*cp = '\0';
spa = avl_find(&spa_namespace_avl, &search, &where);
return (spa);
}
/*
* Fires when spa_sync has not completed within zfs_deadman_synctime_ms.
* If the zfs_deadman_enabled flag is set then it inspects all vdev queues
* looking for potentially hung I/Os.
*/
void
spa_deadman(void *arg)
{
spa_t *spa = arg;
/* Disable the deadman if the pool is suspended. */
if (spa_suspended(spa))
return;
zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu",
(gethrtime() - spa->spa_sync_starttime) / NANOSEC,
(u_longlong_t)++spa->spa_deadman_calls);
if (zfs_deadman_enabled)
vdev_deadman(spa->spa_root_vdev, FTAG);
spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq,
spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() +
MSEC_TO_TICK(zfs_deadman_checktime_ms));
}
static int
spa_log_sm_sort_by_txg(const void *va, const void *vb)
{
const spa_log_sm_t *a = va;
const spa_log_sm_t *b = vb;
return (TREE_CMP(a->sls_txg, b->sls_txg));
}
/*
* Create an uninitialized spa_t with the given name. Requires
* spa_namespace_lock. The caller must ensure that the spa_t doesn't already
* exist by calling spa_lookup() first.
*/
spa_t *
spa_add(const char *name, nvlist_t *config, const char *altroot)
{
spa_t *spa;
spa_config_dirent_t *dp;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP);
mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_feat_stats_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_flushed_ms_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_activities_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_activities_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_waiters_cv, NULL, CV_DEFAULT, NULL);
for (int t = 0; t < TXG_SIZE; t++)
bplist_create(&spa->spa_free_bplist[t]);
(void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name));
spa->spa_state = POOL_STATE_UNINITIALIZED;
spa->spa_freeze_txg = UINT64_MAX;
spa->spa_final_txg = UINT64_MAX;
spa->spa_load_max_txg = UINT64_MAX;
spa->spa_proc = &p0;
spa->spa_proc_state = SPA_PROC_NONE;
spa->spa_trust_config = B_TRUE;
spa->spa_hostid = zone_get_hostid(NULL);
spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms);
spa->spa_deadman_ziotime = MSEC2NSEC(zfs_deadman_ziotime_ms);
spa_set_deadman_failmode(spa, zfs_deadman_failmode);
zfs_refcount_create(&spa->spa_refcount);
spa_config_lock_init(spa);
spa_stats_init(spa);
avl_add(&spa_namespace_avl, spa);
/*
* Set the alternate root, if there is one.
*/
if (altroot)
spa->spa_root = spa_strdup(altroot);
spa->spa_alloc_count = spa_allocators;
spa->spa_allocs = kmem_zalloc(spa->spa_alloc_count *
sizeof (spa_alloc_t), KM_SLEEP);
for (int i = 0; i < spa->spa_alloc_count; i++) {
mutex_init(&spa->spa_allocs[i].spaa_lock, NULL, MUTEX_DEFAULT,
NULL);
avl_create(&spa->spa_allocs[i].spaa_tree, zio_bookmark_compare,
sizeof (zio_t), offsetof(zio_t, io_queue_node.a));
}
avl_create(&spa->spa_metaslabs_by_flushed, metaslab_sort_by_flushed,
sizeof (metaslab_t), offsetof(metaslab_t, ms_spa_txg_node));
avl_create(&spa->spa_sm_logs_by_txg, spa_log_sm_sort_by_txg,
sizeof (spa_log_sm_t), offsetof(spa_log_sm_t, sls_node));
list_create(&spa->spa_log_summary, sizeof (log_summary_entry_t),
offsetof(log_summary_entry_t, lse_node));
/*
* Every pool starts with the default cachefile
*/
list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t),
offsetof(spa_config_dirent_t, scd_link));
dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP);
dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path);
list_insert_head(&spa->spa_config_list, dp);
VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME,
KM_SLEEP) == 0);
if (config != NULL) {
nvlist_t *features;
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ,
&features) == 0) {
VERIFY(nvlist_dup(features, &spa->spa_label_features,
0) == 0);
}
VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
}
if (spa->spa_label_features == NULL) {
VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME,
KM_SLEEP) == 0);
}
spa->spa_min_ashift = INT_MAX;
spa->spa_max_ashift = 0;
spa->spa_min_alloc = INT_MAX;
spa->spa_gcd_alloc = INT_MAX;
/* Reset cached value */
spa->spa_dedup_dspace = ~0ULL;
/*
* As a pool is being created, treat all features as disabled by
* setting SPA_FEATURE_DISABLED for all entries in the feature
* refcount cache.
*/
for (int i = 0; i < SPA_FEATURES; i++) {
spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED;
}
list_create(&spa->spa_leaf_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_leaf_node));
return (spa);
}
/*
* Removes a spa_t from the namespace, freeing up any memory used. Requires
* spa_namespace_lock. This is called only after the spa_t has been closed and
* deactivated.
*/
void
spa_remove(spa_t *spa)
{
spa_config_dirent_t *dp;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_state(spa) == POOL_STATE_UNINITIALIZED);
ASSERT3U(zfs_refcount_count(&spa->spa_refcount), ==, 0);
ASSERT0(spa->spa_waiters);
nvlist_free(spa->spa_config_splitting);
avl_remove(&spa_namespace_avl, spa);
cv_broadcast(&spa_namespace_cv);
if (spa->spa_root)
spa_strfree(spa->spa_root);
while ((dp = list_remove_head(&spa->spa_config_list)) != NULL) {
if (dp->scd_path != NULL)
spa_strfree(dp->scd_path);
kmem_free(dp, sizeof (spa_config_dirent_t));
}
for (int i = 0; i < spa->spa_alloc_count; i++) {
avl_destroy(&spa->spa_allocs[i].spaa_tree);
mutex_destroy(&spa->spa_allocs[i].spaa_lock);
}
kmem_free(spa->spa_allocs, spa->spa_alloc_count *
sizeof (spa_alloc_t));
avl_destroy(&spa->spa_metaslabs_by_flushed);
avl_destroy(&spa->spa_sm_logs_by_txg);
list_destroy(&spa->spa_log_summary);
list_destroy(&spa->spa_config_list);
list_destroy(&spa->spa_leaf_list);
nvlist_free(spa->spa_label_features);
nvlist_free(spa->spa_load_info);
nvlist_free(spa->spa_feat_stats);
spa_config_set(spa, NULL);
zfs_refcount_destroy(&spa->spa_refcount);
spa_stats_destroy(spa);
spa_config_lock_destroy(spa);
for (int t = 0; t < TXG_SIZE; t++)
bplist_destroy(&spa->spa_free_bplist[t]);
zio_checksum_templates_free(spa);
cv_destroy(&spa->spa_async_cv);
cv_destroy(&spa->spa_evicting_os_cv);
cv_destroy(&spa->spa_proc_cv);
cv_destroy(&spa->spa_scrub_io_cv);
cv_destroy(&spa->spa_suspend_cv);
cv_destroy(&spa->spa_activities_cv);
cv_destroy(&spa->spa_waiters_cv);
mutex_destroy(&spa->spa_flushed_ms_lock);
mutex_destroy(&spa->spa_async_lock);
mutex_destroy(&spa->spa_errlist_lock);
mutex_destroy(&spa->spa_errlog_lock);
mutex_destroy(&spa->spa_evicting_os_lock);
mutex_destroy(&spa->spa_history_lock);
mutex_destroy(&spa->spa_proc_lock);
mutex_destroy(&spa->spa_props_lock);
mutex_destroy(&spa->spa_cksum_tmpls_lock);
mutex_destroy(&spa->spa_scrub_lock);
mutex_destroy(&spa->spa_suspend_lock);
mutex_destroy(&spa->spa_vdev_top_lock);
mutex_destroy(&spa->spa_feat_stats_lock);
mutex_destroy(&spa->spa_activities_lock);
kmem_free(spa, sizeof (spa_t));
}
/*
* Given a pool, return the next pool in the namespace, or NULL if there is
* none. If 'prev' is NULL, return the first pool.
*/
spa_t *
spa_next(spa_t *prev)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
if (prev)
return (AVL_NEXT(&spa_namespace_avl, prev));
else
return (avl_first(&spa_namespace_avl));
}
/*
* ==========================================================================
* SPA refcount functions
* ==========================================================================
*/
/*
* Add a reference to the given spa_t. Must have at least one reference, or
* have the namespace lock held.
*/
void
spa_open_ref(spa_t *spa, const void *tag)
{
ASSERT(zfs_refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
MUTEX_HELD(&spa_namespace_lock));
(void) zfs_refcount_add(&spa->spa_refcount, tag);
}
/*
* Remove a reference to the given spa_t. Must have at least one reference, or
* have the namespace lock held.
*/
void
spa_close(spa_t *spa, const void *tag)
{
ASSERT(zfs_refcount_count(&spa->spa_refcount) > spa->spa_minref ||
MUTEX_HELD(&spa_namespace_lock));
(void) zfs_refcount_remove(&spa->spa_refcount, tag);
}
/*
* Remove a reference to the given spa_t held by a dsl dir that is
* being asynchronously released. Async releases occur from a taskq
* performing eviction of dsl datasets and dirs. The namespace lock
* isn't held and the hold by the object being evicted may contribute to
* spa_minref (e.g. dataset or directory released during pool export),
* so the asserts in spa_close() do not apply.
*/
void
spa_async_close(spa_t *spa, const void *tag)
{
(void) zfs_refcount_remove(&spa->spa_refcount, tag);
}
/*
* Check to see if the spa refcount is zero. Must be called with
* spa_namespace_lock held. We really compare against spa_minref, which is the
* number of references acquired when opening a pool
*/
boolean_t
spa_refcount_zero(spa_t *spa)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
return (zfs_refcount_count(&spa->spa_refcount) == spa->spa_minref);
}
/*
* ==========================================================================
* SPA spare and l2cache tracking
* ==========================================================================
*/
/*
* Hot spares and cache devices are tracked using the same code below,
* for 'auxiliary' devices.
*/
typedef struct spa_aux {
uint64_t aux_guid;
uint64_t aux_pool;
avl_node_t aux_avl;
int aux_count;
} spa_aux_t;
static inline int
spa_aux_compare(const void *a, const void *b)
{
const spa_aux_t *sa = (const spa_aux_t *)a;
const spa_aux_t *sb = (const spa_aux_t *)b;
return (TREE_CMP(sa->aux_guid, sb->aux_guid));
}
static void
spa_aux_add(vdev_t *vd, avl_tree_t *avl)
{
avl_index_t where;
spa_aux_t search;
spa_aux_t *aux;
search.aux_guid = vd->vdev_guid;
if ((aux = avl_find(avl, &search, &where)) != NULL) {
aux->aux_count++;
} else {
aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP);
aux->aux_guid = vd->vdev_guid;
aux->aux_count = 1;
avl_insert(avl, aux, where);
}
}
static void
spa_aux_remove(vdev_t *vd, avl_tree_t *avl)
{
spa_aux_t search;
spa_aux_t *aux;
avl_index_t where;
search.aux_guid = vd->vdev_guid;
aux = avl_find(avl, &search, &where);
ASSERT(aux != NULL);
if (--aux->aux_count == 0) {
avl_remove(avl, aux);
kmem_free(aux, sizeof (spa_aux_t));
} else if (aux->aux_pool == spa_guid(vd->vdev_spa)) {
aux->aux_pool = 0ULL;
}
}
static boolean_t
spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl)
{
spa_aux_t search, *found;
search.aux_guid = guid;
found = avl_find(avl, &search, NULL);
if (pool) {
if (found)
*pool = found->aux_pool;
else
*pool = 0ULL;
}
if (refcnt) {
if (found)
*refcnt = found->aux_count;
else
*refcnt = 0;
}
return (found != NULL);
}
static void
spa_aux_activate(vdev_t *vd, avl_tree_t *avl)
{
spa_aux_t search, *found;
avl_index_t where;
search.aux_guid = vd->vdev_guid;
found = avl_find(avl, &search, &where);
ASSERT(found != NULL);
ASSERT(found->aux_pool == 0ULL);
found->aux_pool = spa_guid(vd->vdev_spa);
}
/*
* Spares are tracked globally due to the following constraints:
*
* - A spare may be part of multiple pools.
* - A spare may be added to a pool even if it's actively in use within
* another pool.
* - A spare in use in any pool can only be the source of a replacement if
* the target is a spare in the same pool.
*
* We keep track of all spares on the system through the use of a reference
* counted AVL tree. When a vdev is added as a spare, or used as a replacement
* spare, then we bump the reference count in the AVL tree. In addition, we set
* the 'vdev_isspare' member to indicate that the device is a spare (active or
* inactive). When a spare is made active (used to replace a device in the
* pool), we also keep track of which pool its been made a part of.
*
* The 'spa_spare_lock' protects the AVL tree. These functions are normally
* called under the spa_namespace lock as part of vdev reconfiguration. The
* separate spare lock exists for the status query path, which does not need to
* be completely consistent with respect to other vdev configuration changes.
*/
static int
spa_spare_compare(const void *a, const void *b)
{
return (spa_aux_compare(a, b));
}
void
spa_spare_add(vdev_t *vd)
{
mutex_enter(&spa_spare_lock);
ASSERT(!vd->vdev_isspare);
spa_aux_add(vd, &spa_spare_avl);
vd->vdev_isspare = B_TRUE;
mutex_exit(&spa_spare_lock);
}
void
spa_spare_remove(vdev_t *vd)
{
mutex_enter(&spa_spare_lock);
ASSERT(vd->vdev_isspare);
spa_aux_remove(vd, &spa_spare_avl);
vd->vdev_isspare = B_FALSE;
mutex_exit(&spa_spare_lock);
}
boolean_t
spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt)
{
boolean_t found;
mutex_enter(&spa_spare_lock);
found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl);
mutex_exit(&spa_spare_lock);
return (found);
}
void
spa_spare_activate(vdev_t *vd)
{
mutex_enter(&spa_spare_lock);
ASSERT(vd->vdev_isspare);
spa_aux_activate(vd, &spa_spare_avl);
mutex_exit(&spa_spare_lock);
}
/*
* Level 2 ARC devices are tracked globally for the same reasons as spares.
* Cache devices currently only support one pool per cache device, and so
* for these devices the aux reference count is currently unused beyond 1.
*/
static int
spa_l2cache_compare(const void *a, const void *b)
{
return (spa_aux_compare(a, b));
}
void
spa_l2cache_add(vdev_t *vd)
{
mutex_enter(&spa_l2cache_lock);
ASSERT(!vd->vdev_isl2cache);
spa_aux_add(vd, &spa_l2cache_avl);
vd->vdev_isl2cache = B_TRUE;
mutex_exit(&spa_l2cache_lock);
}
void
spa_l2cache_remove(vdev_t *vd)
{
mutex_enter(&spa_l2cache_lock);
ASSERT(vd->vdev_isl2cache);
spa_aux_remove(vd, &spa_l2cache_avl);
vd->vdev_isl2cache = B_FALSE;
mutex_exit(&spa_l2cache_lock);
}
boolean_t
spa_l2cache_exists(uint64_t guid, uint64_t *pool)
{
boolean_t found;
mutex_enter(&spa_l2cache_lock);
found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl);
mutex_exit(&spa_l2cache_lock);
return (found);
}
void
spa_l2cache_activate(vdev_t *vd)
{
mutex_enter(&spa_l2cache_lock);
ASSERT(vd->vdev_isl2cache);
spa_aux_activate(vd, &spa_l2cache_avl);
mutex_exit(&spa_l2cache_lock);
}
/*
* ==========================================================================
* SPA vdev locking
* ==========================================================================
*/
/*
* Lock the given spa_t for the purpose of adding or removing a vdev.
* Grabs the global spa_namespace_lock plus the spa config lock for writing.
* It returns the next transaction group for the spa_t.
*/
uint64_t
spa_vdev_enter(spa_t *spa)
{
mutex_enter(&spa->spa_vdev_top_lock);
mutex_enter(&spa_namespace_lock);
vdev_autotrim_stop_all(spa);
return (spa_vdev_config_enter(spa));
}
/*
* The same as spa_vdev_enter() above but additionally takes the guid of
* the vdev being detached. When there is a rebuild in process it will be
* suspended while the vdev tree is modified then resumed by spa_vdev_exit().
* The rebuild is canceled if only a single child remains after the detach.
*/
uint64_t
spa_vdev_detach_enter(spa_t *spa, uint64_t guid)
{
mutex_enter(&spa->spa_vdev_top_lock);
mutex_enter(&spa_namespace_lock);
vdev_autotrim_stop_all(spa);
if (guid != 0) {
vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE);
if (vd) {
vdev_rebuild_stop_wait(vd->vdev_top);
}
}
return (spa_vdev_config_enter(spa));
}
/*
* Internal implementation for spa_vdev_enter(). Used when a vdev
* operation requires multiple syncs (i.e. removing a device) while
* keeping the spa_namespace_lock held.
*/
uint64_t
spa_vdev_config_enter(spa_t *spa)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
return (spa_last_synced_txg(spa) + 1);
}
/*
* Used in combination with spa_vdev_config_enter() to allow the syncing
* of multiple transactions without releasing the spa_namespace_lock.
*/
void
spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error,
const char *tag)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
int config_changed = B_FALSE;
ASSERT(txg > spa_last_synced_txg(spa));
spa->spa_pending_vdev = NULL;
/*
* Reassess the DTLs.
*/
vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE, B_FALSE);
if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) {
config_changed = B_TRUE;
spa->spa_config_generation++;
}
/*
* Verify the metaslab classes.
*/
ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0);
ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0);
ASSERT(metaslab_class_validate(spa_embedded_log_class(spa)) == 0);
ASSERT(metaslab_class_validate(spa_special_class(spa)) == 0);
ASSERT(metaslab_class_validate(spa_dedup_class(spa)) == 0);
spa_config_exit(spa, SCL_ALL, spa);
/*
* Panic the system if the specified tag requires it. This
* is useful for ensuring that configurations are updated
* transactionally.
*/
if (zio_injection_enabled)
zio_handle_panic_injection(spa, tag, 0);
/*
* Note: this txg_wait_synced() is important because it ensures
* that there won't be more than one config change per txg.
* This allows us to use the txg as the generation number.
*/
if (error == 0)
txg_wait_synced(spa->spa_dsl_pool, txg);
if (vd != NULL) {
ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL);
if (vd->vdev_ops->vdev_op_leaf) {
mutex_enter(&vd->vdev_initialize_lock);
vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED,
NULL);
mutex_exit(&vd->vdev_initialize_lock);
mutex_enter(&vd->vdev_trim_lock);
vdev_trim_stop(vd, VDEV_TRIM_CANCELED, NULL);
mutex_exit(&vd->vdev_trim_lock);
}
/*
* The vdev may be both a leaf and top-level device.
*/
vdev_autotrim_stop_wait(vd);
spa_config_enter(spa, SCL_STATE_ALL, spa, RW_WRITER);
vdev_free(vd);
spa_config_exit(spa, SCL_STATE_ALL, spa);
}
/*
* If the config changed, update the config cache.
*/
if (config_changed)
spa_write_cachefile(spa, B_FALSE, B_TRUE, B_TRUE);
}
/*
* Unlock the spa_t after adding or removing a vdev. Besides undoing the
* locking of spa_vdev_enter(), we also want make sure the transactions have
* synced to disk, and then update the global configuration cache with the new
* information.
*/
int
spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error)
{
vdev_autotrim_restart(spa);
vdev_rebuild_restart(spa);
spa_vdev_config_exit(spa, vd, txg, error, FTAG);
mutex_exit(&spa_namespace_lock);
mutex_exit(&spa->spa_vdev_top_lock);
return (error);
}
/*
* Lock the given spa_t for the purpose of changing vdev state.
*/
void
spa_vdev_state_enter(spa_t *spa, int oplocks)
{
int locks = SCL_STATE_ALL | oplocks;
/*
* Root pools may need to read of the underlying devfs filesystem
* when opening up a vdev. Unfortunately if we're holding the
* SCL_ZIO lock it will result in a deadlock when we try to issue
* the read from the root filesystem. Instead we "prefetch"
* the associated vnodes that we need prior to opening the
* underlying devices and cache them so that we can prevent
* any I/O when we are doing the actual open.
*/
if (spa_is_root(spa)) {
int low = locks & ~(SCL_ZIO - 1);
int high = locks & ~low;
spa_config_enter(spa, high, spa, RW_WRITER);
vdev_hold(spa->spa_root_vdev);
spa_config_enter(spa, low, spa, RW_WRITER);
} else {
spa_config_enter(spa, locks, spa, RW_WRITER);
}
spa->spa_vdev_locks = locks;
}
int
spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error)
{
boolean_t config_changed = B_FALSE;
vdev_t *vdev_top;
if (vd == NULL || vd == spa->spa_root_vdev) {
vdev_top = spa->spa_root_vdev;
} else {
vdev_top = vd->vdev_top;
}
if (vd != NULL || error == 0)
vdev_dtl_reassess(vdev_top, 0, 0, B_FALSE, B_FALSE);
if (vd != NULL) {
if (vd != spa->spa_root_vdev)
vdev_state_dirty(vdev_top);
config_changed = B_TRUE;
spa->spa_config_generation++;
}
if (spa_is_root(spa))
vdev_rele(spa->spa_root_vdev);
ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL);
spa_config_exit(spa, spa->spa_vdev_locks, spa);
/*
* If anything changed, wait for it to sync. This ensures that,
* from the system administrator's perspective, zpool(8) commands
* are synchronous. This is important for things like zpool offline:
* when the command completes, you expect no further I/O from ZFS.
*/
if (vd != NULL)
txg_wait_synced(spa->spa_dsl_pool, 0);
/*
* If the config changed, update the config cache.
*/
if (config_changed) {
mutex_enter(&spa_namespace_lock);
spa_write_cachefile(spa, B_FALSE, B_TRUE, B_FALSE);
mutex_exit(&spa_namespace_lock);
}
return (error);
}
/*
* ==========================================================================
* Miscellaneous functions
* ==========================================================================
*/
void
spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx)
{
if (!nvlist_exists(spa->spa_label_features, feature)) {
fnvlist_add_boolean(spa->spa_label_features, feature);
/*
* When we are creating the pool (tx_txg==TXG_INITIAL), we can't
* dirty the vdev config because lock SCL_CONFIG is not held.
* Thankfully, in this case we don't need to dirty the config
* because it will be written out anyway when we finish
* creating the pool.
*/
if (tx->tx_txg != TXG_INITIAL)
vdev_config_dirty(spa->spa_root_vdev);
}
}
void
spa_deactivate_mos_feature(spa_t *spa, const char *feature)
{
if (nvlist_remove_all(spa->spa_label_features, feature) == 0)
vdev_config_dirty(spa->spa_root_vdev);
}
/*
* Return the spa_t associated with given pool_guid, if it exists. If
* device_guid is non-zero, determine whether the pool exists *and* contains
* a device with the specified device_guid.
*/
spa_t *
spa_by_guid(uint64_t pool_guid, uint64_t device_guid)
{
spa_t *spa;
avl_tree_t *t = &spa_namespace_avl;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) {
if (spa->spa_state == POOL_STATE_UNINITIALIZED)
continue;
if (spa->spa_root_vdev == NULL)
continue;
if (spa_guid(spa) == pool_guid) {
if (device_guid == 0)
break;
if (vdev_lookup_by_guid(spa->spa_root_vdev,
device_guid) != NULL)
break;
/*
* Check any devices we may be in the process of adding.
*/
if (spa->spa_pending_vdev) {
if (vdev_lookup_by_guid(spa->spa_pending_vdev,
device_guid) != NULL)
break;
}
}
}
return (spa);
}
/*
* Determine whether a pool with the given pool_guid exists.
*/
boolean_t
spa_guid_exists(uint64_t pool_guid, uint64_t device_guid)
{
return (spa_by_guid(pool_guid, device_guid) != NULL);
}
char *
spa_strdup(const char *s)
{
size_t len;
char *new;
len = strlen(s);
new = kmem_alloc(len + 1, KM_SLEEP);
memcpy(new, s, len + 1);
return (new);
}
void
spa_strfree(char *s)
{
kmem_free(s, strlen(s) + 1);
}
uint64_t
spa_generate_guid(spa_t *spa)
{
uint64_t guid;
if (spa != NULL) {
do {
(void) random_get_pseudo_bytes((void *)&guid,
sizeof (guid));
} while (guid == 0 || spa_guid_exists(spa_guid(spa), guid));
} else {
do {
(void) random_get_pseudo_bytes((void *)&guid,
sizeof (guid));
} while (guid == 0 || spa_guid_exists(guid, 0));
}
return (guid);
}
void
snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp)
{
char type[256];
const char *checksum = NULL;
const char *compress = NULL;
if (bp != NULL) {
if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) {
dmu_object_byteswap_t bswap =
DMU_OT_BYTESWAP(BP_GET_TYPE(bp));
(void) snprintf(type, sizeof (type), "bswap %s %s",
DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ?
"metadata" : "data",
dmu_ot_byteswap[bswap].ob_name);
} else {
(void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name,
sizeof (type));
}
if (!BP_IS_EMBEDDED(bp)) {
checksum =
zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name;
}
compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name;
}
SNPRINTF_BLKPTR(kmem_scnprintf, ' ', buf, buflen, bp, type, checksum,
compress);
}
void
spa_freeze(spa_t *spa)
{
uint64_t freeze_txg = 0;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
if (spa->spa_freeze_txg == UINT64_MAX) {
freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE;
spa->spa_freeze_txg = freeze_txg;
}
spa_config_exit(spa, SCL_ALL, FTAG);
if (freeze_txg != 0)
txg_wait_synced(spa_get_dsl(spa), freeze_txg);
}
void
zfs_panic_recover(const char *fmt, ...)
{
va_list adx;
va_start(adx, fmt);
vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx);
va_end(adx);
}
/*
* This is a stripped-down version of strtoull, suitable only for converting
* lowercase hexadecimal numbers that don't overflow.
*/
uint64_t
zfs_strtonum(const char *str, char **nptr)
{
uint64_t val = 0;
char c;
int digit;
while ((c = *str) != '\0') {
if (c >= '0' && c <= '9')
digit = c - '0';
else if (c >= 'a' && c <= 'f')
digit = 10 + c - 'a';
else
break;
val *= 16;
val += digit;
str++;
}
if (nptr)
*nptr = (char *)str;
return (val);
}
void
spa_activate_allocation_classes(spa_t *spa, dmu_tx_t *tx)
{
/*
* We bump the feature refcount for each special vdev added to the pool
*/
ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_ALLOCATION_CLASSES));
spa_feature_incr(spa, SPA_FEATURE_ALLOCATION_CLASSES, tx);
}
/*
* ==========================================================================
* Accessor functions
* ==========================================================================
*/
boolean_t
spa_shutting_down(spa_t *spa)
{
return (spa->spa_async_suspended);
}
dsl_pool_t *
spa_get_dsl(spa_t *spa)
{
return (spa->spa_dsl_pool);
}
boolean_t
spa_is_initializing(spa_t *spa)
{
return (spa->spa_is_initializing);
}
boolean_t
spa_indirect_vdevs_loaded(spa_t *spa)
{
return (spa->spa_indirect_vdevs_loaded);
}
blkptr_t *
spa_get_rootblkptr(spa_t *spa)
{
return (&spa->spa_ubsync.ub_rootbp);
}
void
spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp)
{
spa->spa_uberblock.ub_rootbp = *bp;
}
void
spa_altroot(spa_t *spa, char *buf, size_t buflen)
{
if (spa->spa_root == NULL)
buf[0] = '\0';
else
(void) strlcpy(buf, spa->spa_root, buflen);
}
uint32_t
spa_sync_pass(spa_t *spa)
{
return (spa->spa_sync_pass);
}
char *
spa_name(spa_t *spa)
{
return (spa->spa_name);
}
uint64_t
spa_guid(spa_t *spa)
{
dsl_pool_t *dp = spa_get_dsl(spa);
uint64_t guid;
/*
* If we fail to parse the config during spa_load(), we can go through
* the error path (which posts an ereport) and end up here with no root
* vdev. We stash the original pool guid in 'spa_config_guid' to handle
* this case.
*/
if (spa->spa_root_vdev == NULL)
return (spa->spa_config_guid);
guid = spa->spa_last_synced_guid != 0 ?
spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid;
/*
* Return the most recently synced out guid unless we're
* in syncing context.
*/
if (dp && dsl_pool_sync_context(dp))
return (spa->spa_root_vdev->vdev_guid);
else
return (guid);
}
uint64_t
spa_load_guid(spa_t *spa)
{
/*
* This is a GUID that exists solely as a reference for the
* purposes of the arc. It is generated at load time, and
* is never written to persistent storage.
*/
return (spa->spa_load_guid);
}
uint64_t
spa_last_synced_txg(spa_t *spa)
{
return (spa->spa_ubsync.ub_txg);
}
uint64_t
spa_first_txg(spa_t *spa)
{
return (spa->spa_first_txg);
}
uint64_t
spa_syncing_txg(spa_t *spa)
{
return (spa->spa_syncing_txg);
}
/*
* Return the last txg where data can be dirtied. The final txgs
* will be used to just clear out any deferred frees that remain.
*/
uint64_t
spa_final_dirty_txg(spa_t *spa)
{
return (spa->spa_final_txg - TXG_DEFER_SIZE);
}
pool_state_t
spa_state(spa_t *spa)
{
return (spa->spa_state);
}
spa_load_state_t
spa_load_state(spa_t *spa)
{
return (spa->spa_load_state);
}
uint64_t
spa_freeze_txg(spa_t *spa)
{
return (spa->spa_freeze_txg);
}
/*
* Return the inflated asize for a logical write in bytes. This is used by the
* DMU to calculate the space a logical write will require on disk.
* If lsize is smaller than the largest physical block size allocatable on this
* pool we use its value instead, since the write will end up using the whole
* block anyway.
*/
uint64_t
spa_get_worst_case_asize(spa_t *spa, uint64_t lsize)
{
if (lsize == 0)
return (0); /* No inflation needed */
return (MAX(lsize, 1 << spa->spa_max_ashift) * spa_asize_inflation);
}
/*
* Return the amount of slop space in bytes. It is typically 1/32 of the pool
* (3.2%), minus the embedded log space. On very small pools, it may be
* slightly larger than this. On very large pools, it will be capped to
* the value of spa_max_slop. The embedded log space is not included in
* spa_dspace. By subtracting it, the usable space (per "zfs list") is a
* constant 97% of the total space, regardless of metaslab size (assuming the
* default spa_slop_shift=5 and a non-tiny pool).
*
* See the comment above spa_slop_shift for more details.
*/
uint64_t
spa_get_slop_space(spa_t *spa)
{
uint64_t space = 0;
uint64_t slop = 0;
/*
* Make sure spa_dedup_dspace has been set.
*/
if (spa->spa_dedup_dspace == ~0ULL)
spa_update_dspace(spa);
/*
* spa_get_dspace() includes the space only logically "used" by
* deduplicated data, so since it's not useful to reserve more
* space with more deduplicated data, we subtract that out here.
*/
- space = spa_get_dspace(spa) - spa->spa_dedup_dspace;
+ space =
+ spa_get_dspace(spa) - spa->spa_dedup_dspace - brt_get_dspace(spa);
slop = MIN(space >> spa_slop_shift, spa_max_slop);
/*
* Subtract the embedded log space, but no more than half the (3.2%)
* unusable space. Note, the "no more than half" is only relevant if
* zfs_embedded_slog_min_ms >> spa_slop_shift < 2, which is not true by
* default.
*/
uint64_t embedded_log =
metaslab_class_get_dspace(spa_embedded_log_class(spa));
slop -= MIN(embedded_log, slop >> 1);
/*
* Slop space should be at least spa_min_slop, but no more than half
* the entire pool.
*/
slop = MAX(slop, MIN(space >> 1, spa_min_slop));
return (slop);
}
uint64_t
spa_get_dspace(spa_t *spa)
{
return (spa->spa_dspace);
}
uint64_t
spa_get_checkpoint_space(spa_t *spa)
{
return (spa->spa_checkpoint_info.sci_dspace);
}
void
spa_update_dspace(spa_t *spa)
{
spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) +
ddt_get_dedup_dspace(spa) + brt_get_dspace(spa);
if (spa->spa_nonallocating_dspace > 0) {
/*
* Subtract the space provided by all non-allocating vdevs that
* contribute to dspace. If a file is overwritten, its old
* blocks are freed and new blocks are allocated. If there are
* no snapshots of the file, the available space should remain
* the same. The old blocks could be freed from the
* non-allocating vdev, but the new blocks must be allocated on
* other (allocating) vdevs. By reserving the entire size of
* the non-allocating vdevs (including allocated space), we
* ensure that there will be enough space on the allocating
* vdevs for this file overwrite to succeed.
*
* Note that the DMU/DSL doesn't actually know or care
* how much space is allocated (it does its own tracking
* of how much space has been logically used). So it
* doesn't matter that the data we are moving may be
* allocated twice (on the old device and the new device).
*/
ASSERT3U(spa->spa_dspace, >=, spa->spa_nonallocating_dspace);
spa->spa_dspace -= spa->spa_nonallocating_dspace;
}
}
/*
* Return the failure mode that has been set to this pool. The default
* behavior will be to block all I/Os when a complete failure occurs.
*/
uint64_t
spa_get_failmode(spa_t *spa)
{
return (spa->spa_failmode);
}
boolean_t
spa_suspended(spa_t *spa)
{
return (spa->spa_suspended != ZIO_SUSPEND_NONE);
}
uint64_t
spa_version(spa_t *spa)
{
return (spa->spa_ubsync.ub_version);
}
boolean_t
spa_deflate(spa_t *spa)
{
return (spa->spa_deflate);
}
metaslab_class_t *
spa_normal_class(spa_t *spa)
{
return (spa->spa_normal_class);
}
metaslab_class_t *
spa_log_class(spa_t *spa)
{
return (spa->spa_log_class);
}
metaslab_class_t *
spa_embedded_log_class(spa_t *spa)
{
return (spa->spa_embedded_log_class);
}
metaslab_class_t *
spa_special_class(spa_t *spa)
{
return (spa->spa_special_class);
}
metaslab_class_t *
spa_dedup_class(spa_t *spa)
{
return (spa->spa_dedup_class);
}
/*
* Locate an appropriate allocation class
*/
metaslab_class_t *
spa_preferred_class(spa_t *spa, uint64_t size, dmu_object_type_t objtype,
uint_t level, uint_t special_smallblk)
{
/*
* ZIL allocations determine their class in zio_alloc_zil().
*/
ASSERT(objtype != DMU_OT_INTENT_LOG);
boolean_t has_special_class = spa->spa_special_class->mc_groups != 0;
if (DMU_OT_IS_DDT(objtype)) {
if (spa->spa_dedup_class->mc_groups != 0)
return (spa_dedup_class(spa));
else if (has_special_class && zfs_ddt_data_is_special)
return (spa_special_class(spa));
else
return (spa_normal_class(spa));
}
/* Indirect blocks for user data can land in special if allowed */
if (level > 0 && (DMU_OT_IS_FILE(objtype) || objtype == DMU_OT_ZVOL)) {
if (has_special_class && zfs_user_indirect_is_special)
return (spa_special_class(spa));
else
return (spa_normal_class(spa));
}
if (DMU_OT_IS_METADATA(objtype) || level > 0) {
if (has_special_class)
return (spa_special_class(spa));
else
return (spa_normal_class(spa));
}
/*
* Allow small file blocks in special class in some cases (like
* for the dRAID vdev feature). But always leave a reserve of
* zfs_special_class_metadata_reserve_pct exclusively for metadata.
*/
if (DMU_OT_IS_FILE(objtype) &&
has_special_class && size <= special_smallblk) {
metaslab_class_t *special = spa_special_class(spa);
uint64_t alloc = metaslab_class_get_alloc(special);
uint64_t space = metaslab_class_get_space(special);
uint64_t limit =
(space * (100 - zfs_special_class_metadata_reserve_pct))
/ 100;
if (alloc < limit)
return (special);
}
return (spa_normal_class(spa));
}
void
spa_evicting_os_register(spa_t *spa, objset_t *os)
{
mutex_enter(&spa->spa_evicting_os_lock);
list_insert_head(&spa->spa_evicting_os_list, os);
mutex_exit(&spa->spa_evicting_os_lock);
}
void
spa_evicting_os_deregister(spa_t *spa, objset_t *os)
{
mutex_enter(&spa->spa_evicting_os_lock);
list_remove(&spa->spa_evicting_os_list, os);
cv_broadcast(&spa->spa_evicting_os_cv);
mutex_exit(&spa->spa_evicting_os_lock);
}
void
spa_evicting_os_wait(spa_t *spa)
{
mutex_enter(&spa->spa_evicting_os_lock);
while (!list_is_empty(&spa->spa_evicting_os_list))
cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock);
mutex_exit(&spa->spa_evicting_os_lock);
dmu_buf_user_evict_wait();
}
int
spa_max_replication(spa_t *spa)
{
/*
* As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to
* handle BPs with more than one DVA allocated. Set our max
* replication level accordingly.
*/
if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS)
return (1);
return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override));
}
int
spa_prev_software_version(spa_t *spa)
{
return (spa->spa_prev_software_version);
}
uint64_t
spa_deadman_synctime(spa_t *spa)
{
return (spa->spa_deadman_synctime);
}
spa_autotrim_t
spa_get_autotrim(spa_t *spa)
{
return (spa->spa_autotrim);
}
uint64_t
spa_deadman_ziotime(spa_t *spa)
{
return (spa->spa_deadman_ziotime);
}
uint64_t
spa_get_deadman_failmode(spa_t *spa)
{
return (spa->spa_deadman_failmode);
}
void
spa_set_deadman_failmode(spa_t *spa, const char *failmode)
{
if (strcmp(failmode, "wait") == 0)
spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT;
else if (strcmp(failmode, "continue") == 0)
spa->spa_deadman_failmode = ZIO_FAILURE_MODE_CONTINUE;
else if (strcmp(failmode, "panic") == 0)
spa->spa_deadman_failmode = ZIO_FAILURE_MODE_PANIC;
else
spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT;
}
void
spa_set_deadman_ziotime(hrtime_t ns)
{
spa_t *spa = NULL;
if (spa_mode_global != SPA_MODE_UNINIT) {
mutex_enter(&spa_namespace_lock);
while ((spa = spa_next(spa)) != NULL)
spa->spa_deadman_ziotime = ns;
mutex_exit(&spa_namespace_lock);
}
}
void
spa_set_deadman_synctime(hrtime_t ns)
{
spa_t *spa = NULL;
if (spa_mode_global != SPA_MODE_UNINIT) {
mutex_enter(&spa_namespace_lock);
while ((spa = spa_next(spa)) != NULL)
spa->spa_deadman_synctime = ns;
mutex_exit(&spa_namespace_lock);
}
}
uint64_t
dva_get_dsize_sync(spa_t *spa, const dva_t *dva)
{
uint64_t asize = DVA_GET_ASIZE(dva);
uint64_t dsize = asize;
ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
if (asize != 0 && spa->spa_deflate) {
vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
if (vd != NULL)
dsize = (asize >> SPA_MINBLOCKSHIFT) *
vd->vdev_deflate_ratio;
}
return (dsize);
}
uint64_t
bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp)
{
uint64_t dsize = 0;
for (int d = 0; d < BP_GET_NDVAS(bp); d++)
dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
return (dsize);
}
uint64_t
bp_get_dsize(spa_t *spa, const blkptr_t *bp)
{
uint64_t dsize = 0;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
for (int d = 0; d < BP_GET_NDVAS(bp); d++)
dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
spa_config_exit(spa, SCL_VDEV, FTAG);
return (dsize);
}
uint64_t
spa_dirty_data(spa_t *spa)
{
return (spa->spa_dsl_pool->dp_dirty_total);
}
/*
* ==========================================================================
* SPA Import Progress Routines
* ==========================================================================
*/
typedef struct spa_import_progress {
uint64_t pool_guid; /* unique id for updates */
char *pool_name;
spa_load_state_t spa_load_state;
uint64_t mmp_sec_remaining; /* MMP activity check */
uint64_t spa_load_max_txg; /* rewind txg */
procfs_list_node_t smh_node;
} spa_import_progress_t;
spa_history_list_t *spa_import_progress_list = NULL;
static int
spa_import_progress_show_header(struct seq_file *f)
{
seq_printf(f, "%-20s %-14s %-14s %-12s %s\n", "pool_guid",
"load_state", "multihost_secs", "max_txg",
"pool_name");
return (0);
}
static int
spa_import_progress_show(struct seq_file *f, void *data)
{
spa_import_progress_t *sip = (spa_import_progress_t *)data;
seq_printf(f, "%-20llu %-14llu %-14llu %-12llu %s\n",
(u_longlong_t)sip->pool_guid, (u_longlong_t)sip->spa_load_state,
(u_longlong_t)sip->mmp_sec_remaining,
(u_longlong_t)sip->spa_load_max_txg,
(sip->pool_name ? sip->pool_name : "-"));
return (0);
}
/* Remove oldest elements from list until there are no more than 'size' left */
static void
spa_import_progress_truncate(spa_history_list_t *shl, unsigned int size)
{
spa_import_progress_t *sip;
while (shl->size > size) {
sip = list_remove_head(&shl->procfs_list.pl_list);
if (sip->pool_name)
spa_strfree(sip->pool_name);
kmem_free(sip, sizeof (spa_import_progress_t));
shl->size--;
}
IMPLY(size == 0, list_is_empty(&shl->procfs_list.pl_list));
}
static void
spa_import_progress_init(void)
{
spa_import_progress_list = kmem_zalloc(sizeof (spa_history_list_t),
KM_SLEEP);
spa_import_progress_list->size = 0;
spa_import_progress_list->procfs_list.pl_private =
spa_import_progress_list;
procfs_list_install("zfs",
NULL,
"import_progress",
0644,
&spa_import_progress_list->procfs_list,
spa_import_progress_show,
spa_import_progress_show_header,
NULL,
offsetof(spa_import_progress_t, smh_node));
}
static void
spa_import_progress_destroy(void)
{
spa_history_list_t *shl = spa_import_progress_list;
procfs_list_uninstall(&shl->procfs_list);
spa_import_progress_truncate(shl, 0);
procfs_list_destroy(&shl->procfs_list);
kmem_free(shl, sizeof (spa_history_list_t));
}
int
spa_import_progress_set_state(uint64_t pool_guid,
spa_load_state_t load_state)
{
spa_history_list_t *shl = spa_import_progress_list;
spa_import_progress_t *sip;
int error = ENOENT;
if (shl->size == 0)
return (0);
mutex_enter(&shl->procfs_list.pl_lock);
for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
sip = list_prev(&shl->procfs_list.pl_list, sip)) {
if (sip->pool_guid == pool_guid) {
sip->spa_load_state = load_state;
error = 0;
break;
}
}
mutex_exit(&shl->procfs_list.pl_lock);
return (error);
}
int
spa_import_progress_set_max_txg(uint64_t pool_guid, uint64_t load_max_txg)
{
spa_history_list_t *shl = spa_import_progress_list;
spa_import_progress_t *sip;
int error = ENOENT;
if (shl->size == 0)
return (0);
mutex_enter(&shl->procfs_list.pl_lock);
for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
sip = list_prev(&shl->procfs_list.pl_list, sip)) {
if (sip->pool_guid == pool_guid) {
sip->spa_load_max_txg = load_max_txg;
error = 0;
break;
}
}
mutex_exit(&shl->procfs_list.pl_lock);
return (error);
}
int
spa_import_progress_set_mmp_check(uint64_t pool_guid,
uint64_t mmp_sec_remaining)
{
spa_history_list_t *shl = spa_import_progress_list;
spa_import_progress_t *sip;
int error = ENOENT;
if (shl->size == 0)
return (0);
mutex_enter(&shl->procfs_list.pl_lock);
for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
sip = list_prev(&shl->procfs_list.pl_list, sip)) {
if (sip->pool_guid == pool_guid) {
sip->mmp_sec_remaining = mmp_sec_remaining;
error = 0;
break;
}
}
mutex_exit(&shl->procfs_list.pl_lock);
return (error);
}
/*
* A new import is in progress, add an entry.
*/
void
spa_import_progress_add(spa_t *spa)
{
spa_history_list_t *shl = spa_import_progress_list;
spa_import_progress_t *sip;
const char *poolname = NULL;
sip = kmem_zalloc(sizeof (spa_import_progress_t), KM_SLEEP);
sip->pool_guid = spa_guid(spa);
(void) nvlist_lookup_string(spa->spa_config, ZPOOL_CONFIG_POOL_NAME,
&poolname);
if (poolname == NULL)
poolname = spa_name(spa);
sip->pool_name = spa_strdup(poolname);
sip->spa_load_state = spa_load_state(spa);
mutex_enter(&shl->procfs_list.pl_lock);
procfs_list_add(&shl->procfs_list, sip);
shl->size++;
mutex_exit(&shl->procfs_list.pl_lock);
}
void
spa_import_progress_remove(uint64_t pool_guid)
{
spa_history_list_t *shl = spa_import_progress_list;
spa_import_progress_t *sip;
mutex_enter(&shl->procfs_list.pl_lock);
for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
sip = list_prev(&shl->procfs_list.pl_list, sip)) {
if (sip->pool_guid == pool_guid) {
if (sip->pool_name)
spa_strfree(sip->pool_name);
list_remove(&shl->procfs_list.pl_list, sip);
shl->size--;
kmem_free(sip, sizeof (spa_import_progress_t));
break;
}
}
mutex_exit(&shl->procfs_list.pl_lock);
}
/*
* ==========================================================================
* Initialization and Termination
* ==========================================================================
*/
static int
spa_name_compare(const void *a1, const void *a2)
{
const spa_t *s1 = a1;
const spa_t *s2 = a2;
int s;
s = strcmp(s1->spa_name, s2->spa_name);
return (TREE_ISIGN(s));
}
void
spa_boot_init(void)
{
spa_config_load();
}
void
spa_init(spa_mode_t mode)
{
mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL);
avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t),
offsetof(spa_t, spa_avl));
avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t),
offsetof(spa_aux_t, aux_avl));
avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t),
offsetof(spa_aux_t, aux_avl));
spa_mode_global = mode;
#ifndef _KERNEL
if (spa_mode_global != SPA_MODE_READ && dprintf_find_string("watch")) {
struct sigaction sa;
sa.sa_flags = SA_SIGINFO;
sigemptyset(&sa.sa_mask);
sa.sa_sigaction = arc_buf_sigsegv;
if (sigaction(SIGSEGV, &sa, NULL) == -1) {
perror("could not enable watchpoints: "
"sigaction(SIGSEGV, ...) = ");
} else {
arc_watch = B_TRUE;
}
}
#endif
fm_init();
zfs_refcount_init();
unique_init();
zfs_btree_init();
metaslab_stat_init();
brt_init();
ddt_init();
zio_init();
dmu_init();
zil_init();
vdev_mirror_stat_init();
vdev_raidz_math_init();
vdev_file_init();
zfs_prop_init();
chksum_init();
zpool_prop_init();
zpool_feature_init();
spa_config_load();
vdev_prop_init();
l2arc_start();
scan_init();
qat_init();
spa_import_progress_init();
}
void
spa_fini(void)
{
l2arc_stop();
spa_evict_all();
vdev_file_fini();
vdev_mirror_stat_fini();
vdev_raidz_math_fini();
chksum_fini();
zil_fini();
dmu_fini();
zio_fini();
ddt_fini();
brt_fini();
metaslab_stat_fini();
zfs_btree_fini();
unique_fini();
zfs_refcount_fini();
fm_fini();
scan_fini();
qat_fini();
spa_import_progress_destroy();
avl_destroy(&spa_namespace_avl);
avl_destroy(&spa_spare_avl);
avl_destroy(&spa_l2cache_avl);
cv_destroy(&spa_namespace_cv);
mutex_destroy(&spa_namespace_lock);
mutex_destroy(&spa_spare_lock);
mutex_destroy(&spa_l2cache_lock);
}
/*
* Return whether this pool has a dedicated slog device. No locking needed.
* It's not a problem if the wrong answer is returned as it's only for
* performance and not correctness.
*/
boolean_t
spa_has_slogs(spa_t *spa)
{
return (spa->spa_log_class->mc_groups != 0);
}
spa_log_state_t
spa_get_log_state(spa_t *spa)
{
return (spa->spa_log_state);
}
void
spa_set_log_state(spa_t *spa, spa_log_state_t state)
{
spa->spa_log_state = state;
}
boolean_t
spa_is_root(spa_t *spa)
{
return (spa->spa_is_root);
}
boolean_t
spa_writeable(spa_t *spa)
{
return (!!(spa->spa_mode & SPA_MODE_WRITE) && spa->spa_trust_config);
}
/*
* Returns true if there is a pending sync task in any of the current
* syncing txg, the current quiescing txg, or the current open txg.
*/
boolean_t
spa_has_pending_synctask(spa_t *spa)
{
return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks) ||
!txg_all_lists_empty(&spa->spa_dsl_pool->dp_early_sync_tasks));
}
spa_mode_t
spa_mode(spa_t *spa)
{
return (spa->spa_mode);
}
uint64_t
spa_bootfs(spa_t *spa)
{
return (spa->spa_bootfs);
}
uint64_t
spa_delegation(spa_t *spa)
{
return (spa->spa_delegation);
}
objset_t *
spa_meta_objset(spa_t *spa)
{
return (spa->spa_meta_objset);
}
enum zio_checksum
spa_dedup_checksum(spa_t *spa)
{
return (spa->spa_dedup_checksum);
}
/*
* Reset pool scan stat per scan pass (or reboot).
*/
void
spa_scan_stat_init(spa_t *spa)
{
/* data not stored on disk */
spa->spa_scan_pass_start = gethrestime_sec();
if (dsl_scan_is_paused_scrub(spa->spa_dsl_pool->dp_scan))
spa->spa_scan_pass_scrub_pause = spa->spa_scan_pass_start;
else
spa->spa_scan_pass_scrub_pause = 0;
if (dsl_errorscrub_is_paused(spa->spa_dsl_pool->dp_scan))
spa->spa_scan_pass_errorscrub_pause = spa->spa_scan_pass_start;
else
spa->spa_scan_pass_errorscrub_pause = 0;
spa->spa_scan_pass_scrub_spent_paused = 0;
spa->spa_scan_pass_exam = 0;
spa->spa_scan_pass_issued = 0;
// error scrub stats
spa->spa_scan_pass_errorscrub_spent_paused = 0;
}
/*
* Get scan stats for zpool status reports
*/
int
spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps)
{
dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL;
if (scn == NULL || (scn->scn_phys.scn_func == POOL_SCAN_NONE &&
scn->errorscrub_phys.dep_func == POOL_SCAN_NONE))
return (SET_ERROR(ENOENT));
memset(ps, 0, sizeof (pool_scan_stat_t));
/* data stored on disk */
ps->pss_func = scn->scn_phys.scn_func;
ps->pss_state = scn->scn_phys.scn_state;
ps->pss_start_time = scn->scn_phys.scn_start_time;
ps->pss_end_time = scn->scn_phys.scn_end_time;
ps->pss_to_examine = scn->scn_phys.scn_to_examine;
ps->pss_examined = scn->scn_phys.scn_examined;
ps->pss_skipped = scn->scn_phys.scn_skipped;
ps->pss_processed = scn->scn_phys.scn_processed;
ps->pss_errors = scn->scn_phys.scn_errors;
/* data not stored on disk */
ps->pss_pass_exam = spa->spa_scan_pass_exam;
ps->pss_pass_start = spa->spa_scan_pass_start;
ps->pss_pass_scrub_pause = spa->spa_scan_pass_scrub_pause;
ps->pss_pass_scrub_spent_paused = spa->spa_scan_pass_scrub_spent_paused;
ps->pss_pass_issued = spa->spa_scan_pass_issued;
ps->pss_issued =
scn->scn_issued_before_pass + spa->spa_scan_pass_issued;
/* error scrub data stored on disk */
ps->pss_error_scrub_func = scn->errorscrub_phys.dep_func;
ps->pss_error_scrub_state = scn->errorscrub_phys.dep_state;
ps->pss_error_scrub_start = scn->errorscrub_phys.dep_start_time;
ps->pss_error_scrub_end = scn->errorscrub_phys.dep_end_time;
ps->pss_error_scrub_examined = scn->errorscrub_phys.dep_examined;
ps->pss_error_scrub_to_be_examined =
scn->errorscrub_phys.dep_to_examine;
/* error scrub data not stored on disk */
ps->pss_pass_error_scrub_pause = spa->spa_scan_pass_errorscrub_pause;
return (0);
}
int
spa_maxblocksize(spa_t *spa)
{
if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS))
return (SPA_MAXBLOCKSIZE);
else
return (SPA_OLD_MAXBLOCKSIZE);
}
/*
* Returns the txg that the last device removal completed. No indirect mappings
* have been added since this txg.
*/
uint64_t
spa_get_last_removal_txg(spa_t *spa)
{
uint64_t vdevid;
uint64_t ret = -1ULL;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
/*
* sr_prev_indirect_vdev is only modified while holding all the
* config locks, so it is sufficient to hold SCL_VDEV as reader when
* examining it.
*/
vdevid = spa->spa_removing_phys.sr_prev_indirect_vdev;
while (vdevid != -1ULL) {
vdev_t *vd = vdev_lookup_top(spa, vdevid);
vdev_indirect_births_t *vib = vd->vdev_indirect_births;
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
/*
* If the removal did not remap any data, we don't care.
*/
if (vdev_indirect_births_count(vib) != 0) {
ret = vdev_indirect_births_last_entry_txg(vib);
break;
}
vdevid = vd->vdev_indirect_config.vic_prev_indirect_vdev;
}
spa_config_exit(spa, SCL_VDEV, FTAG);
IMPLY(ret != -1ULL,
spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL));
return (ret);
}
int
spa_maxdnodesize(spa_t *spa)
{
if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE))
return (DNODE_MAX_SIZE);
else
return (DNODE_MIN_SIZE);
}
boolean_t
spa_multihost(spa_t *spa)
{
return (spa->spa_multihost ? B_TRUE : B_FALSE);
}
uint32_t
spa_get_hostid(spa_t *spa)
{
return (spa->spa_hostid);
}
boolean_t
spa_trust_config(spa_t *spa)
{
return (spa->spa_trust_config);
}
uint64_t
spa_missing_tvds_allowed(spa_t *spa)
{
return (spa->spa_missing_tvds_allowed);
}
space_map_t *
spa_syncing_log_sm(spa_t *spa)
{
return (spa->spa_syncing_log_sm);
}
void
spa_set_missing_tvds(spa_t *spa, uint64_t missing)
{
spa->spa_missing_tvds = missing;
}
/*
* Return the pool state string ("ONLINE", "DEGRADED", "SUSPENDED", etc).
*/
const char *
spa_state_to_name(spa_t *spa)
{
ASSERT3P(spa, !=, NULL);
/*
* it is possible for the spa to exist, without root vdev
* as the spa transitions during import/export
*/
vdev_t *rvd = spa->spa_root_vdev;
if (rvd == NULL) {
return ("TRANSITIONING");
}
vdev_state_t state = rvd->vdev_state;
vdev_aux_t aux = rvd->vdev_stat.vs_aux;
if (spa_suspended(spa))
return ("SUSPENDED");
switch (state) {
case VDEV_STATE_CLOSED:
case VDEV_STATE_OFFLINE:
return ("OFFLINE");
case VDEV_STATE_REMOVED:
return ("REMOVED");
case VDEV_STATE_CANT_OPEN:
if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
return ("FAULTED");
else if (aux == VDEV_AUX_SPLIT_POOL)
return ("SPLIT");
else
return ("UNAVAIL");
case VDEV_STATE_FAULTED:
return ("FAULTED");
case VDEV_STATE_DEGRADED:
return ("DEGRADED");
case VDEV_STATE_HEALTHY:
return ("ONLINE");
default:
break;
}
return ("UNKNOWN");
}
boolean_t
spa_top_vdevs_spacemap_addressable(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
if (!vdev_is_spacemap_addressable(rvd->vdev_child[c]))
return (B_FALSE);
}
return (B_TRUE);
}
boolean_t
spa_has_checkpoint(spa_t *spa)
{
return (spa->spa_checkpoint_txg != 0);
}
boolean_t
spa_importing_readonly_checkpoint(spa_t *spa)
{
return ((spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT) &&
spa->spa_mode == SPA_MODE_READ);
}
uint64_t
spa_min_claim_txg(spa_t *spa)
{
uint64_t checkpoint_txg = spa->spa_uberblock.ub_checkpoint_txg;
if (checkpoint_txg != 0)
return (checkpoint_txg + 1);
return (spa->spa_first_txg);
}
/*
* If there is a checkpoint, async destroys may consume more space from
* the pool instead of freeing it. In an attempt to save the pool from
* getting suspended when it is about to run out of space, we stop
* processing async destroys.
*/
boolean_t
spa_suspend_async_destroy(spa_t *spa)
{
dsl_pool_t *dp = spa_get_dsl(spa);
uint64_t unreserved = dsl_pool_unreserved_space(dp,
ZFS_SPACE_CHECK_EXTRA_RESERVED);
uint64_t used = dsl_dir_phys(dp->dp_root_dir)->dd_used_bytes;
uint64_t avail = (unreserved > used) ? (unreserved - used) : 0;
if (spa_has_checkpoint(spa) && avail == 0)
return (B_TRUE);
return (B_FALSE);
}
#if defined(_KERNEL)
int
param_set_deadman_failmode_common(const char *val)
{
spa_t *spa = NULL;
char *p;
if (val == NULL)
return (SET_ERROR(EINVAL));
if ((p = strchr(val, '\n')) != NULL)
*p = '\0';
if (strcmp(val, "wait") != 0 && strcmp(val, "continue") != 0 &&
strcmp(val, "panic"))
return (SET_ERROR(EINVAL));
if (spa_mode_global != SPA_MODE_UNINIT) {
mutex_enter(&spa_namespace_lock);
while ((spa = spa_next(spa)) != NULL)
spa_set_deadman_failmode(spa, val);
mutex_exit(&spa_namespace_lock);
}
return (0);
}
#endif
/* Namespace manipulation */
EXPORT_SYMBOL(spa_lookup);
EXPORT_SYMBOL(spa_add);
EXPORT_SYMBOL(spa_remove);
EXPORT_SYMBOL(spa_next);
/* Refcount functions */
EXPORT_SYMBOL(spa_open_ref);
EXPORT_SYMBOL(spa_close);
EXPORT_SYMBOL(spa_refcount_zero);
/* Pool configuration lock */
EXPORT_SYMBOL(spa_config_tryenter);
EXPORT_SYMBOL(spa_config_enter);
EXPORT_SYMBOL(spa_config_exit);
EXPORT_SYMBOL(spa_config_held);
/* Pool vdev add/remove lock */
EXPORT_SYMBOL(spa_vdev_enter);
EXPORT_SYMBOL(spa_vdev_exit);
/* Pool vdev state change lock */
EXPORT_SYMBOL(spa_vdev_state_enter);
EXPORT_SYMBOL(spa_vdev_state_exit);
/* Accessor functions */
EXPORT_SYMBOL(spa_shutting_down);
EXPORT_SYMBOL(spa_get_dsl);
EXPORT_SYMBOL(spa_get_rootblkptr);
EXPORT_SYMBOL(spa_set_rootblkptr);
EXPORT_SYMBOL(spa_altroot);
EXPORT_SYMBOL(spa_sync_pass);
EXPORT_SYMBOL(spa_name);
EXPORT_SYMBOL(spa_guid);
EXPORT_SYMBOL(spa_last_synced_txg);
EXPORT_SYMBOL(spa_first_txg);
EXPORT_SYMBOL(spa_syncing_txg);
EXPORT_SYMBOL(spa_version);
EXPORT_SYMBOL(spa_state);
EXPORT_SYMBOL(spa_load_state);
EXPORT_SYMBOL(spa_freeze_txg);
EXPORT_SYMBOL(spa_get_dspace);
EXPORT_SYMBOL(spa_update_dspace);
EXPORT_SYMBOL(spa_deflate);
EXPORT_SYMBOL(spa_normal_class);
EXPORT_SYMBOL(spa_log_class);
EXPORT_SYMBOL(spa_special_class);
EXPORT_SYMBOL(spa_preferred_class);
EXPORT_SYMBOL(spa_max_replication);
EXPORT_SYMBOL(spa_prev_software_version);
EXPORT_SYMBOL(spa_get_failmode);
EXPORT_SYMBOL(spa_suspended);
EXPORT_SYMBOL(spa_bootfs);
EXPORT_SYMBOL(spa_delegation);
EXPORT_SYMBOL(spa_meta_objset);
EXPORT_SYMBOL(spa_maxblocksize);
EXPORT_SYMBOL(spa_maxdnodesize);
/* Miscellaneous support routines */
EXPORT_SYMBOL(spa_guid_exists);
EXPORT_SYMBOL(spa_strdup);
EXPORT_SYMBOL(spa_strfree);
EXPORT_SYMBOL(spa_generate_guid);
EXPORT_SYMBOL(snprintf_blkptr);
EXPORT_SYMBOL(spa_freeze);
EXPORT_SYMBOL(spa_upgrade);
EXPORT_SYMBOL(spa_evict_all);
EXPORT_SYMBOL(spa_lookup_by_guid);
EXPORT_SYMBOL(spa_has_spare);
EXPORT_SYMBOL(dva_get_dsize_sync);
EXPORT_SYMBOL(bp_get_dsize_sync);
EXPORT_SYMBOL(bp_get_dsize);
EXPORT_SYMBOL(spa_has_slogs);
EXPORT_SYMBOL(spa_is_root);
EXPORT_SYMBOL(spa_writeable);
EXPORT_SYMBOL(spa_mode);
EXPORT_SYMBOL(spa_namespace_lock);
EXPORT_SYMBOL(spa_trust_config);
EXPORT_SYMBOL(spa_missing_tvds_allowed);
EXPORT_SYMBOL(spa_set_missing_tvds);
EXPORT_SYMBOL(spa_state_to_name);
EXPORT_SYMBOL(spa_importing_readonly_checkpoint);
EXPORT_SYMBOL(spa_min_claim_txg);
EXPORT_SYMBOL(spa_suspend_async_destroy);
EXPORT_SYMBOL(spa_has_checkpoint);
EXPORT_SYMBOL(spa_top_vdevs_spacemap_addressable);
ZFS_MODULE_PARAM(zfs, zfs_, flags, UINT, ZMOD_RW,
"Set additional debugging flags");
ZFS_MODULE_PARAM(zfs, zfs_, recover, INT, ZMOD_RW,
"Set to attempt to recover from fatal errors");
ZFS_MODULE_PARAM(zfs, zfs_, free_leak_on_eio, INT, ZMOD_RW,
"Set to ignore IO errors during free and permanently leak the space");
ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, checktime_ms, U64, ZMOD_RW,
"Dead I/O check interval in milliseconds");
ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, enabled, INT, ZMOD_RW,
"Enable deadman timer");
ZFS_MODULE_PARAM(zfs_spa, spa_, asize_inflation, UINT, ZMOD_RW,
"SPA size estimate multiplication factor");
ZFS_MODULE_PARAM(zfs, zfs_, ddt_data_is_special, INT, ZMOD_RW,
"Place DDT data into the special class");
ZFS_MODULE_PARAM(zfs, zfs_, user_indirect_is_special, INT, ZMOD_RW,
"Place user data indirect blocks into the special class");
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, failmode,
param_set_deadman_failmode, param_get_charp, ZMOD_RW,
"Failmode for deadman timer");
ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, synctime_ms,
param_set_deadman_synctime, spl_param_get_u64, ZMOD_RW,
"Pool sync expiration time in milliseconds");
ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, ziotime_ms,
param_set_deadman_ziotime, spl_param_get_u64, ZMOD_RW,
"IO expiration time in milliseconds");
ZFS_MODULE_PARAM(zfs, zfs_, special_class_metadata_reserve_pct, UINT, ZMOD_RW,
"Small file blocks in special vdevs depends on this much "
"free space available");
/* END CSTYLED */
ZFS_MODULE_PARAM_CALL(zfs_spa, spa_, slop_shift, param_set_slop_shift,
param_get_uint, ZMOD_RW, "Reserved free space in pool");
diff --git a/sys/contrib/openzfs/module/zfs/vdev.c b/sys/contrib/openzfs/module/zfs/vdev.c
index afb01c0ef7fd..e1ca1aecc900 100644
--- a/sys/contrib/openzfs/module/zfs/vdev.c
+++ b/sys/contrib/openzfs/module/zfs/vdev.c
@@ -1,6413 +1,6427 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2021 by Delphix. All rights reserved.
* Copyright 2017 Nexenta Systems, Inc.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2016 Toomas Soome <tsoome@me.com>
* Copyright 2017 Joyent, Inc.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2019, Datto Inc. All rights reserved.
* Copyright (c) 2021, Klara Inc.
* Copyright (c) 2021, 2023 Hewlett Packard Enterprise Development LP.
*/
#include <sys/zfs_context.h>
#include <sys/fm/fs/zfs.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/bpobj.h>
#include <sys/dmu.h>
#include <sys/dmu_tx.h>
#include <sys/dsl_dir.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_rebuild.h>
#include <sys/vdev_draid.h>
#include <sys/uberblock_impl.h>
#include <sys/metaslab.h>
#include <sys/metaslab_impl.h>
#include <sys/space_map.h>
#include <sys/space_reftree.h>
#include <sys/zio.h>
#include <sys/zap.h>
#include <sys/fs/zfs.h>
#include <sys/arc.h>
#include <sys/zil.h>
#include <sys/dsl_scan.h>
#include <sys/vdev_raidz.h>
#include <sys/abd.h>
#include <sys/vdev_initialize.h>
#include <sys/vdev_trim.h>
#include <sys/zvol.h>
#include <sys/zfs_ratelimit.h>
#include "zfs_prop.h"
/*
* One metaslab from each (normal-class) vdev is used by the ZIL. These are
* called "embedded slog metaslabs", are referenced by vdev_log_mg, and are
* part of the spa_embedded_log_class. The metaslab with the most free space
* in each vdev is selected for this purpose when the pool is opened (or a
* vdev is added). See vdev_metaslab_init().
*
* Log blocks can be allocated from the following locations. Each one is tried
* in order until the allocation succeeds:
* 1. dedicated log vdevs, aka "slog" (spa_log_class)
* 2. embedded slog metaslabs (spa_embedded_log_class)
* 3. other metaslabs in normal vdevs (spa_normal_class)
*
* zfs_embedded_slog_min_ms disables the embedded slog if there are fewer
* than this number of metaslabs in the vdev. This ensures that we don't set
* aside an unreasonable amount of space for the ZIL. If set to less than
* 1 << (spa_slop_shift + 1), on small pools the usable space may be reduced
* (by more than 1<<spa_slop_shift) due to the embedded slog metaslab.
*/
static uint_t zfs_embedded_slog_min_ms = 64;
/* default target for number of metaslabs per top-level vdev */
static uint_t zfs_vdev_default_ms_count = 200;
/* minimum number of metaslabs per top-level vdev */
static uint_t zfs_vdev_min_ms_count = 16;
/* practical upper limit of total metaslabs per top-level vdev */
static uint_t zfs_vdev_ms_count_limit = 1ULL << 17;
/* lower limit for metaslab size (512M) */
static uint_t zfs_vdev_default_ms_shift = 29;
/* upper limit for metaslab size (16G) */
static uint_t zfs_vdev_max_ms_shift = 34;
int vdev_validate_skip = B_FALSE;
/*
* Since the DTL space map of a vdev is not expected to have a lot of
* entries, we default its block size to 4K.
*/
int zfs_vdev_dtl_sm_blksz = (1 << 12);
/*
* Rate limit slow IO (delay) events to this many per second.
*/
static unsigned int zfs_slow_io_events_per_second = 20;
/*
* Rate limit checksum events after this many checksum errors per second.
*/
static unsigned int zfs_checksum_events_per_second = 20;
/*
* Ignore errors during scrub/resilver. Allows to work around resilver
* upon import when there are pool errors.
*/
static int zfs_scan_ignore_errors = 0;
/*
* vdev-wide space maps that have lots of entries written to them at
* the end of each transaction can benefit from a higher I/O bandwidth
* (e.g. vdev_obsolete_sm), thus we default their block size to 128K.
*/
int zfs_vdev_standard_sm_blksz = (1 << 17);
/*
* Tunable parameter for debugging or performance analysis. Setting this
* will cause pool corruption on power loss if a volatile out-of-order
* write cache is enabled.
*/
int zfs_nocacheflush = 0;
/*
* Maximum and minimum ashift values that can be automatically set based on
* vdev's physical ashift (disk's physical sector size). While ASHIFT_MAX
* is higher than the maximum value, it is intentionally limited here to not
* excessively impact pool space efficiency. Higher ashift values may still
* be forced by vdev logical ashift or by user via ashift property, but won't
* be set automatically as a performance optimization.
*/
uint_t zfs_vdev_max_auto_ashift = 14;
uint_t zfs_vdev_min_auto_ashift = ASHIFT_MIN;
void
vdev_dbgmsg(vdev_t *vd, const char *fmt, ...)
{
va_list adx;
char buf[256];
va_start(adx, fmt);
(void) vsnprintf(buf, sizeof (buf), fmt, adx);
va_end(adx);
if (vd->vdev_path != NULL) {
zfs_dbgmsg("%s vdev '%s': %s", vd->vdev_ops->vdev_op_type,
vd->vdev_path, buf);
} else {
zfs_dbgmsg("%s-%llu vdev (guid %llu): %s",
vd->vdev_ops->vdev_op_type,
(u_longlong_t)vd->vdev_id,
(u_longlong_t)vd->vdev_guid, buf);
}
}
void
vdev_dbgmsg_print_tree(vdev_t *vd, int indent)
{
char state[20];
if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops) {
zfs_dbgmsg("%*svdev %llu: %s", indent, "",
(u_longlong_t)vd->vdev_id,
vd->vdev_ops->vdev_op_type);
return;
}
switch (vd->vdev_state) {
case VDEV_STATE_UNKNOWN:
(void) snprintf(state, sizeof (state), "unknown");
break;
case VDEV_STATE_CLOSED:
(void) snprintf(state, sizeof (state), "closed");
break;
case VDEV_STATE_OFFLINE:
(void) snprintf(state, sizeof (state), "offline");
break;
case VDEV_STATE_REMOVED:
(void) snprintf(state, sizeof (state), "removed");
break;
case VDEV_STATE_CANT_OPEN:
(void) snprintf(state, sizeof (state), "can't open");
break;
case VDEV_STATE_FAULTED:
(void) snprintf(state, sizeof (state), "faulted");
break;
case VDEV_STATE_DEGRADED:
(void) snprintf(state, sizeof (state), "degraded");
break;
case VDEV_STATE_HEALTHY:
(void) snprintf(state, sizeof (state), "healthy");
break;
default:
(void) snprintf(state, sizeof (state), "<state %u>",
(uint_t)vd->vdev_state);
}
zfs_dbgmsg("%*svdev %u: %s%s, guid: %llu, path: %s, %s", indent,
"", (int)vd->vdev_id, vd->vdev_ops->vdev_op_type,
vd->vdev_islog ? " (log)" : "",
(u_longlong_t)vd->vdev_guid,
vd->vdev_path ? vd->vdev_path : "N/A", state);
for (uint64_t i = 0; i < vd->vdev_children; i++)
vdev_dbgmsg_print_tree(vd->vdev_child[i], indent + 2);
}
/*
* Virtual device management.
*/
static vdev_ops_t *const vdev_ops_table[] = {
&vdev_root_ops,
&vdev_raidz_ops,
&vdev_draid_ops,
&vdev_draid_spare_ops,
&vdev_mirror_ops,
&vdev_replacing_ops,
&vdev_spare_ops,
&vdev_disk_ops,
&vdev_file_ops,
&vdev_missing_ops,
&vdev_hole_ops,
&vdev_indirect_ops,
NULL
};
/*
* Given a vdev type, return the appropriate ops vector.
*/
static vdev_ops_t *
vdev_getops(const char *type)
{
vdev_ops_t *ops, *const *opspp;
for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++)
if (strcmp(ops->vdev_op_type, type) == 0)
break;
return (ops);
}
/*
* Given a vdev and a metaslab class, find which metaslab group we're
* interested in. All vdevs may belong to two different metaslab classes.
* Dedicated slog devices use only the primary metaslab group, rather than a
* separate log group. For embedded slogs, the vdev_log_mg will be non-NULL.
*/
metaslab_group_t *
vdev_get_mg(vdev_t *vd, metaslab_class_t *mc)
{
if (mc == spa_embedded_log_class(vd->vdev_spa) &&
vd->vdev_log_mg != NULL)
return (vd->vdev_log_mg);
else
return (vd->vdev_mg);
}
void
vdev_default_xlate(vdev_t *vd, const range_seg64_t *logical_rs,
range_seg64_t *physical_rs, range_seg64_t *remain_rs)
{
(void) vd, (void) remain_rs;
physical_rs->rs_start = logical_rs->rs_start;
physical_rs->rs_end = logical_rs->rs_end;
}
/*
* Derive the enumerated allocation bias from string input.
* String origin is either the per-vdev zap or zpool(8).
*/
static vdev_alloc_bias_t
vdev_derive_alloc_bias(const char *bias)
{
vdev_alloc_bias_t alloc_bias = VDEV_BIAS_NONE;
if (strcmp(bias, VDEV_ALLOC_BIAS_LOG) == 0)
alloc_bias = VDEV_BIAS_LOG;
else if (strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0)
alloc_bias = VDEV_BIAS_SPECIAL;
else if (strcmp(bias, VDEV_ALLOC_BIAS_DEDUP) == 0)
alloc_bias = VDEV_BIAS_DEDUP;
return (alloc_bias);
}
/*
* Default asize function: return the MAX of psize with the asize of
* all children. This is what's used by anything other than RAID-Z.
*/
uint64_t
vdev_default_asize(vdev_t *vd, uint64_t psize)
{
uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift);
uint64_t csize;
for (int c = 0; c < vd->vdev_children; c++) {
csize = vdev_psize_to_asize(vd->vdev_child[c], psize);
asize = MAX(asize, csize);
}
return (asize);
}
uint64_t
vdev_default_min_asize(vdev_t *vd)
{
return (vd->vdev_min_asize);
}
/*
* Get the minimum allocatable size. We define the allocatable size as
* the vdev's asize rounded to the nearest metaslab. This allows us to
* replace or attach devices which don't have the same physical size but
* can still satisfy the same number of allocations.
*/
uint64_t
vdev_get_min_asize(vdev_t *vd)
{
vdev_t *pvd = vd->vdev_parent;
/*
* If our parent is NULL (inactive spare or cache) or is the root,
* just return our own asize.
*/
if (pvd == NULL)
return (vd->vdev_asize);
/*
* The top-level vdev just returns the allocatable size rounded
* to the nearest metaslab.
*/
if (vd == vd->vdev_top)
return (P2ALIGN(vd->vdev_asize, 1ULL << vd->vdev_ms_shift));
return (pvd->vdev_ops->vdev_op_min_asize(pvd));
}
void
vdev_set_min_asize(vdev_t *vd)
{
vd->vdev_min_asize = vdev_get_min_asize(vd);
for (int c = 0; c < vd->vdev_children; c++)
vdev_set_min_asize(vd->vdev_child[c]);
}
/*
* Get the minimal allocation size for the top-level vdev.
*/
uint64_t
vdev_get_min_alloc(vdev_t *vd)
{
uint64_t min_alloc = 1ULL << vd->vdev_ashift;
if (vd->vdev_ops->vdev_op_min_alloc != NULL)
min_alloc = vd->vdev_ops->vdev_op_min_alloc(vd);
return (min_alloc);
}
/*
* Get the parity level for a top-level vdev.
*/
uint64_t
vdev_get_nparity(vdev_t *vd)
{
uint64_t nparity = 0;
if (vd->vdev_ops->vdev_op_nparity != NULL)
nparity = vd->vdev_ops->vdev_op_nparity(vd);
return (nparity);
}
static int
vdev_prop_get_int(vdev_t *vd, vdev_prop_t prop, uint64_t *value)
{
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa->spa_meta_objset;
uint64_t objid;
int err;
if (vd->vdev_root_zap != 0) {
objid = vd->vdev_root_zap;
} else if (vd->vdev_top_zap != 0) {
objid = vd->vdev_top_zap;
} else if (vd->vdev_leaf_zap != 0) {
objid = vd->vdev_leaf_zap;
} else {
return (EINVAL);
}
err = zap_lookup(mos, objid, vdev_prop_to_name(prop),
sizeof (uint64_t), 1, value);
if (err == ENOENT)
*value = vdev_prop_default_numeric(prop);
return (err);
}
/*
* Get the number of data disks for a top-level vdev.
*/
uint64_t
vdev_get_ndisks(vdev_t *vd)
{
uint64_t ndisks = 1;
if (vd->vdev_ops->vdev_op_ndisks != NULL)
ndisks = vd->vdev_ops->vdev_op_ndisks(vd);
return (ndisks);
}
vdev_t *
vdev_lookup_top(spa_t *spa, uint64_t vdev)
{
vdev_t *rvd = spa->spa_root_vdev;
ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
if (vdev < rvd->vdev_children) {
ASSERT(rvd->vdev_child[vdev] != NULL);
return (rvd->vdev_child[vdev]);
}
return (NULL);
}
vdev_t *
vdev_lookup_by_guid(vdev_t *vd, uint64_t guid)
{
vdev_t *mvd;
if (vd->vdev_guid == guid)
return (vd);
for (int c = 0; c < vd->vdev_children; c++)
if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) !=
NULL)
return (mvd);
return (NULL);
}
static int
vdev_count_leaves_impl(vdev_t *vd)
{
int n = 0;
if (vd->vdev_ops->vdev_op_leaf)
return (1);
for (int c = 0; c < vd->vdev_children; c++)
n += vdev_count_leaves_impl(vd->vdev_child[c]);
return (n);
}
int
vdev_count_leaves(spa_t *spa)
{
int rc;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
rc = vdev_count_leaves_impl(spa->spa_root_vdev);
spa_config_exit(spa, SCL_VDEV, FTAG);
return (rc);
}
void
vdev_add_child(vdev_t *pvd, vdev_t *cvd)
{
size_t oldsize, newsize;
uint64_t id = cvd->vdev_id;
vdev_t **newchild;
ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
ASSERT(cvd->vdev_parent == NULL);
cvd->vdev_parent = pvd;
if (pvd == NULL)
return;
ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL);
oldsize = pvd->vdev_children * sizeof (vdev_t *);
pvd->vdev_children = MAX(pvd->vdev_children, id + 1);
newsize = pvd->vdev_children * sizeof (vdev_t *);
newchild = kmem_alloc(newsize, KM_SLEEP);
if (pvd->vdev_child != NULL) {
memcpy(newchild, pvd->vdev_child, oldsize);
kmem_free(pvd->vdev_child, oldsize);
}
pvd->vdev_child = newchild;
pvd->vdev_child[id] = cvd;
cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd);
ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL);
/*
* Walk up all ancestors to update guid sum.
*/
for (; pvd != NULL; pvd = pvd->vdev_parent)
pvd->vdev_guid_sum += cvd->vdev_guid_sum;
if (cvd->vdev_ops->vdev_op_leaf) {
list_insert_head(&cvd->vdev_spa->spa_leaf_list, cvd);
cvd->vdev_spa->spa_leaf_list_gen++;
}
}
void
vdev_remove_child(vdev_t *pvd, vdev_t *cvd)
{
int c;
uint_t id = cvd->vdev_id;
ASSERT(cvd->vdev_parent == pvd);
if (pvd == NULL)
return;
ASSERT(id < pvd->vdev_children);
ASSERT(pvd->vdev_child[id] == cvd);
pvd->vdev_child[id] = NULL;
cvd->vdev_parent = NULL;
for (c = 0; c < pvd->vdev_children; c++)
if (pvd->vdev_child[c])
break;
if (c == pvd->vdev_children) {
kmem_free(pvd->vdev_child, c * sizeof (vdev_t *));
pvd->vdev_child = NULL;
pvd->vdev_children = 0;
}
if (cvd->vdev_ops->vdev_op_leaf) {
spa_t *spa = cvd->vdev_spa;
list_remove(&spa->spa_leaf_list, cvd);
spa->spa_leaf_list_gen++;
}
/*
* Walk up all ancestors to update guid sum.
*/
for (; pvd != NULL; pvd = pvd->vdev_parent)
pvd->vdev_guid_sum -= cvd->vdev_guid_sum;
}
/*
* Remove any holes in the child array.
*/
void
vdev_compact_children(vdev_t *pvd)
{
vdev_t **newchild, *cvd;
int oldc = pvd->vdev_children;
int newc;
ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
if (oldc == 0)
return;
for (int c = newc = 0; c < oldc; c++)
if (pvd->vdev_child[c])
newc++;
if (newc > 0) {
newchild = kmem_zalloc(newc * sizeof (vdev_t *), KM_SLEEP);
for (int c = newc = 0; c < oldc; c++) {
if ((cvd = pvd->vdev_child[c]) != NULL) {
newchild[newc] = cvd;
cvd->vdev_id = newc++;
}
}
} else {
newchild = NULL;
}
kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *));
pvd->vdev_child = newchild;
pvd->vdev_children = newc;
}
/*
* Allocate and minimally initialize a vdev_t.
*/
vdev_t *
vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
{
vdev_t *vd;
vdev_indirect_config_t *vic;
vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP);
vic = &vd->vdev_indirect_config;
if (spa->spa_root_vdev == NULL) {
ASSERT(ops == &vdev_root_ops);
spa->spa_root_vdev = vd;
spa->spa_load_guid = spa_generate_guid(NULL);
}
if (guid == 0 && ops != &vdev_hole_ops) {
if (spa->spa_root_vdev == vd) {
/*
* The root vdev's guid will also be the pool guid,
* which must be unique among all pools.
*/
guid = spa_generate_guid(NULL);
} else {
/*
* Any other vdev's guid must be unique within the pool.
*/
guid = spa_generate_guid(spa);
}
ASSERT(!spa_guid_exists(spa_guid(spa), guid));
}
vd->vdev_spa = spa;
vd->vdev_id = id;
vd->vdev_guid = guid;
vd->vdev_guid_sum = guid;
vd->vdev_ops = ops;
vd->vdev_state = VDEV_STATE_CLOSED;
vd->vdev_ishole = (ops == &vdev_hole_ops);
vic->vic_prev_indirect_vdev = UINT64_MAX;
rw_init(&vd->vdev_indirect_rwlock, NULL, RW_DEFAULT, NULL);
mutex_init(&vd->vdev_obsolete_lock, NULL, MUTEX_DEFAULT, NULL);
vd->vdev_obsolete_segments = range_tree_create(NULL, RANGE_SEG64, NULL,
0, 0);
/*
* Initialize rate limit structs for events. We rate limit ZIO delay
* and checksum events so that we don't overwhelm ZED with thousands
* of events when a disk is acting up.
*/
zfs_ratelimit_init(&vd->vdev_delay_rl, &zfs_slow_io_events_per_second,
1);
zfs_ratelimit_init(&vd->vdev_deadman_rl, &zfs_slow_io_events_per_second,
1);
zfs_ratelimit_init(&vd->vdev_checksum_rl,
&zfs_checksum_events_per_second, 1);
/*
* Default Thresholds for tuning ZED
*/
vd->vdev_checksum_n = vdev_prop_default_numeric(VDEV_PROP_CHECKSUM_N);
vd->vdev_checksum_t = vdev_prop_default_numeric(VDEV_PROP_CHECKSUM_T);
vd->vdev_io_n = vdev_prop_default_numeric(VDEV_PROP_IO_N);
vd->vdev_io_t = vdev_prop_default_numeric(VDEV_PROP_IO_T);
list_link_init(&vd->vdev_config_dirty_node);
list_link_init(&vd->vdev_state_dirty_node);
list_link_init(&vd->vdev_initialize_node);
list_link_init(&vd->vdev_leaf_node);
list_link_init(&vd->vdev_trim_node);
mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_NOLOCKDEP, NULL);
mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_scan_io_queue_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_initialize_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_initialize_io_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&vd->vdev_initialize_cv, NULL, CV_DEFAULT, NULL);
cv_init(&vd->vdev_initialize_io_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&vd->vdev_trim_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_autotrim_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_trim_io_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&vd->vdev_trim_cv, NULL, CV_DEFAULT, NULL);
cv_init(&vd->vdev_autotrim_cv, NULL, CV_DEFAULT, NULL);
cv_init(&vd->vdev_autotrim_kick_cv, NULL, CV_DEFAULT, NULL);
cv_init(&vd->vdev_trim_io_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&vd->vdev_rebuild_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&vd->vdev_rebuild_cv, NULL, CV_DEFAULT, NULL);
for (int t = 0; t < DTL_TYPES; t++) {
vd->vdev_dtl[t] = range_tree_create(NULL, RANGE_SEG64, NULL, 0,
0);
}
txg_list_create(&vd->vdev_ms_list, spa,
offsetof(struct metaslab, ms_txg_node));
txg_list_create(&vd->vdev_dtl_list, spa,
offsetof(struct vdev, vdev_dtl_node));
vd->vdev_stat.vs_timestamp = gethrtime();
vdev_queue_init(vd);
return (vd);
}
/*
* Allocate a new vdev. The 'alloctype' is used to control whether we are
* creating a new vdev or loading an existing one - the behavior is slightly
* different for each case.
*/
int
vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id,
int alloctype)
{
vdev_ops_t *ops;
const char *type;
uint64_t guid = 0, islog;
vdev_t *vd;
vdev_indirect_config_t *vic;
const char *tmp = NULL;
int rc;
vdev_alloc_bias_t alloc_bias = VDEV_BIAS_NONE;
boolean_t top_level = (parent && !parent->vdev_parent);
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
return (SET_ERROR(EINVAL));
if ((ops = vdev_getops(type)) == NULL)
return (SET_ERROR(EINVAL));
/*
* If this is a load, get the vdev guid from the nvlist.
* Otherwise, vdev_alloc_common() will generate one for us.
*/
if (alloctype == VDEV_ALLOC_LOAD) {
uint64_t label_id;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) ||
label_id != id)
return (SET_ERROR(EINVAL));
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (SET_ERROR(EINVAL));
} else if (alloctype == VDEV_ALLOC_SPARE) {
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (SET_ERROR(EINVAL));
} else if (alloctype == VDEV_ALLOC_L2CACHE) {
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (SET_ERROR(EINVAL));
} else if (alloctype == VDEV_ALLOC_ROOTPOOL) {
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (SET_ERROR(EINVAL));
}
/*
* The first allocated vdev must be of type 'root'.
*/
if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL)
return (SET_ERROR(EINVAL));
/*
* Determine whether we're a log vdev.
*/
islog = 0;
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog);
if (islog && spa_version(spa) < SPA_VERSION_SLOGS)
return (SET_ERROR(ENOTSUP));
if (ops == &vdev_hole_ops && spa_version(spa) < SPA_VERSION_HOLES)
return (SET_ERROR(ENOTSUP));
if (top_level && alloctype == VDEV_ALLOC_ADD) {
const char *bias;
/*
* If creating a top-level vdev, check for allocation
* classes input.
*/
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_ALLOCATION_BIAS,
&bias) == 0) {
alloc_bias = vdev_derive_alloc_bias(bias);
/* spa_vdev_add() expects feature to be enabled */
if (spa->spa_load_state != SPA_LOAD_CREATE &&
!spa_feature_is_enabled(spa,
SPA_FEATURE_ALLOCATION_CLASSES)) {
return (SET_ERROR(ENOTSUP));
}
}
/* spa_vdev_add() expects feature to be enabled */
if (ops == &vdev_draid_ops &&
spa->spa_load_state != SPA_LOAD_CREATE &&
!spa_feature_is_enabled(spa, SPA_FEATURE_DRAID)) {
return (SET_ERROR(ENOTSUP));
}
}
/*
* Initialize the vdev specific data. This is done before calling
* vdev_alloc_common() since it may fail and this simplifies the
* error reporting and cleanup code paths.
*/
void *tsd = NULL;
if (ops->vdev_op_init != NULL) {
rc = ops->vdev_op_init(spa, nv, &tsd);
if (rc != 0) {
return (rc);
}
}
vd = vdev_alloc_common(spa, id, guid, ops);
vd->vdev_tsd = tsd;
vd->vdev_islog = islog;
if (top_level && alloc_bias != VDEV_BIAS_NONE)
vd->vdev_alloc_bias = alloc_bias;
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &tmp) == 0)
vd->vdev_path = spa_strdup(tmp);
/*
* ZPOOL_CONFIG_AUX_STATE = "external" means we previously forced a
* fault on a vdev and want it to persist across imports (like with
* zpool offline -f).
*/
rc = nvlist_lookup_string(nv, ZPOOL_CONFIG_AUX_STATE, &tmp);
if (rc == 0 && tmp != NULL && strcmp(tmp, "external") == 0) {
vd->vdev_stat.vs_aux = VDEV_AUX_EXTERNAL;
vd->vdev_faulted = 1;
vd->vdev_label_aux = VDEV_AUX_EXTERNAL;
}
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &tmp) == 0)
vd->vdev_devid = spa_strdup(tmp);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH, &tmp) == 0)
vd->vdev_physpath = spa_strdup(tmp);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
&tmp) == 0)
vd->vdev_enc_sysfs_path = spa_strdup(tmp);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &tmp) == 0)
vd->vdev_fru = spa_strdup(tmp);
/*
* Set the whole_disk property. If it's not specified, leave the value
* as -1.
*/
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
&vd->vdev_wholedisk) != 0)
vd->vdev_wholedisk = -1ULL;
vic = &vd->vdev_indirect_config;
ASSERT0(vic->vic_mapping_object);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_OBJECT,
&vic->vic_mapping_object);
ASSERT0(vic->vic_births_object);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_BIRTHS,
&vic->vic_births_object);
ASSERT3U(vic->vic_prev_indirect_vdev, ==, UINT64_MAX);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_PREV_INDIRECT_VDEV,
&vic->vic_prev_indirect_vdev);
/*
* Look for the 'not present' flag. This will only be set if the device
* was not present at the time of import.
*/
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
&vd->vdev_not_present);
/*
* Get the alignment requirement. Ignore pool ashift for vdev
* attach case.
*/
if (alloctype != VDEV_ALLOC_ATTACH) {
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT,
&vd->vdev_ashift);
} else {
vd->vdev_attaching = B_TRUE;
}
/*
* Retrieve the vdev creation time.
*/
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_CREATE_TXG,
&vd->vdev_crtxg);
if (vd->vdev_ops == &vdev_root_ops &&
(alloctype == VDEV_ALLOC_LOAD ||
alloctype == VDEV_ALLOC_SPLIT ||
alloctype == VDEV_ALLOC_ROOTPOOL)) {
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_VDEV_ROOT_ZAP,
&vd->vdev_root_zap);
}
/*
* If we're a top-level vdev, try to load the allocation parameters.
*/
if (top_level &&
(alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) {
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY,
&vd->vdev_ms_array);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT,
&vd->vdev_ms_shift);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE,
&vd->vdev_asize);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NONALLOCATING,
&vd->vdev_noalloc);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVING,
&vd->vdev_removing);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_VDEV_TOP_ZAP,
&vd->vdev_top_zap);
} else {
ASSERT0(vd->vdev_top_zap);
}
if (top_level && alloctype != VDEV_ALLOC_ATTACH) {
ASSERT(alloctype == VDEV_ALLOC_LOAD ||
alloctype == VDEV_ALLOC_ADD ||
alloctype == VDEV_ALLOC_SPLIT ||
alloctype == VDEV_ALLOC_ROOTPOOL);
/* Note: metaslab_group_create() is now deferred */
}
if (vd->vdev_ops->vdev_op_leaf &&
(alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) {
(void) nvlist_lookup_uint64(nv,
ZPOOL_CONFIG_VDEV_LEAF_ZAP, &vd->vdev_leaf_zap);
} else {
ASSERT0(vd->vdev_leaf_zap);
}
/*
* If we're a leaf vdev, try to load the DTL object and other state.
*/
if (vd->vdev_ops->vdev_op_leaf &&
(alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE ||
alloctype == VDEV_ALLOC_ROOTPOOL)) {
if (alloctype == VDEV_ALLOC_LOAD) {
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL,
&vd->vdev_dtl_object);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE,
&vd->vdev_unspare);
}
if (alloctype == VDEV_ALLOC_ROOTPOOL) {
uint64_t spare = 0;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
&spare) == 0 && spare)
spa_spare_add(vd);
}
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE,
&vd->vdev_offline);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_RESILVER_TXG,
&vd->vdev_resilver_txg);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REBUILD_TXG,
&vd->vdev_rebuild_txg);
if (nvlist_exists(nv, ZPOOL_CONFIG_RESILVER_DEFER))
vdev_defer_resilver(vd);
/*
* In general, when importing a pool we want to ignore the
* persistent fault state, as the diagnosis made on another
* system may not be valid in the current context. The only
* exception is if we forced a vdev to a persistently faulted
* state with 'zpool offline -f'. The persistent fault will
* remain across imports until cleared.
*
* Local vdevs will remain in the faulted state.
*/
if (spa_load_state(spa) == SPA_LOAD_OPEN ||
spa_load_state(spa) == SPA_LOAD_IMPORT) {
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED,
&vd->vdev_faulted);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED,
&vd->vdev_degraded);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED,
&vd->vdev_removed);
if (vd->vdev_faulted || vd->vdev_degraded) {
const char *aux;
vd->vdev_label_aux =
VDEV_AUX_ERR_EXCEEDED;
if (nvlist_lookup_string(nv,
ZPOOL_CONFIG_AUX_STATE, &aux) == 0 &&
strcmp(aux, "external") == 0)
vd->vdev_label_aux = VDEV_AUX_EXTERNAL;
else
vd->vdev_faulted = 0ULL;
}
}
}
/*
* Add ourselves to the parent's list of children.
*/
vdev_add_child(parent, vd);
*vdp = vd;
return (0);
}
void
vdev_free(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
ASSERT3P(vd->vdev_trim_thread, ==, NULL);
ASSERT3P(vd->vdev_autotrim_thread, ==, NULL);
ASSERT3P(vd->vdev_rebuild_thread, ==, NULL);
/*
* Scan queues are normally destroyed at the end of a scan. If the
* queue exists here, that implies the vdev is being removed while
* the scan is still running.
*/
if (vd->vdev_scan_io_queue != NULL) {
mutex_enter(&vd->vdev_scan_io_queue_lock);
dsl_scan_io_queue_destroy(vd->vdev_scan_io_queue);
vd->vdev_scan_io_queue = NULL;
mutex_exit(&vd->vdev_scan_io_queue_lock);
}
/*
* vdev_free() implies closing the vdev first. This is simpler than
* trying to ensure complicated semantics for all callers.
*/
vdev_close(vd);
ASSERT(!list_link_active(&vd->vdev_config_dirty_node));
ASSERT(!list_link_active(&vd->vdev_state_dirty_node));
/*
* Free all children.
*/
for (int c = 0; c < vd->vdev_children; c++)
vdev_free(vd->vdev_child[c]);
ASSERT(vd->vdev_child == NULL);
ASSERT(vd->vdev_guid_sum == vd->vdev_guid);
if (vd->vdev_ops->vdev_op_fini != NULL)
vd->vdev_ops->vdev_op_fini(vd);
/*
* Discard allocation state.
*/
if (vd->vdev_mg != NULL) {
vdev_metaslab_fini(vd);
metaslab_group_destroy(vd->vdev_mg);
vd->vdev_mg = NULL;
}
if (vd->vdev_log_mg != NULL) {
ASSERT0(vd->vdev_ms_count);
metaslab_group_destroy(vd->vdev_log_mg);
vd->vdev_log_mg = NULL;
}
ASSERT0(vd->vdev_stat.vs_space);
ASSERT0(vd->vdev_stat.vs_dspace);
ASSERT0(vd->vdev_stat.vs_alloc);
/*
* Remove this vdev from its parent's child list.
*/
vdev_remove_child(vd->vdev_parent, vd);
ASSERT(vd->vdev_parent == NULL);
ASSERT(!list_link_active(&vd->vdev_leaf_node));
/*
* Clean up vdev structure.
*/
vdev_queue_fini(vd);
if (vd->vdev_path)
spa_strfree(vd->vdev_path);
if (vd->vdev_devid)
spa_strfree(vd->vdev_devid);
if (vd->vdev_physpath)
spa_strfree(vd->vdev_physpath);
if (vd->vdev_enc_sysfs_path)
spa_strfree(vd->vdev_enc_sysfs_path);
if (vd->vdev_fru)
spa_strfree(vd->vdev_fru);
if (vd->vdev_isspare)
spa_spare_remove(vd);
if (vd->vdev_isl2cache)
spa_l2cache_remove(vd);
txg_list_destroy(&vd->vdev_ms_list);
txg_list_destroy(&vd->vdev_dtl_list);
mutex_enter(&vd->vdev_dtl_lock);
space_map_close(vd->vdev_dtl_sm);
for (int t = 0; t < DTL_TYPES; t++) {
range_tree_vacate(vd->vdev_dtl[t], NULL, NULL);
range_tree_destroy(vd->vdev_dtl[t]);
}
mutex_exit(&vd->vdev_dtl_lock);
EQUIV(vd->vdev_indirect_births != NULL,
vd->vdev_indirect_mapping != NULL);
if (vd->vdev_indirect_births != NULL) {
vdev_indirect_mapping_close(vd->vdev_indirect_mapping);
vdev_indirect_births_close(vd->vdev_indirect_births);
}
if (vd->vdev_obsolete_sm != NULL) {
ASSERT(vd->vdev_removing ||
vd->vdev_ops == &vdev_indirect_ops);
space_map_close(vd->vdev_obsolete_sm);
vd->vdev_obsolete_sm = NULL;
}
range_tree_destroy(vd->vdev_obsolete_segments);
rw_destroy(&vd->vdev_indirect_rwlock);
mutex_destroy(&vd->vdev_obsolete_lock);
mutex_destroy(&vd->vdev_dtl_lock);
mutex_destroy(&vd->vdev_stat_lock);
mutex_destroy(&vd->vdev_probe_lock);
mutex_destroy(&vd->vdev_scan_io_queue_lock);
mutex_destroy(&vd->vdev_initialize_lock);
mutex_destroy(&vd->vdev_initialize_io_lock);
cv_destroy(&vd->vdev_initialize_io_cv);
cv_destroy(&vd->vdev_initialize_cv);
mutex_destroy(&vd->vdev_trim_lock);
mutex_destroy(&vd->vdev_autotrim_lock);
mutex_destroy(&vd->vdev_trim_io_lock);
cv_destroy(&vd->vdev_trim_cv);
cv_destroy(&vd->vdev_autotrim_cv);
cv_destroy(&vd->vdev_autotrim_kick_cv);
cv_destroy(&vd->vdev_trim_io_cv);
mutex_destroy(&vd->vdev_rebuild_lock);
cv_destroy(&vd->vdev_rebuild_cv);
zfs_ratelimit_fini(&vd->vdev_delay_rl);
zfs_ratelimit_fini(&vd->vdev_deadman_rl);
zfs_ratelimit_fini(&vd->vdev_checksum_rl);
if (vd == spa->spa_root_vdev)
spa->spa_root_vdev = NULL;
kmem_free(vd, sizeof (vdev_t));
}
/*
* Transfer top-level vdev state from svd to tvd.
*/
static void
vdev_top_transfer(vdev_t *svd, vdev_t *tvd)
{
spa_t *spa = svd->vdev_spa;
metaslab_t *msp;
vdev_t *vd;
int t;
ASSERT(tvd == tvd->vdev_top);
tvd->vdev_ms_array = svd->vdev_ms_array;
tvd->vdev_ms_shift = svd->vdev_ms_shift;
tvd->vdev_ms_count = svd->vdev_ms_count;
tvd->vdev_top_zap = svd->vdev_top_zap;
svd->vdev_ms_array = 0;
svd->vdev_ms_shift = 0;
svd->vdev_ms_count = 0;
svd->vdev_top_zap = 0;
if (tvd->vdev_mg)
ASSERT3P(tvd->vdev_mg, ==, svd->vdev_mg);
if (tvd->vdev_log_mg)
ASSERT3P(tvd->vdev_log_mg, ==, svd->vdev_log_mg);
tvd->vdev_mg = svd->vdev_mg;
tvd->vdev_log_mg = svd->vdev_log_mg;
tvd->vdev_ms = svd->vdev_ms;
svd->vdev_mg = NULL;
svd->vdev_log_mg = NULL;
svd->vdev_ms = NULL;
if (tvd->vdev_mg != NULL)
tvd->vdev_mg->mg_vd = tvd;
if (tvd->vdev_log_mg != NULL)
tvd->vdev_log_mg->mg_vd = tvd;
tvd->vdev_checkpoint_sm = svd->vdev_checkpoint_sm;
svd->vdev_checkpoint_sm = NULL;
tvd->vdev_alloc_bias = svd->vdev_alloc_bias;
svd->vdev_alloc_bias = VDEV_BIAS_NONE;
tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc;
tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space;
tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace;
svd->vdev_stat.vs_alloc = 0;
svd->vdev_stat.vs_space = 0;
svd->vdev_stat.vs_dspace = 0;
/*
* State which may be set on a top-level vdev that's in the
* process of being removed.
*/
ASSERT0(tvd->vdev_indirect_config.vic_births_object);
ASSERT0(tvd->vdev_indirect_config.vic_mapping_object);
ASSERT3U(tvd->vdev_indirect_config.vic_prev_indirect_vdev, ==, -1ULL);
ASSERT3P(tvd->vdev_indirect_mapping, ==, NULL);
ASSERT3P(tvd->vdev_indirect_births, ==, NULL);
ASSERT3P(tvd->vdev_obsolete_sm, ==, NULL);
ASSERT0(tvd->vdev_noalloc);
ASSERT0(tvd->vdev_removing);
ASSERT0(tvd->vdev_rebuilding);
tvd->vdev_noalloc = svd->vdev_noalloc;
tvd->vdev_removing = svd->vdev_removing;
tvd->vdev_rebuilding = svd->vdev_rebuilding;
tvd->vdev_rebuild_config = svd->vdev_rebuild_config;
tvd->vdev_indirect_config = svd->vdev_indirect_config;
tvd->vdev_indirect_mapping = svd->vdev_indirect_mapping;
tvd->vdev_indirect_births = svd->vdev_indirect_births;
range_tree_swap(&svd->vdev_obsolete_segments,
&tvd->vdev_obsolete_segments);
tvd->vdev_obsolete_sm = svd->vdev_obsolete_sm;
svd->vdev_indirect_config.vic_mapping_object = 0;
svd->vdev_indirect_config.vic_births_object = 0;
svd->vdev_indirect_config.vic_prev_indirect_vdev = -1ULL;
svd->vdev_indirect_mapping = NULL;
svd->vdev_indirect_births = NULL;
svd->vdev_obsolete_sm = NULL;
svd->vdev_noalloc = 0;
svd->vdev_removing = 0;
svd->vdev_rebuilding = 0;
for (t = 0; t < TXG_SIZE; t++) {
while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL)
(void) txg_list_add(&tvd->vdev_ms_list, msp, t);
while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL)
(void) txg_list_add(&tvd->vdev_dtl_list, vd, t);
if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t))
(void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t);
}
if (list_link_active(&svd->vdev_config_dirty_node)) {
vdev_config_clean(svd);
vdev_config_dirty(tvd);
}
if (list_link_active(&svd->vdev_state_dirty_node)) {
vdev_state_clean(svd);
vdev_state_dirty(tvd);
}
tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio;
svd->vdev_deflate_ratio = 0;
tvd->vdev_islog = svd->vdev_islog;
svd->vdev_islog = 0;
dsl_scan_io_queue_vdev_xfer(svd, tvd);
}
static void
vdev_top_update(vdev_t *tvd, vdev_t *vd)
{
if (vd == NULL)
return;
vd->vdev_top = tvd;
for (int c = 0; c < vd->vdev_children; c++)
vdev_top_update(tvd, vd->vdev_child[c]);
}
/*
* Add a mirror/replacing vdev above an existing vdev. There is no need to
* call .vdev_op_init() since mirror/replacing vdevs do not have private state.
*/
vdev_t *
vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops)
{
spa_t *spa = cvd->vdev_spa;
vdev_t *pvd = cvd->vdev_parent;
vdev_t *mvd;
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops);
mvd->vdev_asize = cvd->vdev_asize;
mvd->vdev_min_asize = cvd->vdev_min_asize;
mvd->vdev_max_asize = cvd->vdev_max_asize;
mvd->vdev_psize = cvd->vdev_psize;
mvd->vdev_ashift = cvd->vdev_ashift;
mvd->vdev_logical_ashift = cvd->vdev_logical_ashift;
mvd->vdev_physical_ashift = cvd->vdev_physical_ashift;
mvd->vdev_state = cvd->vdev_state;
mvd->vdev_crtxg = cvd->vdev_crtxg;
vdev_remove_child(pvd, cvd);
vdev_add_child(pvd, mvd);
cvd->vdev_id = mvd->vdev_children;
vdev_add_child(mvd, cvd);
vdev_top_update(cvd->vdev_top, cvd->vdev_top);
if (mvd == mvd->vdev_top)
vdev_top_transfer(cvd, mvd);
return (mvd);
}
/*
* Remove a 1-way mirror/replacing vdev from the tree.
*/
void
vdev_remove_parent(vdev_t *cvd)
{
vdev_t *mvd = cvd->vdev_parent;
vdev_t *pvd = mvd->vdev_parent;
ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
ASSERT(mvd->vdev_children == 1);
ASSERT(mvd->vdev_ops == &vdev_mirror_ops ||
mvd->vdev_ops == &vdev_replacing_ops ||
mvd->vdev_ops == &vdev_spare_ops);
cvd->vdev_ashift = mvd->vdev_ashift;
cvd->vdev_logical_ashift = mvd->vdev_logical_ashift;
cvd->vdev_physical_ashift = mvd->vdev_physical_ashift;
vdev_remove_child(mvd, cvd);
vdev_remove_child(pvd, mvd);
/*
* If cvd will replace mvd as a top-level vdev, preserve mvd's guid.
* Otherwise, we could have detached an offline device, and when we
* go to import the pool we'll think we have two top-level vdevs,
* instead of a different version of the same top-level vdev.
*/
if (mvd->vdev_top == mvd) {
uint64_t guid_delta = mvd->vdev_guid - cvd->vdev_guid;
cvd->vdev_orig_guid = cvd->vdev_guid;
cvd->vdev_guid += guid_delta;
cvd->vdev_guid_sum += guid_delta;
/*
* If pool not set for autoexpand, we need to also preserve
* mvd's asize to prevent automatic expansion of cvd.
* Otherwise if we are adjusting the mirror by attaching and
* detaching children of non-uniform sizes, the mirror could
* autoexpand, unexpectedly requiring larger devices to
* re-establish the mirror.
*/
if (!cvd->vdev_spa->spa_autoexpand)
cvd->vdev_asize = mvd->vdev_asize;
}
cvd->vdev_id = mvd->vdev_id;
vdev_add_child(pvd, cvd);
vdev_top_update(cvd->vdev_top, cvd->vdev_top);
if (cvd == cvd->vdev_top)
vdev_top_transfer(mvd, cvd);
ASSERT(mvd->vdev_children == 0);
vdev_free(mvd);
}
/*
* Choose GCD for spa_gcd_alloc.
*/
static uint64_t
vdev_gcd(uint64_t a, uint64_t b)
{
while (b != 0) {
uint64_t t = b;
b = a % b;
a = t;
}
return (a);
}
/*
* Set spa_min_alloc and spa_gcd_alloc.
*/
static void
vdev_spa_set_alloc(spa_t *spa, uint64_t min_alloc)
{
if (min_alloc < spa->spa_min_alloc)
spa->spa_min_alloc = min_alloc;
if (spa->spa_gcd_alloc == INT_MAX) {
spa->spa_gcd_alloc = min_alloc;
} else {
spa->spa_gcd_alloc = vdev_gcd(min_alloc,
spa->spa_gcd_alloc);
}
}
void
vdev_metaslab_group_create(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
/*
* metaslab_group_create was delayed until allocation bias was available
*/
if (vd->vdev_mg == NULL) {
metaslab_class_t *mc;
if (vd->vdev_islog && vd->vdev_alloc_bias == VDEV_BIAS_NONE)
vd->vdev_alloc_bias = VDEV_BIAS_LOG;
ASSERT3U(vd->vdev_islog, ==,
(vd->vdev_alloc_bias == VDEV_BIAS_LOG));
switch (vd->vdev_alloc_bias) {
case VDEV_BIAS_LOG:
mc = spa_log_class(spa);
break;
case VDEV_BIAS_SPECIAL:
mc = spa_special_class(spa);
break;
case VDEV_BIAS_DEDUP:
mc = spa_dedup_class(spa);
break;
default:
mc = spa_normal_class(spa);
}
vd->vdev_mg = metaslab_group_create(mc, vd,
spa->spa_alloc_count);
if (!vd->vdev_islog) {
vd->vdev_log_mg = metaslab_group_create(
spa_embedded_log_class(spa), vd, 1);
}
/*
* The spa ashift min/max only apply for the normal metaslab
* class. Class destination is late binding so ashift boundary
* setting had to wait until now.
*/
if (vd->vdev_top == vd && vd->vdev_ashift != 0 &&
mc == spa_normal_class(spa) && vd->vdev_aux == NULL) {
if (vd->vdev_ashift > spa->spa_max_ashift)
spa->spa_max_ashift = vd->vdev_ashift;
if (vd->vdev_ashift < spa->spa_min_ashift)
spa->spa_min_ashift = vd->vdev_ashift;
uint64_t min_alloc = vdev_get_min_alloc(vd);
vdev_spa_set_alloc(spa, min_alloc);
}
}
}
int
vdev_metaslab_init(vdev_t *vd, uint64_t txg)
{
spa_t *spa = vd->vdev_spa;
uint64_t oldc = vd->vdev_ms_count;
uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift;
metaslab_t **mspp;
int error;
boolean_t expanding = (oldc != 0);
ASSERT(txg == 0 || spa_config_held(spa, SCL_ALLOC, RW_WRITER));
/*
* This vdev is not being allocated from yet or is a hole.
*/
if (vd->vdev_ms_shift == 0)
return (0);
ASSERT(!vd->vdev_ishole);
ASSERT(oldc <= newc);
mspp = vmem_zalloc(newc * sizeof (*mspp), KM_SLEEP);
if (expanding) {
memcpy(mspp, vd->vdev_ms, oldc * sizeof (*mspp));
vmem_free(vd->vdev_ms, oldc * sizeof (*mspp));
}
vd->vdev_ms = mspp;
vd->vdev_ms_count = newc;
for (uint64_t m = oldc; m < newc; m++) {
uint64_t object = 0;
/*
* vdev_ms_array may be 0 if we are creating the "fake"
* metaslabs for an indirect vdev for zdb's leak detection.
* See zdb_leak_init().
*/
if (txg == 0 && vd->vdev_ms_array != 0) {
error = dmu_read(spa->spa_meta_objset,
vd->vdev_ms_array,
m * sizeof (uint64_t), sizeof (uint64_t), &object,
DMU_READ_PREFETCH);
if (error != 0) {
vdev_dbgmsg(vd, "unable to read the metaslab "
"array [error=%d]", error);
return (error);
}
}
error = metaslab_init(vd->vdev_mg, m, object, txg,
&(vd->vdev_ms[m]));
if (error != 0) {
vdev_dbgmsg(vd, "metaslab_init failed [error=%d]",
error);
return (error);
}
}
/*
* Find the emptiest metaslab on the vdev and mark it for use for
* embedded slog by moving it from the regular to the log metaslab
* group.
*/
if (vd->vdev_mg->mg_class == spa_normal_class(spa) &&
vd->vdev_ms_count > zfs_embedded_slog_min_ms &&
avl_is_empty(&vd->vdev_log_mg->mg_metaslab_tree)) {
uint64_t slog_msid = 0;
uint64_t smallest = UINT64_MAX;
/*
* Note, we only search the new metaslabs, because the old
* (pre-existing) ones may be active (e.g. have non-empty
* range_tree's), and we don't move them to the new
* metaslab_t.
*/
for (uint64_t m = oldc; m < newc; m++) {
uint64_t alloc =
space_map_allocated(vd->vdev_ms[m]->ms_sm);
if (alloc < smallest) {
slog_msid = m;
smallest = alloc;
}
}
metaslab_t *slog_ms = vd->vdev_ms[slog_msid];
/*
* The metaslab was marked as dirty at the end of
* metaslab_init(). Remove it from the dirty list so that we
* can uninitialize and reinitialize it to the new class.
*/
if (txg != 0) {
(void) txg_list_remove_this(&vd->vdev_ms_list,
slog_ms, txg);
}
uint64_t sm_obj = space_map_object(slog_ms->ms_sm);
metaslab_fini(slog_ms);
VERIFY0(metaslab_init(vd->vdev_log_mg, slog_msid, sm_obj, txg,
&vd->vdev_ms[slog_msid]));
}
if (txg == 0)
spa_config_enter(spa, SCL_ALLOC, FTAG, RW_WRITER);
/*
* If the vdev is marked as non-allocating then don't
* activate the metaslabs since we want to ensure that
* no allocations are performed on this device.
*/
if (vd->vdev_noalloc) {
/* track non-allocating vdev space */
spa->spa_nonallocating_dspace += spa_deflate(spa) ?
vd->vdev_stat.vs_dspace : vd->vdev_stat.vs_space;
} else if (!expanding) {
metaslab_group_activate(vd->vdev_mg);
if (vd->vdev_log_mg != NULL)
metaslab_group_activate(vd->vdev_log_mg);
}
if (txg == 0)
spa_config_exit(spa, SCL_ALLOC, FTAG);
return (0);
}
void
vdev_metaslab_fini(vdev_t *vd)
{
if (vd->vdev_checkpoint_sm != NULL) {
ASSERT(spa_feature_is_active(vd->vdev_spa,
SPA_FEATURE_POOL_CHECKPOINT));
space_map_close(vd->vdev_checkpoint_sm);
/*
* Even though we close the space map, we need to set its
* pointer to NULL. The reason is that vdev_metaslab_fini()
* may be called multiple times for certain operations
* (i.e. when destroying a pool) so we need to ensure that
* this clause never executes twice. This logic is similar
* to the one used for the vdev_ms clause below.
*/
vd->vdev_checkpoint_sm = NULL;
}
if (vd->vdev_ms != NULL) {
metaslab_group_t *mg = vd->vdev_mg;
metaslab_group_passivate(mg);
if (vd->vdev_log_mg != NULL) {
ASSERT(!vd->vdev_islog);
metaslab_group_passivate(vd->vdev_log_mg);
}
uint64_t count = vd->vdev_ms_count;
for (uint64_t m = 0; m < count; m++) {
metaslab_t *msp = vd->vdev_ms[m];
if (msp != NULL)
metaslab_fini(msp);
}
vmem_free(vd->vdev_ms, count * sizeof (metaslab_t *));
vd->vdev_ms = NULL;
vd->vdev_ms_count = 0;
for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
ASSERT0(mg->mg_histogram[i]);
if (vd->vdev_log_mg != NULL)
ASSERT0(vd->vdev_log_mg->mg_histogram[i]);
}
}
ASSERT0(vd->vdev_ms_count);
}
typedef struct vdev_probe_stats {
boolean_t vps_readable;
boolean_t vps_writeable;
int vps_flags;
} vdev_probe_stats_t;
static void
vdev_probe_done(zio_t *zio)
{
spa_t *spa = zio->io_spa;
vdev_t *vd = zio->io_vd;
vdev_probe_stats_t *vps = zio->io_private;
ASSERT(vd->vdev_probe_zio != NULL);
if (zio->io_type == ZIO_TYPE_READ) {
if (zio->io_error == 0)
vps->vps_readable = 1;
if (zio->io_error == 0 && spa_writeable(spa)) {
zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd,
zio->io_offset, zio->io_size, zio->io_abd,
ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE));
} else {
abd_free(zio->io_abd);
}
} else if (zio->io_type == ZIO_TYPE_WRITE) {
if (zio->io_error == 0)
vps->vps_writeable = 1;
abd_free(zio->io_abd);
} else if (zio->io_type == ZIO_TYPE_NULL) {
zio_t *pio;
zio_link_t *zl;
vd->vdev_cant_read |= !vps->vps_readable;
vd->vdev_cant_write |= !vps->vps_writeable;
if (vdev_readable(vd) &&
(vdev_writeable(vd) || !spa_writeable(spa))) {
zio->io_error = 0;
} else {
ASSERT(zio->io_error != 0);
vdev_dbgmsg(vd, "failed probe");
(void) zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE,
spa, vd, NULL, NULL, 0);
zio->io_error = SET_ERROR(ENXIO);
}
mutex_enter(&vd->vdev_probe_lock);
ASSERT(vd->vdev_probe_zio == zio);
vd->vdev_probe_zio = NULL;
mutex_exit(&vd->vdev_probe_lock);
zl = NULL;
while ((pio = zio_walk_parents(zio, &zl)) != NULL)
if (!vdev_accessible(vd, pio))
pio->io_error = SET_ERROR(ENXIO);
kmem_free(vps, sizeof (*vps));
}
}
/*
* Determine whether this device is accessible.
*
* Read and write to several known locations: the pad regions of each
* vdev label but the first, which we leave alone in case it contains
* a VTOC.
*/
zio_t *
vdev_probe(vdev_t *vd, zio_t *zio)
{
spa_t *spa = vd->vdev_spa;
vdev_probe_stats_t *vps = NULL;
zio_t *pio;
ASSERT(vd->vdev_ops->vdev_op_leaf);
/*
* Don't probe the probe.
*/
if (zio && (zio->io_flags & ZIO_FLAG_PROBE))
return (NULL);
/*
* To prevent 'probe storms' when a device fails, we create
* just one probe i/o at a time. All zios that want to probe
* this vdev will become parents of the probe io.
*/
mutex_enter(&vd->vdev_probe_lock);
if ((pio = vd->vdev_probe_zio) == NULL) {
vps = kmem_zalloc(sizeof (*vps), KM_SLEEP);
vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE |
ZIO_FLAG_DONT_AGGREGATE | ZIO_FLAG_TRYHARD;
if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) {
/*
* vdev_cant_read and vdev_cant_write can only
* transition from TRUE to FALSE when we have the
* SCL_ZIO lock as writer; otherwise they can only
* transition from FALSE to TRUE. This ensures that
* any zio looking at these values can assume that
* failures persist for the life of the I/O. That's
* important because when a device has intermittent
* connectivity problems, we want to ensure that
* they're ascribed to the device (ENXIO) and not
* the zio (EIO).
*
* Since we hold SCL_ZIO as writer here, clear both
* values so the probe can reevaluate from first
* principles.
*/
vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER;
vd->vdev_cant_read = B_FALSE;
vd->vdev_cant_write = B_FALSE;
}
vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd,
vdev_probe_done, vps,
vps->vps_flags | ZIO_FLAG_DONT_PROPAGATE);
/*
* We can't change the vdev state in this context, so we
* kick off an async task to do it on our behalf.
*/
if (zio != NULL) {
vd->vdev_probe_wanted = B_TRUE;
spa_async_request(spa, SPA_ASYNC_PROBE);
}
}
if (zio != NULL)
zio_add_child(zio, pio);
mutex_exit(&vd->vdev_probe_lock);
if (vps == NULL) {
ASSERT(zio != NULL);
return (NULL);
}
for (int l = 1; l < VDEV_LABELS; l++) {
zio_nowait(zio_read_phys(pio, vd,
vdev_label_offset(vd->vdev_psize, l,
offsetof(vdev_label_t, vl_be)), VDEV_PAD_SIZE,
abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE),
ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE));
}
if (zio == NULL)
return (pio);
zio_nowait(pio);
return (NULL);
}
static void
vdev_load_child(void *arg)
{
vdev_t *vd = arg;
vd->vdev_load_error = vdev_load(vd);
}
static void
vdev_open_child(void *arg)
{
vdev_t *vd = arg;
vd->vdev_open_thread = curthread;
vd->vdev_open_error = vdev_open(vd);
vd->vdev_open_thread = NULL;
}
static boolean_t
vdev_uses_zvols(vdev_t *vd)
{
#ifdef _KERNEL
if (zvol_is_zvol(vd->vdev_path))
return (B_TRUE);
#endif
for (int c = 0; c < vd->vdev_children; c++)
if (vdev_uses_zvols(vd->vdev_child[c]))
return (B_TRUE);
return (B_FALSE);
}
/*
* Returns B_TRUE if the passed child should be opened.
*/
static boolean_t
vdev_default_open_children_func(vdev_t *vd)
{
(void) vd;
return (B_TRUE);
}
/*
* Open the requested child vdevs. If any of the leaf vdevs are using
* a ZFS volume then do the opens in a single thread. This avoids a
* deadlock when the current thread is holding the spa_namespace_lock.
*/
static void
vdev_open_children_impl(vdev_t *vd, vdev_open_children_func_t *open_func)
{
int children = vd->vdev_children;
taskq_t *tq = taskq_create("vdev_open", children, minclsyspri,
children, children, TASKQ_PREPOPULATE);
vd->vdev_nonrot = B_TRUE;
for (int c = 0; c < children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (open_func(cvd) == B_FALSE)
continue;
if (tq == NULL || vdev_uses_zvols(vd)) {
cvd->vdev_open_error = vdev_open(cvd);
} else {
VERIFY(taskq_dispatch(tq, vdev_open_child,
cvd, TQ_SLEEP) != TASKQID_INVALID);
}
vd->vdev_nonrot &= cvd->vdev_nonrot;
}
if (tq != NULL) {
taskq_wait(tq);
taskq_destroy(tq);
}
}
/*
* Open all child vdevs.
*/
void
vdev_open_children(vdev_t *vd)
{
vdev_open_children_impl(vd, vdev_default_open_children_func);
}
/*
* Conditionally open a subset of child vdevs.
*/
void
vdev_open_children_subset(vdev_t *vd, vdev_open_children_func_t *open_func)
{
vdev_open_children_impl(vd, open_func);
}
/*
* Compute the raidz-deflation ratio. Note, we hard-code
* in 128k (1 << 17) because it is the "typical" blocksize.
* Even though SPA_MAXBLOCKSIZE changed, this algorithm can not change,
* otherwise it would inconsistently account for existing bp's.
*/
static void
vdev_set_deflate_ratio(vdev_t *vd)
{
if (vd == vd->vdev_top && !vd->vdev_ishole && vd->vdev_ashift != 0) {
vd->vdev_deflate_ratio = (1 << 17) /
(vdev_psize_to_asize(vd, 1 << 17) >> SPA_MINBLOCKSHIFT);
}
}
/*
* Choose the best of two ashifts, preferring one between logical ashift
* (absolute minimum) and administrator defined maximum, otherwise take
* the biggest of the two.
*/
uint64_t
vdev_best_ashift(uint64_t logical, uint64_t a, uint64_t b)
{
if (a > logical && a <= zfs_vdev_max_auto_ashift) {
if (b <= logical || b > zfs_vdev_max_auto_ashift)
return (a);
else
return (MAX(a, b));
} else if (b <= logical || b > zfs_vdev_max_auto_ashift)
return (MAX(a, b));
return (b);
}
/*
* Maximize performance by inflating the configured ashift for top level
* vdevs to be as close to the physical ashift as possible while maintaining
* administrator defined limits and ensuring it doesn't go below the
* logical ashift.
*/
static void
vdev_ashift_optimize(vdev_t *vd)
{
ASSERT(vd == vd->vdev_top);
if (vd->vdev_ashift < vd->vdev_physical_ashift &&
vd->vdev_physical_ashift <= zfs_vdev_max_auto_ashift) {
vd->vdev_ashift = MIN(
MAX(zfs_vdev_max_auto_ashift, vd->vdev_ashift),
MAX(zfs_vdev_min_auto_ashift,
vd->vdev_physical_ashift));
} else {
/*
* If the logical and physical ashifts are the same, then
* we ensure that the top-level vdev's ashift is not smaller
* than our minimum ashift value. For the unusual case
* where logical ashift > physical ashift, we can't cap
* the calculated ashift based on max ashift as that
* would cause failures.
* We still check if we need to increase it to match
* the min ashift.
*/
vd->vdev_ashift = MAX(zfs_vdev_min_auto_ashift,
vd->vdev_ashift);
}
}
/*
* Prepare a virtual device for access.
*/
int
vdev_open(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
int error;
uint64_t osize = 0;
uint64_t max_osize = 0;
uint64_t asize, max_asize, psize;
uint64_t logical_ashift = 0;
uint64_t physical_ashift = 0;
ASSERT(vd->vdev_open_thread == curthread ||
spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
ASSERT(vd->vdev_state == VDEV_STATE_CLOSED ||
vd->vdev_state == VDEV_STATE_CANT_OPEN ||
vd->vdev_state == VDEV_STATE_OFFLINE);
vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
vd->vdev_cant_read = B_FALSE;
vd->vdev_cant_write = B_FALSE;
vd->vdev_min_asize = vdev_get_min_asize(vd);
/*
* If this vdev is not removed, check its fault status. If it's
* faulted, bail out of the open.
*/
if (!vd->vdev_removed && vd->vdev_faulted) {
ASSERT(vd->vdev_children == 0);
ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
vd->vdev_label_aux);
return (SET_ERROR(ENXIO));
} else if (vd->vdev_offline) {
ASSERT(vd->vdev_children == 0);
vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE);
return (SET_ERROR(ENXIO));
}
error = vd->vdev_ops->vdev_op_open(vd, &osize, &max_osize,
&logical_ashift, &physical_ashift);
/* Keep the device in removed state if unplugged */
if (error == ENOENT && vd->vdev_removed) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_REMOVED,
VDEV_AUX_NONE);
return (error);
}
/*
* Physical volume size should never be larger than its max size, unless
* the disk has shrunk while we were reading it or the device is buggy
* or damaged: either way it's not safe for use, bail out of the open.
*/
if (osize > max_osize) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_OPEN_FAILED);
return (SET_ERROR(ENXIO));
}
/*
* Reset the vdev_reopening flag so that we actually close
* the vdev on error.
*/
vd->vdev_reopening = B_FALSE;
if (zio_injection_enabled && error == 0)
error = zio_handle_device_injection(vd, NULL, SET_ERROR(ENXIO));
if (error) {
if (vd->vdev_removed &&
vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED)
vd->vdev_removed = B_FALSE;
if (vd->vdev_stat.vs_aux == VDEV_AUX_CHILDREN_OFFLINE) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE,
vd->vdev_stat.vs_aux);
} else {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
vd->vdev_stat.vs_aux);
}
return (error);
}
vd->vdev_removed = B_FALSE;
/*
* Recheck the faulted flag now that we have confirmed that
* the vdev is accessible. If we're faulted, bail.
*/
if (vd->vdev_faulted) {
ASSERT(vd->vdev_children == 0);
ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
vd->vdev_label_aux);
return (SET_ERROR(ENXIO));
}
if (vd->vdev_degraded) {
ASSERT(vd->vdev_children == 0);
vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
VDEV_AUX_ERR_EXCEEDED);
} else {
vdev_set_state(vd, B_TRUE, VDEV_STATE_HEALTHY, 0);
}
/*
* For hole or missing vdevs we just return success.
*/
if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops)
return (0);
for (int c = 0; c < vd->vdev_children; c++) {
if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
VDEV_AUX_NONE);
break;
}
}
osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t));
max_osize = P2ALIGN(max_osize, (uint64_t)sizeof (vdev_label_t));
if (vd->vdev_children == 0) {
if (osize < SPA_MINDEVSIZE) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_TOO_SMALL);
return (SET_ERROR(EOVERFLOW));
}
psize = osize;
asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE);
max_asize = max_osize - (VDEV_LABEL_START_SIZE +
VDEV_LABEL_END_SIZE);
} else {
if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE -
(VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_TOO_SMALL);
return (SET_ERROR(EOVERFLOW));
}
psize = 0;
asize = osize;
max_asize = max_osize;
}
/*
* If the vdev was expanded, record this so that we can re-create the
* uberblock rings in labels {2,3}, during the next sync.
*/
if ((psize > vd->vdev_psize) && (vd->vdev_psize != 0))
vd->vdev_copy_uberblocks = B_TRUE;
vd->vdev_psize = psize;
/*
* Make sure the allocatable size hasn't shrunk too much.
*/
if (asize < vd->vdev_min_asize) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_BAD_LABEL);
return (SET_ERROR(EINVAL));
}
/*
* We can always set the logical/physical ashift members since
* their values are only used to calculate the vdev_ashift when
* the device is first added to the config. These values should
* not be used for anything else since they may change whenever
* the device is reopened and we don't store them in the label.
*/
vd->vdev_physical_ashift =
MAX(physical_ashift, vd->vdev_physical_ashift);
vd->vdev_logical_ashift = MAX(logical_ashift,
vd->vdev_logical_ashift);
if (vd->vdev_asize == 0) {
/*
* This is the first-ever open, so use the computed values.
* For compatibility, a different ashift can be requested.
*/
vd->vdev_asize = asize;
vd->vdev_max_asize = max_asize;
/*
* If the vdev_ashift was not overridden at creation time,
* then set it the logical ashift and optimize the ashift.
*/
if (vd->vdev_ashift == 0) {
vd->vdev_ashift = vd->vdev_logical_ashift;
if (vd->vdev_logical_ashift > ASHIFT_MAX) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_ASHIFT_TOO_BIG);
return (SET_ERROR(EDOM));
}
if (vd->vdev_top == vd && vd->vdev_attaching == B_FALSE)
vdev_ashift_optimize(vd);
vd->vdev_attaching = B_FALSE;
}
if (vd->vdev_ashift != 0 && (vd->vdev_ashift < ASHIFT_MIN ||
vd->vdev_ashift > ASHIFT_MAX)) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_BAD_ASHIFT);
return (SET_ERROR(EDOM));
}
} else {
/*
* Make sure the alignment required hasn't increased.
*/
if (vd->vdev_ashift > vd->vdev_top->vdev_ashift &&
vd->vdev_ops->vdev_op_leaf) {
(void) zfs_ereport_post(
FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT,
spa, vd, NULL, NULL, 0);
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_BAD_LABEL);
return (SET_ERROR(EDOM));
}
vd->vdev_max_asize = max_asize;
}
/*
* If all children are healthy we update asize if either:
* The asize has increased, due to a device expansion caused by dynamic
* LUN growth or vdev replacement, and automatic expansion is enabled;
* making the additional space available.
*
* The asize has decreased, due to a device shrink usually caused by a
* vdev replace with a smaller device. This ensures that calculations
* based of max_asize and asize e.g. esize are always valid. It's safe
* to do this as we've already validated that asize is greater than
* vdev_min_asize.
*/
if (vd->vdev_state == VDEV_STATE_HEALTHY &&
((asize > vd->vdev_asize &&
(vd->vdev_expanding || spa->spa_autoexpand)) ||
(asize < vd->vdev_asize)))
vd->vdev_asize = asize;
vdev_set_min_asize(vd);
/*
* Ensure we can issue some IO before declaring the
* vdev open for business.
*/
if (vd->vdev_ops->vdev_op_leaf &&
(error = zio_wait(vdev_probe(vd, NULL))) != 0) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
VDEV_AUX_ERR_EXCEEDED);
return (error);
}
/*
* Track the minimum allocation size.
*/
if (vd->vdev_top == vd && vd->vdev_ashift != 0 &&
vd->vdev_islog == 0 && vd->vdev_aux == NULL) {
uint64_t min_alloc = vdev_get_min_alloc(vd);
vdev_spa_set_alloc(spa, min_alloc);
}
/*
* If this is a leaf vdev, assess whether a resilver is needed.
* But don't do this if we are doing a reopen for a scrub, since
* this would just restart the scrub we are already doing.
*/
if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen)
dsl_scan_assess_vdev(spa->spa_dsl_pool, vd);
return (0);
}
static void
vdev_validate_child(void *arg)
{
vdev_t *vd = arg;
vd->vdev_validate_thread = curthread;
vd->vdev_validate_error = vdev_validate(vd);
vd->vdev_validate_thread = NULL;
}
/*
* Called once the vdevs are all opened, this routine validates the label
* contents. This needs to be done before vdev_load() so that we don't
* inadvertently do repair I/Os to the wrong device.
*
* This function will only return failure if one of the vdevs indicates that it
* has since been destroyed or exported. This is only possible if
* /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state
* will be updated but the function will return 0.
*/
int
vdev_validate(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
taskq_t *tq = NULL;
nvlist_t *label;
uint64_t guid = 0, aux_guid = 0, top_guid;
uint64_t state;
nvlist_t *nvl;
uint64_t txg;
int children = vd->vdev_children;
if (vdev_validate_skip)
return (0);
if (children > 0) {
tq = taskq_create("vdev_validate", children, minclsyspri,
children, children, TASKQ_PREPOPULATE);
}
for (uint64_t c = 0; c < children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (tq == NULL || vdev_uses_zvols(cvd)) {
vdev_validate_child(cvd);
} else {
VERIFY(taskq_dispatch(tq, vdev_validate_child, cvd,
TQ_SLEEP) != TASKQID_INVALID);
}
}
if (tq != NULL) {
taskq_wait(tq);
taskq_destroy(tq);
}
for (int c = 0; c < children; c++) {
int error = vd->vdev_child[c]->vdev_validate_error;
if (error != 0)
return (SET_ERROR(EBADF));
}
/*
* If the device has already failed, or was marked offline, don't do
* any further validation. Otherwise, label I/O will fail and we will
* overwrite the previous state.
*/
if (!vd->vdev_ops->vdev_op_leaf || !vdev_readable(vd))
return (0);
/*
* If we are performing an extreme rewind, we allow for a label that
* was modified at a point after the current txg.
* If config lock is not held do not check for the txg. spa_sync could
* be updating the vdev's label before updating spa_last_synced_txg.
*/
if (spa->spa_extreme_rewind || spa_last_synced_txg(spa) == 0 ||
spa_config_held(spa, SCL_CONFIG, RW_WRITER) != SCL_CONFIG)
txg = UINT64_MAX;
else
txg = spa_last_synced_txg(spa);
if ((label = vdev_label_read_config(vd, txg)) == NULL) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_BAD_LABEL);
vdev_dbgmsg(vd, "vdev_validate: failed reading config for "
"txg %llu", (u_longlong_t)txg);
return (0);
}
/*
* Determine if this vdev has been split off into another
* pool. If so, then refuse to open it.
*/
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_SPLIT_GUID,
&aux_guid) == 0 && aux_guid == spa_guid(spa)) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_SPLIT_POOL);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: vdev split into other pool");
return (0);
}
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, &guid) != 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
ZPOOL_CONFIG_POOL_GUID);
return (0);
}
/*
* If config is not trusted then ignore the spa guid check. This is
* necessary because if the machine crashed during a re-guid the new
* guid might have been written to all of the vdev labels, but not the
* cached config. The check will be performed again once we have the
* trusted config from the MOS.
*/
if (spa->spa_trust_config && guid != spa_guid(spa)) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: vdev label pool_guid doesn't "
"match config (%llu != %llu)", (u_longlong_t)guid,
(u_longlong_t)spa_guid(spa));
return (0);
}
if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_VDEV_TREE, &nvl)
!= 0 || nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_ORIG_GUID,
&aux_guid) != 0)
aux_guid = 0;
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
ZPOOL_CONFIG_GUID);
return (0);
}
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID, &top_guid)
!= 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
ZPOOL_CONFIG_TOP_GUID);
return (0);
}
/*
* If this vdev just became a top-level vdev because its sibling was
* detached, it will have adopted the parent's vdev guid -- but the
* label may or may not be on disk yet. Fortunately, either version
* of the label will have the same top guid, so if we're a top-level
* vdev, we can safely compare to that instead.
* However, if the config comes from a cachefile that failed to update
* after the detach, a top-level vdev will appear as a non top-level
* vdev in the config. Also relax the constraints if we perform an
* extreme rewind.
*
* If we split this vdev off instead, then we also check the
* original pool's guid. We don't want to consider the vdev
* corrupt if it is partway through a split operation.
*/
if (vd->vdev_guid != guid && vd->vdev_guid != aux_guid) {
boolean_t mismatch = B_FALSE;
if (spa->spa_trust_config && !spa->spa_extreme_rewind) {
if (vd != vd->vdev_top || vd->vdev_guid != top_guid)
mismatch = B_TRUE;
} else {
if (vd->vdev_guid != top_guid &&
vd->vdev_top->vdev_guid != guid)
mismatch = B_TRUE;
}
if (mismatch) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: config guid "
"doesn't match label guid");
vdev_dbgmsg(vd, "CONFIG: guid %llu, top_guid %llu",
(u_longlong_t)vd->vdev_guid,
(u_longlong_t)vd->vdev_top->vdev_guid);
vdev_dbgmsg(vd, "LABEL: guid %llu, top_guid %llu, "
"aux_guid %llu", (u_longlong_t)guid,
(u_longlong_t)top_guid, (u_longlong_t)aux_guid);
return (0);
}
}
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE,
&state) != 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
ZPOOL_CONFIG_POOL_STATE);
return (0);
}
nvlist_free(label);
/*
* If this is a verbatim import, no need to check the
* state of the pool.
*/
if (!(spa->spa_import_flags & ZFS_IMPORT_VERBATIM) &&
spa_load_state(spa) == SPA_LOAD_OPEN &&
state != POOL_STATE_ACTIVE) {
vdev_dbgmsg(vd, "vdev_validate: invalid pool state (%llu) "
"for spa %s", (u_longlong_t)state, spa->spa_name);
return (SET_ERROR(EBADF));
}
/*
* If we were able to open and validate a vdev that was
* previously marked permanently unavailable, clear that state
* now.
*/
if (vd->vdev_not_present)
vd->vdev_not_present = 0;
return (0);
}
static void
-vdev_copy_path_impl(vdev_t *svd, vdev_t *dvd)
-{
- char *old, *new;
- if (svd->vdev_path != NULL && dvd->vdev_path != NULL) {
- if (strcmp(svd->vdev_path, dvd->vdev_path) != 0) {
- zfs_dbgmsg("vdev_copy_path: vdev %llu: path changed "
- "from '%s' to '%s'", (u_longlong_t)dvd->vdev_guid,
- dvd->vdev_path, svd->vdev_path);
- spa_strfree(dvd->vdev_path);
- dvd->vdev_path = spa_strdup(svd->vdev_path);
+vdev_update_path(const char *prefix, char *svd, char **dvd, uint64_t guid)
+{
+ if (svd != NULL && *dvd != NULL) {
+ if (strcmp(svd, *dvd) != 0) {
+ zfs_dbgmsg("vdev_copy_path: vdev %llu: %s changed "
+ "from '%s' to '%s'", (u_longlong_t)guid, prefix,
+ *dvd, svd);
+ spa_strfree(*dvd);
+ *dvd = spa_strdup(svd);
}
- } else if (svd->vdev_path != NULL) {
- dvd->vdev_path = spa_strdup(svd->vdev_path);
+ } else if (svd != NULL) {
+ *dvd = spa_strdup(svd);
zfs_dbgmsg("vdev_copy_path: vdev %llu: path set to '%s'",
- (u_longlong_t)dvd->vdev_guid, dvd->vdev_path);
+ (u_longlong_t)guid, *dvd);
}
+}
+
+static void
+vdev_copy_path_impl(vdev_t *svd, vdev_t *dvd)
+{
+ char *old, *new;
+
+ vdev_update_path("vdev_path", svd->vdev_path, &dvd->vdev_path,
+ dvd->vdev_guid);
+
+ vdev_update_path("vdev_devid", svd->vdev_devid, &dvd->vdev_devid,
+ dvd->vdev_guid);
+
+ vdev_update_path("vdev_physpath", svd->vdev_physpath,
+ &dvd->vdev_physpath, dvd->vdev_guid);
/*
* Our enclosure sysfs path may have changed between imports
*/
old = dvd->vdev_enc_sysfs_path;
new = svd->vdev_enc_sysfs_path;
if ((old != NULL && new == NULL) ||
(old == NULL && new != NULL) ||
((old != NULL && new != NULL) && strcmp(new, old) != 0)) {
zfs_dbgmsg("vdev_copy_path: vdev %llu: vdev_enc_sysfs_path "
"changed from '%s' to '%s'", (u_longlong_t)dvd->vdev_guid,
old, new);
if (dvd->vdev_enc_sysfs_path)
spa_strfree(dvd->vdev_enc_sysfs_path);
if (svd->vdev_enc_sysfs_path) {
dvd->vdev_enc_sysfs_path = spa_strdup(
svd->vdev_enc_sysfs_path);
} else {
dvd->vdev_enc_sysfs_path = NULL;
}
}
}
/*
* Recursively copy vdev paths from one vdev to another. Source and destination
* vdev trees must have same geometry otherwise return error. Intended to copy
* paths from userland config into MOS config.
*/
int
vdev_copy_path_strict(vdev_t *svd, vdev_t *dvd)
{
if ((svd->vdev_ops == &vdev_missing_ops) ||
(svd->vdev_ishole && dvd->vdev_ishole) ||
(dvd->vdev_ops == &vdev_indirect_ops))
return (0);
if (svd->vdev_ops != dvd->vdev_ops) {
vdev_dbgmsg(svd, "vdev_copy_path: vdev type mismatch: %s != %s",
svd->vdev_ops->vdev_op_type, dvd->vdev_ops->vdev_op_type);
return (SET_ERROR(EINVAL));
}
if (svd->vdev_guid != dvd->vdev_guid) {
vdev_dbgmsg(svd, "vdev_copy_path: guids mismatch (%llu != "
"%llu)", (u_longlong_t)svd->vdev_guid,
(u_longlong_t)dvd->vdev_guid);
return (SET_ERROR(EINVAL));
}
if (svd->vdev_children != dvd->vdev_children) {
vdev_dbgmsg(svd, "vdev_copy_path: children count mismatch: "
"%llu != %llu", (u_longlong_t)svd->vdev_children,
(u_longlong_t)dvd->vdev_children);
return (SET_ERROR(EINVAL));
}
for (uint64_t i = 0; i < svd->vdev_children; i++) {
int error = vdev_copy_path_strict(svd->vdev_child[i],
dvd->vdev_child[i]);
if (error != 0)
return (error);
}
if (svd->vdev_ops->vdev_op_leaf)
vdev_copy_path_impl(svd, dvd);
return (0);
}
static void
vdev_copy_path_search(vdev_t *stvd, vdev_t *dvd)
{
ASSERT(stvd->vdev_top == stvd);
ASSERT3U(stvd->vdev_id, ==, dvd->vdev_top->vdev_id);
for (uint64_t i = 0; i < dvd->vdev_children; i++) {
vdev_copy_path_search(stvd, dvd->vdev_child[i]);
}
if (!dvd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(dvd))
return;
/*
* The idea here is that while a vdev can shift positions within
* a top vdev (when replacing, attaching mirror, etc.) it cannot
* step outside of it.
*/
vdev_t *vd = vdev_lookup_by_guid(stvd, dvd->vdev_guid);
if (vd == NULL || vd->vdev_ops != dvd->vdev_ops)
return;
ASSERT(vd->vdev_ops->vdev_op_leaf);
vdev_copy_path_impl(vd, dvd);
}
/*
* Recursively copy vdev paths from one root vdev to another. Source and
* destination vdev trees may differ in geometry. For each destination leaf
* vdev, search a vdev with the same guid and top vdev id in the source.
* Intended to copy paths from userland config into MOS config.
*/
void
vdev_copy_path_relaxed(vdev_t *srvd, vdev_t *drvd)
{
uint64_t children = MIN(srvd->vdev_children, drvd->vdev_children);
ASSERT(srvd->vdev_ops == &vdev_root_ops);
ASSERT(drvd->vdev_ops == &vdev_root_ops);
for (uint64_t i = 0; i < children; i++) {
vdev_copy_path_search(srvd->vdev_child[i],
drvd->vdev_child[i]);
}
}
/*
* Close a virtual device.
*/
void
vdev_close(vdev_t *vd)
{
vdev_t *pvd = vd->vdev_parent;
spa_t *spa __maybe_unused = vd->vdev_spa;
ASSERT(vd != NULL);
ASSERT(vd->vdev_open_thread == curthread ||
spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
/*
* If our parent is reopening, then we are as well, unless we are
* going offline.
*/
if (pvd != NULL && pvd->vdev_reopening)
vd->vdev_reopening = (pvd->vdev_reopening && !vd->vdev_offline);
vd->vdev_ops->vdev_op_close(vd);
/*
* We record the previous state before we close it, so that if we are
* doing a reopen(), we don't generate FMA ereports if we notice that
* it's still faulted.
*/
vd->vdev_prevstate = vd->vdev_state;
if (vd->vdev_offline)
vd->vdev_state = VDEV_STATE_OFFLINE;
else
vd->vdev_state = VDEV_STATE_CLOSED;
vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
}
void
vdev_hold(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_is_root(spa));
if (spa->spa_state == POOL_STATE_UNINITIALIZED)
return;
for (int c = 0; c < vd->vdev_children; c++)
vdev_hold(vd->vdev_child[c]);
if (vd->vdev_ops->vdev_op_leaf && vd->vdev_ops->vdev_op_hold != NULL)
vd->vdev_ops->vdev_op_hold(vd);
}
void
vdev_rele(vdev_t *vd)
{
ASSERT(spa_is_root(vd->vdev_spa));
for (int c = 0; c < vd->vdev_children; c++)
vdev_rele(vd->vdev_child[c]);
if (vd->vdev_ops->vdev_op_leaf && vd->vdev_ops->vdev_op_rele != NULL)
vd->vdev_ops->vdev_op_rele(vd);
}
/*
* Reopen all interior vdevs and any unopened leaves. We don't actually
* reopen leaf vdevs which had previously been opened as they might deadlock
* on the spa_config_lock. Instead we only obtain the leaf's physical size.
* If the leaf has never been opened then open it, as usual.
*/
void
vdev_reopen(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
/* set the reopening flag unless we're taking the vdev offline */
vd->vdev_reopening = !vd->vdev_offline;
vdev_close(vd);
(void) vdev_open(vd);
/*
* Call vdev_validate() here to make sure we have the same device.
* Otherwise, a device with an invalid label could be successfully
* opened in response to vdev_reopen().
*/
if (vd->vdev_aux) {
(void) vdev_validate_aux(vd);
if (vdev_readable(vd) && vdev_writeable(vd) &&
vd->vdev_aux == &spa->spa_l2cache) {
/*
* In case the vdev is present we should evict all ARC
* buffers and pointers to log blocks and reclaim their
* space before restoring its contents to L2ARC.
*/
if (l2arc_vdev_present(vd)) {
l2arc_rebuild_vdev(vd, B_TRUE);
} else {
l2arc_add_vdev(spa, vd);
}
spa_async_request(spa, SPA_ASYNC_L2CACHE_REBUILD);
spa_async_request(spa, SPA_ASYNC_L2CACHE_TRIM);
}
} else {
(void) vdev_validate(vd);
}
/*
* Recheck if resilver is still needed and cancel any
* scheduled resilver if resilver is unneeded.
*/
if (!vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL) &&
spa->spa_async_tasks & SPA_ASYNC_RESILVER) {
mutex_enter(&spa->spa_async_lock);
spa->spa_async_tasks &= ~SPA_ASYNC_RESILVER;
mutex_exit(&spa->spa_async_lock);
}
/*
* Reassess parent vdev's health.
*/
vdev_propagate_state(vd);
}
int
vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing)
{
int error;
/*
* Normally, partial opens (e.g. of a mirror) are allowed.
* For a create, however, we want to fail the request if
* there are any components we can't open.
*/
error = vdev_open(vd);
if (error || vd->vdev_state != VDEV_STATE_HEALTHY) {
vdev_close(vd);
return (error ? error : SET_ERROR(ENXIO));
}
/*
* Recursively load DTLs and initialize all labels.
*/
if ((error = vdev_dtl_load(vd)) != 0 ||
(error = vdev_label_init(vd, txg, isreplacing ?
VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) {
vdev_close(vd);
return (error);
}
return (0);
}
void
vdev_metaslab_set_size(vdev_t *vd)
{
uint64_t asize = vd->vdev_asize;
uint64_t ms_count = asize >> zfs_vdev_default_ms_shift;
uint64_t ms_shift;
/*
* There are two dimensions to the metaslab sizing calculation:
* the size of the metaslab and the count of metaslabs per vdev.
*
* The default values used below are a good balance between memory
* usage (larger metaslab size means more memory needed for loaded
* metaslabs; more metaslabs means more memory needed for the
* metaslab_t structs), metaslab load time (larger metaslabs take
* longer to load), and metaslab sync time (more metaslabs means
* more time spent syncing all of them).
*
* In general, we aim for zfs_vdev_default_ms_count (200) metaslabs.
* The range of the dimensions are as follows:
*
* 2^29 <= ms_size <= 2^34
* 16 <= ms_count <= 131,072
*
* On the lower end of vdev sizes, we aim for metaslabs sizes of
* at least 512MB (2^29) to minimize fragmentation effects when
* testing with smaller devices. However, the count constraint
* of at least 16 metaslabs will override this minimum size goal.
*
* On the upper end of vdev sizes, we aim for a maximum metaslab
* size of 16GB. However, we will cap the total count to 2^17
* metaslabs to keep our memory footprint in check and let the
* metaslab size grow from there if that limit is hit.
*
* The net effect of applying above constrains is summarized below.
*
* vdev size metaslab count
* --------------|-----------------
* < 8GB ~16
* 8GB - 100GB one per 512MB
* 100GB - 3TB ~200
* 3TB - 2PB one per 16GB
* > 2PB ~131,072
* --------------------------------
*
* Finally, note that all of the above calculate the initial
* number of metaslabs. Expanding a top-level vdev will result
* in additional metaslabs being allocated making it possible
* to exceed the zfs_vdev_ms_count_limit.
*/
if (ms_count < zfs_vdev_min_ms_count)
ms_shift = highbit64(asize / zfs_vdev_min_ms_count);
else if (ms_count > zfs_vdev_default_ms_count)
ms_shift = highbit64(asize / zfs_vdev_default_ms_count);
else
ms_shift = zfs_vdev_default_ms_shift;
if (ms_shift < SPA_MAXBLOCKSHIFT) {
ms_shift = SPA_MAXBLOCKSHIFT;
} else if (ms_shift > zfs_vdev_max_ms_shift) {
ms_shift = zfs_vdev_max_ms_shift;
/* cap the total count to constrain memory footprint */
if ((asize >> ms_shift) > zfs_vdev_ms_count_limit)
ms_shift = highbit64(asize / zfs_vdev_ms_count_limit);
}
vd->vdev_ms_shift = ms_shift;
ASSERT3U(vd->vdev_ms_shift, >=, SPA_MAXBLOCKSHIFT);
}
void
vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg)
{
ASSERT(vd == vd->vdev_top);
/* indirect vdevs don't have metaslabs or dtls */
ASSERT(vdev_is_concrete(vd) || flags == 0);
ASSERT(ISP2(flags));
ASSERT(spa_writeable(vd->vdev_spa));
if (flags & VDD_METASLAB)
(void) txg_list_add(&vd->vdev_ms_list, arg, txg);
if (flags & VDD_DTL)
(void) txg_list_add(&vd->vdev_dtl_list, arg, txg);
(void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg);
}
void
vdev_dirty_leaves(vdev_t *vd, int flags, uint64_t txg)
{
for (int c = 0; c < vd->vdev_children; c++)
vdev_dirty_leaves(vd->vdev_child[c], flags, txg);
if (vd->vdev_ops->vdev_op_leaf)
vdev_dirty(vd->vdev_top, flags, vd, txg);
}
/*
* DTLs.
*
* A vdev's DTL (dirty time log) is the set of transaction groups for which
* the vdev has less than perfect replication. There are four kinds of DTL:
*
* DTL_MISSING: txgs for which the vdev has no valid copies of the data
*
* DTL_PARTIAL: txgs for which data is available, but not fully replicated
*
* DTL_SCRUB: the txgs that could not be repaired by the last scrub; upon
* scrub completion, DTL_SCRUB replaces DTL_MISSING in the range of
* txgs that was scrubbed.
*
* DTL_OUTAGE: txgs which cannot currently be read, whether due to
* persistent errors or just some device being offline.
* Unlike the other three, the DTL_OUTAGE map is not generally
* maintained; it's only computed when needed, typically to
* determine whether a device can be detached.
*
* For leaf vdevs, DTL_MISSING and DTL_PARTIAL are identical: the device
* either has the data or it doesn't.
*
* For interior vdevs such as mirror and RAID-Z the picture is more complex.
* A vdev's DTL_PARTIAL is the union of its children's DTL_PARTIALs, because
* if any child is less than fully replicated, then so is its parent.
* A vdev's DTL_MISSING is a modified union of its children's DTL_MISSINGs,
* comprising only those txgs which appear in 'maxfaults' or more children;
* those are the txgs we don't have enough replication to read. For example,
* double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2);
* thus, its DTL_MISSING consists of the set of txgs that appear in more than
* two child DTL_MISSING maps.
*
* It should be clear from the above that to compute the DTLs and outage maps
* for all vdevs, it suffices to know just the leaf vdevs' DTL_MISSING maps.
* Therefore, that is all we keep on disk. When loading the pool, or after
* a configuration change, we generate all other DTLs from first principles.
*/
void
vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
{
range_tree_t *rt = vd->vdev_dtl[t];
ASSERT(t < DTL_TYPES);
ASSERT(vd != vd->vdev_spa->spa_root_vdev);
ASSERT(spa_writeable(vd->vdev_spa));
mutex_enter(&vd->vdev_dtl_lock);
if (!range_tree_contains(rt, txg, size))
range_tree_add(rt, txg, size);
mutex_exit(&vd->vdev_dtl_lock);
}
boolean_t
vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
{
range_tree_t *rt = vd->vdev_dtl[t];
boolean_t dirty = B_FALSE;
ASSERT(t < DTL_TYPES);
ASSERT(vd != vd->vdev_spa->spa_root_vdev);
/*
* While we are loading the pool, the DTLs have not been loaded yet.
* This isn't a problem but it can result in devices being tried
* which are known to not have the data. In which case, the import
* is relying on the checksum to ensure that we get the right data.
* Note that while importing we are only reading the MOS, which is
* always checksummed.
*/
mutex_enter(&vd->vdev_dtl_lock);
if (!range_tree_is_empty(rt))
dirty = range_tree_contains(rt, txg, size);
mutex_exit(&vd->vdev_dtl_lock);
return (dirty);
}
boolean_t
vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t)
{
range_tree_t *rt = vd->vdev_dtl[t];
boolean_t empty;
mutex_enter(&vd->vdev_dtl_lock);
empty = range_tree_is_empty(rt);
mutex_exit(&vd->vdev_dtl_lock);
return (empty);
}
/*
* Check if the txg falls within the range which must be
* resilvered. DVAs outside this range can always be skipped.
*/
boolean_t
vdev_default_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize,
uint64_t phys_birth)
{
(void) dva, (void) psize;
/* Set by sequential resilver. */
if (phys_birth == TXG_UNKNOWN)
return (B_TRUE);
return (vdev_dtl_contains(vd, DTL_PARTIAL, phys_birth, 1));
}
/*
* Returns B_TRUE if the vdev determines the DVA needs to be resilvered.
*/
boolean_t
vdev_dtl_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize,
uint64_t phys_birth)
{
ASSERT(vd != vd->vdev_spa->spa_root_vdev);
if (vd->vdev_ops->vdev_op_need_resilver == NULL ||
vd->vdev_ops->vdev_op_leaf)
return (B_TRUE);
return (vd->vdev_ops->vdev_op_need_resilver(vd, dva, psize,
phys_birth));
}
/*
* Returns the lowest txg in the DTL range.
*/
static uint64_t
vdev_dtl_min(vdev_t *vd)
{
ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock));
ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0);
ASSERT0(vd->vdev_children);
return (range_tree_min(vd->vdev_dtl[DTL_MISSING]) - 1);
}
/*
* Returns the highest txg in the DTL.
*/
static uint64_t
vdev_dtl_max(vdev_t *vd)
{
ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock));
ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0);
ASSERT0(vd->vdev_children);
return (range_tree_max(vd->vdev_dtl[DTL_MISSING]));
}
/*
* Determine if a resilvering vdev should remove any DTL entries from
* its range. If the vdev was resilvering for the entire duration of the
* scan then it should excise that range from its DTLs. Otherwise, this
* vdev is considered partially resilvered and should leave its DTL
* entries intact. The comment in vdev_dtl_reassess() describes how we
* excise the DTLs.
*/
static boolean_t
vdev_dtl_should_excise(vdev_t *vd, boolean_t rebuild_done)
{
ASSERT0(vd->vdev_children);
if (vd->vdev_state < VDEV_STATE_DEGRADED)
return (B_FALSE);
if (vd->vdev_resilver_deferred)
return (B_FALSE);
if (range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]))
return (B_TRUE);
if (rebuild_done) {
vdev_rebuild_t *vr = &vd->vdev_top->vdev_rebuild_config;
vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
/* Rebuild not initiated by attach */
if (vd->vdev_rebuild_txg == 0)
return (B_TRUE);
/*
* When a rebuild completes without error then all missing data
* up to the rebuild max txg has been reconstructed and the DTL
* is eligible for excision.
*/
if (vrp->vrp_rebuild_state == VDEV_REBUILD_COMPLETE &&
vdev_dtl_max(vd) <= vrp->vrp_max_txg) {
ASSERT3U(vrp->vrp_min_txg, <=, vdev_dtl_min(vd));
ASSERT3U(vrp->vrp_min_txg, <, vd->vdev_rebuild_txg);
ASSERT3U(vd->vdev_rebuild_txg, <=, vrp->vrp_max_txg);
return (B_TRUE);
}
} else {
dsl_scan_t *scn = vd->vdev_spa->spa_dsl_pool->dp_scan;
dsl_scan_phys_t *scnp __maybe_unused = &scn->scn_phys;
/* Resilver not initiated by attach */
if (vd->vdev_resilver_txg == 0)
return (B_TRUE);
/*
* When a resilver is initiated the scan will assign the
* scn_max_txg value to the highest txg value that exists
* in all DTLs. If this device's max DTL is not part of this
* scan (i.e. it is not in the range (scn_min_txg, scn_max_txg]
* then it is not eligible for excision.
*/
if (vdev_dtl_max(vd) <= scn->scn_phys.scn_max_txg) {
ASSERT3U(scnp->scn_min_txg, <=, vdev_dtl_min(vd));
ASSERT3U(scnp->scn_min_txg, <, vd->vdev_resilver_txg);
ASSERT3U(vd->vdev_resilver_txg, <=, scnp->scn_max_txg);
return (B_TRUE);
}
}
return (B_FALSE);
}
/*
* Reassess DTLs after a config change or scrub completion. If txg == 0 no
* write operations will be issued to the pool.
*/
void
vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg,
boolean_t scrub_done, boolean_t rebuild_done)
{
spa_t *spa = vd->vdev_spa;
avl_tree_t reftree;
int minref;
ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
for (int c = 0; c < vd->vdev_children; c++)
vdev_dtl_reassess(vd->vdev_child[c], txg,
scrub_txg, scrub_done, rebuild_done);
if (vd == spa->spa_root_vdev || !vdev_is_concrete(vd) || vd->vdev_aux)
return;
if (vd->vdev_ops->vdev_op_leaf) {
dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
vdev_rebuild_t *vr = &vd->vdev_top->vdev_rebuild_config;
boolean_t check_excise = B_FALSE;
boolean_t wasempty = B_TRUE;
mutex_enter(&vd->vdev_dtl_lock);
/*
* If requested, pretend the scan or rebuild completed cleanly.
*/
if (zfs_scan_ignore_errors) {
if (scn != NULL)
scn->scn_phys.scn_errors = 0;
if (vr != NULL)
vr->vr_rebuild_phys.vrp_errors = 0;
}
if (scrub_txg != 0 &&
!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) {
wasempty = B_FALSE;
zfs_dbgmsg("guid:%llu txg:%llu scrub:%llu started:%d "
"dtl:%llu/%llu errors:%llu",
(u_longlong_t)vd->vdev_guid, (u_longlong_t)txg,
(u_longlong_t)scrub_txg, spa->spa_scrub_started,
(u_longlong_t)vdev_dtl_min(vd),
(u_longlong_t)vdev_dtl_max(vd),
(u_longlong_t)(scn ? scn->scn_phys.scn_errors : 0));
}
/*
* If we've completed a scrub/resilver or a rebuild cleanly
* then determine if this vdev should remove any DTLs. We
* only want to excise regions on vdevs that were available
* during the entire duration of this scan.
*/
if (rebuild_done &&
vr != NULL && vr->vr_rebuild_phys.vrp_errors == 0) {
check_excise = B_TRUE;
} else {
if (spa->spa_scrub_started ||
(scn != NULL && scn->scn_phys.scn_errors == 0)) {
check_excise = B_TRUE;
}
}
if (scrub_txg && check_excise &&
vdev_dtl_should_excise(vd, rebuild_done)) {
/*
* We completed a scrub, resilver or rebuild up to
* scrub_txg. If we did it without rebooting, then
* the scrub dtl will be valid, so excise the old
* region and fold in the scrub dtl. Otherwise,
* leave the dtl as-is if there was an error.
*
* There's little trick here: to excise the beginning
* of the DTL_MISSING map, we put it into a reference
* tree and then add a segment with refcnt -1 that
* covers the range [0, scrub_txg). This means
* that each txg in that range has refcnt -1 or 0.
* We then add DTL_SCRUB with a refcnt of 2, so that
* entries in the range [0, scrub_txg) will have a
* positive refcnt -- either 1 or 2. We then convert
* the reference tree into the new DTL_MISSING map.
*/
space_reftree_create(&reftree);
space_reftree_add_map(&reftree,
vd->vdev_dtl[DTL_MISSING], 1);
space_reftree_add_seg(&reftree, 0, scrub_txg, -1);
space_reftree_add_map(&reftree,
vd->vdev_dtl[DTL_SCRUB], 2);
space_reftree_generate_map(&reftree,
vd->vdev_dtl[DTL_MISSING], 1);
space_reftree_destroy(&reftree);
if (!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) {
zfs_dbgmsg("update DTL_MISSING:%llu/%llu",
(u_longlong_t)vdev_dtl_min(vd),
(u_longlong_t)vdev_dtl_max(vd));
} else if (!wasempty) {
zfs_dbgmsg("DTL_MISSING is now empty");
}
}
range_tree_vacate(vd->vdev_dtl[DTL_PARTIAL], NULL, NULL);
range_tree_walk(vd->vdev_dtl[DTL_MISSING],
range_tree_add, vd->vdev_dtl[DTL_PARTIAL]);
if (scrub_done)
range_tree_vacate(vd->vdev_dtl[DTL_SCRUB], NULL, NULL);
range_tree_vacate(vd->vdev_dtl[DTL_OUTAGE], NULL, NULL);
if (!vdev_readable(vd))
range_tree_add(vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL);
else
range_tree_walk(vd->vdev_dtl[DTL_MISSING],
range_tree_add, vd->vdev_dtl[DTL_OUTAGE]);
/*
* If the vdev was resilvering or rebuilding and no longer
* has any DTLs then reset the appropriate flag and dirty
* the top level so that we persist the change.
*/
if (txg != 0 &&
range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) &&
range_tree_is_empty(vd->vdev_dtl[DTL_OUTAGE])) {
if (vd->vdev_rebuild_txg != 0) {
vd->vdev_rebuild_txg = 0;
vdev_config_dirty(vd->vdev_top);
} else if (vd->vdev_resilver_txg != 0) {
vd->vdev_resilver_txg = 0;
vdev_config_dirty(vd->vdev_top);
}
}
mutex_exit(&vd->vdev_dtl_lock);
if (txg != 0)
vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg);
return;
}
mutex_enter(&vd->vdev_dtl_lock);
for (int t = 0; t < DTL_TYPES; t++) {
/* account for child's outage in parent's missing map */
int s = (t == DTL_MISSING) ? DTL_OUTAGE: t;
if (t == DTL_SCRUB)
continue; /* leaf vdevs only */
if (t == DTL_PARTIAL)
minref = 1; /* i.e. non-zero */
else if (vdev_get_nparity(vd) != 0)
minref = vdev_get_nparity(vd) + 1; /* RAID-Z, dRAID */
else
minref = vd->vdev_children; /* any kind of mirror */
space_reftree_create(&reftree);
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
mutex_enter(&cvd->vdev_dtl_lock);
space_reftree_add_map(&reftree, cvd->vdev_dtl[s], 1);
mutex_exit(&cvd->vdev_dtl_lock);
}
space_reftree_generate_map(&reftree, vd->vdev_dtl[t], minref);
space_reftree_destroy(&reftree);
}
mutex_exit(&vd->vdev_dtl_lock);
}
/*
* Iterate over all the vdevs except spare, and post kobj events
*/
void
vdev_post_kobj_evt(vdev_t *vd)
{
if (vd->vdev_ops->vdev_op_kobj_evt_post &&
vd->vdev_kobj_flag == B_FALSE) {
vd->vdev_kobj_flag = B_TRUE;
vd->vdev_ops->vdev_op_kobj_evt_post(vd);
}
for (int c = 0; c < vd->vdev_children; c++)
vdev_post_kobj_evt(vd->vdev_child[c]);
}
/*
* Iterate over all the vdevs except spare, and clear kobj events
*/
void
vdev_clear_kobj_evt(vdev_t *vd)
{
vd->vdev_kobj_flag = B_FALSE;
for (int c = 0; c < vd->vdev_children; c++)
vdev_clear_kobj_evt(vd->vdev_child[c]);
}
int
vdev_dtl_load(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa->spa_meta_objset;
range_tree_t *rt;
int error = 0;
if (vd->vdev_ops->vdev_op_leaf && vd->vdev_dtl_object != 0) {
ASSERT(vdev_is_concrete(vd));
/*
* If the dtl cannot be sync'd there is no need to open it.
*/
if (spa->spa_mode == SPA_MODE_READ && !spa->spa_read_spacemaps)
return (0);
error = space_map_open(&vd->vdev_dtl_sm, mos,
vd->vdev_dtl_object, 0, -1ULL, 0);
if (error)
return (error);
ASSERT(vd->vdev_dtl_sm != NULL);
rt = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
error = space_map_load(vd->vdev_dtl_sm, rt, SM_ALLOC);
if (error == 0) {
mutex_enter(&vd->vdev_dtl_lock);
range_tree_walk(rt, range_tree_add,
vd->vdev_dtl[DTL_MISSING]);
mutex_exit(&vd->vdev_dtl_lock);
}
range_tree_vacate(rt, NULL, NULL);
range_tree_destroy(rt);
return (error);
}
for (int c = 0; c < vd->vdev_children; c++) {
error = vdev_dtl_load(vd->vdev_child[c]);
if (error != 0)
break;
}
return (error);
}
static void
vdev_zap_allocation_data(vdev_t *vd, dmu_tx_t *tx)
{
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa->spa_meta_objset;
vdev_alloc_bias_t alloc_bias = vd->vdev_alloc_bias;
const char *string;
ASSERT(alloc_bias != VDEV_BIAS_NONE);
string =
(alloc_bias == VDEV_BIAS_LOG) ? VDEV_ALLOC_BIAS_LOG :
(alloc_bias == VDEV_BIAS_SPECIAL) ? VDEV_ALLOC_BIAS_SPECIAL :
(alloc_bias == VDEV_BIAS_DEDUP) ? VDEV_ALLOC_BIAS_DEDUP : NULL;
ASSERT(string != NULL);
VERIFY0(zap_add(mos, vd->vdev_top_zap, VDEV_TOP_ZAP_ALLOCATION_BIAS,
1, strlen(string) + 1, string, tx));
if (alloc_bias == VDEV_BIAS_SPECIAL || alloc_bias == VDEV_BIAS_DEDUP) {
spa_activate_allocation_classes(spa, tx);
}
}
void
vdev_destroy_unlink_zap(vdev_t *vd, uint64_t zapobj, dmu_tx_t *tx)
{
spa_t *spa = vd->vdev_spa;
VERIFY0(zap_destroy(spa->spa_meta_objset, zapobj, tx));
VERIFY0(zap_remove_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps,
zapobj, tx));
}
uint64_t
vdev_create_link_zap(vdev_t *vd, dmu_tx_t *tx)
{
spa_t *spa = vd->vdev_spa;
uint64_t zap = zap_create(spa->spa_meta_objset, DMU_OTN_ZAP_METADATA,
DMU_OT_NONE, 0, tx);
ASSERT(zap != 0);
VERIFY0(zap_add_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps,
zap, tx));
return (zap);
}
void
vdev_construct_zaps(vdev_t *vd, dmu_tx_t *tx)
{
if (vd->vdev_ops != &vdev_hole_ops &&
vd->vdev_ops != &vdev_missing_ops &&
vd->vdev_ops != &vdev_root_ops &&
!vd->vdev_top->vdev_removing) {
if (vd->vdev_ops->vdev_op_leaf && vd->vdev_leaf_zap == 0) {
vd->vdev_leaf_zap = vdev_create_link_zap(vd, tx);
}
if (vd == vd->vdev_top && vd->vdev_top_zap == 0) {
vd->vdev_top_zap = vdev_create_link_zap(vd, tx);
if (vd->vdev_alloc_bias != VDEV_BIAS_NONE)
vdev_zap_allocation_data(vd, tx);
}
}
if (vd->vdev_ops == &vdev_root_ops && vd->vdev_root_zap == 0 &&
spa_feature_is_enabled(vd->vdev_spa, SPA_FEATURE_AVZ_V2)) {
if (!spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_AVZ_V2))
spa_feature_incr(vd->vdev_spa, SPA_FEATURE_AVZ_V2, tx);
vd->vdev_root_zap = vdev_create_link_zap(vd, tx);
}
for (uint64_t i = 0; i < vd->vdev_children; i++) {
vdev_construct_zaps(vd->vdev_child[i], tx);
}
}
static void
vdev_dtl_sync(vdev_t *vd, uint64_t txg)
{
spa_t *spa = vd->vdev_spa;
range_tree_t *rt = vd->vdev_dtl[DTL_MISSING];
objset_t *mos = spa->spa_meta_objset;
range_tree_t *rtsync;
dmu_tx_t *tx;
uint64_t object = space_map_object(vd->vdev_dtl_sm);
ASSERT(vdev_is_concrete(vd));
ASSERT(vd->vdev_ops->vdev_op_leaf);
tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
if (vd->vdev_detached || vd->vdev_top->vdev_removing) {
mutex_enter(&vd->vdev_dtl_lock);
space_map_free(vd->vdev_dtl_sm, tx);
space_map_close(vd->vdev_dtl_sm);
vd->vdev_dtl_sm = NULL;
mutex_exit(&vd->vdev_dtl_lock);
/*
* We only destroy the leaf ZAP for detached leaves or for
* removed log devices. Removed data devices handle leaf ZAP
* cleanup later, once cancellation is no longer possible.
*/
if (vd->vdev_leaf_zap != 0 && (vd->vdev_detached ||
vd->vdev_top->vdev_islog)) {
vdev_destroy_unlink_zap(vd, vd->vdev_leaf_zap, tx);
vd->vdev_leaf_zap = 0;
}
dmu_tx_commit(tx);
return;
}
if (vd->vdev_dtl_sm == NULL) {
uint64_t new_object;
new_object = space_map_alloc(mos, zfs_vdev_dtl_sm_blksz, tx);
VERIFY3U(new_object, !=, 0);
VERIFY0(space_map_open(&vd->vdev_dtl_sm, mos, new_object,
0, -1ULL, 0));
ASSERT(vd->vdev_dtl_sm != NULL);
}
rtsync = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
mutex_enter(&vd->vdev_dtl_lock);
range_tree_walk(rt, range_tree_add, rtsync);
mutex_exit(&vd->vdev_dtl_lock);
space_map_truncate(vd->vdev_dtl_sm, zfs_vdev_dtl_sm_blksz, tx);
space_map_write(vd->vdev_dtl_sm, rtsync, SM_ALLOC, SM_NO_VDEVID, tx);
range_tree_vacate(rtsync, NULL, NULL);
range_tree_destroy(rtsync);
/*
* If the object for the space map has changed then dirty
* the top level so that we update the config.
*/
if (object != space_map_object(vd->vdev_dtl_sm)) {
vdev_dbgmsg(vd, "txg %llu, spa %s, DTL old object %llu, "
"new object %llu", (u_longlong_t)txg, spa_name(spa),
(u_longlong_t)object,
(u_longlong_t)space_map_object(vd->vdev_dtl_sm));
vdev_config_dirty(vd->vdev_top);
}
dmu_tx_commit(tx);
}
/*
* Determine whether the specified vdev can be offlined/detached/removed
* without losing data.
*/
boolean_t
vdev_dtl_required(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
vdev_t *tvd = vd->vdev_top;
uint8_t cant_read = vd->vdev_cant_read;
boolean_t required;
ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
if (vd == spa->spa_root_vdev || vd == tvd)
return (B_TRUE);
/*
* Temporarily mark the device as unreadable, and then determine
* whether this results in any DTL outages in the top-level vdev.
* If not, we can safely offline/detach/remove the device.
*/
vd->vdev_cant_read = B_TRUE;
vdev_dtl_reassess(tvd, 0, 0, B_FALSE, B_FALSE);
required = !vdev_dtl_empty(tvd, DTL_OUTAGE);
vd->vdev_cant_read = cant_read;
vdev_dtl_reassess(tvd, 0, 0, B_FALSE, B_FALSE);
if (!required && zio_injection_enabled) {
required = !!zio_handle_device_injection(vd, NULL,
SET_ERROR(ECHILD));
}
return (required);
}
/*
* Determine if resilver is needed, and if so the txg range.
*/
boolean_t
vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp)
{
boolean_t needed = B_FALSE;
uint64_t thismin = UINT64_MAX;
uint64_t thismax = 0;
if (vd->vdev_children == 0) {
mutex_enter(&vd->vdev_dtl_lock);
if (!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) &&
vdev_writeable(vd)) {
thismin = vdev_dtl_min(vd);
thismax = vdev_dtl_max(vd);
needed = B_TRUE;
}
mutex_exit(&vd->vdev_dtl_lock);
} else {
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
uint64_t cmin, cmax;
if (vdev_resilver_needed(cvd, &cmin, &cmax)) {
thismin = MIN(thismin, cmin);
thismax = MAX(thismax, cmax);
needed = B_TRUE;
}
}
}
if (needed && minp) {
*minp = thismin;
*maxp = thismax;
}
return (needed);
}
/*
* Gets the checkpoint space map object from the vdev's ZAP. On success sm_obj
* will contain either the checkpoint spacemap object or zero if none exists.
* All other errors are returned to the caller.
*/
int
vdev_checkpoint_sm_object(vdev_t *vd, uint64_t *sm_obj)
{
ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
if (vd->vdev_top_zap == 0) {
*sm_obj = 0;
return (0);
}
int error = zap_lookup(spa_meta_objset(vd->vdev_spa), vd->vdev_top_zap,
VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, sizeof (uint64_t), 1, sm_obj);
if (error == ENOENT) {
*sm_obj = 0;
error = 0;
}
return (error);
}
int
vdev_load(vdev_t *vd)
{
int children = vd->vdev_children;
int error = 0;
taskq_t *tq = NULL;
/*
* It's only worthwhile to use the taskq for the root vdev, because the
* slow part is metaslab_init, and that only happens for top-level
* vdevs.
*/
if (vd->vdev_ops == &vdev_root_ops && vd->vdev_children > 0) {
tq = taskq_create("vdev_load", children, minclsyspri,
children, children, TASKQ_PREPOPULATE);
}
/*
* Recursively load all children.
*/
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (tq == NULL || vdev_uses_zvols(cvd)) {
cvd->vdev_load_error = vdev_load(cvd);
} else {
VERIFY(taskq_dispatch(tq, vdev_load_child,
cvd, TQ_SLEEP) != TASKQID_INVALID);
}
}
if (tq != NULL) {
taskq_wait(tq);
taskq_destroy(tq);
}
for (int c = 0; c < vd->vdev_children; c++) {
int error = vd->vdev_child[c]->vdev_load_error;
if (error != 0)
return (error);
}
vdev_set_deflate_ratio(vd);
/*
* On spa_load path, grab the allocation bias from our zap
*/
if (vd == vd->vdev_top && vd->vdev_top_zap != 0) {
spa_t *spa = vd->vdev_spa;
char bias_str[64];
error = zap_lookup(spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_ALLOCATION_BIAS, 1, sizeof (bias_str),
bias_str);
if (error == 0) {
ASSERT(vd->vdev_alloc_bias == VDEV_BIAS_NONE);
vd->vdev_alloc_bias = vdev_derive_alloc_bias(bias_str);
} else if (error != ENOENT) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
vdev_dbgmsg(vd, "vdev_load: zap_lookup(top_zap=%llu) "
"failed [error=%d]",
(u_longlong_t)vd->vdev_top_zap, error);
return (error);
}
}
if (vd == vd->vdev_top && vd->vdev_top_zap != 0) {
spa_t *spa = vd->vdev_spa;
uint64_t failfast;
error = zap_lookup(spa->spa_meta_objset, vd->vdev_top_zap,
vdev_prop_to_name(VDEV_PROP_FAILFAST), sizeof (failfast),
1, &failfast);
if (error == 0) {
vd->vdev_failfast = failfast & 1;
} else if (error == ENOENT) {
vd->vdev_failfast = vdev_prop_default_numeric(
VDEV_PROP_FAILFAST);
} else {
vdev_dbgmsg(vd,
"vdev_load: zap_lookup(top_zap=%llu) "
"failed [error=%d]",
(u_longlong_t)vd->vdev_top_zap, error);
}
}
/*
* Load any rebuild state from the top-level vdev zap.
*/
if (vd == vd->vdev_top && vd->vdev_top_zap != 0) {
error = vdev_rebuild_load(vd);
if (error && error != ENOTSUP) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
vdev_dbgmsg(vd, "vdev_load: vdev_rebuild_load "
"failed [error=%d]", error);
return (error);
}
}
if (vd->vdev_top_zap != 0 || vd->vdev_leaf_zap != 0) {
uint64_t zapobj;
if (vd->vdev_top_zap != 0)
zapobj = vd->vdev_top_zap;
else
zapobj = vd->vdev_leaf_zap;
error = vdev_prop_get_int(vd, VDEV_PROP_CHECKSUM_N,
&vd->vdev_checksum_n);
if (error && error != ENOENT)
vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) "
"failed [error=%d]", (u_longlong_t)zapobj, error);
error = vdev_prop_get_int(vd, VDEV_PROP_CHECKSUM_T,
&vd->vdev_checksum_t);
if (error && error != ENOENT)
vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) "
"failed [error=%d]", (u_longlong_t)zapobj, error);
error = vdev_prop_get_int(vd, VDEV_PROP_IO_N,
&vd->vdev_io_n);
if (error && error != ENOENT)
vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) "
"failed [error=%d]", (u_longlong_t)zapobj, error);
error = vdev_prop_get_int(vd, VDEV_PROP_IO_T,
&vd->vdev_io_t);
if (error && error != ENOENT)
vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) "
"failed [error=%d]", (u_longlong_t)zapobj, error);
}
/*
* If this is a top-level vdev, initialize its metaslabs.
*/
if (vd == vd->vdev_top && vdev_is_concrete(vd)) {
vdev_metaslab_group_create(vd);
if (vd->vdev_ashift == 0 || vd->vdev_asize == 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
vdev_dbgmsg(vd, "vdev_load: invalid size. ashift=%llu, "
"asize=%llu", (u_longlong_t)vd->vdev_ashift,
(u_longlong_t)vd->vdev_asize);
return (SET_ERROR(ENXIO));
}
error = vdev_metaslab_init(vd, 0);
if (error != 0) {
vdev_dbgmsg(vd, "vdev_load: metaslab_init failed "
"[error=%d]", error);
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
return (error);
}
uint64_t checkpoint_sm_obj;
error = vdev_checkpoint_sm_object(vd, &checkpoint_sm_obj);
if (error == 0 && checkpoint_sm_obj != 0) {
objset_t *mos = spa_meta_objset(vd->vdev_spa);
ASSERT(vd->vdev_asize != 0);
ASSERT3P(vd->vdev_checkpoint_sm, ==, NULL);
error = space_map_open(&vd->vdev_checkpoint_sm,
mos, checkpoint_sm_obj, 0, vd->vdev_asize,
vd->vdev_ashift);
if (error != 0) {
vdev_dbgmsg(vd, "vdev_load: space_map_open "
"failed for checkpoint spacemap (obj %llu) "
"[error=%d]",
(u_longlong_t)checkpoint_sm_obj, error);
return (error);
}
ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
/*
* Since the checkpoint_sm contains free entries
* exclusively we can use space_map_allocated() to
* indicate the cumulative checkpointed space that
* has been freed.
*/
vd->vdev_stat.vs_checkpoint_space =
-space_map_allocated(vd->vdev_checkpoint_sm);
vd->vdev_spa->spa_checkpoint_info.sci_dspace +=
vd->vdev_stat.vs_checkpoint_space;
} else if (error != 0) {
vdev_dbgmsg(vd, "vdev_load: failed to retrieve "
"checkpoint space map object from vdev ZAP "
"[error=%d]", error);
return (error);
}
}
/*
* If this is a leaf vdev, load its DTL.
*/
if (vd->vdev_ops->vdev_op_leaf && (error = vdev_dtl_load(vd)) != 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
vdev_dbgmsg(vd, "vdev_load: vdev_dtl_load failed "
"[error=%d]", error);
return (error);
}
uint64_t obsolete_sm_object;
error = vdev_obsolete_sm_object(vd, &obsolete_sm_object);
if (error == 0 && obsolete_sm_object != 0) {
objset_t *mos = vd->vdev_spa->spa_meta_objset;
ASSERT(vd->vdev_asize != 0);
ASSERT3P(vd->vdev_obsolete_sm, ==, NULL);
if ((error = space_map_open(&vd->vdev_obsolete_sm, mos,
obsolete_sm_object, 0, vd->vdev_asize, 0))) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
vdev_dbgmsg(vd, "vdev_load: space_map_open failed for "
"obsolete spacemap (obj %llu) [error=%d]",
(u_longlong_t)obsolete_sm_object, error);
return (error);
}
} else if (error != 0) {
vdev_dbgmsg(vd, "vdev_load: failed to retrieve obsolete "
"space map object from vdev ZAP [error=%d]", error);
return (error);
}
return (0);
}
/*
* The special vdev case is used for hot spares and l2cache devices. Its
* sole purpose it to set the vdev state for the associated vdev. To do this,
* we make sure that we can open the underlying device, then try to read the
* label, and make sure that the label is sane and that it hasn't been
* repurposed to another pool.
*/
int
vdev_validate_aux(vdev_t *vd)
{
nvlist_t *label;
uint64_t guid, version;
uint64_t state;
if (!vdev_readable(vd))
return (0);
if ((label = vdev_label_read_config(vd, -1ULL)) == NULL) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
return (-1);
}
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 ||
!SPA_VERSION_IS_SUPPORTED(version) ||
nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 ||
guid != vd->vdev_guid ||
nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
return (-1);
}
/*
* We don't actually check the pool state here. If it's in fact in
* use by another pool, we update this fact on the fly when requested.
*/
nvlist_free(label);
return (0);
}
static void
vdev_destroy_ms_flush_data(vdev_t *vd, dmu_tx_t *tx)
{
objset_t *mos = spa_meta_objset(vd->vdev_spa);
if (vd->vdev_top_zap == 0)
return;
uint64_t object = 0;
int err = zap_lookup(mos, vd->vdev_top_zap,
VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1, &object);
if (err == ENOENT)
return;
VERIFY0(err);
VERIFY0(dmu_object_free(mos, object, tx));
VERIFY0(zap_remove(mos, vd->vdev_top_zap,
VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, tx));
}
/*
* Free the objects used to store this vdev's spacemaps, and the array
* that points to them.
*/
void
vdev_destroy_spacemaps(vdev_t *vd, dmu_tx_t *tx)
{
if (vd->vdev_ms_array == 0)
return;
objset_t *mos = vd->vdev_spa->spa_meta_objset;
uint64_t array_count = vd->vdev_asize >> vd->vdev_ms_shift;
size_t array_bytes = array_count * sizeof (uint64_t);
uint64_t *smobj_array = kmem_alloc(array_bytes, KM_SLEEP);
VERIFY0(dmu_read(mos, vd->vdev_ms_array, 0,
array_bytes, smobj_array, 0));
for (uint64_t i = 0; i < array_count; i++) {
uint64_t smobj = smobj_array[i];
if (smobj == 0)
continue;
space_map_free_obj(mos, smobj, tx);
}
kmem_free(smobj_array, array_bytes);
VERIFY0(dmu_object_free(mos, vd->vdev_ms_array, tx));
vdev_destroy_ms_flush_data(vd, tx);
vd->vdev_ms_array = 0;
}
static void
vdev_remove_empty_log(vdev_t *vd, uint64_t txg)
{
spa_t *spa = vd->vdev_spa;
ASSERT(vd->vdev_islog);
ASSERT(vd == vd->vdev_top);
ASSERT3U(txg, ==, spa_syncing_txg(spa));
dmu_tx_t *tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
vdev_destroy_spacemaps(vd, tx);
if (vd->vdev_top_zap != 0) {
vdev_destroy_unlink_zap(vd, vd->vdev_top_zap, tx);
vd->vdev_top_zap = 0;
}
dmu_tx_commit(tx);
}
void
vdev_sync_done(vdev_t *vd, uint64_t txg)
{
metaslab_t *msp;
boolean_t reassess = !txg_list_empty(&vd->vdev_ms_list, TXG_CLEAN(txg));
ASSERT(vdev_is_concrete(vd));
while ((msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg)))
!= NULL)
metaslab_sync_done(msp, txg);
if (reassess) {
metaslab_sync_reassess(vd->vdev_mg);
if (vd->vdev_log_mg != NULL)
metaslab_sync_reassess(vd->vdev_log_mg);
}
}
void
vdev_sync(vdev_t *vd, uint64_t txg)
{
spa_t *spa = vd->vdev_spa;
vdev_t *lvd;
metaslab_t *msp;
ASSERT3U(txg, ==, spa->spa_syncing_txg);
dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
if (range_tree_space(vd->vdev_obsolete_segments) > 0) {
ASSERT(vd->vdev_removing ||
vd->vdev_ops == &vdev_indirect_ops);
vdev_indirect_sync_obsolete(vd, tx);
/*
* If the vdev is indirect, it can't have dirty
* metaslabs or DTLs.
*/
if (vd->vdev_ops == &vdev_indirect_ops) {
ASSERT(txg_list_empty(&vd->vdev_ms_list, txg));
ASSERT(txg_list_empty(&vd->vdev_dtl_list, txg));
dmu_tx_commit(tx);
return;
}
}
ASSERT(vdev_is_concrete(vd));
if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0 &&
!vd->vdev_removing) {
ASSERT(vd == vd->vdev_top);
ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset,
DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx);
ASSERT(vd->vdev_ms_array != 0);
vdev_config_dirty(vd);
}
while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) {
metaslab_sync(msp, txg);
(void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg));
}
while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL)
vdev_dtl_sync(lvd, txg);
/*
* If this is an empty log device being removed, destroy the
* metadata associated with it.
*/
if (vd->vdev_islog && vd->vdev_stat.vs_alloc == 0 && vd->vdev_removing)
vdev_remove_empty_log(vd, txg);
(void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg));
dmu_tx_commit(tx);
}
uint64_t
vdev_psize_to_asize(vdev_t *vd, uint64_t psize)
{
return (vd->vdev_ops->vdev_op_asize(vd, psize));
}
/*
* Mark the given vdev faulted. A faulted vdev behaves as if the device could
* not be opened, and no I/O is attempted.
*/
int
vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux)
{
vdev_t *vd, *tvd;
spa_vdev_state_enter(spa, SCL_NONE);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
tvd = vd->vdev_top;
/*
* If user did a 'zpool offline -f' then make the fault persist across
* reboots.
*/
if (aux == VDEV_AUX_EXTERNAL_PERSIST) {
/*
* There are two kinds of forced faults: temporary and
* persistent. Temporary faults go away at pool import, while
* persistent faults stay set. Both types of faults can be
* cleared with a zpool clear.
*
* We tell if a vdev is persistently faulted by looking at the
* ZPOOL_CONFIG_AUX_STATE nvpair. If it's set to "external" at
* import then it's a persistent fault. Otherwise, it's
* temporary. We get ZPOOL_CONFIG_AUX_STATE set to "external"
* by setting vd.vdev_stat.vs_aux to VDEV_AUX_EXTERNAL. This
* tells vdev_config_generate() (which gets run later) to set
* ZPOOL_CONFIG_AUX_STATE to "external" in the nvlist.
*/
vd->vdev_stat.vs_aux = VDEV_AUX_EXTERNAL;
vd->vdev_tmpoffline = B_FALSE;
aux = VDEV_AUX_EXTERNAL;
} else {
vd->vdev_tmpoffline = B_TRUE;
}
/*
* We don't directly use the aux state here, but if we do a
* vdev_reopen(), we need this value to be present to remember why we
* were faulted.
*/
vd->vdev_label_aux = aux;
/*
* Faulted state takes precedence over degraded.
*/
vd->vdev_delayed_close = B_FALSE;
vd->vdev_faulted = 1ULL;
vd->vdev_degraded = 0ULL;
vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, aux);
/*
* If this device has the only valid copy of the data, then
* back off and simply mark the vdev as degraded instead.
*/
if (!tvd->vdev_islog && vd->vdev_aux == NULL && vdev_dtl_required(vd)) {
vd->vdev_degraded = 1ULL;
vd->vdev_faulted = 0ULL;
/*
* If we reopen the device and it's not dead, only then do we
* mark it degraded.
*/
vdev_reopen(tvd);
if (vdev_readable(vd))
vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, aux);
}
return (spa_vdev_state_exit(spa, vd, 0));
}
/*
* Mark the given vdev degraded. A degraded vdev is purely an indication to the
* user that something is wrong. The vdev continues to operate as normal as far
* as I/O is concerned.
*/
int
vdev_degrade(spa_t *spa, uint64_t guid, vdev_aux_t aux)
{
vdev_t *vd;
spa_vdev_state_enter(spa, SCL_NONE);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
/*
* If the vdev is already faulted, then don't do anything.
*/
if (vd->vdev_faulted || vd->vdev_degraded)
return (spa_vdev_state_exit(spa, NULL, 0));
vd->vdev_degraded = 1ULL;
if (!vdev_is_dead(vd))
vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED,
aux);
return (spa_vdev_state_exit(spa, vd, 0));
}
int
vdev_remove_wanted(spa_t *spa, uint64_t guid)
{
vdev_t *vd;
spa_vdev_state_enter(spa, SCL_NONE);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
/*
* If the vdev is already removed, or expanding which can trigger
* repartition add/remove events, then don't do anything.
*/
if (vd->vdev_removed || vd->vdev_expanding)
return (spa_vdev_state_exit(spa, NULL, 0));
/*
* Confirm the vdev has been removed, otherwise don't do anything.
*/
if (vd->vdev_ops->vdev_op_leaf && !zio_wait(vdev_probe(vd, NULL)))
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(EEXIST)));
vd->vdev_remove_wanted = B_TRUE;
spa_async_request(spa, SPA_ASYNC_REMOVE);
return (spa_vdev_state_exit(spa, vd, 0));
}
/*
* Online the given vdev.
*
* If 'ZFS_ONLINE_UNSPARE' is set, it implies two things. First, any attached
* spare device should be detached when the device finishes resilvering.
* Second, the online should be treated like a 'test' online case, so no FMA
* events are generated if the device fails to open.
*/
int
vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate)
{
vdev_t *vd, *tvd, *pvd, *rvd = spa->spa_root_vdev;
boolean_t wasoffline;
vdev_state_t oldstate;
spa_vdev_state_enter(spa, SCL_NONE);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
wasoffline = (vd->vdev_offline || vd->vdev_tmpoffline);
oldstate = vd->vdev_state;
tvd = vd->vdev_top;
vd->vdev_offline = B_FALSE;
vd->vdev_tmpoffline = B_FALSE;
vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE);
vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT);
/* XXX - L2ARC 1.0 does not support expansion */
if (!vd->vdev_aux) {
for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
pvd->vdev_expanding = !!((flags & ZFS_ONLINE_EXPAND) ||
spa->spa_autoexpand);
vd->vdev_expansion_time = gethrestime_sec();
}
vdev_reopen(tvd);
vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE;
if (!vd->vdev_aux) {
for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
pvd->vdev_expanding = B_FALSE;
}
if (newstate)
*newstate = vd->vdev_state;
if ((flags & ZFS_ONLINE_UNSPARE) &&
!vdev_is_dead(vd) && vd->vdev_parent &&
vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
vd->vdev_parent->vdev_child[0] == vd)
vd->vdev_unspare = B_TRUE;
if ((flags & ZFS_ONLINE_EXPAND) || spa->spa_autoexpand) {
/* XXX - L2ARC 1.0 does not support expansion */
if (vd->vdev_aux)
return (spa_vdev_state_exit(spa, vd, ENOTSUP));
spa->spa_ccw_fail_time = 0;
spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
}
/* Restart initializing if necessary */
mutex_enter(&vd->vdev_initialize_lock);
if (vdev_writeable(vd) &&
vd->vdev_initialize_thread == NULL &&
vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE) {
(void) vdev_initialize(vd);
}
mutex_exit(&vd->vdev_initialize_lock);
/*
* Restart trimming if necessary. We do not restart trimming for cache
* devices here. This is triggered by l2arc_rebuild_vdev()
* asynchronously for the whole device or in l2arc_evict() as it evicts
* space for upcoming writes.
*/
mutex_enter(&vd->vdev_trim_lock);
if (vdev_writeable(vd) && !vd->vdev_isl2cache &&
vd->vdev_trim_thread == NULL &&
vd->vdev_trim_state == VDEV_TRIM_ACTIVE) {
(void) vdev_trim(vd, vd->vdev_trim_rate, vd->vdev_trim_partial,
vd->vdev_trim_secure);
}
mutex_exit(&vd->vdev_trim_lock);
if (wasoffline ||
(oldstate < VDEV_STATE_DEGRADED &&
vd->vdev_state >= VDEV_STATE_DEGRADED)) {
spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_ONLINE);
/*
* Asynchronously detach spare vdev if resilver or
* rebuild is not required
*/
if (vd->vdev_unspare &&
!dsl_scan_resilvering(spa->spa_dsl_pool) &&
!dsl_scan_resilver_scheduled(spa->spa_dsl_pool) &&
!vdev_rebuild_active(tvd))
spa_async_request(spa, SPA_ASYNC_DETACH_SPARE);
}
return (spa_vdev_state_exit(spa, vd, 0));
}
static int
vdev_offline_locked(spa_t *spa, uint64_t guid, uint64_t flags)
{
vdev_t *vd, *tvd;
int error = 0;
uint64_t generation;
metaslab_group_t *mg;
top:
spa_vdev_state_enter(spa, SCL_ALLOC);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
if (vd->vdev_ops == &vdev_draid_spare_ops)
return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
tvd = vd->vdev_top;
mg = tvd->vdev_mg;
generation = spa->spa_config_generation + 1;
/*
* If the device isn't already offline, try to offline it.
*/
if (!vd->vdev_offline) {
/*
* If this device has the only valid copy of some data,
* don't allow it to be offlined. Log devices are always
* expendable.
*/
if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
vdev_dtl_required(vd))
return (spa_vdev_state_exit(spa, NULL,
SET_ERROR(EBUSY)));
/*
* If the top-level is a slog and it has had allocations
* then proceed. We check that the vdev's metaslab group
* is not NULL since it's possible that we may have just
* added this vdev but not yet initialized its metaslabs.
*/
if (tvd->vdev_islog && mg != NULL) {
/*
* Prevent any future allocations.
*/
ASSERT3P(tvd->vdev_log_mg, ==, NULL);
metaslab_group_passivate(mg);
(void) spa_vdev_state_exit(spa, vd, 0);
error = spa_reset_logs(spa);
/*
* If the log device was successfully reset but has
* checkpointed data, do not offline it.
*/
if (error == 0 &&
tvd->vdev_checkpoint_sm != NULL) {
ASSERT3U(space_map_allocated(
tvd->vdev_checkpoint_sm), !=, 0);
error = ZFS_ERR_CHECKPOINT_EXISTS;
}
spa_vdev_state_enter(spa, SCL_ALLOC);
/*
* Check to see if the config has changed.
*/
if (error || generation != spa->spa_config_generation) {
metaslab_group_activate(mg);
if (error)
return (spa_vdev_state_exit(spa,
vd, error));
(void) spa_vdev_state_exit(spa, vd, 0);
goto top;
}
ASSERT0(tvd->vdev_stat.vs_alloc);
}
/*
* Offline this device and reopen its top-level vdev.
* If the top-level vdev is a log device then just offline
* it. Otherwise, if this action results in the top-level
* vdev becoming unusable, undo it and fail the request.
*/
vd->vdev_offline = B_TRUE;
vdev_reopen(tvd);
if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
vdev_is_dead(tvd)) {
vd->vdev_offline = B_FALSE;
vdev_reopen(tvd);
return (spa_vdev_state_exit(spa, NULL,
SET_ERROR(EBUSY)));
}
/*
* Add the device back into the metaslab rotor so that
* once we online the device it's open for business.
*/
if (tvd->vdev_islog && mg != NULL)
metaslab_group_activate(mg);
}
vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY);
return (spa_vdev_state_exit(spa, vd, 0));
}
int
vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags)
{
int error;
mutex_enter(&spa->spa_vdev_top_lock);
error = vdev_offline_locked(spa, guid, flags);
mutex_exit(&spa->spa_vdev_top_lock);
return (error);
}
/*
* Clear the error counts associated with this vdev. Unlike vdev_online() and
* vdev_offline(), we assume the spa config is locked. We also clear all
* children. If 'vd' is NULL, then the user wants to clear all vdevs.
*/
void
vdev_clear(spa_t *spa, vdev_t *vd)
{
vdev_t *rvd = spa->spa_root_vdev;
ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
if (vd == NULL)
vd = rvd;
vd->vdev_stat.vs_read_errors = 0;
vd->vdev_stat.vs_write_errors = 0;
vd->vdev_stat.vs_checksum_errors = 0;
vd->vdev_stat.vs_slow_ios = 0;
for (int c = 0; c < vd->vdev_children; c++)
vdev_clear(spa, vd->vdev_child[c]);
/*
* It makes no sense to "clear" an indirect or removed vdev.
*/
if (!vdev_is_concrete(vd) || vd->vdev_removed)
return;
/*
* If we're in the FAULTED state or have experienced failed I/O, then
* clear the persistent state and attempt to reopen the device. We
* also mark the vdev config dirty, so that the new faulted state is
* written out to disk.
*/
if (vd->vdev_faulted || vd->vdev_degraded ||
!vdev_readable(vd) || !vdev_writeable(vd)) {
/*
* When reopening in response to a clear event, it may be due to
* a fmadm repair request. In this case, if the device is
* still broken, we want to still post the ereport again.
*/
vd->vdev_forcefault = B_TRUE;
vd->vdev_faulted = vd->vdev_degraded = 0ULL;
vd->vdev_cant_read = B_FALSE;
vd->vdev_cant_write = B_FALSE;
vd->vdev_stat.vs_aux = 0;
vdev_reopen(vd == rvd ? rvd : vd->vdev_top);
vd->vdev_forcefault = B_FALSE;
if (vd != rvd && vdev_writeable(vd->vdev_top))
vdev_state_dirty(vd->vdev_top);
/* If a resilver isn't required, check if vdevs can be culled */
if (vd->vdev_aux == NULL && !vdev_is_dead(vd) &&
!dsl_scan_resilvering(spa->spa_dsl_pool) &&
!dsl_scan_resilver_scheduled(spa->spa_dsl_pool))
spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_CLEAR);
}
/*
* When clearing a FMA-diagnosed fault, we always want to
* unspare the device, as we assume that the original spare was
* done in response to the FMA fault.
*/
if (!vdev_is_dead(vd) && vd->vdev_parent != NULL &&
vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
vd->vdev_parent->vdev_child[0] == vd)
vd->vdev_unspare = B_TRUE;
/* Clear recent error events cache (i.e. duplicate events tracking) */
zfs_ereport_clear(spa, vd);
}
boolean_t
vdev_is_dead(vdev_t *vd)
{
/*
* Holes and missing devices are always considered "dead".
* This simplifies the code since we don't have to check for
* these types of devices in the various code paths.
* Instead we rely on the fact that we skip over dead devices
* before issuing I/O to them.
*/
return (vd->vdev_state < VDEV_STATE_DEGRADED ||
vd->vdev_ops == &vdev_hole_ops ||
vd->vdev_ops == &vdev_missing_ops);
}
boolean_t
vdev_readable(vdev_t *vd)
{
return (!vdev_is_dead(vd) && !vd->vdev_cant_read);
}
boolean_t
vdev_writeable(vdev_t *vd)
{
return (!vdev_is_dead(vd) && !vd->vdev_cant_write &&
vdev_is_concrete(vd));
}
boolean_t
vdev_allocatable(vdev_t *vd)
{
uint64_t state = vd->vdev_state;
/*
* We currently allow allocations from vdevs which may be in the
* process of reopening (i.e. VDEV_STATE_CLOSED). If the device
* fails to reopen then we'll catch it later when we're holding
* the proper locks. Note that we have to get the vdev state
* in a local variable because although it changes atomically,
* we're asking two separate questions about it.
*/
return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) &&
!vd->vdev_cant_write && vdev_is_concrete(vd) &&
vd->vdev_mg->mg_initialized);
}
boolean_t
vdev_accessible(vdev_t *vd, zio_t *zio)
{
ASSERT(zio->io_vd == vd);
if (vdev_is_dead(vd) || vd->vdev_remove_wanted)
return (B_FALSE);
if (zio->io_type == ZIO_TYPE_READ)
return (!vd->vdev_cant_read);
if (zio->io_type == ZIO_TYPE_WRITE)
return (!vd->vdev_cant_write);
return (B_TRUE);
}
static void
vdev_get_child_stat(vdev_t *cvd, vdev_stat_t *vs, vdev_stat_t *cvs)
{
/*
* Exclude the dRAID spare when aggregating to avoid double counting
* the ops and bytes. These IOs are counted by the physical leaves.
*/
if (cvd->vdev_ops == &vdev_draid_spare_ops)
return;
for (int t = 0; t < VS_ZIO_TYPES; t++) {
vs->vs_ops[t] += cvs->vs_ops[t];
vs->vs_bytes[t] += cvs->vs_bytes[t];
}
cvs->vs_scan_removing = cvd->vdev_removing;
}
/*
* Get extended stats
*/
static void
vdev_get_child_stat_ex(vdev_t *cvd, vdev_stat_ex_t *vsx, vdev_stat_ex_t *cvsx)
{
(void) cvd;
int t, b;
for (t = 0; t < ZIO_TYPES; t++) {
for (b = 0; b < ARRAY_SIZE(vsx->vsx_disk_histo[0]); b++)
vsx->vsx_disk_histo[t][b] += cvsx->vsx_disk_histo[t][b];
for (b = 0; b < ARRAY_SIZE(vsx->vsx_total_histo[0]); b++) {
vsx->vsx_total_histo[t][b] +=
cvsx->vsx_total_histo[t][b];
}
}
for (t = 0; t < ZIO_PRIORITY_NUM_QUEUEABLE; t++) {
for (b = 0; b < ARRAY_SIZE(vsx->vsx_queue_histo[0]); b++) {
vsx->vsx_queue_histo[t][b] +=
cvsx->vsx_queue_histo[t][b];
}
vsx->vsx_active_queue[t] += cvsx->vsx_active_queue[t];
vsx->vsx_pend_queue[t] += cvsx->vsx_pend_queue[t];
for (b = 0; b < ARRAY_SIZE(vsx->vsx_ind_histo[0]); b++)
vsx->vsx_ind_histo[t][b] += cvsx->vsx_ind_histo[t][b];
for (b = 0; b < ARRAY_SIZE(vsx->vsx_agg_histo[0]); b++)
vsx->vsx_agg_histo[t][b] += cvsx->vsx_agg_histo[t][b];
}
}
boolean_t
vdev_is_spacemap_addressable(vdev_t *vd)
{
if (spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_SPACEMAP_V2))
return (B_TRUE);
/*
* If double-word space map entries are not enabled we assume
* 47 bits of the space map entry are dedicated to the entry's
* offset (see SM_OFFSET_BITS in space_map.h). We then use that
* to calculate the maximum address that can be described by a
* space map entry for the given device.
*/
uint64_t shift = vd->vdev_ashift + SM_OFFSET_BITS;
if (shift >= 63) /* detect potential overflow */
return (B_TRUE);
return (vd->vdev_asize < (1ULL << shift));
}
/*
* Get statistics for the given vdev.
*/
static void
vdev_get_stats_ex_impl(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx)
{
int t;
/*
* If we're getting stats on the root vdev, aggregate the I/O counts
* over all top-level vdevs (i.e. the direct children of the root).
*/
if (!vd->vdev_ops->vdev_op_leaf) {
if (vs) {
memset(vs->vs_ops, 0, sizeof (vs->vs_ops));
memset(vs->vs_bytes, 0, sizeof (vs->vs_bytes));
}
if (vsx)
memset(vsx, 0, sizeof (*vsx));
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
vdev_stat_t *cvs = &cvd->vdev_stat;
vdev_stat_ex_t *cvsx = &cvd->vdev_stat_ex;
vdev_get_stats_ex_impl(cvd, cvs, cvsx);
if (vs)
vdev_get_child_stat(cvd, vs, cvs);
if (vsx)
vdev_get_child_stat_ex(cvd, vsx, cvsx);
}
} else {
/*
* We're a leaf. Just copy our ZIO active queue stats in. The
* other leaf stats are updated in vdev_stat_update().
*/
if (!vsx)
return;
memcpy(vsx, &vd->vdev_stat_ex, sizeof (vd->vdev_stat_ex));
for (t = 0; t < ZIO_PRIORITY_NUM_QUEUEABLE; t++) {
vsx->vsx_active_queue[t] = vd->vdev_queue.vq_cactive[t];
vsx->vsx_pend_queue[t] = vdev_queue_class_length(vd, t);
}
}
}
void
vdev_get_stats_ex(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx)
{
vdev_t *tvd = vd->vdev_top;
mutex_enter(&vd->vdev_stat_lock);
if (vs) {
memcpy(vs, &vd->vdev_stat, sizeof (*vs));
vs->vs_timestamp = gethrtime() - vs->vs_timestamp;
vs->vs_state = vd->vdev_state;
vs->vs_rsize = vdev_get_min_asize(vd);
if (vd->vdev_ops->vdev_op_leaf) {
vs->vs_pspace = vd->vdev_psize;
vs->vs_rsize += VDEV_LABEL_START_SIZE +
VDEV_LABEL_END_SIZE;
/*
* Report initializing progress. Since we don't
* have the initializing locks held, this is only
* an estimate (although a fairly accurate one).
*/
vs->vs_initialize_bytes_done =
vd->vdev_initialize_bytes_done;
vs->vs_initialize_bytes_est =
vd->vdev_initialize_bytes_est;
vs->vs_initialize_state = vd->vdev_initialize_state;
vs->vs_initialize_action_time =
vd->vdev_initialize_action_time;
/*
* Report manual TRIM progress. Since we don't have
* the manual TRIM locks held, this is only an
* estimate (although fairly accurate one).
*/
vs->vs_trim_notsup = !vd->vdev_has_trim;
vs->vs_trim_bytes_done = vd->vdev_trim_bytes_done;
vs->vs_trim_bytes_est = vd->vdev_trim_bytes_est;
vs->vs_trim_state = vd->vdev_trim_state;
vs->vs_trim_action_time = vd->vdev_trim_action_time;
/* Set when there is a deferred resilver. */
vs->vs_resilver_deferred = vd->vdev_resilver_deferred;
}
/*
* Report expandable space on top-level, non-auxiliary devices
* only. The expandable space is reported in terms of metaslab
* sized units since that determines how much space the pool
* can expand.
*/
if (vd->vdev_aux == NULL && tvd != NULL) {
vs->vs_esize = P2ALIGN(
vd->vdev_max_asize - vd->vdev_asize,
1ULL << tvd->vdev_ms_shift);
}
vs->vs_configured_ashift = vd->vdev_top != NULL
? vd->vdev_top->vdev_ashift : vd->vdev_ashift;
vs->vs_logical_ashift = vd->vdev_logical_ashift;
if (vd->vdev_physical_ashift <= ASHIFT_MAX)
vs->vs_physical_ashift = vd->vdev_physical_ashift;
else
vs->vs_physical_ashift = 0;
/*
* Report fragmentation and rebuild progress for top-level,
* non-auxiliary, concrete devices.
*/
if (vd->vdev_aux == NULL && vd == vd->vdev_top &&
vdev_is_concrete(vd)) {
/*
* The vdev fragmentation rating doesn't take into
* account the embedded slog metaslab (vdev_log_mg).
* Since it's only one metaslab, it would have a tiny
* impact on the overall fragmentation.
*/
vs->vs_fragmentation = (vd->vdev_mg != NULL) ?
vd->vdev_mg->mg_fragmentation : 0;
}
vs->vs_noalloc = MAX(vd->vdev_noalloc,
tvd ? tvd->vdev_noalloc : 0);
}
vdev_get_stats_ex_impl(vd, vs, vsx);
mutex_exit(&vd->vdev_stat_lock);
}
void
vdev_get_stats(vdev_t *vd, vdev_stat_t *vs)
{
return (vdev_get_stats_ex(vd, vs, NULL));
}
void
vdev_clear_stats(vdev_t *vd)
{
mutex_enter(&vd->vdev_stat_lock);
vd->vdev_stat.vs_space = 0;
vd->vdev_stat.vs_dspace = 0;
vd->vdev_stat.vs_alloc = 0;
mutex_exit(&vd->vdev_stat_lock);
}
void
vdev_scan_stat_init(vdev_t *vd)
{
vdev_stat_t *vs = &vd->vdev_stat;
for (int c = 0; c < vd->vdev_children; c++)
vdev_scan_stat_init(vd->vdev_child[c]);
mutex_enter(&vd->vdev_stat_lock);
vs->vs_scan_processed = 0;
mutex_exit(&vd->vdev_stat_lock);
}
void
vdev_stat_update(zio_t *zio, uint64_t psize)
{
spa_t *spa = zio->io_spa;
vdev_t *rvd = spa->spa_root_vdev;
vdev_t *vd = zio->io_vd ? zio->io_vd : rvd;
vdev_t *pvd;
uint64_t txg = zio->io_txg;
/* Suppress ASAN false positive */
#ifdef __SANITIZE_ADDRESS__
vdev_stat_t *vs = vd ? &vd->vdev_stat : NULL;
vdev_stat_ex_t *vsx = vd ? &vd->vdev_stat_ex : NULL;
#else
vdev_stat_t *vs = &vd->vdev_stat;
vdev_stat_ex_t *vsx = &vd->vdev_stat_ex;
#endif
zio_type_t type = zio->io_type;
int flags = zio->io_flags;
/*
* If this i/o is a gang leader, it didn't do any actual work.
*/
if (zio->io_gang_tree)
return;
if (zio->io_error == 0) {
/*
* If this is a root i/o, don't count it -- we've already
* counted the top-level vdevs, and vdev_get_stats() will
* aggregate them when asked. This reduces contention on
* the root vdev_stat_lock and implicitly handles blocks
* that compress away to holes, for which there is no i/o.
* (Holes never create vdev children, so all the counters
* remain zero, which is what we want.)
*
* Note: this only applies to successful i/o (io_error == 0)
* because unlike i/o counts, errors are not additive.
* When reading a ditto block, for example, failure of
* one top-level vdev does not imply a root-level error.
*/
if (vd == rvd)
return;
ASSERT(vd == zio->io_vd);
if (flags & ZIO_FLAG_IO_BYPASS)
return;
mutex_enter(&vd->vdev_stat_lock);
if (flags & ZIO_FLAG_IO_REPAIR) {
/*
* Repair is the result of a resilver issued by the
* scan thread (spa_sync).
*/
if (flags & ZIO_FLAG_SCAN_THREAD) {
dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
dsl_scan_phys_t *scn_phys = &scn->scn_phys;
uint64_t *processed = &scn_phys->scn_processed;
if (vd->vdev_ops->vdev_op_leaf)
atomic_add_64(processed, psize);
vs->vs_scan_processed += psize;
}
/*
* Repair is the result of a rebuild issued by the
* rebuild thread (vdev_rebuild_thread). To avoid
* double counting repaired bytes the virtual dRAID
* spare vdev is excluded from the processed bytes.
*/
if (zio->io_priority == ZIO_PRIORITY_REBUILD) {
vdev_t *tvd = vd->vdev_top;
vdev_rebuild_t *vr = &tvd->vdev_rebuild_config;
vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
uint64_t *rebuilt = &vrp->vrp_bytes_rebuilt;
if (vd->vdev_ops->vdev_op_leaf &&
vd->vdev_ops != &vdev_draid_spare_ops) {
atomic_add_64(rebuilt, psize);
}
vs->vs_rebuild_processed += psize;
}
if (flags & ZIO_FLAG_SELF_HEAL)
vs->vs_self_healed += psize;
}
/*
* The bytes/ops/histograms are recorded at the leaf level and
* aggregated into the higher level vdevs in vdev_get_stats().
*/
if (vd->vdev_ops->vdev_op_leaf &&
(zio->io_priority < ZIO_PRIORITY_NUM_QUEUEABLE)) {
zio_type_t vs_type = type;
zio_priority_t priority = zio->io_priority;
/*
* TRIM ops and bytes are reported to user space as
* ZIO_TYPE_IOCTL. This is done to preserve the
* vdev_stat_t structure layout for user space.
*/
if (type == ZIO_TYPE_TRIM)
vs_type = ZIO_TYPE_IOCTL;
/*
* Solely for the purposes of 'zpool iostat -lqrw'
* reporting use the priority to categorize the IO.
* Only the following are reported to user space:
*
* ZIO_PRIORITY_SYNC_READ,
* ZIO_PRIORITY_SYNC_WRITE,
* ZIO_PRIORITY_ASYNC_READ,
* ZIO_PRIORITY_ASYNC_WRITE,
* ZIO_PRIORITY_SCRUB,
* ZIO_PRIORITY_TRIM,
* ZIO_PRIORITY_REBUILD.
*/
if (priority == ZIO_PRIORITY_INITIALIZING) {
ASSERT3U(type, ==, ZIO_TYPE_WRITE);
priority = ZIO_PRIORITY_ASYNC_WRITE;
} else if (priority == ZIO_PRIORITY_REMOVAL) {
priority = ((type == ZIO_TYPE_WRITE) ?
ZIO_PRIORITY_ASYNC_WRITE :
ZIO_PRIORITY_ASYNC_READ);
}
vs->vs_ops[vs_type]++;
vs->vs_bytes[vs_type] += psize;
if (flags & ZIO_FLAG_DELEGATED) {
vsx->vsx_agg_histo[priority]
[RQ_HISTO(zio->io_size)]++;
} else {
vsx->vsx_ind_histo[priority]
[RQ_HISTO(zio->io_size)]++;
}
if (zio->io_delta && zio->io_delay) {
vsx->vsx_queue_histo[priority]
[L_HISTO(zio->io_delta - zio->io_delay)]++;
vsx->vsx_disk_histo[type]
[L_HISTO(zio->io_delay)]++;
vsx->vsx_total_histo[type]
[L_HISTO(zio->io_delta)]++;
}
}
mutex_exit(&vd->vdev_stat_lock);
return;
}
if (flags & ZIO_FLAG_SPECULATIVE)
return;
/*
* If this is an I/O error that is going to be retried, then ignore the
* error. Otherwise, the user may interpret B_FAILFAST I/O errors as
* hard errors, when in reality they can happen for any number of
* innocuous reasons (bus resets, MPxIO link failure, etc).
*/
if (zio->io_error == EIO &&
!(zio->io_flags & ZIO_FLAG_IO_RETRY))
return;
/*
* Intent logs writes won't propagate their error to the root
* I/O so don't mark these types of failures as pool-level
* errors.
*/
if (zio->io_vd == NULL && (zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
return;
if (type == ZIO_TYPE_WRITE && txg != 0 &&
(!(flags & ZIO_FLAG_IO_REPAIR) ||
(flags & ZIO_FLAG_SCAN_THREAD) ||
spa->spa_claiming)) {
/*
* This is either a normal write (not a repair), or it's
* a repair induced by the scrub thread, or it's a repair
* made by zil_claim() during spa_load() in the first txg.
* In the normal case, we commit the DTL change in the same
* txg as the block was born. In the scrub-induced repair
* case, we know that scrubs run in first-pass syncing context,
* so we commit the DTL change in spa_syncing_txg(spa).
* In the zil_claim() case, we commit in spa_first_txg(spa).
*
* We currently do not make DTL entries for failed spontaneous
* self-healing writes triggered by normal (non-scrubbing)
* reads, because we have no transactional context in which to
* do so -- and it's not clear that it'd be desirable anyway.
*/
if (vd->vdev_ops->vdev_op_leaf) {
uint64_t commit_txg = txg;
if (flags & ZIO_FLAG_SCAN_THREAD) {
ASSERT(flags & ZIO_FLAG_IO_REPAIR);
ASSERT(spa_sync_pass(spa) == 1);
vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1);
commit_txg = spa_syncing_txg(spa);
} else if (spa->spa_claiming) {
ASSERT(flags & ZIO_FLAG_IO_REPAIR);
commit_txg = spa_first_txg(spa);
}
ASSERT(commit_txg >= spa_syncing_txg(spa));
if (vdev_dtl_contains(vd, DTL_MISSING, txg, 1))
return;
for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
vdev_dtl_dirty(pvd, DTL_PARTIAL, txg, 1);
vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg);
}
if (vd != rvd)
vdev_dtl_dirty(vd, DTL_MISSING, txg, 1);
}
}
int64_t
vdev_deflated_space(vdev_t *vd, int64_t space)
{
ASSERT((space & (SPA_MINBLOCKSIZE-1)) == 0);
ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache);
return ((space >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio);
}
/*
* Update the in-core space usage stats for this vdev, its metaslab class,
* and the root vdev.
*/
void
vdev_space_update(vdev_t *vd, int64_t alloc_delta, int64_t defer_delta,
int64_t space_delta)
{
(void) defer_delta;
int64_t dspace_delta;
spa_t *spa = vd->vdev_spa;
vdev_t *rvd = spa->spa_root_vdev;
ASSERT(vd == vd->vdev_top);
/*
* Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion
* factor. We must calculate this here and not at the root vdev
* because the root vdev's psize-to-asize is simply the max of its
* children's, thus not accurate enough for us.
*/
dspace_delta = vdev_deflated_space(vd, space_delta);
mutex_enter(&vd->vdev_stat_lock);
/* ensure we won't underflow */
if (alloc_delta < 0) {
ASSERT3U(vd->vdev_stat.vs_alloc, >=, -alloc_delta);
}
vd->vdev_stat.vs_alloc += alloc_delta;
vd->vdev_stat.vs_space += space_delta;
vd->vdev_stat.vs_dspace += dspace_delta;
mutex_exit(&vd->vdev_stat_lock);
/* every class but log contributes to root space stats */
if (vd->vdev_mg != NULL && !vd->vdev_islog) {
ASSERT(!vd->vdev_isl2cache);
mutex_enter(&rvd->vdev_stat_lock);
rvd->vdev_stat.vs_alloc += alloc_delta;
rvd->vdev_stat.vs_space += space_delta;
rvd->vdev_stat.vs_dspace += dspace_delta;
mutex_exit(&rvd->vdev_stat_lock);
}
/* Note: metaslab_class_space_update moved to metaslab_space_update */
}
/*
* Mark a top-level vdev's config as dirty, placing it on the dirty list
* so that it will be written out next time the vdev configuration is synced.
* If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs.
*/
void
vdev_config_dirty(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
vdev_t *rvd = spa->spa_root_vdev;
int c;
ASSERT(spa_writeable(spa));
/*
* If this is an aux vdev (as with l2cache and spare devices), then we
* update the vdev config manually and set the sync flag.
*/
if (vd->vdev_aux != NULL) {
spa_aux_vdev_t *sav = vd->vdev_aux;
nvlist_t **aux;
uint_t naux;
for (c = 0; c < sav->sav_count; c++) {
if (sav->sav_vdevs[c] == vd)
break;
}
if (c == sav->sav_count) {
/*
* We're being removed. There's nothing more to do.
*/
ASSERT(sav->sav_sync == B_TRUE);
return;
}
sav->sav_sync = B_TRUE;
if (nvlist_lookup_nvlist_array(sav->sav_config,
ZPOOL_CONFIG_L2CACHE, &aux, &naux) != 0) {
VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
ZPOOL_CONFIG_SPARES, &aux, &naux) == 0);
}
ASSERT(c < naux);
/*
* Setting the nvlist in the middle if the array is a little
* sketchy, but it will work.
*/
nvlist_free(aux[c]);
aux[c] = vdev_config_generate(spa, vd, B_TRUE, 0);
return;
}
/*
* The dirty list is protected by the SCL_CONFIG lock. The caller
* must either hold SCL_CONFIG as writer, or must be the sync thread
* (which holds SCL_CONFIG as reader). There's only one sync thread,
* so this is sufficient to ensure mutual exclusion.
*/
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
(dsl_pool_sync_context(spa_get_dsl(spa)) &&
spa_config_held(spa, SCL_CONFIG, RW_READER)));
if (vd == rvd) {
for (c = 0; c < rvd->vdev_children; c++)
vdev_config_dirty(rvd->vdev_child[c]);
} else {
ASSERT(vd == vd->vdev_top);
if (!list_link_active(&vd->vdev_config_dirty_node) &&
vdev_is_concrete(vd)) {
list_insert_head(&spa->spa_config_dirty_list, vd);
}
}
}
void
vdev_config_clean(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
(dsl_pool_sync_context(spa_get_dsl(spa)) &&
spa_config_held(spa, SCL_CONFIG, RW_READER)));
ASSERT(list_link_active(&vd->vdev_config_dirty_node));
list_remove(&spa->spa_config_dirty_list, vd);
}
/*
* Mark a top-level vdev's state as dirty, so that the next pass of
* spa_sync() can convert this into vdev_config_dirty(). We distinguish
* the state changes from larger config changes because they require
* much less locking, and are often needed for administrative actions.
*/
void
vdev_state_dirty(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_writeable(spa));
ASSERT(vd == vd->vdev_top);
/*
* The state list is protected by the SCL_STATE lock. The caller
* must either hold SCL_STATE as writer, or must be the sync thread
* (which holds SCL_STATE as reader). There's only one sync thread,
* so this is sufficient to ensure mutual exclusion.
*/
ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
(dsl_pool_sync_context(spa_get_dsl(spa)) &&
spa_config_held(spa, SCL_STATE, RW_READER)));
if (!list_link_active(&vd->vdev_state_dirty_node) &&
vdev_is_concrete(vd))
list_insert_head(&spa->spa_state_dirty_list, vd);
}
void
vdev_state_clean(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
(dsl_pool_sync_context(spa_get_dsl(spa)) &&
spa_config_held(spa, SCL_STATE, RW_READER)));
ASSERT(list_link_active(&vd->vdev_state_dirty_node));
list_remove(&spa->spa_state_dirty_list, vd);
}
/*
* Propagate vdev state up from children to parent.
*/
void
vdev_propagate_state(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
vdev_t *rvd = spa->spa_root_vdev;
int degraded = 0, faulted = 0;
int corrupted = 0;
vdev_t *child;
if (vd->vdev_children > 0) {
for (int c = 0; c < vd->vdev_children; c++) {
child = vd->vdev_child[c];
/*
* Don't factor holes or indirect vdevs into the
* decision.
*/
if (!vdev_is_concrete(child))
continue;
if (!vdev_readable(child) ||
(!vdev_writeable(child) && spa_writeable(spa))) {
/*
* Root special: if there is a top-level log
* device, treat the root vdev as if it were
* degraded.
*/
if (child->vdev_islog && vd == rvd)
degraded++;
else
faulted++;
} else if (child->vdev_state <= VDEV_STATE_DEGRADED) {
degraded++;
}
if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA)
corrupted++;
}
vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded);
/*
* Root special: if there is a top-level vdev that cannot be
* opened due to corrupted metadata, then propagate the root
* vdev's aux state as 'corrupt' rather than 'insufficient
* replicas'.
*/
if (corrupted && vd == rvd &&
rvd->vdev_state == VDEV_STATE_CANT_OPEN)
vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
}
if (vd->vdev_parent)
vdev_propagate_state(vd->vdev_parent);
}
/*
* Set a vdev's state. If this is during an open, we don't update the parent
* state, because we're in the process of opening children depth-first.
* Otherwise, we propagate the change to the parent.
*
* If this routine places a device in a faulted state, an appropriate ereport is
* generated.
*/
void
vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux)
{
uint64_t save_state;
spa_t *spa = vd->vdev_spa;
if (state == vd->vdev_state) {
/*
* Since vdev_offline() code path is already in an offline
* state we can miss a statechange event to OFFLINE. Check
* the previous state to catch this condition.
*/
if (vd->vdev_ops->vdev_op_leaf &&
(state == VDEV_STATE_OFFLINE) &&
(vd->vdev_prevstate >= VDEV_STATE_FAULTED)) {
/* post an offline state change */
zfs_post_state_change(spa, vd, vd->vdev_prevstate);
}
vd->vdev_stat.vs_aux = aux;
return;
}
save_state = vd->vdev_state;
vd->vdev_state = state;
vd->vdev_stat.vs_aux = aux;
/*
* If we are setting the vdev state to anything but an open state, then
* always close the underlying device unless the device has requested
* a delayed close (i.e. we're about to remove or fault the device).
* Otherwise, we keep accessible but invalid devices open forever.
* We don't call vdev_close() itself, because that implies some extra
* checks (offline, etc) that we don't want here. This is limited to
* leaf devices, because otherwise closing the device will affect other
* children.
*/
if (!vd->vdev_delayed_close && vdev_is_dead(vd) &&
vd->vdev_ops->vdev_op_leaf)
vd->vdev_ops->vdev_op_close(vd);
if (vd->vdev_removed &&
state == VDEV_STATE_CANT_OPEN &&
(aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) {
/*
* If the previous state is set to VDEV_STATE_REMOVED, then this
* device was previously marked removed and someone attempted to
* reopen it. If this failed due to a nonexistent device, then
* keep the device in the REMOVED state. We also let this be if
* it is one of our special test online cases, which is only
* attempting to online the device and shouldn't generate an FMA
* fault.
*/
vd->vdev_state = VDEV_STATE_REMOVED;
vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
} else if (state == VDEV_STATE_REMOVED) {
vd->vdev_removed = B_TRUE;
} else if (state == VDEV_STATE_CANT_OPEN) {
/*
* If we fail to open a vdev during an import or recovery, we
* mark it as "not available", which signifies that it was
* never there to begin with. Failure to open such a device
* is not considered an error.
*/
if ((spa_load_state(spa) == SPA_LOAD_IMPORT ||
spa_load_state(spa) == SPA_LOAD_RECOVER) &&
vd->vdev_ops->vdev_op_leaf)
vd->vdev_not_present = 1;
/*
* Post the appropriate ereport. If the 'prevstate' field is
* set to something other than VDEV_STATE_UNKNOWN, it indicates
* that this is part of a vdev_reopen(). In this case, we don't
* want to post the ereport if the device was already in the
* CANT_OPEN state beforehand.
*
* If the 'checkremove' flag is set, then this is an attempt to
* online the device in response to an insertion event. If we
* hit this case, then we have detected an insertion event for a
* faulted or offline device that wasn't in the removed state.
* In this scenario, we don't post an ereport because we are
* about to replace the device, or attempt an online with
* vdev_forcefault, which will generate the fault for us.
*/
if ((vd->vdev_prevstate != state || vd->vdev_forcefault) &&
!vd->vdev_not_present && !vd->vdev_checkremove &&
vd != spa->spa_root_vdev) {
const char *class;
switch (aux) {
case VDEV_AUX_OPEN_FAILED:
class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED;
break;
case VDEV_AUX_CORRUPT_DATA:
class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA;
break;
case VDEV_AUX_NO_REPLICAS:
class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS;
break;
case VDEV_AUX_BAD_GUID_SUM:
class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM;
break;
case VDEV_AUX_TOO_SMALL:
class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL;
break;
case VDEV_AUX_BAD_LABEL:
class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL;
break;
case VDEV_AUX_BAD_ASHIFT:
class = FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT;
break;
default:
class = FM_EREPORT_ZFS_DEVICE_UNKNOWN;
}
(void) zfs_ereport_post(class, spa, vd, NULL, NULL,
save_state);
}
/* Erase any notion of persistent removed state */
vd->vdev_removed = B_FALSE;
} else {
vd->vdev_removed = B_FALSE;
}
/*
* Notify ZED of any significant state-change on a leaf vdev.
*
*/
if (vd->vdev_ops->vdev_op_leaf) {
/* preserve original state from a vdev_reopen() */
if ((vd->vdev_prevstate != VDEV_STATE_UNKNOWN) &&
(vd->vdev_prevstate != vd->vdev_state) &&
(save_state <= VDEV_STATE_CLOSED))
save_state = vd->vdev_prevstate;
/* filter out state change due to initial vdev_open */
if (save_state > VDEV_STATE_CLOSED)
zfs_post_state_change(spa, vd, save_state);
}
if (!isopen && vd->vdev_parent)
vdev_propagate_state(vd->vdev_parent);
}
boolean_t
vdev_children_are_offline(vdev_t *vd)
{
ASSERT(!vd->vdev_ops->vdev_op_leaf);
for (uint64_t i = 0; i < vd->vdev_children; i++) {
if (vd->vdev_child[i]->vdev_state != VDEV_STATE_OFFLINE)
return (B_FALSE);
}
return (B_TRUE);
}
/*
* Check the vdev configuration to ensure that it's capable of supporting
* a root pool. We do not support partial configuration.
*/
boolean_t
vdev_is_bootable(vdev_t *vd)
{
if (!vd->vdev_ops->vdev_op_leaf) {
const char *vdev_type = vd->vdev_ops->vdev_op_type;
if (strcmp(vdev_type, VDEV_TYPE_MISSING) == 0)
return (B_FALSE);
}
for (int c = 0; c < vd->vdev_children; c++) {
if (!vdev_is_bootable(vd->vdev_child[c]))
return (B_FALSE);
}
return (B_TRUE);
}
boolean_t
vdev_is_concrete(vdev_t *vd)
{
vdev_ops_t *ops = vd->vdev_ops;
if (ops == &vdev_indirect_ops || ops == &vdev_hole_ops ||
ops == &vdev_missing_ops || ops == &vdev_root_ops) {
return (B_FALSE);
} else {
return (B_TRUE);
}
}
/*
* Determine if a log device has valid content. If the vdev was
* removed or faulted in the MOS config then we know that
* the content on the log device has already been written to the pool.
*/
boolean_t
vdev_log_state_valid(vdev_t *vd)
{
if (vd->vdev_ops->vdev_op_leaf && !vd->vdev_faulted &&
!vd->vdev_removed)
return (B_TRUE);
for (int c = 0; c < vd->vdev_children; c++)
if (vdev_log_state_valid(vd->vdev_child[c]))
return (B_TRUE);
return (B_FALSE);
}
/*
* Expand a vdev if possible.
*/
void
vdev_expand(vdev_t *vd, uint64_t txg)
{
ASSERT(vd->vdev_top == vd);
ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
ASSERT(vdev_is_concrete(vd));
vdev_set_deflate_ratio(vd);
if ((vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count &&
vdev_is_concrete(vd)) {
vdev_metaslab_group_create(vd);
VERIFY(vdev_metaslab_init(vd, txg) == 0);
vdev_config_dirty(vd);
}
}
/*
* Split a vdev.
*/
void
vdev_split(vdev_t *vd)
{
vdev_t *cvd, *pvd = vd->vdev_parent;
VERIFY3U(pvd->vdev_children, >, 1);
vdev_remove_child(pvd, vd);
vdev_compact_children(pvd);
ASSERT3P(pvd->vdev_child, !=, NULL);
cvd = pvd->vdev_child[0];
if (pvd->vdev_children == 1) {
vdev_remove_parent(cvd);
cvd->vdev_splitting = B_TRUE;
}
vdev_propagate_state(cvd);
}
void
vdev_deadman(vdev_t *vd, const char *tag)
{
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
vdev_deadman(cvd, tag);
}
if (vd->vdev_ops->vdev_op_leaf) {
vdev_queue_t *vq = &vd->vdev_queue;
mutex_enter(&vq->vq_lock);
if (vq->vq_active > 0) {
spa_t *spa = vd->vdev_spa;
zio_t *fio;
uint64_t delta;
zfs_dbgmsg("slow vdev: %s has %u active IOs",
vd->vdev_path, vq->vq_active);
/*
* Look at the head of all the pending queues,
* if any I/O has been outstanding for longer than
* the spa_deadman_synctime invoke the deadman logic.
*/
fio = list_head(&vq->vq_active_list);
delta = gethrtime() - fio->io_timestamp;
if (delta > spa_deadman_synctime(spa))
zio_deadman(fio, tag);
}
mutex_exit(&vq->vq_lock);
}
}
void
vdev_defer_resilver(vdev_t *vd)
{
ASSERT(vd->vdev_ops->vdev_op_leaf);
vd->vdev_resilver_deferred = B_TRUE;
vd->vdev_spa->spa_resilver_deferred = B_TRUE;
}
/*
* Clears the resilver deferred flag on all leaf devs under vd. Returns
* B_TRUE if we have devices that need to be resilvered and are available to
* accept resilver I/Os.
*/
boolean_t
vdev_clear_resilver_deferred(vdev_t *vd, dmu_tx_t *tx)
{
boolean_t resilver_needed = B_FALSE;
spa_t *spa = vd->vdev_spa;
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
resilver_needed |= vdev_clear_resilver_deferred(cvd, tx);
}
if (vd == spa->spa_root_vdev &&
spa_feature_is_active(spa, SPA_FEATURE_RESILVER_DEFER)) {
spa_feature_decr(spa, SPA_FEATURE_RESILVER_DEFER, tx);
vdev_config_dirty(vd);
spa->spa_resilver_deferred = B_FALSE;
return (resilver_needed);
}
if (!vdev_is_concrete(vd) || vd->vdev_aux ||
!vd->vdev_ops->vdev_op_leaf)
return (resilver_needed);
vd->vdev_resilver_deferred = B_FALSE;
return (!vdev_is_dead(vd) && !vd->vdev_offline &&
vdev_resilver_needed(vd, NULL, NULL));
}
boolean_t
vdev_xlate_is_empty(range_seg64_t *rs)
{
return (rs->rs_start == rs->rs_end);
}
/*
* Translate a logical range to the first contiguous physical range for the
* specified vdev_t. This function is initially called with a leaf vdev and
* will walk each parent vdev until it reaches a top-level vdev. Once the
* top-level is reached the physical range is initialized and the recursive
* function begins to unwind. As it unwinds it calls the parent's vdev
* specific translation function to do the real conversion.
*/
void
vdev_xlate(vdev_t *vd, const range_seg64_t *logical_rs,
range_seg64_t *physical_rs, range_seg64_t *remain_rs)
{
/*
* Walk up the vdev tree
*/
if (vd != vd->vdev_top) {
vdev_xlate(vd->vdev_parent, logical_rs, physical_rs,
remain_rs);
} else {
/*
* We've reached the top-level vdev, initialize the physical
* range to the logical range and set an empty remaining
* range then start to unwind.
*/
physical_rs->rs_start = logical_rs->rs_start;
physical_rs->rs_end = logical_rs->rs_end;
remain_rs->rs_start = logical_rs->rs_start;
remain_rs->rs_end = logical_rs->rs_start;
return;
}
vdev_t *pvd = vd->vdev_parent;
ASSERT3P(pvd, !=, NULL);
ASSERT3P(pvd->vdev_ops->vdev_op_xlate, !=, NULL);
/*
* As this recursive function unwinds, translate the logical
* range into its physical and any remaining components by calling
* the vdev specific translate function.
*/
range_seg64_t intermediate = { 0 };
pvd->vdev_ops->vdev_op_xlate(vd, physical_rs, &intermediate, remain_rs);
physical_rs->rs_start = intermediate.rs_start;
physical_rs->rs_end = intermediate.rs_end;
}
void
vdev_xlate_walk(vdev_t *vd, const range_seg64_t *logical_rs,
vdev_xlate_func_t *func, void *arg)
{
range_seg64_t iter_rs = *logical_rs;
range_seg64_t physical_rs;
range_seg64_t remain_rs;
while (!vdev_xlate_is_empty(&iter_rs)) {
vdev_xlate(vd, &iter_rs, &physical_rs, &remain_rs);
/*
* With raidz and dRAID, it's possible that the logical range
* does not live on this leaf vdev. Only when there is a non-
* zero physical size call the provided function.
*/
if (!vdev_xlate_is_empty(&physical_rs))
func(arg, &physical_rs);
iter_rs = remain_rs;
}
}
static char *
vdev_name(vdev_t *vd, char *buf, int buflen)
{
if (vd->vdev_path == NULL) {
if (strcmp(vd->vdev_ops->vdev_op_type, "root") == 0) {
strlcpy(buf, vd->vdev_spa->spa_name, buflen);
} else if (!vd->vdev_ops->vdev_op_leaf) {
snprintf(buf, buflen, "%s-%llu",
vd->vdev_ops->vdev_op_type,
(u_longlong_t)vd->vdev_id);
}
} else {
strlcpy(buf, vd->vdev_path, buflen);
}
return (buf);
}
/*
* Look at the vdev tree and determine whether any devices are currently being
* replaced.
*/
boolean_t
vdev_replace_in_progress(vdev_t *vdev)
{
ASSERT(spa_config_held(vdev->vdev_spa, SCL_ALL, RW_READER) != 0);
if (vdev->vdev_ops == &vdev_replacing_ops)
return (B_TRUE);
/*
* A 'spare' vdev indicates that we have a replace in progress, unless
* it has exactly two children, and the second, the hot spare, has
* finished being resilvered.
*/
if (vdev->vdev_ops == &vdev_spare_ops && (vdev->vdev_children > 2 ||
!vdev_dtl_empty(vdev->vdev_child[1], DTL_MISSING)))
return (B_TRUE);
for (int i = 0; i < vdev->vdev_children; i++) {
if (vdev_replace_in_progress(vdev->vdev_child[i]))
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Add a (source=src, propname=propval) list to an nvlist.
*/
static void
vdev_prop_add_list(nvlist_t *nvl, const char *propname, const char *strval,
uint64_t intval, zprop_source_t src)
{
nvlist_t *propval;
propval = fnvlist_alloc();
fnvlist_add_uint64(propval, ZPROP_SOURCE, src);
if (strval != NULL)
fnvlist_add_string(propval, ZPROP_VALUE, strval);
else
fnvlist_add_uint64(propval, ZPROP_VALUE, intval);
fnvlist_add_nvlist(nvl, propname, propval);
nvlist_free(propval);
}
static void
vdev_props_set_sync(void *arg, dmu_tx_t *tx)
{
vdev_t *vd;
nvlist_t *nvp = arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
objset_t *mos = spa->spa_meta_objset;
nvpair_t *elem = NULL;
uint64_t vdev_guid;
uint64_t objid;
nvlist_t *nvprops;
vdev_guid = fnvlist_lookup_uint64(nvp, ZPOOL_VDEV_PROPS_SET_VDEV);
nvprops = fnvlist_lookup_nvlist(nvp, ZPOOL_VDEV_PROPS_SET_PROPS);
vd = spa_lookup_by_guid(spa, vdev_guid, B_TRUE);
/* this vdev could get removed while waiting for this sync task */
if (vd == NULL)
return;
/*
* Set vdev property values in the vdev props mos object.
*/
if (vd->vdev_root_zap != 0) {
objid = vd->vdev_root_zap;
} else if (vd->vdev_top_zap != 0) {
objid = vd->vdev_top_zap;
} else if (vd->vdev_leaf_zap != 0) {
objid = vd->vdev_leaf_zap;
} else {
panic("unexpected vdev type");
}
mutex_enter(&spa->spa_props_lock);
while ((elem = nvlist_next_nvpair(nvprops, elem)) != NULL) {
uint64_t intval;
const char *strval;
vdev_prop_t prop;
const char *propname = nvpair_name(elem);
zprop_type_t proptype;
switch (prop = vdev_name_to_prop(propname)) {
case VDEV_PROP_USERPROP:
if (vdev_prop_user(propname)) {
strval = fnvpair_value_string(elem);
if (strlen(strval) == 0) {
/* remove the property if value == "" */
(void) zap_remove(mos, objid, propname,
tx);
} else {
VERIFY0(zap_update(mos, objid, propname,
1, strlen(strval) + 1, strval, tx));
}
spa_history_log_internal(spa, "vdev set", tx,
"vdev_guid=%llu: %s=%s",
(u_longlong_t)vdev_guid, nvpair_name(elem),
strval);
}
break;
default:
/* normalize the property name */
propname = vdev_prop_to_name(prop);
proptype = vdev_prop_get_type(prop);
if (nvpair_type(elem) == DATA_TYPE_STRING) {
ASSERT(proptype == PROP_TYPE_STRING);
strval = fnvpair_value_string(elem);
VERIFY0(zap_update(mos, objid, propname,
1, strlen(strval) + 1, strval, tx));
spa_history_log_internal(spa, "vdev set", tx,
"vdev_guid=%llu: %s=%s",
(u_longlong_t)vdev_guid, nvpair_name(elem),
strval);
} else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
intval = fnvpair_value_uint64(elem);
if (proptype == PROP_TYPE_INDEX) {
const char *unused;
VERIFY0(vdev_prop_index_to_string(
prop, intval, &unused));
}
VERIFY0(zap_update(mos, objid, propname,
sizeof (uint64_t), 1, &intval, tx));
spa_history_log_internal(spa, "vdev set", tx,
"vdev_guid=%llu: %s=%lld",
(u_longlong_t)vdev_guid,
nvpair_name(elem), (longlong_t)intval);
} else {
panic("invalid vdev property type %u",
nvpair_type(elem));
}
}
}
mutex_exit(&spa->spa_props_lock);
}
int
vdev_prop_set(vdev_t *vd, nvlist_t *innvl, nvlist_t *outnvl)
{
spa_t *spa = vd->vdev_spa;
nvpair_t *elem = NULL;
uint64_t vdev_guid;
nvlist_t *nvprops;
int error = 0;
ASSERT(vd != NULL);
/* Check that vdev has a zap we can use */
if (vd->vdev_root_zap == 0 &&
vd->vdev_top_zap == 0 &&
vd->vdev_leaf_zap == 0)
return (SET_ERROR(EINVAL));
if (nvlist_lookup_uint64(innvl, ZPOOL_VDEV_PROPS_SET_VDEV,
&vdev_guid) != 0)
return (SET_ERROR(EINVAL));
if (nvlist_lookup_nvlist(innvl, ZPOOL_VDEV_PROPS_SET_PROPS,
&nvprops) != 0)
return (SET_ERROR(EINVAL));
if ((vd = spa_lookup_by_guid(spa, vdev_guid, B_TRUE)) == NULL)
return (SET_ERROR(EINVAL));
while ((elem = nvlist_next_nvpair(nvprops, elem)) != NULL) {
const char *propname = nvpair_name(elem);
vdev_prop_t prop = vdev_name_to_prop(propname);
uint64_t intval = 0;
const char *strval = NULL;
if (prop == VDEV_PROP_USERPROP && !vdev_prop_user(propname)) {
error = EINVAL;
goto end;
}
if (vdev_prop_readonly(prop)) {
error = EROFS;
goto end;
}
/* Special Processing */
switch (prop) {
case VDEV_PROP_PATH:
if (vd->vdev_path == NULL) {
error = EROFS;
break;
}
if (nvpair_value_string(elem, &strval) != 0) {
error = EINVAL;
break;
}
/* New path must start with /dev/ */
if (strncmp(strval, "/dev/", 5)) {
error = EINVAL;
break;
}
error = spa_vdev_setpath(spa, vdev_guid, strval);
break;
case VDEV_PROP_ALLOCATING:
if (nvpair_value_uint64(elem, &intval) != 0) {
error = EINVAL;
break;
}
if (intval != vd->vdev_noalloc)
break;
if (intval == 0)
error = spa_vdev_noalloc(spa, vdev_guid);
else
error = spa_vdev_alloc(spa, vdev_guid);
break;
case VDEV_PROP_FAILFAST:
if (nvpair_value_uint64(elem, &intval) != 0) {
error = EINVAL;
break;
}
vd->vdev_failfast = intval & 1;
break;
case VDEV_PROP_CHECKSUM_N:
if (nvpair_value_uint64(elem, &intval) != 0) {
error = EINVAL;
break;
}
vd->vdev_checksum_n = intval;
break;
case VDEV_PROP_CHECKSUM_T:
if (nvpair_value_uint64(elem, &intval) != 0) {
error = EINVAL;
break;
}
vd->vdev_checksum_t = intval;
break;
case VDEV_PROP_IO_N:
if (nvpair_value_uint64(elem, &intval) != 0) {
error = EINVAL;
break;
}
vd->vdev_io_n = intval;
break;
case VDEV_PROP_IO_T:
if (nvpair_value_uint64(elem, &intval) != 0) {
error = EINVAL;
break;
}
vd->vdev_io_t = intval;
break;
default:
/* Most processing is done in vdev_props_set_sync */
break;
}
end:
if (error != 0) {
intval = error;
vdev_prop_add_list(outnvl, propname, strval, intval, 0);
return (error);
}
}
return (dsl_sync_task(spa->spa_name, NULL, vdev_props_set_sync,
innvl, 6, ZFS_SPACE_CHECK_EXTRA_RESERVED));
}
int
vdev_prop_get(vdev_t *vd, nvlist_t *innvl, nvlist_t *outnvl)
{
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa->spa_meta_objset;
int err = 0;
uint64_t objid;
uint64_t vdev_guid;
nvpair_t *elem = NULL;
nvlist_t *nvprops = NULL;
uint64_t intval = 0;
char *strval = NULL;
const char *propname = NULL;
vdev_prop_t prop;
ASSERT(vd != NULL);
ASSERT(mos != NULL);
if (nvlist_lookup_uint64(innvl, ZPOOL_VDEV_PROPS_GET_VDEV,
&vdev_guid) != 0)
return (SET_ERROR(EINVAL));
nvlist_lookup_nvlist(innvl, ZPOOL_VDEV_PROPS_GET_PROPS, &nvprops);
if (vd->vdev_root_zap != 0) {
objid = vd->vdev_root_zap;
} else if (vd->vdev_top_zap != 0) {
objid = vd->vdev_top_zap;
} else if (vd->vdev_leaf_zap != 0) {
objid = vd->vdev_leaf_zap;
} else {
return (SET_ERROR(EINVAL));
}
ASSERT(objid != 0);
mutex_enter(&spa->spa_props_lock);
if (nvprops != NULL) {
char namebuf[64] = { 0 };
while ((elem = nvlist_next_nvpair(nvprops, elem)) != NULL) {
intval = 0;
strval = NULL;
propname = nvpair_name(elem);
prop = vdev_name_to_prop(propname);
zprop_source_t src = ZPROP_SRC_DEFAULT;
uint64_t integer_size, num_integers;
switch (prop) {
/* Special Read-only Properties */
case VDEV_PROP_NAME:
strval = vdev_name(vd, namebuf,
sizeof (namebuf));
if (strval == NULL)
continue;
vdev_prop_add_list(outnvl, propname, strval, 0,
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_CAPACITY:
/* percent used */
intval = (vd->vdev_stat.vs_dspace == 0) ? 0 :
(vd->vdev_stat.vs_alloc * 100 /
vd->vdev_stat.vs_dspace);
vdev_prop_add_list(outnvl, propname, NULL,
intval, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_STATE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_state, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_GUID:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_guid, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_ASIZE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_asize, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_PSIZE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_psize, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_ASHIFT:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_ashift, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_SIZE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_dspace, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_FREE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_dspace -
vd->vdev_stat.vs_alloc, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_ALLOCATED:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_alloc, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_EXPANDSZ:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_esize, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_FRAGMENTATION:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_fragmentation,
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_PARITY:
vdev_prop_add_list(outnvl, propname, NULL,
vdev_get_nparity(vd), ZPROP_SRC_NONE);
continue;
case VDEV_PROP_PATH:
if (vd->vdev_path == NULL)
continue;
vdev_prop_add_list(outnvl, propname,
vd->vdev_path, 0, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_DEVID:
if (vd->vdev_devid == NULL)
continue;
vdev_prop_add_list(outnvl, propname,
vd->vdev_devid, 0, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_PHYS_PATH:
if (vd->vdev_physpath == NULL)
continue;
vdev_prop_add_list(outnvl, propname,
vd->vdev_physpath, 0, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_ENC_PATH:
if (vd->vdev_enc_sysfs_path == NULL)
continue;
vdev_prop_add_list(outnvl, propname,
vd->vdev_enc_sysfs_path, 0, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_FRU:
if (vd->vdev_fru == NULL)
continue;
vdev_prop_add_list(outnvl, propname,
vd->vdev_fru, 0, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_PARENT:
if (vd->vdev_parent != NULL) {
strval = vdev_name(vd->vdev_parent,
namebuf, sizeof (namebuf));
vdev_prop_add_list(outnvl, propname,
strval, 0, ZPROP_SRC_NONE);
}
continue;
case VDEV_PROP_CHILDREN:
if (vd->vdev_children > 0)
strval = kmem_zalloc(ZAP_MAXVALUELEN,
KM_SLEEP);
for (uint64_t i = 0; i < vd->vdev_children;
i++) {
const char *vname;
vname = vdev_name(vd->vdev_child[i],
namebuf, sizeof (namebuf));
if (vname == NULL)
vname = "(unknown)";
if (strlen(strval) > 0)
strlcat(strval, ",",
ZAP_MAXVALUELEN);
strlcat(strval, vname, ZAP_MAXVALUELEN);
}
if (strval != NULL) {
vdev_prop_add_list(outnvl, propname,
strval, 0, ZPROP_SRC_NONE);
kmem_free(strval, ZAP_MAXVALUELEN);
}
continue;
case VDEV_PROP_NUMCHILDREN:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_children, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_READ_ERRORS:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_read_errors,
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_WRITE_ERRORS:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_write_errors,
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_CHECKSUM_ERRORS:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_checksum_errors,
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_INITIALIZE_ERRORS:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_initialize_errors,
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_OPS_NULL:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_ops[ZIO_TYPE_NULL],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_OPS_READ:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_ops[ZIO_TYPE_READ],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_OPS_WRITE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_ops[ZIO_TYPE_WRITE],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_OPS_FREE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_ops[ZIO_TYPE_FREE],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_OPS_CLAIM:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_ops[ZIO_TYPE_CLAIM],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_OPS_TRIM:
/*
* TRIM ops and bytes are reported to user
* space as ZIO_TYPE_IOCTL. This is done to
* preserve the vdev_stat_t structure layout
* for user space.
*/
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_ops[ZIO_TYPE_IOCTL],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_BYTES_NULL:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_bytes[ZIO_TYPE_NULL],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_BYTES_READ:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_bytes[ZIO_TYPE_READ],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_BYTES_WRITE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_bytes[ZIO_TYPE_WRITE],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_BYTES_FREE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_bytes[ZIO_TYPE_FREE],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_BYTES_CLAIM:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_bytes[ZIO_TYPE_CLAIM],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_BYTES_TRIM:
/*
* TRIM ops and bytes are reported to user
* space as ZIO_TYPE_IOCTL. This is done to
* preserve the vdev_stat_t structure layout
* for user space.
*/
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_bytes[ZIO_TYPE_IOCTL],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_REMOVING:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_removing, ZPROP_SRC_NONE);
continue;
/* Numeric Properites */
case VDEV_PROP_ALLOCATING:
/* Leaf vdevs cannot have this property */
if (vd->vdev_mg == NULL &&
vd->vdev_top != NULL) {
src = ZPROP_SRC_NONE;
intval = ZPROP_BOOLEAN_NA;
} else {
err = vdev_prop_get_int(vd, prop,
&intval);
if (err && err != ENOENT)
break;
if (intval ==
vdev_prop_default_numeric(prop))
src = ZPROP_SRC_DEFAULT;
else
src = ZPROP_SRC_LOCAL;
}
vdev_prop_add_list(outnvl, propname, NULL,
intval, src);
break;
case VDEV_PROP_FAILFAST:
src = ZPROP_SRC_LOCAL;
strval = NULL;
err = zap_lookup(mos, objid, nvpair_name(elem),
sizeof (uint64_t), 1, &intval);
if (err == ENOENT) {
intval = vdev_prop_default_numeric(
prop);
err = 0;
} else if (err) {
break;
}
if (intval == vdev_prop_default_numeric(prop))
src = ZPROP_SRC_DEFAULT;
vdev_prop_add_list(outnvl, propname, strval,
intval, src);
break;
case VDEV_PROP_CHECKSUM_N:
case VDEV_PROP_CHECKSUM_T:
case VDEV_PROP_IO_N:
case VDEV_PROP_IO_T:
err = vdev_prop_get_int(vd, prop, &intval);
if (err && err != ENOENT)
break;
if (intval == vdev_prop_default_numeric(prop))
src = ZPROP_SRC_DEFAULT;
else
src = ZPROP_SRC_LOCAL;
vdev_prop_add_list(outnvl, propname, NULL,
intval, src);
break;
/* Text Properties */
case VDEV_PROP_COMMENT:
/* Exists in the ZAP below */
/* FALLTHRU */
case VDEV_PROP_USERPROP:
/* User Properites */
src = ZPROP_SRC_LOCAL;
err = zap_length(mos, objid, nvpair_name(elem),
&integer_size, &num_integers);
if (err)
break;
switch (integer_size) {
case 8:
/* User properties cannot be integers */
err = EINVAL;
break;
case 1:
/* string property */
strval = kmem_alloc(num_integers,
KM_SLEEP);
err = zap_lookup(mos, objid,
nvpair_name(elem), 1,
num_integers, strval);
if (err) {
kmem_free(strval,
num_integers);
break;
}
vdev_prop_add_list(outnvl, propname,
strval, 0, src);
kmem_free(strval, num_integers);
break;
}
break;
default:
err = ENOENT;
break;
}
if (err)
break;
}
} else {
/*
* Get all properties from the MOS vdev property object.
*/
zap_cursor_t zc;
zap_attribute_t za;
for (zap_cursor_init(&zc, mos, objid);
(err = zap_cursor_retrieve(&zc, &za)) == 0;
zap_cursor_advance(&zc)) {
intval = 0;
strval = NULL;
zprop_source_t src = ZPROP_SRC_DEFAULT;
propname = za.za_name;
switch (za.za_integer_length) {
case 8:
/* We do not allow integer user properties */
/* This is likely an internal value */
break;
case 1:
/* string property */
strval = kmem_alloc(za.za_num_integers,
KM_SLEEP);
err = zap_lookup(mos, objid, za.za_name, 1,
za.za_num_integers, strval);
if (err) {
kmem_free(strval, za.za_num_integers);
break;
}
vdev_prop_add_list(outnvl, propname, strval, 0,
src);
kmem_free(strval, za.za_num_integers);
break;
default:
break;
}
}
zap_cursor_fini(&zc);
}
mutex_exit(&spa->spa_props_lock);
if (err && err != ENOENT) {
return (err);
}
return (0);
}
EXPORT_SYMBOL(vdev_fault);
EXPORT_SYMBOL(vdev_degrade);
EXPORT_SYMBOL(vdev_online);
EXPORT_SYMBOL(vdev_offline);
EXPORT_SYMBOL(vdev_clear);
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, default_ms_count, UINT, ZMOD_RW,
"Target number of metaslabs per top-level vdev");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, default_ms_shift, UINT, ZMOD_RW,
"Default lower limit for metaslab size");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, max_ms_shift, UINT, ZMOD_RW,
"Default upper limit for metaslab size");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, min_ms_count, UINT, ZMOD_RW,
"Minimum number of metaslabs per top-level vdev");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, ms_count_limit, UINT, ZMOD_RW,
"Practical upper limit of total metaslabs per top-level vdev");
ZFS_MODULE_PARAM(zfs, zfs_, slow_io_events_per_second, UINT, ZMOD_RW,
"Rate limit slow IO (delay) events to this many per second");
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs, zfs_, checksum_events_per_second, UINT, ZMOD_RW,
"Rate limit checksum events to this many checksum errors per second "
"(do not set below ZED threshold).");
/* END CSTYLED */
ZFS_MODULE_PARAM(zfs, zfs_, scan_ignore_errors, INT, ZMOD_RW,
"Ignore errors during resilver/scrub");
ZFS_MODULE_PARAM(zfs_vdev, vdev_, validate_skip, INT, ZMOD_RW,
"Bypass vdev_validate()");
ZFS_MODULE_PARAM(zfs, zfs_, nocacheflush, INT, ZMOD_RW,
"Disable cache flushes");
ZFS_MODULE_PARAM(zfs, zfs_, embedded_slog_min_ms, UINT, ZMOD_RW,
"Minimum number of metaslabs required to dedicate one for log blocks");
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM_CALL(zfs_vdev, zfs_vdev_, min_auto_ashift,
param_set_min_auto_ashift, param_get_uint, ZMOD_RW,
"Minimum ashift used when creating new top-level vdevs");
ZFS_MODULE_PARAM_CALL(zfs_vdev, zfs_vdev_, max_auto_ashift,
param_set_max_auto_ashift, param_get_uint, ZMOD_RW,
"Maximum ashift used when optimizing for logical -> physical sector "
"size on new top-level vdevs");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/vdev_label.c b/sys/contrib/openzfs/module/zfs/vdev_label.c
index a2e5524a8391..737d8b33e188 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_label.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_label.c
@@ -1,2041 +1,2080 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2020 by Delphix. All rights reserved.
* Copyright (c) 2017, Intel Corporation.
*/
/*
* Virtual Device Labels
* ---------------------
*
* The vdev label serves several distinct purposes:
*
* 1. Uniquely identify this device as part of a ZFS pool and confirm its
* identity within the pool.
*
* 2. Verify that all the devices given in a configuration are present
* within the pool.
*
* 3. Determine the uberblock for the pool.
*
* 4. In case of an import operation, determine the configuration of the
* toplevel vdev of which it is a part.
*
* 5. If an import operation cannot find all the devices in the pool,
* provide enough information to the administrator to determine which
* devices are missing.
*
* It is important to note that while the kernel is responsible for writing the
* label, it only consumes the information in the first three cases. The
* latter information is only consumed in userland when determining the
* configuration to import a pool.
*
*
* Label Organization
* ------------------
*
* Before describing the contents of the label, it's important to understand how
* the labels are written and updated with respect to the uberblock.
*
* When the pool configuration is altered, either because it was newly created
* or a device was added, we want to update all the labels such that we can deal
* with fatal failure at any point. To this end, each disk has two labels which
* are updated before and after the uberblock is synced. Assuming we have
* labels and an uberblock with the following transaction groups:
*
* L1 UB L2
* +------+ +------+ +------+
* | | | | | |
* | t10 | | t10 | | t10 |
* | | | | | |
* +------+ +------+ +------+
*
* In this stable state, the labels and the uberblock were all updated within
* the same transaction group (10). Each label is mirrored and checksummed, so
* that we can detect when we fail partway through writing the label.
*
* In order to identify which labels are valid, the labels are written in the
* following manner:
*
* 1. For each vdev, update 'L1' to the new label
* 2. Update the uberblock
* 3. For each vdev, update 'L2' to the new label
*
* Given arbitrary failure, we can determine the correct label to use based on
* the transaction group. If we fail after updating L1 but before updating the
* UB, we will notice that L1's transaction group is greater than the uberblock,
* so L2 must be valid. If we fail after writing the uberblock but before
* writing L2, we will notice that L2's transaction group is less than L1, and
* therefore L1 is valid.
*
* Another added complexity is that not every label is updated when the config
* is synced. If we add a single device, we do not want to have to re-write
* every label for every device in the pool. This means that both L1 and L2 may
* be older than the pool uberblock, because the necessary information is stored
* on another vdev.
*
*
* On-disk Format
* --------------
*
* The vdev label consists of two distinct parts, and is wrapped within the
* vdev_label_t structure. The label includes 8k of padding to permit legacy
* VTOC disk labels, but is otherwise ignored.
*
* The first half of the label is a packed nvlist which contains pool wide
* properties, per-vdev properties, and configuration information. It is
* described in more detail below.
*
* The latter half of the label consists of a redundant array of uberblocks.
* These uberblocks are updated whenever a transaction group is committed,
* or when the configuration is updated. When a pool is loaded, we scan each
* vdev for the 'best' uberblock.
*
*
* Configuration Information
* -------------------------
*
* The nvlist describing the pool and vdev contains the following elements:
*
* version ZFS on-disk version
* name Pool name
* state Pool state
* txg Transaction group in which this label was written
* pool_guid Unique identifier for this pool
* vdev_tree An nvlist describing vdev tree.
* features_for_read
* An nvlist of the features necessary for reading the MOS.
*
* Each leaf device label also contains the following:
*
* top_guid Unique ID for top-level vdev in which this is contained
* guid Unique ID for the leaf vdev
*
* The 'vs' configuration follows the format described in 'spa_config.c'.
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/dmu.h>
#include <sys/zap.h>
#include <sys/vdev.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_draid.h>
#include <sys/uberblock_impl.h>
#include <sys/metaslab.h>
#include <sys/metaslab_impl.h>
#include <sys/zio.h>
#include <sys/dsl_scan.h>
#include <sys/abd.h>
#include <sys/fs/zfs.h>
#include <sys/byteorder.h>
#include <sys/zfs_bootenv.h>
/*
* Basic routines to read and write from a vdev label.
* Used throughout the rest of this file.
*/
uint64_t
vdev_label_offset(uint64_t psize, int l, uint64_t offset)
{
ASSERT(offset < sizeof (vdev_label_t));
ASSERT(P2PHASE_TYPED(psize, sizeof (vdev_label_t), uint64_t) == 0);
return (offset + l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
0 : psize - VDEV_LABELS * sizeof (vdev_label_t)));
}
/*
* Returns back the vdev label associated with the passed in offset.
*/
int
vdev_label_number(uint64_t psize, uint64_t offset)
{
int l;
if (offset >= psize - VDEV_LABEL_END_SIZE) {
offset -= psize - VDEV_LABEL_END_SIZE;
offset += (VDEV_LABELS / 2) * sizeof (vdev_label_t);
}
l = offset / sizeof (vdev_label_t);
return (l < VDEV_LABELS ? l : -1);
}
static void
vdev_label_read(zio_t *zio, vdev_t *vd, int l, abd_t *buf, uint64_t offset,
uint64_t size, zio_done_func_t *done, void *private, int flags)
{
ASSERT(
spa_config_held(zio->io_spa, SCL_STATE, RW_READER) == SCL_STATE ||
spa_config_held(zio->io_spa, SCL_STATE, RW_WRITER) == SCL_STATE);
ASSERT(flags & ZIO_FLAG_CONFIG_WRITER);
zio_nowait(zio_read_phys(zio, vd,
vdev_label_offset(vd->vdev_psize, l, offset),
size, buf, ZIO_CHECKSUM_LABEL, done, private,
ZIO_PRIORITY_SYNC_READ, flags, B_TRUE));
}
void
vdev_label_write(zio_t *zio, vdev_t *vd, int l, abd_t *buf, uint64_t offset,
uint64_t size, zio_done_func_t *done, void *private, int flags)
{
ASSERT(
spa_config_held(zio->io_spa, SCL_STATE, RW_READER) == SCL_STATE ||
spa_config_held(zio->io_spa, SCL_STATE, RW_WRITER) == SCL_STATE);
ASSERT(flags & ZIO_FLAG_CONFIG_WRITER);
zio_nowait(zio_write_phys(zio, vd,
vdev_label_offset(vd->vdev_psize, l, offset),
size, buf, ZIO_CHECKSUM_LABEL, done, private,
ZIO_PRIORITY_SYNC_WRITE, flags, B_TRUE));
}
/*
* Generate the nvlist representing this vdev's stats
*/
void
vdev_config_generate_stats(vdev_t *vd, nvlist_t *nv)
{
nvlist_t *nvx;
vdev_stat_t *vs;
vdev_stat_ex_t *vsx;
vs = kmem_alloc(sizeof (*vs), KM_SLEEP);
vsx = kmem_alloc(sizeof (*vsx), KM_SLEEP);
vdev_get_stats_ex(vd, vs, vsx);
fnvlist_add_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t *)vs, sizeof (*vs) / sizeof (uint64_t));
/*
* Add extended stats into a special extended stats nvlist. This keeps
* all the extended stats nicely grouped together. The extended stats
* nvlist is then added to the main nvlist.
*/
nvx = fnvlist_alloc();
/* ZIOs in flight to disk */
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
vsx->vsx_active_queue[ZIO_PRIORITY_SYNC_READ]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
vsx->vsx_active_queue[ZIO_PRIORITY_SYNC_WRITE]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
vsx->vsx_active_queue[ZIO_PRIORITY_ASYNC_READ]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
vsx->vsx_active_queue[ZIO_PRIORITY_ASYNC_WRITE]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
vsx->vsx_active_queue[ZIO_PRIORITY_SCRUB]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
vsx->vsx_active_queue[ZIO_PRIORITY_TRIM]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,
vsx->vsx_active_queue[ZIO_PRIORITY_REBUILD]);
/* ZIOs pending */
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE,
vsx->vsx_pend_queue[ZIO_PRIORITY_SYNC_READ]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE,
vsx->vsx_pend_queue[ZIO_PRIORITY_SYNC_WRITE]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE,
vsx->vsx_pend_queue[ZIO_PRIORITY_ASYNC_READ]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE,
vsx->vsx_pend_queue[ZIO_PRIORITY_ASYNC_WRITE]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE,
vsx->vsx_pend_queue[ZIO_PRIORITY_SCRUB]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_TRIM_PEND_QUEUE,
vsx->vsx_pend_queue[ZIO_PRIORITY_TRIM]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_REBUILD_PEND_QUEUE,
vsx->vsx_pend_queue[ZIO_PRIORITY_REBUILD]);
/* Histograms */
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
vsx->vsx_total_histo[ZIO_TYPE_READ],
ARRAY_SIZE(vsx->vsx_total_histo[ZIO_TYPE_READ]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
vsx->vsx_total_histo[ZIO_TYPE_WRITE],
ARRAY_SIZE(vsx->vsx_total_histo[ZIO_TYPE_WRITE]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
vsx->vsx_disk_histo[ZIO_TYPE_READ],
ARRAY_SIZE(vsx->vsx_disk_histo[ZIO_TYPE_READ]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
vsx->vsx_disk_histo[ZIO_TYPE_WRITE],
ARRAY_SIZE(vsx->vsx_disk_histo[ZIO_TYPE_WRITE]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
vsx->vsx_queue_histo[ZIO_PRIORITY_SYNC_READ],
ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_SYNC_READ]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
vsx->vsx_queue_histo[ZIO_PRIORITY_SYNC_WRITE],
ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_SYNC_WRITE]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
vsx->vsx_queue_histo[ZIO_PRIORITY_ASYNC_READ],
ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_ASYNC_READ]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
vsx->vsx_queue_histo[ZIO_PRIORITY_ASYNC_WRITE],
ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_ASYNC_WRITE]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
vsx->vsx_queue_histo[ZIO_PRIORITY_SCRUB],
ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_SCRUB]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
vsx->vsx_queue_histo[ZIO_PRIORITY_TRIM],
ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_TRIM]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
vsx->vsx_queue_histo[ZIO_PRIORITY_REBUILD],
ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_REBUILD]));
/* Request sizes */
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO,
vsx->vsx_ind_histo[ZIO_PRIORITY_SYNC_READ],
ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_SYNC_READ]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO,
vsx->vsx_ind_histo[ZIO_PRIORITY_SYNC_WRITE],
ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_SYNC_WRITE]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO,
vsx->vsx_ind_histo[ZIO_PRIORITY_ASYNC_READ],
ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_ASYNC_READ]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO,
vsx->vsx_ind_histo[ZIO_PRIORITY_ASYNC_WRITE],
ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_ASYNC_WRITE]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO,
vsx->vsx_ind_histo[ZIO_PRIORITY_SCRUB],
ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_SCRUB]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO,
vsx->vsx_ind_histo[ZIO_PRIORITY_TRIM],
ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_TRIM]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_IND_REBUILD_HISTO,
vsx->vsx_ind_histo[ZIO_PRIORITY_REBUILD],
ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_REBUILD]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO,
vsx->vsx_agg_histo[ZIO_PRIORITY_SYNC_READ],
ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_SYNC_READ]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO,
vsx->vsx_agg_histo[ZIO_PRIORITY_SYNC_WRITE],
ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_SYNC_WRITE]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO,
vsx->vsx_agg_histo[ZIO_PRIORITY_ASYNC_READ],
ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_ASYNC_READ]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO,
vsx->vsx_agg_histo[ZIO_PRIORITY_ASYNC_WRITE],
ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_ASYNC_WRITE]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO,
vsx->vsx_agg_histo[ZIO_PRIORITY_SCRUB],
ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_SCRUB]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_AGG_TRIM_HISTO,
vsx->vsx_agg_histo[ZIO_PRIORITY_TRIM],
ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_TRIM]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_AGG_REBUILD_HISTO,
vsx->vsx_agg_histo[ZIO_PRIORITY_REBUILD],
ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_REBUILD]));
/* IO delays */
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SLOW_IOS, vs->vs_slow_ios);
/* Add extended stats nvlist to main nvlist */
fnvlist_add_nvlist(nv, ZPOOL_CONFIG_VDEV_STATS_EX, nvx);
fnvlist_free(nvx);
kmem_free(vs, sizeof (*vs));
kmem_free(vsx, sizeof (*vsx));
}
static void
root_vdev_actions_getprogress(vdev_t *vd, nvlist_t *nvl)
{
spa_t *spa = vd->vdev_spa;
if (vd != spa->spa_root_vdev)
return;
/* provide either current or previous scan information */
pool_scan_stat_t ps;
if (spa_scan_get_stats(spa, &ps) == 0) {
fnvlist_add_uint64_array(nvl,
ZPOOL_CONFIG_SCAN_STATS, (uint64_t *)&ps,
sizeof (pool_scan_stat_t) / sizeof (uint64_t));
}
pool_removal_stat_t prs;
if (spa_removal_get_stats(spa, &prs) == 0) {
fnvlist_add_uint64_array(nvl,
ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t *)&prs,
sizeof (prs) / sizeof (uint64_t));
}
pool_checkpoint_stat_t pcs;
if (spa_checkpoint_get_stats(spa, &pcs) == 0) {
fnvlist_add_uint64_array(nvl,
ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t *)&pcs,
sizeof (pcs) / sizeof (uint64_t));
}
}
static void
top_vdev_actions_getprogress(vdev_t *vd, nvlist_t *nvl)
{
if (vd == vd->vdev_top) {
vdev_rebuild_stat_t vrs;
if (vdev_rebuild_get_stats(vd, &vrs) == 0) {
fnvlist_add_uint64_array(nvl,
ZPOOL_CONFIG_REBUILD_STATS, (uint64_t *)&vrs,
sizeof (vrs) / sizeof (uint64_t));
}
}
}
/*
* Generate the nvlist representing this vdev's config.
*/
nvlist_t *
vdev_config_generate(spa_t *spa, vdev_t *vd, boolean_t getstats,
vdev_config_flag_t flags)
{
nvlist_t *nv = NULL;
vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
nv = fnvlist_alloc();
fnvlist_add_string(nv, ZPOOL_CONFIG_TYPE, vd->vdev_ops->vdev_op_type);
if (!(flags & (VDEV_CONFIG_SPARE | VDEV_CONFIG_L2CACHE)))
fnvlist_add_uint64(nv, ZPOOL_CONFIG_ID, vd->vdev_id);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_GUID, vd->vdev_guid);
if (vd->vdev_path != NULL)
fnvlist_add_string(nv, ZPOOL_CONFIG_PATH, vd->vdev_path);
if (vd->vdev_devid != NULL)
fnvlist_add_string(nv, ZPOOL_CONFIG_DEVID, vd->vdev_devid);
if (vd->vdev_physpath != NULL)
fnvlist_add_string(nv, ZPOOL_CONFIG_PHYS_PATH,
vd->vdev_physpath);
if (vd->vdev_enc_sysfs_path != NULL)
fnvlist_add_string(nv, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
vd->vdev_enc_sysfs_path);
if (vd->vdev_fru != NULL)
fnvlist_add_string(nv, ZPOOL_CONFIG_FRU, vd->vdev_fru);
if (vd->vdev_ops->vdev_op_config_generate != NULL)
vd->vdev_ops->vdev_op_config_generate(vd, nv);
if (vd->vdev_wholedisk != -1ULL) {
fnvlist_add_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
vd->vdev_wholedisk);
}
if (vd->vdev_not_present && !(flags & VDEV_CONFIG_MISSING))
fnvlist_add_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 1);
if (vd->vdev_isspare)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 1);
if (flags & VDEV_CONFIG_L2CACHE)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_ASHIFT, vd->vdev_ashift);
if (!(flags & (VDEV_CONFIG_SPARE | VDEV_CONFIG_L2CACHE)) &&
vd == vd->vdev_top) {
fnvlist_add_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY,
vd->vdev_ms_array);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT,
vd->vdev_ms_shift);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_ASHIFT, vd->vdev_ashift);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_ASIZE,
vd->vdev_asize);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_LOG, vd->vdev_islog);
if (vd->vdev_noalloc) {
fnvlist_add_uint64(nv, ZPOOL_CONFIG_NONALLOCATING,
vd->vdev_noalloc);
}
/*
* Slog devices are removed synchronously so don't
* persist the vdev_removing flag to the label.
*/
if (vd->vdev_removing && !vd->vdev_islog) {
fnvlist_add_uint64(nv, ZPOOL_CONFIG_REMOVING,
vd->vdev_removing);
}
/* zpool command expects alloc class data */
if (getstats && vd->vdev_alloc_bias != VDEV_BIAS_NONE) {
const char *bias = NULL;
switch (vd->vdev_alloc_bias) {
case VDEV_BIAS_LOG:
bias = VDEV_ALLOC_BIAS_LOG;
break;
case VDEV_BIAS_SPECIAL:
bias = VDEV_ALLOC_BIAS_SPECIAL;
break;
case VDEV_BIAS_DEDUP:
bias = VDEV_ALLOC_BIAS_DEDUP;
break;
default:
ASSERT3U(vd->vdev_alloc_bias, ==,
VDEV_BIAS_NONE);
}
fnvlist_add_string(nv, ZPOOL_CONFIG_ALLOCATION_BIAS,
bias);
}
}
if (vd->vdev_dtl_sm != NULL) {
fnvlist_add_uint64(nv, ZPOOL_CONFIG_DTL,
space_map_object(vd->vdev_dtl_sm));
}
if (vic->vic_mapping_object != 0) {
fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_OBJECT,
vic->vic_mapping_object);
}
if (vic->vic_births_object != 0) {
fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_BIRTHS,
vic->vic_births_object);
}
if (vic->vic_prev_indirect_vdev != UINT64_MAX) {
fnvlist_add_uint64(nv, ZPOOL_CONFIG_PREV_INDIRECT_VDEV,
vic->vic_prev_indirect_vdev);
}
if (vd->vdev_crtxg)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_CREATE_TXG, vd->vdev_crtxg);
if (vd->vdev_expansion_time)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_EXPANSION_TIME,
vd->vdev_expansion_time);
if (flags & VDEV_CONFIG_MOS) {
if (vd->vdev_leaf_zap != 0) {
ASSERT(vd->vdev_ops->vdev_op_leaf);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_VDEV_LEAF_ZAP,
vd->vdev_leaf_zap);
}
if (vd->vdev_top_zap != 0) {
ASSERT(vd == vd->vdev_top);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_VDEV_TOP_ZAP,
vd->vdev_top_zap);
}
if (vd->vdev_ops == &vdev_root_ops && vd->vdev_root_zap != 0 &&
spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_AVZ_V2)) {
fnvlist_add_uint64(nv, ZPOOL_CONFIG_VDEV_ROOT_ZAP,
vd->vdev_root_zap);
}
if (vd->vdev_resilver_deferred) {
ASSERT(vd->vdev_ops->vdev_op_leaf);
ASSERT(spa->spa_resilver_deferred);
fnvlist_add_boolean(nv, ZPOOL_CONFIG_RESILVER_DEFER);
}
}
if (getstats) {
vdev_config_generate_stats(vd, nv);
root_vdev_actions_getprogress(vd, nv);
top_vdev_actions_getprogress(vd, nv);
/*
* Note: this can be called from open context
* (spa_get_stats()), so we need the rwlock to prevent
* the mapping from being changed by condensing.
*/
rw_enter(&vd->vdev_indirect_rwlock, RW_READER);
if (vd->vdev_indirect_mapping != NULL) {
ASSERT(vd->vdev_indirect_births != NULL);
vdev_indirect_mapping_t *vim =
vd->vdev_indirect_mapping;
fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_SIZE,
vdev_indirect_mapping_size(vim));
}
rw_exit(&vd->vdev_indirect_rwlock);
if (vd->vdev_mg != NULL &&
vd->vdev_mg->mg_fragmentation != ZFS_FRAG_INVALID) {
/*
* Compute approximately how much memory would be used
* for the indirect mapping if this device were to
* be removed.
*
* Note: If the frag metric is invalid, then not
* enough metaslabs have been converted to have
* histograms.
*/
uint64_t seg_count = 0;
uint64_t to_alloc = vd->vdev_stat.vs_alloc;
/*
* There are the same number of allocated segments
* as free segments, so we will have at least one
* entry per free segment. However, small free
* segments (smaller than vdev_removal_max_span)
* will be combined with adjacent allocated segments
* as a single mapping.
*/
for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
if (i + 1 < highbit64(vdev_removal_max_span)
- 1) {
to_alloc +=
vd->vdev_mg->mg_histogram[i] <<
(i + 1);
} else {
seg_count +=
vd->vdev_mg->mg_histogram[i];
}
}
/*
* The maximum length of a mapping is
* zfs_remove_max_segment, so we need at least one entry
* per zfs_remove_max_segment of allocated data.
*/
seg_count += to_alloc / spa_remove_max_segment(spa);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_SIZE,
seg_count *
sizeof (vdev_indirect_mapping_entry_phys_t));
}
}
if (!vd->vdev_ops->vdev_op_leaf) {
nvlist_t **child;
uint64_t c;
ASSERT(!vd->vdev_ishole);
child = kmem_alloc(vd->vdev_children * sizeof (nvlist_t *),
KM_SLEEP);
for (c = 0; c < vd->vdev_children; c++) {
child[c] = vdev_config_generate(spa, vd->vdev_child[c],
getstats, flags);
}
fnvlist_add_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
(const nvlist_t * const *)child, vd->vdev_children);
for (c = 0; c < vd->vdev_children; c++)
nvlist_free(child[c]);
kmem_free(child, vd->vdev_children * sizeof (nvlist_t *));
} else {
const char *aux = NULL;
if (vd->vdev_offline && !vd->vdev_tmpoffline)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_OFFLINE, B_TRUE);
if (vd->vdev_resilver_txg != 0)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_RESILVER_TXG,
vd->vdev_resilver_txg);
if (vd->vdev_rebuild_txg != 0)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_REBUILD_TXG,
vd->vdev_rebuild_txg);
if (vd->vdev_faulted)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_FAULTED, B_TRUE);
if (vd->vdev_degraded)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_DEGRADED, B_TRUE);
if (vd->vdev_removed)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_REMOVED, B_TRUE);
if (vd->vdev_unspare)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_UNSPARE, B_TRUE);
if (vd->vdev_ishole)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_HOLE, B_TRUE);
/* Set the reason why we're FAULTED/DEGRADED. */
switch (vd->vdev_stat.vs_aux) {
case VDEV_AUX_ERR_EXCEEDED:
aux = "err_exceeded";
break;
case VDEV_AUX_EXTERNAL:
aux = "external";
break;
}
if (aux != NULL && !vd->vdev_tmpoffline) {
fnvlist_add_string(nv, ZPOOL_CONFIG_AUX_STATE, aux);
} else {
/*
* We're healthy - clear any previous AUX_STATE values.
*/
if (nvlist_exists(nv, ZPOOL_CONFIG_AUX_STATE))
nvlist_remove_all(nv, ZPOOL_CONFIG_AUX_STATE);
}
if (vd->vdev_splitting && vd->vdev_orig_guid != 0LL) {
fnvlist_add_uint64(nv, ZPOOL_CONFIG_ORIG_GUID,
vd->vdev_orig_guid);
}
}
return (nv);
}
/*
* Generate a view of the top-level vdevs. If we currently have holes
* in the namespace, then generate an array which contains a list of holey
* vdevs. Additionally, add the number of top-level children that currently
* exist.
*/
void
vdev_top_config_generate(spa_t *spa, nvlist_t *config)
{
vdev_t *rvd = spa->spa_root_vdev;
uint64_t *array;
uint_t c, idx;
array = kmem_alloc(rvd->vdev_children * sizeof (uint64_t), KM_SLEEP);
for (c = 0, idx = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
if (tvd->vdev_ishole) {
array[idx++] = c;
}
}
if (idx) {
VERIFY(nvlist_add_uint64_array(config, ZPOOL_CONFIG_HOLE_ARRAY,
array, idx) == 0);
}
VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN,
rvd->vdev_children) == 0);
kmem_free(array, rvd->vdev_children * sizeof (uint64_t));
}
/*
* Returns the configuration from the label of the given vdev. For vdevs
* which don't have a txg value stored on their label (i.e. spares/cache)
* or have not been completely initialized (txg = 0) just return
* the configuration from the first valid label we find. Otherwise,
* find the most up-to-date label that does not exceed the specified
* 'txg' value.
*/
nvlist_t *
vdev_label_read_config(vdev_t *vd, uint64_t txg)
{
spa_t *spa = vd->vdev_spa;
nvlist_t *config = NULL;
vdev_phys_t *vp[VDEV_LABELS];
abd_t *vp_abd[VDEV_LABELS];
zio_t *zio[VDEV_LABELS];
uint64_t best_txg = 0;
uint64_t label_txg = 0;
int error = 0;
int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL |
ZIO_FLAG_SPECULATIVE;
ASSERT(vd->vdev_validate_thread == curthread ||
spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
if (!vdev_readable(vd))
return (NULL);
/*
* The label for a dRAID distributed spare is not stored on disk.
* Instead it is generated when needed which allows us to bypass
* the pipeline when reading the config from the label.
*/
if (vd->vdev_ops == &vdev_draid_spare_ops)
return (vdev_draid_read_config_spare(vd));
for (int l = 0; l < VDEV_LABELS; l++) {
vp_abd[l] = abd_alloc_linear(sizeof (vdev_phys_t), B_TRUE);
vp[l] = abd_to_buf(vp_abd[l]);
}
retry:
for (int l = 0; l < VDEV_LABELS; l++) {
zio[l] = zio_root(spa, NULL, NULL, flags);
vdev_label_read(zio[l], vd, l, vp_abd[l],
offsetof(vdev_label_t, vl_vdev_phys), sizeof (vdev_phys_t),
NULL, NULL, flags);
}
for (int l = 0; l < VDEV_LABELS; l++) {
nvlist_t *label = NULL;
if (zio_wait(zio[l]) == 0 &&
nvlist_unpack(vp[l]->vp_nvlist, sizeof (vp[l]->vp_nvlist),
&label, 0) == 0) {
/*
* Auxiliary vdevs won't have txg values in their
* labels and newly added vdevs may not have been
* completely initialized so just return the
* configuration from the first valid label we
* encounter.
*/
error = nvlist_lookup_uint64(label,
ZPOOL_CONFIG_POOL_TXG, &label_txg);
if ((error || label_txg == 0) && !config) {
config = label;
for (l++; l < VDEV_LABELS; l++)
zio_wait(zio[l]);
break;
} else if (label_txg <= txg && label_txg > best_txg) {
best_txg = label_txg;
nvlist_free(config);
config = fnvlist_dup(label);
}
}
if (label != NULL) {
nvlist_free(label);
label = NULL;
}
}
if (config == NULL && !(flags & ZIO_FLAG_TRYHARD)) {
flags |= ZIO_FLAG_TRYHARD;
goto retry;
}
/*
* We found a valid label but it didn't pass txg restrictions.
*/
if (config == NULL && label_txg != 0) {
vdev_dbgmsg(vd, "label discarded as txg is too large "
"(%llu > %llu)", (u_longlong_t)label_txg,
(u_longlong_t)txg);
}
for (int l = 0; l < VDEV_LABELS; l++) {
abd_free(vp_abd[l]);
}
return (config);
}
/*
* Determine if a device is in use. The 'spare_guid' parameter will be filled
* in with the device guid if this spare is active elsewhere on the system.
*/
static boolean_t
vdev_inuse(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason,
uint64_t *spare_guid, uint64_t *l2cache_guid)
{
spa_t *spa = vd->vdev_spa;
uint64_t state, pool_guid, device_guid, txg, spare_pool;
uint64_t vdtxg = 0;
nvlist_t *label;
if (spare_guid)
*spare_guid = 0ULL;
if (l2cache_guid)
*l2cache_guid = 0ULL;
/*
* Read the label, if any, and perform some basic sanity checks.
*/
if ((label = vdev_label_read_config(vd, -1ULL)) == NULL)
return (B_FALSE);
(void) nvlist_lookup_uint64(label, ZPOOL_CONFIG_CREATE_TXG,
&vdtxg);
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE,
&state) != 0 ||
nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID,
&device_guid) != 0) {
nvlist_free(label);
return (B_FALSE);
}
if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
(nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID,
&pool_guid) != 0 ||
nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_TXG,
&txg) != 0)) {
nvlist_free(label);
return (B_FALSE);
}
nvlist_free(label);
/*
* Check to see if this device indeed belongs to the pool it claims to
* be a part of. The only way this is allowed is if the device is a hot
* spare (which we check for later on).
*/
if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
!spa_guid_exists(pool_guid, device_guid) &&
!spa_spare_exists(device_guid, NULL, NULL) &&
!spa_l2cache_exists(device_guid, NULL))
return (B_FALSE);
/*
* If the transaction group is zero, then this an initialized (but
* unused) label. This is only an error if the create transaction
* on-disk is the same as the one we're using now, in which case the
* user has attempted to add the same vdev multiple times in the same
* transaction.
*/
if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
txg == 0 && vdtxg == crtxg)
return (B_TRUE);
/*
* Check to see if this is a spare device. We do an explicit check for
* spa_has_spare() here because it may be on our pending list of spares
* to add.
*/
if (spa_spare_exists(device_guid, &spare_pool, NULL) ||
spa_has_spare(spa, device_guid)) {
if (spare_guid)
*spare_guid = device_guid;
switch (reason) {
case VDEV_LABEL_CREATE:
return (B_TRUE);
case VDEV_LABEL_REPLACE:
return (!spa_has_spare(spa, device_guid) ||
spare_pool != 0ULL);
case VDEV_LABEL_SPARE:
return (spa_has_spare(spa, device_guid));
default:
break;
}
}
/*
* Check to see if this is an l2cache device.
*/
if (spa_l2cache_exists(device_guid, NULL) ||
spa_has_l2cache(spa, device_guid)) {
if (l2cache_guid)
*l2cache_guid = device_guid;
switch (reason) {
case VDEV_LABEL_CREATE:
return (B_TRUE);
case VDEV_LABEL_REPLACE:
return (!spa_has_l2cache(spa, device_guid));
case VDEV_LABEL_L2CACHE:
return (spa_has_l2cache(spa, device_guid));
default:
break;
}
}
/*
* We can't rely on a pool's state if it's been imported
* read-only. Instead we look to see if the pools is marked
* read-only in the namespace and set the state to active.
*/
if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
(spa = spa_by_guid(pool_guid, device_guid)) != NULL &&
spa_mode(spa) == SPA_MODE_READ)
state = POOL_STATE_ACTIVE;
/*
* If the device is marked ACTIVE, then this device is in use by another
* pool on the system.
*/
return (state == POOL_STATE_ACTIVE);
}
/*
* Initialize a vdev label. We check to make sure each leaf device is not in
* use, and writable. We put down an initial label which we will later
* overwrite with a complete label. Note that it's important to do this
* sequentially, not in parallel, so that we catch cases of multiple use of the
* same leaf vdev in the vdev we're creating -- e.g. mirroring a disk with
* itself.
*/
int
vdev_label_init(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason)
{
spa_t *spa = vd->vdev_spa;
nvlist_t *label;
vdev_phys_t *vp;
abd_t *vp_abd;
abd_t *bootenv;
uberblock_t *ub;
abd_t *ub_abd;
zio_t *zio;
char *buf;
size_t buflen;
int error;
uint64_t spare_guid = 0, l2cache_guid = 0;
int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL;
+ boolean_t reason_spare = (reason == VDEV_LABEL_SPARE || (reason ==
+ VDEV_LABEL_REMOVE && vd->vdev_isspare));
+ boolean_t reason_l2cache = (reason == VDEV_LABEL_L2CACHE || (reason ==
+ VDEV_LABEL_REMOVE && vd->vdev_isl2cache));
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
for (int c = 0; c < vd->vdev_children; c++)
if ((error = vdev_label_init(vd->vdev_child[c],
crtxg, reason)) != 0)
return (error);
/* Track the creation time for this vdev */
vd->vdev_crtxg = crtxg;
if (!vd->vdev_ops->vdev_op_leaf || !spa_writeable(spa))
return (0);
/*
* Dead vdevs cannot be initialized.
*/
if (vdev_is_dead(vd))
return (SET_ERROR(EIO));
/*
* Determine if the vdev is in use.
*/
if (reason != VDEV_LABEL_REMOVE && reason != VDEV_LABEL_SPLIT &&
vdev_inuse(vd, crtxg, reason, &spare_guid, &l2cache_guid))
return (SET_ERROR(EBUSY));
/*
* If this is a request to add or replace a spare or l2cache device
* that is in use elsewhere on the system, then we must update the
* guid (which was initialized to a random value) to reflect the
* actual GUID (which is shared between multiple pools).
*/
if (reason != VDEV_LABEL_REMOVE && reason != VDEV_LABEL_L2CACHE &&
spare_guid != 0ULL) {
uint64_t guid_delta = spare_guid - vd->vdev_guid;
vd->vdev_guid += guid_delta;
for (vdev_t *pvd = vd; pvd != NULL; pvd = pvd->vdev_parent)
pvd->vdev_guid_sum += guid_delta;
/*
* If this is a replacement, then we want to fallthrough to the
* rest of the code. If we're adding a spare, then it's already
* labeled appropriately and we can just return.
*/
if (reason == VDEV_LABEL_SPARE)
return (0);
ASSERT(reason == VDEV_LABEL_REPLACE ||
reason == VDEV_LABEL_SPLIT);
}
if (reason != VDEV_LABEL_REMOVE && reason != VDEV_LABEL_SPARE &&
l2cache_guid != 0ULL) {
uint64_t guid_delta = l2cache_guid - vd->vdev_guid;
vd->vdev_guid += guid_delta;
for (vdev_t *pvd = vd; pvd != NULL; pvd = pvd->vdev_parent)
pvd->vdev_guid_sum += guid_delta;
/*
* If this is a replacement, then we want to fallthrough to the
* rest of the code. If we're adding an l2cache, then it's
* already labeled appropriately and we can just return.
*/
if (reason == VDEV_LABEL_L2CACHE)
return (0);
ASSERT(reason == VDEV_LABEL_REPLACE);
}
/*
* Initialize its label.
*/
vp_abd = abd_alloc_linear(sizeof (vdev_phys_t), B_TRUE);
abd_zero(vp_abd, sizeof (vdev_phys_t));
vp = abd_to_buf(vp_abd);
/*
* Generate a label describing the pool and our top-level vdev.
* We mark it as being from txg 0 to indicate that it's not
* really part of an active pool just yet. The labels will
* be written again with a meaningful txg by spa_sync().
*/
- if (reason == VDEV_LABEL_SPARE ||
- (reason == VDEV_LABEL_REMOVE && vd->vdev_isspare)) {
+ if (reason_spare || reason_l2cache) {
/*
- * For inactive hot spares, we generate a special label that
- * identifies as a mutually shared hot spare. We write the
- * label if we are adding a hot spare, or if we are removing an
- * active hot spare (in which case we want to revert the
- * labels).
+ * For inactive hot spares and level 2 ARC devices, we generate
+ * a special label that identifies as a mutually shared hot
+ * spare or l2cache device. We write the label in case of
+ * addition or removal of hot spare or l2cache vdev (in which
+ * case we want to revert the labels).
*/
VERIFY(nvlist_alloc(&label, NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_VERSION,
spa_version(spa)) == 0);
VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_POOL_STATE,
- POOL_STATE_SPARE) == 0);
- VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_GUID,
- vd->vdev_guid) == 0);
- } else if (reason == VDEV_LABEL_L2CACHE ||
- (reason == VDEV_LABEL_REMOVE && vd->vdev_isl2cache)) {
- /*
- * For level 2 ARC devices, add a special label.
- */
- VERIFY(nvlist_alloc(&label, NV_UNIQUE_NAME, KM_SLEEP) == 0);
-
- VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_VERSION,
- spa_version(spa)) == 0);
- VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_POOL_STATE,
- POOL_STATE_L2CACHE) == 0);
+ reason_spare ? POOL_STATE_SPARE : POOL_STATE_L2CACHE) == 0);
VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_GUID,
vd->vdev_guid) == 0);
/*
* This is merely to facilitate reporting the ashift of the
* cache device through zdb. The actual retrieval of the
* ashift (in vdev_alloc()) uses the nvlist
* spa->spa_l2cache->sav_config (populated in
* spa_ld_open_aux_vdevs()).
*/
- VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_ASHIFT,
- vd->vdev_ashift) == 0);
+ if (reason_l2cache) {
+ VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_ASHIFT,
+ vd->vdev_ashift) == 0);
+ }
+
+ /*
+ * Add path information to help find it during pool import
+ */
+ if (vd->vdev_path != NULL) {
+ VERIFY(nvlist_add_string(label, ZPOOL_CONFIG_PATH,
+ vd->vdev_path) == 0);
+ }
+ if (vd->vdev_devid != NULL) {
+ VERIFY(nvlist_add_string(label, ZPOOL_CONFIG_DEVID,
+ vd->vdev_devid) == 0);
+ }
+ if (vd->vdev_physpath != NULL) {
+ VERIFY(nvlist_add_string(label, ZPOOL_CONFIG_PHYS_PATH,
+ vd->vdev_physpath) == 0);
+ }
+
+ /*
+ * When spare or l2cache (aux) vdev is added during pool
+ * creation, spa->spa_uberblock is not written until this
+ * point. Write it on next config sync.
+ */
+ if (uberblock_verify(&spa->spa_uberblock))
+ spa->spa_aux_sync_uber = B_TRUE;
} else {
uint64_t txg = 0ULL;
if (reason == VDEV_LABEL_SPLIT)
txg = spa->spa_uberblock.ub_txg;
label = spa_config_generate(spa, vd, txg, B_FALSE);
/*
* Add our creation time. This allows us to detect multiple
* vdev uses as described above, and automatically expires if we
* fail.
*/
VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_CREATE_TXG,
crtxg) == 0);
}
buf = vp->vp_nvlist;
buflen = sizeof (vp->vp_nvlist);
error = nvlist_pack(label, &buf, &buflen, NV_ENCODE_XDR, KM_SLEEP);
if (error != 0) {
nvlist_free(label);
abd_free(vp_abd);
/* EFAULT means nvlist_pack ran out of room */
return (SET_ERROR(error == EFAULT ? ENAMETOOLONG : EINVAL));
}
/*
* Initialize uberblock template.
*/
ub_abd = abd_alloc_linear(VDEV_UBERBLOCK_RING, B_TRUE);
abd_zero(ub_abd, VDEV_UBERBLOCK_RING);
abd_copy_from_buf(ub_abd, &spa->spa_uberblock, sizeof (uberblock_t));
ub = abd_to_buf(ub_abd);
ub->ub_txg = 0;
/* Initialize the 2nd padding area. */
bootenv = abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE);
abd_zero(bootenv, VDEV_PAD_SIZE);
/*
* Write everything in parallel.
*/
retry:
zio = zio_root(spa, NULL, NULL, flags);
for (int l = 0; l < VDEV_LABELS; l++) {
vdev_label_write(zio, vd, l, vp_abd,
offsetof(vdev_label_t, vl_vdev_phys),
sizeof (vdev_phys_t), NULL, NULL, flags);
/*
* Skip the 1st padding area.
* Zero out the 2nd padding area where it might have
* left over data from previous filesystem format.
*/
vdev_label_write(zio, vd, l, bootenv,
offsetof(vdev_label_t, vl_be),
VDEV_PAD_SIZE, NULL, NULL, flags);
vdev_label_write(zio, vd, l, ub_abd,
offsetof(vdev_label_t, vl_uberblock),
VDEV_UBERBLOCK_RING, NULL, NULL, flags);
}
error = zio_wait(zio);
if (error != 0 && !(flags & ZIO_FLAG_TRYHARD)) {
flags |= ZIO_FLAG_TRYHARD;
goto retry;
}
nvlist_free(label);
abd_free(bootenv);
abd_free(ub_abd);
abd_free(vp_abd);
/*
* If this vdev hasn't been previously identified as a spare, then we
* mark it as such only if a) we are labeling it as a spare, or b) it
* exists as a spare elsewhere in the system. Do the same for
* level 2 ARC devices.
*/
if (error == 0 && !vd->vdev_isspare &&
(reason == VDEV_LABEL_SPARE ||
spa_spare_exists(vd->vdev_guid, NULL, NULL)))
spa_spare_add(vd);
if (error == 0 && !vd->vdev_isl2cache &&
(reason == VDEV_LABEL_L2CACHE ||
spa_l2cache_exists(vd->vdev_guid, NULL)))
spa_l2cache_add(vd);
return (error);
}
/*
* Done callback for vdev_label_read_bootenv_impl. If this is the first
* callback to finish, store our abd in the callback pointer. Otherwise, we
* just free our abd and return.
*/
static void
vdev_label_read_bootenv_done(zio_t *zio)
{
zio_t *rio = zio->io_private;
abd_t **cbp = rio->io_private;
ASSERT3U(zio->io_size, ==, VDEV_PAD_SIZE);
if (zio->io_error == 0) {
mutex_enter(&rio->io_lock);
if (*cbp == NULL) {
/* Will free this buffer in vdev_label_read_bootenv. */
*cbp = zio->io_abd;
} else {
abd_free(zio->io_abd);
}
mutex_exit(&rio->io_lock);
} else {
abd_free(zio->io_abd);
}
}
static void
vdev_label_read_bootenv_impl(zio_t *zio, vdev_t *vd, int flags)
{
for (int c = 0; c < vd->vdev_children; c++)
vdev_label_read_bootenv_impl(zio, vd->vdev_child[c], flags);
/*
* We just use the first label that has a correct checksum; the
* bootloader should have rewritten them all to be the same on boot,
* and any changes we made since boot have been the same across all
* labels.
*/
if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) {
for (int l = 0; l < VDEV_LABELS; l++) {
vdev_label_read(zio, vd, l,
abd_alloc_linear(VDEV_PAD_SIZE, B_FALSE),
offsetof(vdev_label_t, vl_be), VDEV_PAD_SIZE,
vdev_label_read_bootenv_done, zio, flags);
}
}
}
int
vdev_label_read_bootenv(vdev_t *rvd, nvlist_t *bootenv)
{
nvlist_t *config;
spa_t *spa = rvd->vdev_spa;
abd_t *abd = NULL;
int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL |
ZIO_FLAG_SPECULATIVE | ZIO_FLAG_TRYHARD;
ASSERT(bootenv);
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
zio_t *zio = zio_root(spa, NULL, &abd, flags);
vdev_label_read_bootenv_impl(zio, rvd, flags);
int err = zio_wait(zio);
if (abd != NULL) {
char *buf;
vdev_boot_envblock_t *vbe = abd_to_buf(abd);
vbe->vbe_version = ntohll(vbe->vbe_version);
switch (vbe->vbe_version) {
case VB_RAW:
/*
* if we have textual data in vbe_bootenv, create nvlist
* with key "envmap".
*/
fnvlist_add_uint64(bootenv, BOOTENV_VERSION, VB_RAW);
vbe->vbe_bootenv[sizeof (vbe->vbe_bootenv) - 1] = '\0';
fnvlist_add_string(bootenv, GRUB_ENVMAP,
vbe->vbe_bootenv);
break;
case VB_NVLIST:
err = nvlist_unpack(vbe->vbe_bootenv,
sizeof (vbe->vbe_bootenv), &config, 0);
if (err == 0) {
fnvlist_merge(bootenv, config);
nvlist_free(config);
break;
}
zfs_fallthrough;
default:
/* Check for FreeBSD zfs bootonce command string */
buf = abd_to_buf(abd);
if (*buf == '\0') {
fnvlist_add_uint64(bootenv, BOOTENV_VERSION,
VB_NVLIST);
break;
}
fnvlist_add_string(bootenv, FREEBSD_BOOTONCE, buf);
}
/*
* abd was allocated in vdev_label_read_bootenv_impl()
*/
abd_free(abd);
/*
* If we managed to read any successfully,
* return success.
*/
return (0);
}
return (err);
}
int
vdev_label_write_bootenv(vdev_t *vd, nvlist_t *env)
{
zio_t *zio;
spa_t *spa = vd->vdev_spa;
vdev_boot_envblock_t *bootenv;
int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL;
int error;
size_t nvsize;
char *nvbuf;
const char *tmp;
error = nvlist_size(env, &nvsize, NV_ENCODE_XDR);
if (error != 0)
return (SET_ERROR(error));
if (nvsize >= sizeof (bootenv->vbe_bootenv)) {
return (SET_ERROR(E2BIG));
}
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
error = ENXIO;
for (int c = 0; c < vd->vdev_children; c++) {
int child_err;
child_err = vdev_label_write_bootenv(vd->vdev_child[c], env);
/*
* As long as any of the disks managed to write all of their
* labels successfully, return success.
*/
if (child_err == 0)
error = child_err;
}
if (!vd->vdev_ops->vdev_op_leaf || vdev_is_dead(vd) ||
!vdev_writeable(vd)) {
return (error);
}
ASSERT3U(sizeof (*bootenv), ==, VDEV_PAD_SIZE);
abd_t *abd = abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE);
abd_zero(abd, VDEV_PAD_SIZE);
bootenv = abd_borrow_buf_copy(abd, VDEV_PAD_SIZE);
nvbuf = bootenv->vbe_bootenv;
nvsize = sizeof (bootenv->vbe_bootenv);
bootenv->vbe_version = fnvlist_lookup_uint64(env, BOOTENV_VERSION);
switch (bootenv->vbe_version) {
case VB_RAW:
if (nvlist_lookup_string(env, GRUB_ENVMAP, &tmp) == 0) {
(void) strlcpy(bootenv->vbe_bootenv, tmp, nvsize);
}
error = 0;
break;
case VB_NVLIST:
error = nvlist_pack(env, &nvbuf, &nvsize, NV_ENCODE_XDR,
KM_SLEEP);
break;
default:
error = EINVAL;
break;
}
if (error == 0) {
bootenv->vbe_version = htonll(bootenv->vbe_version);
abd_return_buf_copy(abd, bootenv, VDEV_PAD_SIZE);
} else {
abd_free(abd);
return (SET_ERROR(error));
}
retry:
zio = zio_root(spa, NULL, NULL, flags);
for (int l = 0; l < VDEV_LABELS; l++) {
vdev_label_write(zio, vd, l, abd,
offsetof(vdev_label_t, vl_be),
VDEV_PAD_SIZE, NULL, NULL, flags);
}
error = zio_wait(zio);
if (error != 0 && !(flags & ZIO_FLAG_TRYHARD)) {
flags |= ZIO_FLAG_TRYHARD;
goto retry;
}
abd_free(abd);
return (error);
}
/*
* ==========================================================================
* uberblock load/sync
* ==========================================================================
*/
/*
* Consider the following situation: txg is safely synced to disk. We've
* written the first uberblock for txg + 1, and then we lose power. When we
* come back up, we fail to see the uberblock for txg + 1 because, say,
* it was on a mirrored device and the replica to which we wrote txg + 1
* is now offline. If we then make some changes and sync txg + 1, and then
* the missing replica comes back, then for a few seconds we'll have two
* conflicting uberblocks on disk with the same txg. The solution is simple:
* among uberblocks with equal txg, choose the one with the latest timestamp.
*/
static int
vdev_uberblock_compare(const uberblock_t *ub1, const uberblock_t *ub2)
{
int cmp = TREE_CMP(ub1->ub_txg, ub2->ub_txg);
if (likely(cmp))
return (cmp);
cmp = TREE_CMP(ub1->ub_timestamp, ub2->ub_timestamp);
if (likely(cmp))
return (cmp);
/*
* If MMP_VALID(ub) && MMP_SEQ_VALID(ub) then the host has an MMP-aware
* ZFS, e.g. OpenZFS >= 0.7.
*
* If one ub has MMP and the other does not, they were written by
* different hosts, which matters for MMP. So we treat no MMP/no SEQ as
* a 0 value.
*
* Since timestamp and txg are the same if we get this far, either is
* acceptable for importing the pool.
*/
unsigned int seq1 = 0;
unsigned int seq2 = 0;
if (MMP_VALID(ub1) && MMP_SEQ_VALID(ub1))
seq1 = MMP_SEQ(ub1);
if (MMP_VALID(ub2) && MMP_SEQ_VALID(ub2))
seq2 = MMP_SEQ(ub2);
return (TREE_CMP(seq1, seq2));
}
struct ubl_cbdata {
uberblock_t *ubl_ubbest; /* Best uberblock */
vdev_t *ubl_vd; /* vdev associated with the above */
};
static void
vdev_uberblock_load_done(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
spa_t *spa = zio->io_spa;
zio_t *rio = zio->io_private;
uberblock_t *ub = abd_to_buf(zio->io_abd);
struct ubl_cbdata *cbp = rio->io_private;
ASSERT3U(zio->io_size, ==, VDEV_UBERBLOCK_SIZE(vd));
if (zio->io_error == 0 && uberblock_verify(ub) == 0) {
mutex_enter(&rio->io_lock);
if (ub->ub_txg <= spa->spa_load_max_txg &&
vdev_uberblock_compare(ub, cbp->ubl_ubbest) > 0) {
/*
* Keep track of the vdev in which this uberblock
* was found. We will use this information later
* to obtain the config nvlist associated with
* this uberblock.
*/
*cbp->ubl_ubbest = *ub;
cbp->ubl_vd = vd;
}
mutex_exit(&rio->io_lock);
}
abd_free(zio->io_abd);
}
static void
vdev_uberblock_load_impl(zio_t *zio, vdev_t *vd, int flags,
struct ubl_cbdata *cbp)
{
for (int c = 0; c < vd->vdev_children; c++)
vdev_uberblock_load_impl(zio, vd->vdev_child[c], flags, cbp);
if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd) &&
vd->vdev_ops != &vdev_draid_spare_ops) {
for (int l = 0; l < VDEV_LABELS; l++) {
for (int n = 0; n < VDEV_UBERBLOCK_COUNT(vd); n++) {
vdev_label_read(zio, vd, l,
abd_alloc_linear(VDEV_UBERBLOCK_SIZE(vd),
B_TRUE), VDEV_UBERBLOCK_OFFSET(vd, n),
VDEV_UBERBLOCK_SIZE(vd),
vdev_uberblock_load_done, zio, flags);
}
}
}
}
/*
* Reads the 'best' uberblock from disk along with its associated
* configuration. First, we read the uberblock array of each label of each
* vdev, keeping track of the uberblock with the highest txg in each array.
* Then, we read the configuration from the same vdev as the best uberblock.
*/
void
vdev_uberblock_load(vdev_t *rvd, uberblock_t *ub, nvlist_t **config)
{
zio_t *zio;
spa_t *spa = rvd->vdev_spa;
struct ubl_cbdata cb;
int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL |
ZIO_FLAG_SPECULATIVE | ZIO_FLAG_TRYHARD;
ASSERT(ub);
ASSERT(config);
memset(ub, 0, sizeof (uberblock_t));
*config = NULL;
cb.ubl_ubbest = ub;
cb.ubl_vd = NULL;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
zio = zio_root(spa, NULL, &cb, flags);
vdev_uberblock_load_impl(zio, rvd, flags, &cb);
(void) zio_wait(zio);
/*
* It's possible that the best uberblock was discovered on a label
* that has a configuration which was written in a future txg.
* Search all labels on this vdev to find the configuration that
* matches the txg for our uberblock.
*/
if (cb.ubl_vd != NULL) {
vdev_dbgmsg(cb.ubl_vd, "best uberblock found for spa %s. "
"txg %llu", spa->spa_name, (u_longlong_t)ub->ub_txg);
*config = vdev_label_read_config(cb.ubl_vd, ub->ub_txg);
if (*config == NULL && spa->spa_extreme_rewind) {
vdev_dbgmsg(cb.ubl_vd, "failed to read label config. "
"Trying again without txg restrictions.");
*config = vdev_label_read_config(cb.ubl_vd, UINT64_MAX);
}
if (*config == NULL) {
vdev_dbgmsg(cb.ubl_vd, "failed to read label config");
}
}
spa_config_exit(spa, SCL_ALL, FTAG);
}
/*
* For use when a leaf vdev is expanded.
* The location of labels 2 and 3 changed, and at the new location the
* uberblock rings are either empty or contain garbage. The sync will write
* new configs there because the vdev is dirty, but expansion also needs the
* uberblock rings copied. Read them from label 0 which did not move.
*
* Since the point is to populate labels {2,3} with valid uberblocks,
* we zero uberblocks we fail to read or which are not valid.
*/
static void
vdev_copy_uberblocks(vdev_t *vd)
{
abd_t *ub_abd;
zio_t *write_zio;
int locks = (SCL_L2ARC | SCL_ZIO);
int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL |
ZIO_FLAG_SPECULATIVE;
ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_READER) ==
SCL_STATE);
ASSERT(vd->vdev_ops->vdev_op_leaf);
/*
* No uberblocks are stored on distributed spares, they may be
* safely skipped when expanding a leaf vdev.
*/
if (vd->vdev_ops == &vdev_draid_spare_ops)
return;
spa_config_enter(vd->vdev_spa, locks, FTAG, RW_READER);
ub_abd = abd_alloc_linear(VDEV_UBERBLOCK_SIZE(vd), B_TRUE);
write_zio = zio_root(vd->vdev_spa, NULL, NULL, flags);
for (int n = 0; n < VDEV_UBERBLOCK_COUNT(vd); n++) {
const int src_label = 0;
zio_t *zio;
zio = zio_root(vd->vdev_spa, NULL, NULL, flags);
vdev_label_read(zio, vd, src_label, ub_abd,
VDEV_UBERBLOCK_OFFSET(vd, n), VDEV_UBERBLOCK_SIZE(vd),
NULL, NULL, flags);
if (zio_wait(zio) || uberblock_verify(abd_to_buf(ub_abd)))
abd_zero(ub_abd, VDEV_UBERBLOCK_SIZE(vd));
for (int l = 2; l < VDEV_LABELS; l++)
vdev_label_write(write_zio, vd, l, ub_abd,
VDEV_UBERBLOCK_OFFSET(vd, n),
VDEV_UBERBLOCK_SIZE(vd), NULL, NULL,
flags | ZIO_FLAG_DONT_PROPAGATE);
}
(void) zio_wait(write_zio);
spa_config_exit(vd->vdev_spa, locks, FTAG);
abd_free(ub_abd);
}
/*
* On success, increment root zio's count of good writes.
* We only get credit for writes to known-visible vdevs; see spa_vdev_add().
*/
static void
vdev_uberblock_sync_done(zio_t *zio)
{
uint64_t *good_writes = zio->io_private;
if (zio->io_error == 0 && zio->io_vd->vdev_top->vdev_ms_array != 0)
atomic_inc_64(good_writes);
}
/*
* Write the uberblock to all labels of all leaves of the specified vdev.
*/
static void
vdev_uberblock_sync(zio_t *zio, uint64_t *good_writes,
uberblock_t *ub, vdev_t *vd, int flags)
{
for (uint64_t c = 0; c < vd->vdev_children; c++) {
vdev_uberblock_sync(zio, good_writes,
ub, vd->vdev_child[c], flags);
}
if (!vd->vdev_ops->vdev_op_leaf)
return;
if (!vdev_writeable(vd))
return;
/*
* There's no need to write uberblocks to a distributed spare, they
* are already stored on all the leaves of the parent dRAID. For
* this same reason vdev_uberblock_load_impl() skips distributed
* spares when reading uberblocks.
*/
if (vd->vdev_ops == &vdev_draid_spare_ops)
return;
/* If the vdev was expanded, need to copy uberblock rings. */
if (vd->vdev_state == VDEV_STATE_HEALTHY &&
vd->vdev_copy_uberblocks == B_TRUE) {
vdev_copy_uberblocks(vd);
vd->vdev_copy_uberblocks = B_FALSE;
}
int m = spa_multihost(vd->vdev_spa) ? MMP_BLOCKS_PER_LABEL : 0;
int n = ub->ub_txg % (VDEV_UBERBLOCK_COUNT(vd) - m);
/* Copy the uberblock_t into the ABD */
abd_t *ub_abd = abd_alloc_for_io(VDEV_UBERBLOCK_SIZE(vd), B_TRUE);
abd_zero(ub_abd, VDEV_UBERBLOCK_SIZE(vd));
abd_copy_from_buf(ub_abd, ub, sizeof (uberblock_t));
for (int l = 0; l < VDEV_LABELS; l++)
vdev_label_write(zio, vd, l, ub_abd,
VDEV_UBERBLOCK_OFFSET(vd, n), VDEV_UBERBLOCK_SIZE(vd),
vdev_uberblock_sync_done, good_writes,
flags | ZIO_FLAG_DONT_PROPAGATE);
abd_free(ub_abd);
}
/* Sync the uberblocks to all vdevs in svd[] */
static int
vdev_uberblock_sync_list(vdev_t **svd, int svdcount, uberblock_t *ub, int flags)
{
spa_t *spa = svd[0]->vdev_spa;
zio_t *zio;
uint64_t good_writes = 0;
zio = zio_root(spa, NULL, NULL, flags);
for (int v = 0; v < svdcount; v++)
vdev_uberblock_sync(zio, &good_writes, ub, svd[v], flags);
+ if (spa->spa_aux_sync_uber) {
+ for (int v = 0; v < spa->spa_spares.sav_count; v++) {
+ vdev_uberblock_sync(zio, &good_writes, ub,
+ spa->spa_spares.sav_vdevs[v], flags);
+ }
+ for (int v = 0; v < spa->spa_l2cache.sav_count; v++) {
+ vdev_uberblock_sync(zio, &good_writes, ub,
+ spa->spa_l2cache.sav_vdevs[v], flags);
+ }
+ }
(void) zio_wait(zio);
/*
* Flush the uberblocks to disk. This ensures that the odd labels
* are no longer needed (because the new uberblocks and the even
* labels are safely on disk), so it is safe to overwrite them.
*/
zio = zio_root(spa, NULL, NULL, flags);
for (int v = 0; v < svdcount; v++) {
if (vdev_writeable(svd[v])) {
zio_flush(zio, svd[v]);
}
}
+ if (spa->spa_aux_sync_uber) {
+ spa->spa_aux_sync_uber = B_FALSE;
+ for (int v = 0; v < spa->spa_spares.sav_count; v++) {
+ if (vdev_writeable(spa->spa_spares.sav_vdevs[v])) {
+ zio_flush(zio, spa->spa_spares.sav_vdevs[v]);
+ }
+ }
+ for (int v = 0; v < spa->spa_l2cache.sav_count; v++) {
+ if (vdev_writeable(spa->spa_l2cache.sav_vdevs[v])) {
+ zio_flush(zio, spa->spa_l2cache.sav_vdevs[v]);
+ }
+ }
+ }
(void) zio_wait(zio);
return (good_writes >= 1 ? 0 : EIO);
}
/*
* On success, increment the count of good writes for our top-level vdev.
*/
static void
vdev_label_sync_done(zio_t *zio)
{
uint64_t *good_writes = zio->io_private;
if (zio->io_error == 0)
atomic_inc_64(good_writes);
}
/*
* If there weren't enough good writes, indicate failure to the parent.
*/
static void
vdev_label_sync_top_done(zio_t *zio)
{
uint64_t *good_writes = zio->io_private;
if (*good_writes == 0)
zio->io_error = SET_ERROR(EIO);
kmem_free(good_writes, sizeof (uint64_t));
}
/*
* We ignore errors for log and cache devices, simply free the private data.
*/
static void
vdev_label_sync_ignore_done(zio_t *zio)
{
kmem_free(zio->io_private, sizeof (uint64_t));
}
/*
* Write all even or odd labels to all leaves of the specified vdev.
*/
static void
vdev_label_sync(zio_t *zio, uint64_t *good_writes,
vdev_t *vd, int l, uint64_t txg, int flags)
{
nvlist_t *label;
vdev_phys_t *vp;
abd_t *vp_abd;
char *buf;
size_t buflen;
for (int c = 0; c < vd->vdev_children; c++) {
vdev_label_sync(zio, good_writes,
vd->vdev_child[c], l, txg, flags);
}
if (!vd->vdev_ops->vdev_op_leaf)
return;
if (!vdev_writeable(vd))
return;
/*
* The top-level config never needs to be written to a distributed
* spare. When read vdev_dspare_label_read_config() will generate
* the config for the vdev_label_read_config().
*/
if (vd->vdev_ops == &vdev_draid_spare_ops)
return;
/*
* Generate a label describing the top-level config to which we belong.
*/
label = spa_config_generate(vd->vdev_spa, vd, txg, B_FALSE);
vp_abd = abd_alloc_linear(sizeof (vdev_phys_t), B_TRUE);
abd_zero(vp_abd, sizeof (vdev_phys_t));
vp = abd_to_buf(vp_abd);
buf = vp->vp_nvlist;
buflen = sizeof (vp->vp_nvlist);
if (!nvlist_pack(label, &buf, &buflen, NV_ENCODE_XDR, KM_SLEEP)) {
for (; l < VDEV_LABELS; l += 2) {
vdev_label_write(zio, vd, l, vp_abd,
offsetof(vdev_label_t, vl_vdev_phys),
sizeof (vdev_phys_t),
vdev_label_sync_done, good_writes,
flags | ZIO_FLAG_DONT_PROPAGATE);
}
}
abd_free(vp_abd);
nvlist_free(label);
}
static int
vdev_label_sync_list(spa_t *spa, int l, uint64_t txg, int flags)
{
list_t *dl = &spa->spa_config_dirty_list;
vdev_t *vd;
zio_t *zio;
int error;
/*
* Write the new labels to disk.
*/
zio = zio_root(spa, NULL, NULL, flags);
for (vd = list_head(dl); vd != NULL; vd = list_next(dl, vd)) {
uint64_t *good_writes;
ASSERT(!vd->vdev_ishole);
good_writes = kmem_zalloc(sizeof (uint64_t), KM_SLEEP);
zio_t *vio = zio_null(zio, spa, NULL,
(vd->vdev_islog || vd->vdev_aux != NULL) ?
vdev_label_sync_ignore_done : vdev_label_sync_top_done,
good_writes, flags);
vdev_label_sync(vio, good_writes, vd, l, txg, flags);
zio_nowait(vio);
}
error = zio_wait(zio);
/*
* Flush the new labels to disk.
*/
zio = zio_root(spa, NULL, NULL, flags);
for (vd = list_head(dl); vd != NULL; vd = list_next(dl, vd))
zio_flush(zio, vd);
(void) zio_wait(zio);
return (error);
}
/*
* Sync the uberblock and any changes to the vdev configuration.
*
* The order of operations is carefully crafted to ensure that
* if the system panics or loses power at any time, the state on disk
* is still transactionally consistent. The in-line comments below
* describe the failure semantics at each stage.
*
* Moreover, vdev_config_sync() is designed to be idempotent: if it fails
* at any time, you can just call it again, and it will resume its work.
*/
int
vdev_config_sync(vdev_t **svd, int svdcount, uint64_t txg)
{
spa_t *spa = svd[0]->vdev_spa;
uberblock_t *ub = &spa->spa_uberblock;
int error = 0;
int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL;
ASSERT(svdcount != 0);
retry:
/*
* Normally, we don't want to try too hard to write every label and
* uberblock. If there is a flaky disk, we don't want the rest of the
* sync process to block while we retry. But if we can't write a
* single label out, we should retry with ZIO_FLAG_TRYHARD before
* bailing out and declaring the pool faulted.
*/
if (error != 0) {
if ((flags & ZIO_FLAG_TRYHARD) != 0)
return (error);
flags |= ZIO_FLAG_TRYHARD;
}
ASSERT(ub->ub_txg <= txg);
/*
* If this isn't a resync due to I/O errors,
* and nothing changed in this transaction group,
* and the vdev configuration hasn't changed,
* then there's nothing to do.
*/
if (ub->ub_txg < txg) {
boolean_t changed = uberblock_update(ub, spa->spa_root_vdev,
txg, spa->spa_mmp.mmp_delay);
if (!changed && list_is_empty(&spa->spa_config_dirty_list))
return (0);
}
if (txg > spa_freeze_txg(spa))
return (0);
ASSERT(txg <= spa->spa_final_txg);
/*
* Flush the write cache of every disk that's been written to
* in this transaction group. This ensures that all blocks
* written in this txg will be committed to stable storage
* before any uberblock that references them.
*/
zio_t *zio = zio_root(spa, NULL, NULL, flags);
for (vdev_t *vd =
txg_list_head(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)); vd != NULL;
vd = txg_list_next(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg)))
zio_flush(zio, vd);
(void) zio_wait(zio);
/*
* Sync out the even labels (L0, L2) for every dirty vdev. If the
* system dies in the middle of this process, that's OK: all of the
* even labels that made it to disk will be newer than any uberblock,
* and will therefore be considered invalid. The odd labels (L1, L3),
* which have not yet been touched, will still be valid. We flush
* the new labels to disk to ensure that all even-label updates
* are committed to stable storage before the uberblock update.
*/
if ((error = vdev_label_sync_list(spa, 0, txg, flags)) != 0) {
if ((flags & ZIO_FLAG_TRYHARD) != 0) {
zfs_dbgmsg("vdev_label_sync_list() returned error %d "
"for pool '%s' when syncing out the even labels "
"of dirty vdevs", error, spa_name(spa));
}
goto retry;
}
/*
* Sync the uberblocks to all vdevs in svd[].
* If the system dies in the middle of this step, there are two cases
* to consider, and the on-disk state is consistent either way:
*
* (1) If none of the new uberblocks made it to disk, then the
* previous uberblock will be the newest, and the odd labels
* (which had not yet been touched) will be valid with respect
* to that uberblock.
*
* (2) If one or more new uberblocks made it to disk, then they
* will be the newest, and the even labels (which had all
* been successfully committed) will be valid with respect
* to the new uberblocks.
*/
if ((error = vdev_uberblock_sync_list(svd, svdcount, ub, flags)) != 0) {
if ((flags & ZIO_FLAG_TRYHARD) != 0) {
zfs_dbgmsg("vdev_uberblock_sync_list() returned error "
"%d for pool '%s'", error, spa_name(spa));
}
goto retry;
}
if (spa_multihost(spa))
mmp_update_uberblock(spa, ub);
/*
* Sync out odd labels for every dirty vdev. If the system dies
* in the middle of this process, the even labels and the new
* uberblocks will suffice to open the pool. The next time
* the pool is opened, the first thing we'll do -- before any
* user data is modified -- is mark every vdev dirty so that
* all labels will be brought up to date. We flush the new labels
* to disk to ensure that all odd-label updates are committed to
* stable storage before the next transaction group begins.
*/
if ((error = vdev_label_sync_list(spa, 1, txg, flags)) != 0) {
if ((flags & ZIO_FLAG_TRYHARD) != 0) {
zfs_dbgmsg("vdev_label_sync_list() returned error %d "
"for pool '%s' when syncing out the odd labels of "
"dirty vdevs", error, spa_name(spa));
}
goto retry;
}
return (0);
}
diff --git a/sys/contrib/openzfs/module/zfs/vdev_trim.c b/sys/contrib/openzfs/module/zfs/vdev_trim.c
index 03e17db024ea..d96b75e5edf1 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_trim.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_trim.c
@@ -1,1776 +1,1777 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2016 by Delphix. All rights reserved.
* Copyright (c) 2019 by Lawrence Livermore National Security, LLC.
* Copyright (c) 2021 Hewlett Packard Enterprise Development LP
* Copyright 2023 RackTop Systems, Inc.
*/
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/txg.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_trim.h>
#include <sys/metaslab_impl.h>
#include <sys/dsl_synctask.h>
#include <sys/zap.h>
#include <sys/dmu_tx.h>
#include <sys/arc_impl.h>
/*
* TRIM is a feature which is used to notify a SSD that some previously
* written space is no longer allocated by the pool. This is useful because
* writes to a SSD must be performed to blocks which have first been erased.
* Ensuring the SSD always has a supply of erased blocks for new writes
* helps prevent the performance from deteriorating.
*
* There are two supported TRIM methods; manual and automatic.
*
* Manual TRIM:
*
* A manual TRIM is initiated by running the 'zpool trim' command. A single
* 'vdev_trim' thread is created for each leaf vdev, and it is responsible for
* managing that vdev TRIM process. This involves iterating over all the
* metaslabs, calculating the unallocated space ranges, and then issuing the
* required TRIM I/Os.
*
* While a metaslab is being actively trimmed it is not eligible to perform
* new allocations. After traversing all of the metaslabs the thread is
* terminated. Finally, both the requested options and current progress of
* the TRIM are regularly written to the pool. This allows the TRIM to be
* suspended and resumed as needed.
*
* Automatic TRIM:
*
* An automatic TRIM is enabled by setting the 'autotrim' pool property
* to 'on'. When enabled, a `vdev_autotrim' thread is created for each
* top-level (not leaf) vdev in the pool. These threads perform the same
* core TRIM process as a manual TRIM, but with a few key differences.
*
* 1) Automatic TRIM happens continuously in the background and operates
* solely on recently freed blocks (ms_trim not ms_allocatable).
*
* 2) Each thread is associated with a top-level (not leaf) vdev. This has
* the benefit of simplifying the threading model, it makes it easier
* to coordinate administrative commands, and it ensures only a single
* metaslab is disabled at a time. Unlike manual TRIM, this means each
* 'vdev_autotrim' thread is responsible for issuing TRIM I/Os for its
* children.
*
* 3) There is no automatic TRIM progress information stored on disk, nor
* is it reported by 'zpool status'.
*
* While the automatic TRIM process is highly effective it is more likely
* than a manual TRIM to encounter tiny ranges. Ranges less than or equal to
* 'zfs_trim_extent_bytes_min' (32k) are considered too small to efficiently
* TRIM and are skipped. This means small amounts of freed space may not
* be automatically trimmed.
*
* Furthermore, devices with attached hot spares and devices being actively
* replaced are skipped. This is done to avoid adding additional stress to
* a potentially unhealthy device and to minimize the required rebuild time.
*
* For this reason it may be beneficial to occasionally manually TRIM a pool
* even when automatic TRIM is enabled.
*/
/*
* Maximum size of TRIM I/O, ranges will be chunked in to 128MiB lengths.
*/
static unsigned int zfs_trim_extent_bytes_max = 128 * 1024 * 1024;
/*
* Minimum size of TRIM I/O, extents smaller than 32Kib will be skipped.
*/
static unsigned int zfs_trim_extent_bytes_min = 32 * 1024;
/*
* Skip uninitialized metaslabs during the TRIM process. This option is
* useful for pools constructed from large thinly-provisioned devices where
* TRIM operations are slow. As a pool ages an increasing fraction of
* the pools metaslabs will be initialized progressively degrading the
* usefulness of this option. This setting is stored when starting a
* manual TRIM and will persist for the duration of the requested TRIM.
*/
unsigned int zfs_trim_metaslab_skip = 0;
/*
* Maximum number of queued TRIM I/Os per leaf vdev. The number of
* concurrent TRIM I/Os issued to the device is controlled by the
* zfs_vdev_trim_min_active and zfs_vdev_trim_max_active module options.
*/
static unsigned int zfs_trim_queue_limit = 10;
/*
* The minimum number of transaction groups between automatic trims of a
* metaslab. This setting represents a trade-off between issuing more
* efficient TRIM operations, by allowing them to be aggregated longer,
* and issuing them promptly so the trimmed space is available. Note
* that this value is a minimum; metaslabs can be trimmed less frequently
* when there are a large number of ranges which need to be trimmed.
*
* Increasing this value will allow frees to be aggregated for a longer
* time. This can result is larger TRIM operations, and increased memory
* usage in order to track the ranges to be trimmed. Decreasing this value
* has the opposite effect. The default value of 32 was determined though
* testing to be a reasonable compromise.
*/
static unsigned int zfs_trim_txg_batch = 32;
/*
* The trim_args are a control structure which describe how a leaf vdev
* should be trimmed. The core elements are the vdev, the metaslab being
* trimmed and a range tree containing the extents to TRIM. All provided
* ranges must be within the metaslab.
*/
typedef struct trim_args {
/*
* These fields are set by the caller of vdev_trim_ranges().
*/
vdev_t *trim_vdev; /* Leaf vdev to TRIM */
metaslab_t *trim_msp; /* Disabled metaslab */
range_tree_t *trim_tree; /* TRIM ranges (in metaslab) */
trim_type_t trim_type; /* Manual or auto TRIM */
uint64_t trim_extent_bytes_max; /* Maximum TRIM I/O size */
uint64_t trim_extent_bytes_min; /* Minimum TRIM I/O size */
enum trim_flag trim_flags; /* TRIM flags (secure) */
/*
* These fields are updated by vdev_trim_ranges().
*/
hrtime_t trim_start_time; /* Start time */
uint64_t trim_bytes_done; /* Bytes trimmed */
} trim_args_t;
/*
* Determines whether a vdev_trim_thread() should be stopped.
*/
static boolean_t
vdev_trim_should_stop(vdev_t *vd)
{
return (vd->vdev_trim_exit_wanted || !vdev_writeable(vd) ||
vd->vdev_detached || vd->vdev_top->vdev_removing);
}
/*
* Determines whether a vdev_autotrim_thread() should be stopped.
*/
static boolean_t
vdev_autotrim_should_stop(vdev_t *tvd)
{
return (tvd->vdev_autotrim_exit_wanted ||
!vdev_writeable(tvd) || tvd->vdev_removing ||
spa_get_autotrim(tvd->vdev_spa) == SPA_AUTOTRIM_OFF);
}
/*
* Wait for given number of kicks, return true if the wait is aborted due to
* vdev_autotrim_exit_wanted.
*/
static boolean_t
vdev_autotrim_wait_kick(vdev_t *vd, int num_of_kick)
{
mutex_enter(&vd->vdev_autotrim_lock);
for (int i = 0; i < num_of_kick; i++) {
if (vd->vdev_autotrim_exit_wanted)
break;
- cv_wait(&vd->vdev_autotrim_kick_cv, &vd->vdev_autotrim_lock);
+ cv_wait_idle(&vd->vdev_autotrim_kick_cv,
+ &vd->vdev_autotrim_lock);
}
boolean_t exit_wanted = vd->vdev_autotrim_exit_wanted;
mutex_exit(&vd->vdev_autotrim_lock);
return (exit_wanted);
}
/*
* The sync task for updating the on-disk state of a manual TRIM. This
* is scheduled by vdev_trim_change_state().
*/
static void
vdev_trim_zap_update_sync(void *arg, dmu_tx_t *tx)
{
/*
* We pass in the guid instead of the vdev_t since the vdev may
* have been freed prior to the sync task being processed. This
* happens when a vdev is detached as we call spa_config_vdev_exit(),
* stop the trimming thread, schedule the sync task, and free
* the vdev. Later when the scheduled sync task is invoked, it would
* find that the vdev has been freed.
*/
uint64_t guid = *(uint64_t *)arg;
uint64_t txg = dmu_tx_get_txg(tx);
kmem_free(arg, sizeof (uint64_t));
vdev_t *vd = spa_lookup_by_guid(tx->tx_pool->dp_spa, guid, B_FALSE);
if (vd == NULL || vd->vdev_top->vdev_removing || !vdev_is_concrete(vd))
return;
uint64_t last_offset = vd->vdev_trim_offset[txg & TXG_MASK];
vd->vdev_trim_offset[txg & TXG_MASK] = 0;
VERIFY3U(vd->vdev_leaf_zap, !=, 0);
objset_t *mos = vd->vdev_spa->spa_meta_objset;
if (last_offset > 0 || vd->vdev_trim_last_offset == UINT64_MAX) {
if (vd->vdev_trim_last_offset == UINT64_MAX)
last_offset = 0;
vd->vdev_trim_last_offset = last_offset;
VERIFY0(zap_update(mos, vd->vdev_leaf_zap,
VDEV_LEAF_ZAP_TRIM_LAST_OFFSET,
sizeof (last_offset), 1, &last_offset, tx));
}
if (vd->vdev_trim_action_time > 0) {
uint64_t val = (uint64_t)vd->vdev_trim_action_time;
VERIFY0(zap_update(mos, vd->vdev_leaf_zap,
VDEV_LEAF_ZAP_TRIM_ACTION_TIME, sizeof (val),
1, &val, tx));
}
if (vd->vdev_trim_rate > 0) {
uint64_t rate = (uint64_t)vd->vdev_trim_rate;
if (rate == UINT64_MAX)
rate = 0;
VERIFY0(zap_update(mos, vd->vdev_leaf_zap,
VDEV_LEAF_ZAP_TRIM_RATE, sizeof (rate), 1, &rate, tx));
}
uint64_t partial = vd->vdev_trim_partial;
if (partial == UINT64_MAX)
partial = 0;
VERIFY0(zap_update(mos, vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_PARTIAL,
sizeof (partial), 1, &partial, tx));
uint64_t secure = vd->vdev_trim_secure;
if (secure == UINT64_MAX)
secure = 0;
VERIFY0(zap_update(mos, vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_SECURE,
sizeof (secure), 1, &secure, tx));
uint64_t trim_state = vd->vdev_trim_state;
VERIFY0(zap_update(mos, vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_STATE,
sizeof (trim_state), 1, &trim_state, tx));
}
/*
* Update the on-disk state of a manual TRIM. This is called to request
* that a TRIM be started/suspended/canceled, or to change one of the
* TRIM options (partial, secure, rate).
*/
static void
vdev_trim_change_state(vdev_t *vd, vdev_trim_state_t new_state,
uint64_t rate, boolean_t partial, boolean_t secure)
{
ASSERT(MUTEX_HELD(&vd->vdev_trim_lock));
spa_t *spa = vd->vdev_spa;
if (new_state == vd->vdev_trim_state)
return;
/*
* Copy the vd's guid, this will be freed by the sync task.
*/
uint64_t *guid = kmem_zalloc(sizeof (uint64_t), KM_SLEEP);
*guid = vd->vdev_guid;
/*
* If we're suspending, then preserve the original start time.
*/
if (vd->vdev_trim_state != VDEV_TRIM_SUSPENDED) {
vd->vdev_trim_action_time = gethrestime_sec();
}
/*
* If we're activating, then preserve the requested rate and trim
* method. Setting the last offset and rate to UINT64_MAX is used
* as a sentinel to indicate they should be reset to default values.
*/
if (new_state == VDEV_TRIM_ACTIVE) {
if (vd->vdev_trim_state == VDEV_TRIM_COMPLETE ||
vd->vdev_trim_state == VDEV_TRIM_CANCELED) {
vd->vdev_trim_last_offset = UINT64_MAX;
vd->vdev_trim_rate = UINT64_MAX;
vd->vdev_trim_partial = UINT64_MAX;
vd->vdev_trim_secure = UINT64_MAX;
}
if (rate != 0)
vd->vdev_trim_rate = rate;
if (partial != 0)
vd->vdev_trim_partial = partial;
if (secure != 0)
vd->vdev_trim_secure = secure;
}
vdev_trim_state_t old_state = vd->vdev_trim_state;
boolean_t resumed = (old_state == VDEV_TRIM_SUSPENDED);
vd->vdev_trim_state = new_state;
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
dsl_sync_task_nowait(spa_get_dsl(spa), vdev_trim_zap_update_sync,
guid, tx);
switch (new_state) {
case VDEV_TRIM_ACTIVE:
spa_event_notify(spa, vd, NULL,
resumed ? ESC_ZFS_TRIM_RESUME : ESC_ZFS_TRIM_START);
spa_history_log_internal(spa, "trim", tx,
"vdev=%s activated", vd->vdev_path);
break;
case VDEV_TRIM_SUSPENDED:
spa_event_notify(spa, vd, NULL, ESC_ZFS_TRIM_SUSPEND);
spa_history_log_internal(spa, "trim", tx,
"vdev=%s suspended", vd->vdev_path);
break;
case VDEV_TRIM_CANCELED:
if (old_state == VDEV_TRIM_ACTIVE ||
old_state == VDEV_TRIM_SUSPENDED) {
spa_event_notify(spa, vd, NULL, ESC_ZFS_TRIM_CANCEL);
spa_history_log_internal(spa, "trim", tx,
"vdev=%s canceled", vd->vdev_path);
}
break;
case VDEV_TRIM_COMPLETE:
spa_event_notify(spa, vd, NULL, ESC_ZFS_TRIM_FINISH);
spa_history_log_internal(spa, "trim", tx,
"vdev=%s complete", vd->vdev_path);
break;
default:
panic("invalid state %llu", (unsigned long long)new_state);
}
dmu_tx_commit(tx);
if (new_state != VDEV_TRIM_ACTIVE)
spa_notify_waiters(spa);
}
/*
* The zio_done_func_t done callback for each manual TRIM issued. It is
* responsible for updating the TRIM stats, reissuing failed TRIM I/Os,
* and limiting the number of in flight TRIM I/Os.
*/
static void
vdev_trim_cb(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
mutex_enter(&vd->vdev_trim_io_lock);
if (zio->io_error == ENXIO && !vdev_writeable(vd)) {
/*
* The I/O failed because the vdev was unavailable; roll the
* last offset back. (This works because spa_sync waits on
* spa_txg_zio before it runs sync tasks.)
*/
uint64_t *offset =
&vd->vdev_trim_offset[zio->io_txg & TXG_MASK];
*offset = MIN(*offset, zio->io_offset);
} else {
if (zio->io_error != 0) {
vd->vdev_stat.vs_trim_errors++;
spa_iostats_trim_add(vd->vdev_spa, TRIM_TYPE_MANUAL,
0, 0, 0, 0, 1, zio->io_orig_size);
} else {
spa_iostats_trim_add(vd->vdev_spa, TRIM_TYPE_MANUAL,
1, zio->io_orig_size, 0, 0, 0, 0);
}
vd->vdev_trim_bytes_done += zio->io_orig_size;
}
ASSERT3U(vd->vdev_trim_inflight[TRIM_TYPE_MANUAL], >, 0);
vd->vdev_trim_inflight[TRIM_TYPE_MANUAL]--;
cv_broadcast(&vd->vdev_trim_io_cv);
mutex_exit(&vd->vdev_trim_io_lock);
spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd);
}
/*
* The zio_done_func_t done callback for each automatic TRIM issued. It
* is responsible for updating the TRIM stats and limiting the number of
* in flight TRIM I/Os. Automatic TRIM I/Os are best effort and are
* never reissued on failure.
*/
static void
vdev_autotrim_cb(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
mutex_enter(&vd->vdev_trim_io_lock);
if (zio->io_error != 0) {
vd->vdev_stat.vs_trim_errors++;
spa_iostats_trim_add(vd->vdev_spa, TRIM_TYPE_AUTO,
0, 0, 0, 0, 1, zio->io_orig_size);
} else {
spa_iostats_trim_add(vd->vdev_spa, TRIM_TYPE_AUTO,
1, zio->io_orig_size, 0, 0, 0, 0);
}
ASSERT3U(vd->vdev_trim_inflight[TRIM_TYPE_AUTO], >, 0);
vd->vdev_trim_inflight[TRIM_TYPE_AUTO]--;
cv_broadcast(&vd->vdev_trim_io_cv);
mutex_exit(&vd->vdev_trim_io_lock);
spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd);
}
/*
* The zio_done_func_t done callback for each TRIM issued via
* vdev_trim_simple(). It is responsible for updating the TRIM stats and
* limiting the number of in flight TRIM I/Os. Simple TRIM I/Os are best
* effort and are never reissued on failure.
*/
static void
vdev_trim_simple_cb(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
mutex_enter(&vd->vdev_trim_io_lock);
if (zio->io_error != 0) {
vd->vdev_stat.vs_trim_errors++;
spa_iostats_trim_add(vd->vdev_spa, TRIM_TYPE_SIMPLE,
0, 0, 0, 0, 1, zio->io_orig_size);
} else {
spa_iostats_trim_add(vd->vdev_spa, TRIM_TYPE_SIMPLE,
1, zio->io_orig_size, 0, 0, 0, 0);
}
ASSERT3U(vd->vdev_trim_inflight[TRIM_TYPE_SIMPLE], >, 0);
vd->vdev_trim_inflight[TRIM_TYPE_SIMPLE]--;
cv_broadcast(&vd->vdev_trim_io_cv);
mutex_exit(&vd->vdev_trim_io_lock);
spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd);
}
/*
* Returns the average trim rate in bytes/sec for the ta->trim_vdev.
*/
static uint64_t
vdev_trim_calculate_rate(trim_args_t *ta)
{
return (ta->trim_bytes_done * 1000 /
(NSEC2MSEC(gethrtime() - ta->trim_start_time) + 1));
}
/*
* Issues a physical TRIM and takes care of rate limiting (bytes/sec)
* and number of concurrent TRIM I/Os.
*/
static int
vdev_trim_range(trim_args_t *ta, uint64_t start, uint64_t size)
{
vdev_t *vd = ta->trim_vdev;
spa_t *spa = vd->vdev_spa;
void *cb;
mutex_enter(&vd->vdev_trim_io_lock);
/*
* Limit manual TRIM I/Os to the requested rate. This does not
* apply to automatic TRIM since no per vdev rate can be specified.
*/
if (ta->trim_type == TRIM_TYPE_MANUAL) {
while (vd->vdev_trim_rate != 0 && !vdev_trim_should_stop(vd) &&
vdev_trim_calculate_rate(ta) > vd->vdev_trim_rate) {
cv_timedwait_idle(&vd->vdev_trim_io_cv,
&vd->vdev_trim_io_lock, ddi_get_lbolt() +
MSEC_TO_TICK(10));
}
}
ta->trim_bytes_done += size;
/* Limit in flight trimming I/Os */
while (vd->vdev_trim_inflight[0] + vd->vdev_trim_inflight[1] +
vd->vdev_trim_inflight[2] >= zfs_trim_queue_limit) {
cv_wait(&vd->vdev_trim_io_cv, &vd->vdev_trim_io_lock);
}
vd->vdev_trim_inflight[ta->trim_type]++;
mutex_exit(&vd->vdev_trim_io_lock);
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
uint64_t txg = dmu_tx_get_txg(tx);
spa_config_enter(spa, SCL_STATE_ALL, vd, RW_READER);
mutex_enter(&vd->vdev_trim_lock);
if (ta->trim_type == TRIM_TYPE_MANUAL &&
vd->vdev_trim_offset[txg & TXG_MASK] == 0) {
uint64_t *guid = kmem_zalloc(sizeof (uint64_t), KM_SLEEP);
*guid = vd->vdev_guid;
/* This is the first write of this txg. */
dsl_sync_task_nowait(spa_get_dsl(spa),
vdev_trim_zap_update_sync, guid, tx);
}
/*
* We know the vdev_t will still be around since all consumers of
* vdev_free must stop the trimming first.
*/
if ((ta->trim_type == TRIM_TYPE_MANUAL &&
vdev_trim_should_stop(vd)) ||
(ta->trim_type == TRIM_TYPE_AUTO &&
vdev_autotrim_should_stop(vd->vdev_top))) {
mutex_enter(&vd->vdev_trim_io_lock);
vd->vdev_trim_inflight[ta->trim_type]--;
mutex_exit(&vd->vdev_trim_io_lock);
spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd);
mutex_exit(&vd->vdev_trim_lock);
dmu_tx_commit(tx);
return (SET_ERROR(EINTR));
}
mutex_exit(&vd->vdev_trim_lock);
if (ta->trim_type == TRIM_TYPE_MANUAL)
vd->vdev_trim_offset[txg & TXG_MASK] = start + size;
if (ta->trim_type == TRIM_TYPE_MANUAL) {
cb = vdev_trim_cb;
} else if (ta->trim_type == TRIM_TYPE_AUTO) {
cb = vdev_autotrim_cb;
} else {
cb = vdev_trim_simple_cb;
}
zio_nowait(zio_trim(spa->spa_txg_zio[txg & TXG_MASK], vd,
start, size, cb, NULL, ZIO_PRIORITY_TRIM, ZIO_FLAG_CANFAIL,
ta->trim_flags));
/* vdev_trim_cb and vdev_autotrim_cb release SCL_STATE_ALL */
dmu_tx_commit(tx);
return (0);
}
/*
* Issues TRIM I/Os for all ranges in the provided ta->trim_tree range tree.
* Additional parameters describing how the TRIM should be performed must
* be set in the trim_args structure. See the trim_args definition for
* additional information.
*/
static int
vdev_trim_ranges(trim_args_t *ta)
{
vdev_t *vd = ta->trim_vdev;
zfs_btree_t *t = &ta->trim_tree->rt_root;
zfs_btree_index_t idx;
uint64_t extent_bytes_max = ta->trim_extent_bytes_max;
uint64_t extent_bytes_min = ta->trim_extent_bytes_min;
spa_t *spa = vd->vdev_spa;
int error = 0;
ta->trim_start_time = gethrtime();
ta->trim_bytes_done = 0;
for (range_seg_t *rs = zfs_btree_first(t, &idx); rs != NULL;
rs = zfs_btree_next(t, &idx, &idx)) {
uint64_t size = rs_get_end(rs, ta->trim_tree) - rs_get_start(rs,
ta->trim_tree);
if (extent_bytes_min && size < extent_bytes_min) {
spa_iostats_trim_add(spa, ta->trim_type,
0, 0, 1, size, 0, 0);
continue;
}
/* Split range into legally-sized physical chunks */
uint64_t writes_required = ((size - 1) / extent_bytes_max) + 1;
for (uint64_t w = 0; w < writes_required; w++) {
error = vdev_trim_range(ta, VDEV_LABEL_START_SIZE +
rs_get_start(rs, ta->trim_tree) +
(w *extent_bytes_max), MIN(size -
(w * extent_bytes_max), extent_bytes_max));
if (error != 0) {
goto done;
}
}
}
done:
/*
* Make sure all TRIMs for this metaslab have completed before
* returning. TRIM zios have lower priority over regular or syncing
* zios, so all TRIM zios for this metaslab must complete before the
* metaslab is re-enabled. Otherwise it's possible write zios to
* this metaslab could cut ahead of still queued TRIM zios for this
* metaslab causing corruption if the ranges overlap.
*/
mutex_enter(&vd->vdev_trim_io_lock);
while (vd->vdev_trim_inflight[0] > 0) {
cv_wait(&vd->vdev_trim_io_cv, &vd->vdev_trim_io_lock);
}
mutex_exit(&vd->vdev_trim_io_lock);
return (error);
}
static void
vdev_trim_xlate_last_rs_end(void *arg, range_seg64_t *physical_rs)
{
uint64_t *last_rs_end = (uint64_t *)arg;
if (physical_rs->rs_end > *last_rs_end)
*last_rs_end = physical_rs->rs_end;
}
static void
vdev_trim_xlate_progress(void *arg, range_seg64_t *physical_rs)
{
vdev_t *vd = (vdev_t *)arg;
uint64_t size = physical_rs->rs_end - physical_rs->rs_start;
vd->vdev_trim_bytes_est += size;
if (vd->vdev_trim_last_offset >= physical_rs->rs_end) {
vd->vdev_trim_bytes_done += size;
} else if (vd->vdev_trim_last_offset > physical_rs->rs_start &&
vd->vdev_trim_last_offset <= physical_rs->rs_end) {
vd->vdev_trim_bytes_done +=
vd->vdev_trim_last_offset - physical_rs->rs_start;
}
}
/*
* Calculates the completion percentage of a manual TRIM.
*/
static void
vdev_trim_calculate_progress(vdev_t *vd)
{
ASSERT(spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_READER) ||
spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_WRITER));
ASSERT(vd->vdev_leaf_zap != 0);
vd->vdev_trim_bytes_est = 0;
vd->vdev_trim_bytes_done = 0;
for (uint64_t i = 0; i < vd->vdev_top->vdev_ms_count; i++) {
metaslab_t *msp = vd->vdev_top->vdev_ms[i];
mutex_enter(&msp->ms_lock);
uint64_t ms_free = (msp->ms_size -
metaslab_allocated_space(msp)) /
vdev_get_ndisks(vd->vdev_top);
/*
* Convert the metaslab range to a physical range
* on our vdev. We use this to determine if we are
* in the middle of this metaslab range.
*/
range_seg64_t logical_rs, physical_rs, remain_rs;
logical_rs.rs_start = msp->ms_start;
logical_rs.rs_end = msp->ms_start + msp->ms_size;
/* Metaslab space after this offset has not been trimmed. */
vdev_xlate(vd, &logical_rs, &physical_rs, &remain_rs);
if (vd->vdev_trim_last_offset <= physical_rs.rs_start) {
vd->vdev_trim_bytes_est += ms_free;
mutex_exit(&msp->ms_lock);
continue;
}
/* Metaslab space before this offset has been trimmed */
uint64_t last_rs_end = physical_rs.rs_end;
if (!vdev_xlate_is_empty(&remain_rs)) {
vdev_xlate_walk(vd, &remain_rs,
vdev_trim_xlate_last_rs_end, &last_rs_end);
}
if (vd->vdev_trim_last_offset > last_rs_end) {
vd->vdev_trim_bytes_done += ms_free;
vd->vdev_trim_bytes_est += ms_free;
mutex_exit(&msp->ms_lock);
continue;
}
/*
* If we get here, we're in the middle of trimming this
* metaslab. Load it and walk the free tree for more
* accurate progress estimation.
*/
VERIFY0(metaslab_load(msp));
range_tree_t *rt = msp->ms_allocatable;
zfs_btree_t *bt = &rt->rt_root;
zfs_btree_index_t idx;
for (range_seg_t *rs = zfs_btree_first(bt, &idx);
rs != NULL; rs = zfs_btree_next(bt, &idx, &idx)) {
logical_rs.rs_start = rs_get_start(rs, rt);
logical_rs.rs_end = rs_get_end(rs, rt);
vdev_xlate_walk(vd, &logical_rs,
vdev_trim_xlate_progress, vd);
}
mutex_exit(&msp->ms_lock);
}
}
/*
* Load from disk the vdev's manual TRIM information. This includes the
* state, progress, and options provided when initiating the manual TRIM.
*/
static int
vdev_trim_load(vdev_t *vd)
{
int err = 0;
ASSERT(spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_READER) ||
spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_WRITER));
ASSERT(vd->vdev_leaf_zap != 0);
if (vd->vdev_trim_state == VDEV_TRIM_ACTIVE ||
vd->vdev_trim_state == VDEV_TRIM_SUSPENDED) {
err = zap_lookup(vd->vdev_spa->spa_meta_objset,
vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_LAST_OFFSET,
sizeof (vd->vdev_trim_last_offset), 1,
&vd->vdev_trim_last_offset);
if (err == ENOENT) {
vd->vdev_trim_last_offset = 0;
err = 0;
}
if (err == 0) {
err = zap_lookup(vd->vdev_spa->spa_meta_objset,
vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_RATE,
sizeof (vd->vdev_trim_rate), 1,
&vd->vdev_trim_rate);
if (err == ENOENT) {
vd->vdev_trim_rate = 0;
err = 0;
}
}
if (err == 0) {
err = zap_lookup(vd->vdev_spa->spa_meta_objset,
vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_PARTIAL,
sizeof (vd->vdev_trim_partial), 1,
&vd->vdev_trim_partial);
if (err == ENOENT) {
vd->vdev_trim_partial = 0;
err = 0;
}
}
if (err == 0) {
err = zap_lookup(vd->vdev_spa->spa_meta_objset,
vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_SECURE,
sizeof (vd->vdev_trim_secure), 1,
&vd->vdev_trim_secure);
if (err == ENOENT) {
vd->vdev_trim_secure = 0;
err = 0;
}
}
}
vdev_trim_calculate_progress(vd);
return (err);
}
static void
vdev_trim_xlate_range_add(void *arg, range_seg64_t *physical_rs)
{
trim_args_t *ta = arg;
vdev_t *vd = ta->trim_vdev;
/*
* Only a manual trim will be traversing the vdev sequentially.
* For an auto trim all valid ranges should be added.
*/
if (ta->trim_type == TRIM_TYPE_MANUAL) {
/* Only add segments that we have not visited yet */
if (physical_rs->rs_end <= vd->vdev_trim_last_offset)
return;
/* Pick up where we left off mid-range. */
if (vd->vdev_trim_last_offset > physical_rs->rs_start) {
ASSERT3U(physical_rs->rs_end, >,
vd->vdev_trim_last_offset);
physical_rs->rs_start = vd->vdev_trim_last_offset;
}
}
ASSERT3U(physical_rs->rs_end, >, physical_rs->rs_start);
range_tree_add(ta->trim_tree, physical_rs->rs_start,
physical_rs->rs_end - physical_rs->rs_start);
}
/*
* Convert the logical range into physical ranges and add them to the
* range tree passed in the trim_args_t.
*/
static void
vdev_trim_range_add(void *arg, uint64_t start, uint64_t size)
{
trim_args_t *ta = arg;
vdev_t *vd = ta->trim_vdev;
range_seg64_t logical_rs;
logical_rs.rs_start = start;
logical_rs.rs_end = start + size;
/*
* Every range to be trimmed must be part of ms_allocatable.
* When ZFS_DEBUG_TRIM is set load the metaslab to verify this
* is always the case.
*/
if (zfs_flags & ZFS_DEBUG_TRIM) {
metaslab_t *msp = ta->trim_msp;
VERIFY0(metaslab_load(msp));
VERIFY3B(msp->ms_loaded, ==, B_TRUE);
VERIFY(range_tree_contains(msp->ms_allocatable, start, size));
}
ASSERT(vd->vdev_ops->vdev_op_leaf);
vdev_xlate_walk(vd, &logical_rs, vdev_trim_xlate_range_add, arg);
}
/*
* Each manual TRIM thread is responsible for trimming the unallocated
* space for each leaf vdev. This is accomplished by sequentially iterating
* over its top-level metaslabs and issuing TRIM I/O for the space described
* by its ms_allocatable. While a metaslab is undergoing trimming it is
* not eligible for new allocations.
*/
static __attribute__((noreturn)) void
vdev_trim_thread(void *arg)
{
vdev_t *vd = arg;
spa_t *spa = vd->vdev_spa;
trim_args_t ta;
int error = 0;
/*
* The VDEV_LEAF_ZAP_TRIM_* entries may have been updated by
* vdev_trim(). Wait for the updated values to be reflected
* in the zap in order to start with the requested settings.
*/
txg_wait_synced(spa_get_dsl(vd->vdev_spa), 0);
ASSERT(vdev_is_concrete(vd));
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vd->vdev_trim_last_offset = 0;
vd->vdev_trim_rate = 0;
vd->vdev_trim_partial = 0;
vd->vdev_trim_secure = 0;
VERIFY0(vdev_trim_load(vd));
ta.trim_vdev = vd;
ta.trim_extent_bytes_max = zfs_trim_extent_bytes_max;
ta.trim_extent_bytes_min = zfs_trim_extent_bytes_min;
ta.trim_tree = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
ta.trim_type = TRIM_TYPE_MANUAL;
ta.trim_flags = 0;
/*
* When a secure TRIM has been requested infer that the intent
* is that everything must be trimmed. Override the default
* minimum TRIM size to prevent ranges from being skipped.
*/
if (vd->vdev_trim_secure) {
ta.trim_flags |= ZIO_TRIM_SECURE;
ta.trim_extent_bytes_min = SPA_MINBLOCKSIZE;
}
uint64_t ms_count = 0;
for (uint64_t i = 0; !vd->vdev_detached &&
i < vd->vdev_top->vdev_ms_count; i++) {
metaslab_t *msp = vd->vdev_top->vdev_ms[i];
/*
* If we've expanded the top-level vdev or it's our
* first pass, calculate our progress.
*/
if (vd->vdev_top->vdev_ms_count != ms_count) {
vdev_trim_calculate_progress(vd);
ms_count = vd->vdev_top->vdev_ms_count;
}
spa_config_exit(spa, SCL_CONFIG, FTAG);
metaslab_disable(msp);
mutex_enter(&msp->ms_lock);
VERIFY0(metaslab_load(msp));
/*
* If a partial TRIM was requested skip metaslabs which have
* never been initialized and thus have never been written.
*/
if (msp->ms_sm == NULL && vd->vdev_trim_partial) {
mutex_exit(&msp->ms_lock);
metaslab_enable(msp, B_FALSE, B_FALSE);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_trim_calculate_progress(vd);
continue;
}
ta.trim_msp = msp;
range_tree_walk(msp->ms_allocatable, vdev_trim_range_add, &ta);
range_tree_vacate(msp->ms_trim, NULL, NULL);
mutex_exit(&msp->ms_lock);
error = vdev_trim_ranges(&ta);
metaslab_enable(msp, B_TRUE, B_FALSE);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
range_tree_vacate(ta.trim_tree, NULL, NULL);
if (error != 0)
break;
}
spa_config_exit(spa, SCL_CONFIG, FTAG);
range_tree_destroy(ta.trim_tree);
mutex_enter(&vd->vdev_trim_lock);
if (!vd->vdev_trim_exit_wanted) {
if (vdev_writeable(vd)) {
vdev_trim_change_state(vd, VDEV_TRIM_COMPLETE,
vd->vdev_trim_rate, vd->vdev_trim_partial,
vd->vdev_trim_secure);
} else if (vd->vdev_faulted) {
vdev_trim_change_state(vd, VDEV_TRIM_CANCELED,
vd->vdev_trim_rate, vd->vdev_trim_partial,
vd->vdev_trim_secure);
}
}
ASSERT(vd->vdev_trim_thread != NULL || vd->vdev_trim_inflight[0] == 0);
/*
* Drop the vdev_trim_lock while we sync out the txg since it's
* possible that a device might be trying to come online and must
* check to see if it needs to restart a trim. That thread will be
* holding the spa_config_lock which would prevent the txg_wait_synced
* from completing.
*/
mutex_exit(&vd->vdev_trim_lock);
txg_wait_synced(spa_get_dsl(spa), 0);
mutex_enter(&vd->vdev_trim_lock);
vd->vdev_trim_thread = NULL;
cv_broadcast(&vd->vdev_trim_cv);
mutex_exit(&vd->vdev_trim_lock);
thread_exit();
}
/*
* Initiates a manual TRIM for the vdev_t. Callers must hold vdev_trim_lock,
* the vdev_t must be a leaf and cannot already be manually trimming.
*/
void
vdev_trim(vdev_t *vd, uint64_t rate, boolean_t partial, boolean_t secure)
{
ASSERT(MUTEX_HELD(&vd->vdev_trim_lock));
ASSERT(vd->vdev_ops->vdev_op_leaf);
ASSERT(vdev_is_concrete(vd));
ASSERT3P(vd->vdev_trim_thread, ==, NULL);
ASSERT(!vd->vdev_detached);
ASSERT(!vd->vdev_trim_exit_wanted);
ASSERT(!vd->vdev_top->vdev_removing);
vdev_trim_change_state(vd, VDEV_TRIM_ACTIVE, rate, partial, secure);
vd->vdev_trim_thread = thread_create(NULL, 0,
vdev_trim_thread, vd, 0, &p0, TS_RUN, maxclsyspri);
}
/*
* Wait for the trimming thread to be terminated (canceled or stopped).
*/
static void
vdev_trim_stop_wait_impl(vdev_t *vd)
{
ASSERT(MUTEX_HELD(&vd->vdev_trim_lock));
while (vd->vdev_trim_thread != NULL)
cv_wait(&vd->vdev_trim_cv, &vd->vdev_trim_lock);
ASSERT3P(vd->vdev_trim_thread, ==, NULL);
vd->vdev_trim_exit_wanted = B_FALSE;
}
/*
* Wait for vdev trim threads which were listed to cleanly exit.
*/
void
vdev_trim_stop_wait(spa_t *spa, list_t *vd_list)
{
(void) spa;
vdev_t *vd;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
while ((vd = list_remove_head(vd_list)) != NULL) {
mutex_enter(&vd->vdev_trim_lock);
vdev_trim_stop_wait_impl(vd);
mutex_exit(&vd->vdev_trim_lock);
}
}
/*
* Stop trimming a device, with the resultant trimming state being tgt_state.
* For blocking behavior pass NULL for vd_list. Otherwise, when a list_t is
* provided the stopping vdev is inserted in to the list. Callers are then
* required to call vdev_trim_stop_wait() to block for all the trim threads
* to exit. The caller must hold vdev_trim_lock and must not be writing to
* the spa config, as the trimming thread may try to enter the config as a
* reader before exiting.
*/
void
vdev_trim_stop(vdev_t *vd, vdev_trim_state_t tgt_state, list_t *vd_list)
{
ASSERT(!spa_config_held(vd->vdev_spa, SCL_CONFIG|SCL_STATE, RW_WRITER));
ASSERT(MUTEX_HELD(&vd->vdev_trim_lock));
ASSERT(vd->vdev_ops->vdev_op_leaf);
ASSERT(vdev_is_concrete(vd));
/*
* Allow cancel requests to proceed even if the trim thread has
* stopped.
*/
if (vd->vdev_trim_thread == NULL && tgt_state != VDEV_TRIM_CANCELED)
return;
vdev_trim_change_state(vd, tgt_state, 0, 0, 0);
vd->vdev_trim_exit_wanted = B_TRUE;
if (vd_list == NULL) {
vdev_trim_stop_wait_impl(vd);
} else {
ASSERT(MUTEX_HELD(&spa_namespace_lock));
list_insert_tail(vd_list, vd);
}
}
/*
* Requests that all listed vdevs stop trimming.
*/
static void
vdev_trim_stop_all_impl(vdev_t *vd, vdev_trim_state_t tgt_state,
list_t *vd_list)
{
if (vd->vdev_ops->vdev_op_leaf && vdev_is_concrete(vd)) {
mutex_enter(&vd->vdev_trim_lock);
vdev_trim_stop(vd, tgt_state, vd_list);
mutex_exit(&vd->vdev_trim_lock);
return;
}
for (uint64_t i = 0; i < vd->vdev_children; i++) {
vdev_trim_stop_all_impl(vd->vdev_child[i], tgt_state,
vd_list);
}
}
/*
* Convenience function to stop trimming of a vdev tree and set all trim
* thread pointers to NULL.
*/
void
vdev_trim_stop_all(vdev_t *vd, vdev_trim_state_t tgt_state)
{
spa_t *spa = vd->vdev_spa;
list_t vd_list;
vdev_t *vd_l2cache;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
list_create(&vd_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_trim_node));
vdev_trim_stop_all_impl(vd, tgt_state, &vd_list);
/*
* Iterate over cache devices and request stop trimming the
* whole device in case we export the pool or remove the cache
* device prematurely.
*/
for (int i = 0; i < spa->spa_l2cache.sav_count; i++) {
vd_l2cache = spa->spa_l2cache.sav_vdevs[i];
vdev_trim_stop_all_impl(vd_l2cache, tgt_state, &vd_list);
}
vdev_trim_stop_wait(spa, &vd_list);
if (vd->vdev_spa->spa_sync_on) {
/* Make sure that our state has been synced to disk */
txg_wait_synced(spa_get_dsl(vd->vdev_spa), 0);
}
list_destroy(&vd_list);
}
/*
* Conditionally restarts a manual TRIM given its on-disk state.
*/
void
vdev_trim_restart(vdev_t *vd)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(!spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
if (vd->vdev_leaf_zap != 0) {
mutex_enter(&vd->vdev_trim_lock);
uint64_t trim_state = VDEV_TRIM_NONE;
int err = zap_lookup(vd->vdev_spa->spa_meta_objset,
vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_STATE,
sizeof (trim_state), 1, &trim_state);
ASSERT(err == 0 || err == ENOENT);
vd->vdev_trim_state = trim_state;
uint64_t timestamp = 0;
err = zap_lookup(vd->vdev_spa->spa_meta_objset,
vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_ACTION_TIME,
sizeof (timestamp), 1, &timestamp);
ASSERT(err == 0 || err == ENOENT);
vd->vdev_trim_action_time = timestamp;
if (vd->vdev_trim_state == VDEV_TRIM_SUSPENDED ||
vd->vdev_offline) {
/* load progress for reporting, but don't resume */
VERIFY0(vdev_trim_load(vd));
} else if (vd->vdev_trim_state == VDEV_TRIM_ACTIVE &&
vdev_writeable(vd) && !vd->vdev_top->vdev_removing &&
vd->vdev_trim_thread == NULL) {
VERIFY0(vdev_trim_load(vd));
vdev_trim(vd, vd->vdev_trim_rate,
vd->vdev_trim_partial, vd->vdev_trim_secure);
}
mutex_exit(&vd->vdev_trim_lock);
}
for (uint64_t i = 0; i < vd->vdev_children; i++) {
vdev_trim_restart(vd->vdev_child[i]);
}
}
/*
* Used by the automatic TRIM when ZFS_DEBUG_TRIM is set to verify that
* every TRIM range is contained within ms_allocatable.
*/
static void
vdev_trim_range_verify(void *arg, uint64_t start, uint64_t size)
{
trim_args_t *ta = arg;
metaslab_t *msp = ta->trim_msp;
VERIFY3B(msp->ms_loaded, ==, B_TRUE);
VERIFY3U(msp->ms_disabled, >, 0);
VERIFY(range_tree_contains(msp->ms_allocatable, start, size));
}
/*
* Each automatic TRIM thread is responsible for managing the trimming of a
* top-level vdev in the pool. No automatic TRIM state is maintained on-disk.
*
* N.B. This behavior is different from a manual TRIM where a thread
* is created for each leaf vdev, instead of each top-level vdev.
*/
static __attribute__((noreturn)) void
vdev_autotrim_thread(void *arg)
{
vdev_t *vd = arg;
spa_t *spa = vd->vdev_spa;
int shift = 0;
mutex_enter(&vd->vdev_autotrim_lock);
ASSERT3P(vd->vdev_top, ==, vd);
ASSERT3P(vd->vdev_autotrim_thread, !=, NULL);
mutex_exit(&vd->vdev_autotrim_lock);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
while (!vdev_autotrim_should_stop(vd)) {
int txgs_per_trim = MAX(zfs_trim_txg_batch, 1);
uint64_t extent_bytes_max = zfs_trim_extent_bytes_max;
uint64_t extent_bytes_min = zfs_trim_extent_bytes_min;
/*
* All of the metaslabs are divided in to groups of size
* num_metaslabs / zfs_trim_txg_batch. Each of these groups
* is composed of metaslabs which are spread evenly over the
* device.
*
* For example, when zfs_trim_txg_batch = 32 (default) then
* group 0 will contain metaslabs 0, 32, 64, ...;
* group 1 will contain metaslabs 1, 33, 65, ...;
* group 2 will contain metaslabs 2, 34, 66, ...; and so on.
*
* On each pass through the while() loop one of these groups
* is selected. This is accomplished by using a shift value
* to select the starting metaslab, then striding over the
* metaslabs using the zfs_trim_txg_batch size. This is
* done to accomplish two things.
*
* 1) By dividing the metaslabs in to groups, and making sure
* that each group takes a minimum of one txg to process.
* Then zfs_trim_txg_batch controls the minimum number of
* txgs which must occur before a metaslab is revisited.
*
* 2) Selecting non-consecutive metaslabs distributes the
* TRIM commands for a group evenly over the entire device.
* This can be advantageous for certain types of devices.
*/
for (uint64_t i = shift % txgs_per_trim; i < vd->vdev_ms_count;
i += txgs_per_trim) {
metaslab_t *msp = vd->vdev_ms[i];
range_tree_t *trim_tree;
boolean_t issued_trim = B_FALSE;
boolean_t wait_aborted = B_FALSE;
spa_config_exit(spa, SCL_CONFIG, FTAG);
metaslab_disable(msp);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
mutex_enter(&msp->ms_lock);
/*
* Skip the metaslab when it has never been allocated
* or when there are no recent frees to trim.
*/
if (msp->ms_sm == NULL ||
range_tree_is_empty(msp->ms_trim)) {
mutex_exit(&msp->ms_lock);
metaslab_enable(msp, B_FALSE, B_FALSE);
continue;
}
/*
* Skip the metaslab when it has already been disabled.
* This may happen when a manual TRIM or initialize
* operation is running concurrently. In the case
* of a manual TRIM, the ms_trim tree will have been
* vacated. Only ranges added after the manual TRIM
* disabled the metaslab will be included in the tree.
* These will be processed when the automatic TRIM
* next revisits this metaslab.
*/
if (msp->ms_disabled > 1) {
mutex_exit(&msp->ms_lock);
metaslab_enable(msp, B_FALSE, B_FALSE);
continue;
}
/*
* Allocate an empty range tree which is swapped in
* for the existing ms_trim tree while it is processed.
*/
trim_tree = range_tree_create(NULL, RANGE_SEG64, NULL,
0, 0);
range_tree_swap(&msp->ms_trim, &trim_tree);
ASSERT(range_tree_is_empty(msp->ms_trim));
/*
* There are two cases when constructing the per-vdev
* trim trees for a metaslab. If the top-level vdev
* has no children then it is also a leaf and should
* be trimmed. Otherwise our children are the leaves
* and a trim tree should be constructed for each.
*/
trim_args_t *tap;
uint64_t children = vd->vdev_children;
if (children == 0) {
children = 1;
tap = kmem_zalloc(sizeof (trim_args_t) *
children, KM_SLEEP);
tap[0].trim_vdev = vd;
} else {
tap = kmem_zalloc(sizeof (trim_args_t) *
children, KM_SLEEP);
for (uint64_t c = 0; c < children; c++) {
tap[c].trim_vdev = vd->vdev_child[c];
}
}
for (uint64_t c = 0; c < children; c++) {
trim_args_t *ta = &tap[c];
vdev_t *cvd = ta->trim_vdev;
ta->trim_msp = msp;
ta->trim_extent_bytes_max = extent_bytes_max;
ta->trim_extent_bytes_min = extent_bytes_min;
ta->trim_type = TRIM_TYPE_AUTO;
ta->trim_flags = 0;
if (cvd->vdev_detached ||
!vdev_writeable(cvd) ||
!cvd->vdev_has_trim ||
cvd->vdev_trim_thread != NULL) {
continue;
}
/*
* When a device has an attached hot spare, or
* is being replaced it will not be trimmed.
* This is done to avoid adding additional
* stress to a potentially unhealthy device,
* and to minimize the required rebuild time.
*/
if (!cvd->vdev_ops->vdev_op_leaf)
continue;
ta->trim_tree = range_tree_create(NULL,
RANGE_SEG64, NULL, 0, 0);
range_tree_walk(trim_tree,
vdev_trim_range_add, ta);
}
mutex_exit(&msp->ms_lock);
spa_config_exit(spa, SCL_CONFIG, FTAG);
/*
* Issue the TRIM I/Os for all ranges covered by the
* TRIM trees. These ranges are safe to TRIM because
* no new allocations will be performed until the call
* to metaslab_enabled() below.
*/
for (uint64_t c = 0; c < children; c++) {
trim_args_t *ta = &tap[c];
/*
* Always yield to a manual TRIM if one has
* been started for the child vdev.
*/
if (ta->trim_tree == NULL ||
ta->trim_vdev->vdev_trim_thread != NULL) {
continue;
}
/*
* After this point metaslab_enable() must be
* called with the sync flag set. This is done
* here because vdev_trim_ranges() is allowed
* to be interrupted (EINTR) before issuing all
* of the required TRIM I/Os.
*/
issued_trim = B_TRUE;
int error = vdev_trim_ranges(ta);
if (error)
break;
}
/*
* Verify every range which was trimmed is still
* contained within the ms_allocatable tree.
*/
if (zfs_flags & ZFS_DEBUG_TRIM) {
mutex_enter(&msp->ms_lock);
VERIFY0(metaslab_load(msp));
VERIFY3P(tap[0].trim_msp, ==, msp);
range_tree_walk(trim_tree,
vdev_trim_range_verify, &tap[0]);
mutex_exit(&msp->ms_lock);
}
range_tree_vacate(trim_tree, NULL, NULL);
range_tree_destroy(trim_tree);
/*
* Wait for couples of kicks, to ensure the trim io is
* synced. If the wait is aborted due to
* vdev_autotrim_exit_wanted, we need to signal
* metaslab_enable() to wait for sync.
*/
if (issued_trim) {
wait_aborted = vdev_autotrim_wait_kick(vd,
TXG_CONCURRENT_STATES + TXG_DEFER_SIZE);
}
metaslab_enable(msp, wait_aborted, B_FALSE);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
for (uint64_t c = 0; c < children; c++) {
trim_args_t *ta = &tap[c];
if (ta->trim_tree == NULL)
continue;
range_tree_vacate(ta->trim_tree, NULL, NULL);
range_tree_destroy(ta->trim_tree);
}
kmem_free(tap, sizeof (trim_args_t) * children);
if (vdev_autotrim_should_stop(vd))
break;
}
spa_config_exit(spa, SCL_CONFIG, FTAG);
vdev_autotrim_wait_kick(vd, 1);
shift++;
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
}
for (uint64_t c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
mutex_enter(&cvd->vdev_trim_io_lock);
while (cvd->vdev_trim_inflight[1] > 0) {
cv_wait(&cvd->vdev_trim_io_cv,
&cvd->vdev_trim_io_lock);
}
mutex_exit(&cvd->vdev_trim_io_lock);
}
spa_config_exit(spa, SCL_CONFIG, FTAG);
/*
* When exiting because the autotrim property was set to off, then
* abandon any unprocessed ms_trim ranges to reclaim the memory.
*/
if (spa_get_autotrim(spa) == SPA_AUTOTRIM_OFF) {
for (uint64_t i = 0; i < vd->vdev_ms_count; i++) {
metaslab_t *msp = vd->vdev_ms[i];
mutex_enter(&msp->ms_lock);
range_tree_vacate(msp->ms_trim, NULL, NULL);
mutex_exit(&msp->ms_lock);
}
}
mutex_enter(&vd->vdev_autotrim_lock);
ASSERT(vd->vdev_autotrim_thread != NULL);
vd->vdev_autotrim_thread = NULL;
cv_broadcast(&vd->vdev_autotrim_cv);
mutex_exit(&vd->vdev_autotrim_lock);
thread_exit();
}
/*
* Starts an autotrim thread, if needed, for each top-level vdev which can be
* trimmed. A top-level vdev which has been evacuated will never be trimmed.
*/
void
vdev_autotrim(spa_t *spa)
{
vdev_t *root_vd = spa->spa_root_vdev;
for (uint64_t i = 0; i < root_vd->vdev_children; i++) {
vdev_t *tvd = root_vd->vdev_child[i];
mutex_enter(&tvd->vdev_autotrim_lock);
if (vdev_writeable(tvd) && !tvd->vdev_removing &&
tvd->vdev_autotrim_thread == NULL) {
ASSERT3P(tvd->vdev_top, ==, tvd);
tvd->vdev_autotrim_thread = thread_create(NULL, 0,
vdev_autotrim_thread, tvd, 0, &p0, TS_RUN,
maxclsyspri);
ASSERT(tvd->vdev_autotrim_thread != NULL);
}
mutex_exit(&tvd->vdev_autotrim_lock);
}
}
/*
* Wait for the vdev_autotrim_thread associated with the passed top-level
* vdev to be terminated (canceled or stopped).
*/
void
vdev_autotrim_stop_wait(vdev_t *tvd)
{
mutex_enter(&tvd->vdev_autotrim_lock);
if (tvd->vdev_autotrim_thread != NULL) {
tvd->vdev_autotrim_exit_wanted = B_TRUE;
cv_broadcast(&tvd->vdev_autotrim_kick_cv);
cv_wait(&tvd->vdev_autotrim_cv,
&tvd->vdev_autotrim_lock);
ASSERT3P(tvd->vdev_autotrim_thread, ==, NULL);
tvd->vdev_autotrim_exit_wanted = B_FALSE;
}
mutex_exit(&tvd->vdev_autotrim_lock);
}
void
vdev_autotrim_kick(spa_t *spa)
{
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
vdev_t *root_vd = spa->spa_root_vdev;
vdev_t *tvd;
for (uint64_t i = 0; i < root_vd->vdev_children; i++) {
tvd = root_vd->vdev_child[i];
mutex_enter(&tvd->vdev_autotrim_lock);
if (tvd->vdev_autotrim_thread != NULL)
cv_broadcast(&tvd->vdev_autotrim_kick_cv);
mutex_exit(&tvd->vdev_autotrim_lock);
}
}
/*
* Wait for all of the vdev_autotrim_thread associated with the pool to
* be terminated (canceled or stopped).
*/
void
vdev_autotrim_stop_all(spa_t *spa)
{
vdev_t *root_vd = spa->spa_root_vdev;
for (uint64_t i = 0; i < root_vd->vdev_children; i++)
vdev_autotrim_stop_wait(root_vd->vdev_child[i]);
}
/*
* Conditionally restart all of the vdev_autotrim_thread's for the pool.
*/
void
vdev_autotrim_restart(spa_t *spa)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
if (spa->spa_autotrim)
vdev_autotrim(spa);
}
static __attribute__((noreturn)) void
vdev_trim_l2arc_thread(void *arg)
{
vdev_t *vd = arg;
spa_t *spa = vd->vdev_spa;
l2arc_dev_t *dev = l2arc_vdev_get(vd);
trim_args_t ta = {0};
range_seg64_t physical_rs;
ASSERT(vdev_is_concrete(vd));
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vd->vdev_trim_last_offset = 0;
vd->vdev_trim_rate = 0;
vd->vdev_trim_partial = 0;
vd->vdev_trim_secure = 0;
ta.trim_vdev = vd;
ta.trim_tree = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
ta.trim_type = TRIM_TYPE_MANUAL;
ta.trim_extent_bytes_max = zfs_trim_extent_bytes_max;
ta.trim_extent_bytes_min = SPA_MINBLOCKSIZE;
ta.trim_flags = 0;
physical_rs.rs_start = vd->vdev_trim_bytes_done = 0;
physical_rs.rs_end = vd->vdev_trim_bytes_est =
vdev_get_min_asize(vd);
range_tree_add(ta.trim_tree, physical_rs.rs_start,
physical_rs.rs_end - physical_rs.rs_start);
mutex_enter(&vd->vdev_trim_lock);
vdev_trim_change_state(vd, VDEV_TRIM_ACTIVE, 0, 0, 0);
mutex_exit(&vd->vdev_trim_lock);
(void) vdev_trim_ranges(&ta);
spa_config_exit(spa, SCL_CONFIG, FTAG);
mutex_enter(&vd->vdev_trim_io_lock);
while (vd->vdev_trim_inflight[TRIM_TYPE_MANUAL] > 0) {
cv_wait(&vd->vdev_trim_io_cv, &vd->vdev_trim_io_lock);
}
mutex_exit(&vd->vdev_trim_io_lock);
range_tree_vacate(ta.trim_tree, NULL, NULL);
range_tree_destroy(ta.trim_tree);
mutex_enter(&vd->vdev_trim_lock);
if (!vd->vdev_trim_exit_wanted && vdev_writeable(vd)) {
vdev_trim_change_state(vd, VDEV_TRIM_COMPLETE,
vd->vdev_trim_rate, vd->vdev_trim_partial,
vd->vdev_trim_secure);
}
ASSERT(vd->vdev_trim_thread != NULL ||
vd->vdev_trim_inflight[TRIM_TYPE_MANUAL] == 0);
/*
* Drop the vdev_trim_lock while we sync out the txg since it's
* possible that a device might be trying to come online and
* must check to see if it needs to restart a trim. That thread
* will be holding the spa_config_lock which would prevent the
* txg_wait_synced from completing. Same strategy as in
* vdev_trim_thread().
*/
mutex_exit(&vd->vdev_trim_lock);
txg_wait_synced(spa_get_dsl(vd->vdev_spa), 0);
mutex_enter(&vd->vdev_trim_lock);
/*
* Update the header of the cache device here, before
* broadcasting vdev_trim_cv which may lead to the removal
* of the device. The same applies for setting l2ad_trim_all to
* false.
*/
spa_config_enter(vd->vdev_spa, SCL_L2ARC, vd,
RW_READER);
memset(dev->l2ad_dev_hdr, 0, dev->l2ad_dev_hdr_asize);
l2arc_dev_hdr_update(dev);
spa_config_exit(vd->vdev_spa, SCL_L2ARC, vd);
vd->vdev_trim_thread = NULL;
if (vd->vdev_trim_state == VDEV_TRIM_COMPLETE)
dev->l2ad_trim_all = B_FALSE;
cv_broadcast(&vd->vdev_trim_cv);
mutex_exit(&vd->vdev_trim_lock);
thread_exit();
}
/*
* Punches out TRIM threads for the L2ARC devices in a spa and assigns them
* to vd->vdev_trim_thread variable. This facilitates the management of
* trimming the whole cache device using TRIM_TYPE_MANUAL upon addition
* to a pool or pool creation or when the header of the device is invalid.
*/
void
vdev_trim_l2arc(spa_t *spa)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
/*
* Locate the spa's l2arc devices and kick off TRIM threads.
*/
for (int i = 0; i < spa->spa_l2cache.sav_count; i++) {
vdev_t *vd = spa->spa_l2cache.sav_vdevs[i];
l2arc_dev_t *dev = l2arc_vdev_get(vd);
if (dev == NULL || !dev->l2ad_trim_all) {
/*
* Don't attempt TRIM if the vdev is UNAVAIL or if the
* cache device was not marked for whole device TRIM
* (ie l2arc_trim_ahead = 0, or the L2ARC device header
* is valid with trim_state = VDEV_TRIM_COMPLETE and
* l2ad_log_entries > 0).
*/
continue;
}
mutex_enter(&vd->vdev_trim_lock);
ASSERT(vd->vdev_ops->vdev_op_leaf);
ASSERT(vdev_is_concrete(vd));
ASSERT3P(vd->vdev_trim_thread, ==, NULL);
ASSERT(!vd->vdev_detached);
ASSERT(!vd->vdev_trim_exit_wanted);
ASSERT(!vd->vdev_top->vdev_removing);
vdev_trim_change_state(vd, VDEV_TRIM_ACTIVE, 0, 0, 0);
vd->vdev_trim_thread = thread_create(NULL, 0,
vdev_trim_l2arc_thread, vd, 0, &p0, TS_RUN, maxclsyspri);
mutex_exit(&vd->vdev_trim_lock);
}
}
/*
* A wrapper which calls vdev_trim_ranges(). It is intended to be called
* on leaf vdevs.
*/
int
vdev_trim_simple(vdev_t *vd, uint64_t start, uint64_t size)
{
trim_args_t ta = {0};
range_seg64_t physical_rs;
int error;
physical_rs.rs_start = start;
physical_rs.rs_end = start + size;
ASSERT(vdev_is_concrete(vd));
ASSERT(vd->vdev_ops->vdev_op_leaf);
ASSERT(!vd->vdev_detached);
ASSERT(!vd->vdev_top->vdev_removing);
ta.trim_vdev = vd;
ta.trim_tree = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
ta.trim_type = TRIM_TYPE_SIMPLE;
ta.trim_extent_bytes_max = zfs_trim_extent_bytes_max;
ta.trim_extent_bytes_min = SPA_MINBLOCKSIZE;
ta.trim_flags = 0;
ASSERT3U(physical_rs.rs_end, >=, physical_rs.rs_start);
if (physical_rs.rs_end > physical_rs.rs_start) {
range_tree_add(ta.trim_tree, physical_rs.rs_start,
physical_rs.rs_end - physical_rs.rs_start);
} else {
ASSERT3U(physical_rs.rs_end, ==, physical_rs.rs_start);
}
error = vdev_trim_ranges(&ta);
mutex_enter(&vd->vdev_trim_io_lock);
while (vd->vdev_trim_inflight[TRIM_TYPE_SIMPLE] > 0) {
cv_wait(&vd->vdev_trim_io_cv, &vd->vdev_trim_io_lock);
}
mutex_exit(&vd->vdev_trim_io_lock);
range_tree_vacate(ta.trim_tree, NULL, NULL);
range_tree_destroy(ta.trim_tree);
return (error);
}
EXPORT_SYMBOL(vdev_trim);
EXPORT_SYMBOL(vdev_trim_stop);
EXPORT_SYMBOL(vdev_trim_stop_all);
EXPORT_SYMBOL(vdev_trim_stop_wait);
EXPORT_SYMBOL(vdev_trim_restart);
EXPORT_SYMBOL(vdev_autotrim);
EXPORT_SYMBOL(vdev_autotrim_stop_all);
EXPORT_SYMBOL(vdev_autotrim_stop_wait);
EXPORT_SYMBOL(vdev_autotrim_restart);
EXPORT_SYMBOL(vdev_trim_l2arc);
EXPORT_SYMBOL(vdev_trim_simple);
ZFS_MODULE_PARAM(zfs_trim, zfs_trim_, extent_bytes_max, UINT, ZMOD_RW,
"Max size of TRIM commands, larger will be split");
ZFS_MODULE_PARAM(zfs_trim, zfs_trim_, extent_bytes_min, UINT, ZMOD_RW,
"Min size of TRIM commands, smaller will be skipped");
ZFS_MODULE_PARAM(zfs_trim, zfs_trim_, metaslab_skip, UINT, ZMOD_RW,
"Skip metaslabs which have never been initialized");
ZFS_MODULE_PARAM(zfs_trim, zfs_trim_, txg_batch, UINT, ZMOD_RW,
"Min number of txgs to aggregate frees before issuing TRIM");
ZFS_MODULE_PARAM(zfs_trim, zfs_trim_, queue_limit, UINT, ZMOD_RW,
"Max queued TRIMs outstanding per leaf vdev");
diff --git a/sys/contrib/openzfs/module/zfs/zfs_replay.c b/sys/contrib/openzfs/module/zfs/zfs_replay.c
index 09c7be853bf9..2e0af60f6db4 100644
--- a/sys/contrib/openzfs/module/zfs/zfs_replay.c
+++ b/sys/contrib/openzfs/module/zfs/zfs_replay.c
@@ -1,1223 +1,1259 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 Cyril Plisko. All rights reserved.
* Copyright (c) 2013, 2017 by Delphix. All rights reserved.
* Copyright (c) 2021, 2022 by Pawel Jakub Dawidek
*/
#include <sys/types.h>
#include <sys/param.h>
#include <sys/sysmacros.h>
#include <sys/cmn_err.h>
#include <sys/kmem.h>
#include <sys/thread.h>
#include <sys/file.h>
#include <sys/fcntl.h>
#include <sys/vfs.h>
#include <sys/fs/zfs.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_dir.h>
#include <sys/zfs_acl.h>
#include <sys/zfs_fuid.h>
#include <sys/zfs_vnops.h>
#include <sys/spa.h>
#include <sys/zil.h>
#include <sys/byteorder.h>
#include <sys/stat.h>
#include <sys/acl.h>
#include <sys/atomic.h>
#include <sys/cred.h>
#include <sys/zpl.h>
#include <sys/dmu_objset.h>
#include <sys/zfeature.h>
/*
* NB: FreeBSD expects to be able to do vnode locking in lookup and
* hold the locks across all subsequent VOPs until vput is called.
* This means that its zfs vnops routines can't do any internal locking.
* In order to have the same contract as the Linux vnops there would
* needed to be duplicate locked vnops. If the vnops were used more widely
* in common code this would likely be preferable. However, currently
* this is the only file where this is the case.
*/
/*
* Functions to replay ZFS intent log (ZIL) records
* The functions are called through a function vector (zfs_replay_vector)
* which is indexed by the transaction type.
*/
static void
zfs_init_vattr(vattr_t *vap, uint64_t mask, uint64_t mode,
uint64_t uid, uint64_t gid, uint64_t rdev, uint64_t nodeid)
{
memset(vap, 0, sizeof (*vap));
vap->va_mask = (uint_t)mask;
vap->va_mode = mode;
#if defined(__FreeBSD__) || defined(__APPLE__)
vap->va_type = IFTOVT(mode);
#endif
vap->va_uid = (uid_t)(IS_EPHEMERAL(uid)) ? -1 : uid;
vap->va_gid = (gid_t)(IS_EPHEMERAL(gid)) ? -1 : gid;
vap->va_rdev = zfs_cmpldev(rdev);
vap->va_nodeid = nodeid;
}
static int
zfs_replay_error(void *arg1, void *arg2, boolean_t byteswap)
{
(void) arg1, (void) arg2, (void) byteswap;
return (SET_ERROR(ENOTSUP));
}
static void
zfs_replay_xvattr(lr_attr_t *lrattr, xvattr_t *xvap)
{
xoptattr_t *xoap = NULL;
uint64_t *attrs;
uint64_t *crtime;
uint32_t *bitmap;
void *scanstamp;
int i;
xvap->xva_vattr.va_mask |= ATTR_XVATTR;
if ((xoap = xva_getxoptattr(xvap)) == NULL) {
xvap->xva_vattr.va_mask &= ~ATTR_XVATTR; /* shouldn't happen */
return;
}
ASSERT(lrattr->lr_attr_masksize == xvap->xva_mapsize);
bitmap = &lrattr->lr_attr_bitmap;
for (i = 0; i != lrattr->lr_attr_masksize; i++, bitmap++)
xvap->xva_reqattrmap[i] = *bitmap;
attrs = (uint64_t *)(lrattr + lrattr->lr_attr_masksize - 1);
crtime = attrs + 1;
scanstamp = (caddr_t)(crtime + 2);
if (XVA_ISSET_REQ(xvap, XAT_HIDDEN))
xoap->xoa_hidden = ((*attrs & XAT0_HIDDEN) != 0);
if (XVA_ISSET_REQ(xvap, XAT_SYSTEM))
xoap->xoa_system = ((*attrs & XAT0_SYSTEM) != 0);
if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE))
xoap->xoa_archive = ((*attrs & XAT0_ARCHIVE) != 0);
if (XVA_ISSET_REQ(xvap, XAT_READONLY))
xoap->xoa_readonly = ((*attrs & XAT0_READONLY) != 0);
if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE))
xoap->xoa_immutable = ((*attrs & XAT0_IMMUTABLE) != 0);
if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK))
xoap->xoa_nounlink = ((*attrs & XAT0_NOUNLINK) != 0);
if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY))
xoap->xoa_appendonly = ((*attrs & XAT0_APPENDONLY) != 0);
if (XVA_ISSET_REQ(xvap, XAT_NODUMP))
xoap->xoa_nodump = ((*attrs & XAT0_NODUMP) != 0);
if (XVA_ISSET_REQ(xvap, XAT_OPAQUE))
xoap->xoa_opaque = ((*attrs & XAT0_OPAQUE) != 0);
if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED))
xoap->xoa_av_modified = ((*attrs & XAT0_AV_MODIFIED) != 0);
if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED))
xoap->xoa_av_quarantined =
((*attrs & XAT0_AV_QUARANTINED) != 0);
if (XVA_ISSET_REQ(xvap, XAT_CREATETIME))
ZFS_TIME_DECODE(&xoap->xoa_createtime, crtime);
if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) {
ASSERT(!XVA_ISSET_REQ(xvap, XAT_PROJID));
memcpy(xoap->xoa_av_scanstamp, scanstamp, AV_SCANSTAMP_SZ);
} else if (XVA_ISSET_REQ(xvap, XAT_PROJID)) {
/*
* XAT_PROJID and XAT_AV_SCANSTAMP will never be valid
* at the same time, so we can share the same space.
*/
memcpy(&xoap->xoa_projid, scanstamp, sizeof (uint64_t));
}
if (XVA_ISSET_REQ(xvap, XAT_REPARSE))
xoap->xoa_reparse = ((*attrs & XAT0_REPARSE) != 0);
if (XVA_ISSET_REQ(xvap, XAT_OFFLINE))
xoap->xoa_offline = ((*attrs & XAT0_OFFLINE) != 0);
if (XVA_ISSET_REQ(xvap, XAT_SPARSE))
xoap->xoa_sparse = ((*attrs & XAT0_SPARSE) != 0);
if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT))
xoap->xoa_projinherit = ((*attrs & XAT0_PROJINHERIT) != 0);
}
static int
zfs_replay_domain_cnt(uint64_t uid, uint64_t gid)
{
uint64_t uid_idx;
uint64_t gid_idx;
int domcnt = 0;
uid_idx = FUID_INDEX(uid);
gid_idx = FUID_INDEX(gid);
if (uid_idx)
domcnt++;
if (gid_idx > 0 && gid_idx != uid_idx)
domcnt++;
return (domcnt);
}
static void *
zfs_replay_fuid_domain_common(zfs_fuid_info_t *fuid_infop, void *start,
int domcnt)
{
int i;
for (i = 0; i != domcnt; i++) {
fuid_infop->z_domain_table[i] = start;
start = (caddr_t)start + strlen(start) + 1;
}
return (start);
}
/*
* Set the uid/gid in the fuid_info structure.
*/
static void
zfs_replay_fuid_ugid(zfs_fuid_info_t *fuid_infop, uint64_t uid, uint64_t gid)
{
/*
* If owner or group are log specific FUIDs then slurp up
* domain information and build zfs_fuid_info_t
*/
if (IS_EPHEMERAL(uid))
fuid_infop->z_fuid_owner = uid;
if (IS_EPHEMERAL(gid))
fuid_infop->z_fuid_group = gid;
}
/*
* Load fuid domains into fuid_info_t
*/
static zfs_fuid_info_t *
zfs_replay_fuid_domain(void *buf, void **end, uint64_t uid, uint64_t gid)
{
int domcnt;
zfs_fuid_info_t *fuid_infop;
fuid_infop = zfs_fuid_info_alloc();
domcnt = zfs_replay_domain_cnt(uid, gid);
if (domcnt == 0)
return (fuid_infop);
fuid_infop->z_domain_table =
kmem_zalloc(domcnt * sizeof (char *), KM_SLEEP);
zfs_replay_fuid_ugid(fuid_infop, uid, gid);
fuid_infop->z_domain_cnt = domcnt;
*end = zfs_replay_fuid_domain_common(fuid_infop, buf, domcnt);
return (fuid_infop);
}
/*
* load zfs_fuid_t's and fuid_domains into fuid_info_t
*/
static zfs_fuid_info_t *
zfs_replay_fuids(void *start, void **end, int idcnt, int domcnt, uint64_t uid,
uint64_t gid)
{
uint64_t *log_fuid = (uint64_t *)start;
zfs_fuid_info_t *fuid_infop;
int i;
fuid_infop = zfs_fuid_info_alloc();
fuid_infop->z_domain_cnt = domcnt;
fuid_infop->z_domain_table =
kmem_zalloc(domcnt * sizeof (char *), KM_SLEEP);
for (i = 0; i != idcnt; i++) {
zfs_fuid_t *zfuid;
zfuid = kmem_alloc(sizeof (zfs_fuid_t), KM_SLEEP);
zfuid->z_logfuid = *log_fuid;
zfuid->z_id = -1;
zfuid->z_domidx = 0;
list_insert_tail(&fuid_infop->z_fuids, zfuid);
log_fuid++;
}
zfs_replay_fuid_ugid(fuid_infop, uid, gid);
*end = zfs_replay_fuid_domain_common(fuid_infop, log_fuid, domcnt);
return (fuid_infop);
}
static void
zfs_replay_swap_attrs(lr_attr_t *lrattr)
{
/* swap the lr_attr structure */
byteswap_uint32_array(lrattr, sizeof (*lrattr));
/* swap the bitmap */
byteswap_uint32_array(lrattr + 1, (lrattr->lr_attr_masksize - 1) *
sizeof (uint32_t));
/* swap the attributes, create time + 64 bit word for attributes */
byteswap_uint64_array((caddr_t)(lrattr + 1) + (sizeof (uint32_t) *
(lrattr->lr_attr_masksize - 1)), 3 * sizeof (uint64_t));
}
/*
* Replay file create with optional ACL, xvattr information as well
* as option FUID information.
*/
static int
zfs_replay_create_acl(void *arg1, void *arg2, boolean_t byteswap)
{
zfsvfs_t *zfsvfs = arg1;
lr_acl_create_t *lracl = arg2;
char *name = NULL; /* location determined later */
lr_create_t *lr = (lr_create_t *)lracl;
znode_t *dzp;
znode_t *zp;
xvattr_t xva;
int vflg = 0;
vsecattr_t vsec = { 0 };
lr_attr_t *lrattr;
void *aclstart;
void *fuidstart;
size_t xvatlen = 0;
uint64_t txtype;
uint64_t objid;
uint64_t dnodesize;
int error;
+ ASSERT3U(lr->lr_common.lrc_reclen, >=, sizeof (*lracl));
+
txtype = (lr->lr_common.lrc_txtype & ~TX_CI);
if (byteswap) {
byteswap_uint64_array(lracl, sizeof (*lracl));
if (txtype == TX_CREATE_ACL_ATTR ||
txtype == TX_MKDIR_ACL_ATTR) {
lrattr = (lr_attr_t *)(caddr_t)(lracl + 1);
zfs_replay_swap_attrs(lrattr);
xvatlen = ZIL_XVAT_SIZE(lrattr->lr_attr_masksize);
}
aclstart = (caddr_t)(lracl + 1) + xvatlen;
zfs_ace_byteswap(aclstart, lracl->lr_acl_bytes, B_FALSE);
/* swap fuids */
if (lracl->lr_fuidcnt) {
byteswap_uint64_array((caddr_t)aclstart +
ZIL_ACE_LENGTH(lracl->lr_acl_bytes),
lracl->lr_fuidcnt * sizeof (uint64_t));
}
}
if ((error = zfs_zget(zfsvfs, lr->lr_doid, &dzp)) != 0)
return (error);
objid = LR_FOID_GET_OBJ(lr->lr_foid);
dnodesize = LR_FOID_GET_SLOTS(lr->lr_foid) << DNODE_SHIFT;
xva_init(&xva);
zfs_init_vattr(&xva.xva_vattr, ATTR_MODE | ATTR_UID | ATTR_GID,
lr->lr_mode, lr->lr_uid, lr->lr_gid, lr->lr_rdev, objid);
/*
* All forms of zfs create (create, mkdir, mkxattrdir, symlink)
* eventually end up in zfs_mknode(), which assigns the object's
* creation time, generation number, and dnode size. The generic
* zfs_create() has no concept of these attributes, so we smuggle
* the values inside the vattr's otherwise unused va_ctime,
* va_nblocks, and va_fsid fields.
*/
ZFS_TIME_DECODE(&xva.xva_vattr.va_ctime, lr->lr_crtime);
xva.xva_vattr.va_nblocks = lr->lr_gen;
xva.xva_vattr.va_fsid = dnodesize;
error = dnode_try_claim(zfsvfs->z_os, objid, dnodesize >> DNODE_SHIFT);
if (error)
goto bail;
if (lr->lr_common.lrc_txtype & TX_CI)
vflg |= FIGNORECASE;
switch (txtype) {
case TX_CREATE_ACL:
aclstart = (caddr_t)(lracl + 1);
fuidstart = (caddr_t)aclstart +
ZIL_ACE_LENGTH(lracl->lr_acl_bytes);
zfsvfs->z_fuid_replay = zfs_replay_fuids(fuidstart,
(void *)&name, lracl->lr_fuidcnt, lracl->lr_domcnt,
lr->lr_uid, lr->lr_gid);
zfs_fallthrough;
case TX_CREATE_ACL_ATTR:
if (name == NULL) {
lrattr = (lr_attr_t *)(caddr_t)(lracl + 1);
xvatlen = ZIL_XVAT_SIZE(lrattr->lr_attr_masksize);
xva.xva_vattr.va_mask |= ATTR_XVATTR;
zfs_replay_xvattr(lrattr, &xva);
}
vsec.vsa_mask = VSA_ACE | VSA_ACE_ACLFLAGS;
vsec.vsa_aclentp = (caddr_t)(lracl + 1) + xvatlen;
vsec.vsa_aclcnt = lracl->lr_aclcnt;
vsec.vsa_aclentsz = lracl->lr_acl_bytes;
vsec.vsa_aclflags = lracl->lr_acl_flags;
if (zfsvfs->z_fuid_replay == NULL) {
fuidstart = (caddr_t)(lracl + 1) + xvatlen +
ZIL_ACE_LENGTH(lracl->lr_acl_bytes);
zfsvfs->z_fuid_replay =
zfs_replay_fuids(fuidstart,
(void *)&name, lracl->lr_fuidcnt, lracl->lr_domcnt,
lr->lr_uid, lr->lr_gid);
}
#if defined(__linux__)
error = zfs_create(dzp, name, &xva.xva_vattr,
0, 0, &zp, kcred, vflg, &vsec, zfs_init_idmap);
#else
error = zfs_create(dzp, name, &xva.xva_vattr,
0, 0, &zp, kcred, vflg, &vsec, NULL);
#endif
break;
case TX_MKDIR_ACL:
aclstart = (caddr_t)(lracl + 1);
fuidstart = (caddr_t)aclstart +
ZIL_ACE_LENGTH(lracl->lr_acl_bytes);
zfsvfs->z_fuid_replay = zfs_replay_fuids(fuidstart,
(void *)&name, lracl->lr_fuidcnt, lracl->lr_domcnt,
lr->lr_uid, lr->lr_gid);
zfs_fallthrough;
case TX_MKDIR_ACL_ATTR:
if (name == NULL) {
lrattr = (lr_attr_t *)(caddr_t)(lracl + 1);
xvatlen = ZIL_XVAT_SIZE(lrattr->lr_attr_masksize);
zfs_replay_xvattr(lrattr, &xva);
}
vsec.vsa_mask = VSA_ACE | VSA_ACE_ACLFLAGS;
vsec.vsa_aclentp = (caddr_t)(lracl + 1) + xvatlen;
vsec.vsa_aclcnt = lracl->lr_aclcnt;
vsec.vsa_aclentsz = lracl->lr_acl_bytes;
vsec.vsa_aclflags = lracl->lr_acl_flags;
if (zfsvfs->z_fuid_replay == NULL) {
fuidstart = (caddr_t)(lracl + 1) + xvatlen +
ZIL_ACE_LENGTH(lracl->lr_acl_bytes);
zfsvfs->z_fuid_replay =
zfs_replay_fuids(fuidstart,
(void *)&name, lracl->lr_fuidcnt, lracl->lr_domcnt,
lr->lr_uid, lr->lr_gid);
}
#if defined(__linux__)
error = zfs_mkdir(dzp, name, &xva.xva_vattr,
&zp, kcred, vflg, &vsec, zfs_init_idmap);
#else
error = zfs_mkdir(dzp, name, &xva.xva_vattr,
&zp, kcred, vflg, &vsec, NULL);
#endif
break;
default:
error = SET_ERROR(ENOTSUP);
}
bail:
if (error == 0 && zp != NULL) {
#ifdef __FreeBSD__
VOP_UNLOCK1(ZTOV(zp));
#endif
zrele(zp);
}
zrele(dzp);
if (zfsvfs->z_fuid_replay)
zfs_fuid_info_free(zfsvfs->z_fuid_replay);
zfsvfs->z_fuid_replay = NULL;
return (error);
}
static int
zfs_replay_create(void *arg1, void *arg2, boolean_t byteswap)
{
zfsvfs_t *zfsvfs = arg1;
lr_create_t *lr = arg2;
char *name = NULL; /* location determined later */
char *link; /* symlink content follows name */
znode_t *dzp;
znode_t *zp = NULL;
xvattr_t xva;
int vflg = 0;
size_t lrsize = sizeof (lr_create_t);
lr_attr_t *lrattr;
void *start;
size_t xvatlen;
uint64_t txtype;
uint64_t objid;
uint64_t dnodesize;
int error;
+ ASSERT3U(lr->lr_common.lrc_reclen, >, sizeof (*lr));
+
txtype = (lr->lr_common.lrc_txtype & ~TX_CI);
if (byteswap) {
byteswap_uint64_array(lr, sizeof (*lr));
if (txtype == TX_CREATE_ATTR || txtype == TX_MKDIR_ATTR)
zfs_replay_swap_attrs((lr_attr_t *)(lr + 1));
}
if ((error = zfs_zget(zfsvfs, lr->lr_doid, &dzp)) != 0)
return (error);
objid = LR_FOID_GET_OBJ(lr->lr_foid);
dnodesize = LR_FOID_GET_SLOTS(lr->lr_foid) << DNODE_SHIFT;
xva_init(&xva);
zfs_init_vattr(&xva.xva_vattr, ATTR_MODE | ATTR_UID | ATTR_GID,
lr->lr_mode, lr->lr_uid, lr->lr_gid, lr->lr_rdev, objid);
/*
* All forms of zfs create (create, mkdir, mkxattrdir, symlink)
* eventually end up in zfs_mknode(), which assigns the object's
* creation time, generation number, and dnode slot count. The
* generic zfs_create() has no concept of these attributes, so
* we smuggle the values inside the vattr's otherwise unused
* va_ctime, va_nblocks, and va_fsid fields.
*/
ZFS_TIME_DECODE(&xva.xva_vattr.va_ctime, lr->lr_crtime);
xva.xva_vattr.va_nblocks = lr->lr_gen;
xva.xva_vattr.va_fsid = dnodesize;
error = dnode_try_claim(zfsvfs->z_os, objid, dnodesize >> DNODE_SHIFT);
if (error)
goto out;
if (lr->lr_common.lrc_txtype & TX_CI)
vflg |= FIGNORECASE;
/*
* Symlinks don't have fuid info, and CIFS never creates
* symlinks.
*
* The _ATTR versions will grab the fuid info in their subcases.
*/
if (txtype != TX_SYMLINK &&
txtype != TX_MKDIR_ATTR &&
txtype != TX_CREATE_ATTR) {
start = (lr + 1);
zfsvfs->z_fuid_replay =
zfs_replay_fuid_domain(start, &start,
lr->lr_uid, lr->lr_gid);
}
switch (txtype) {
case TX_CREATE_ATTR:
lrattr = (lr_attr_t *)(caddr_t)(lr + 1);
xvatlen = ZIL_XVAT_SIZE(lrattr->lr_attr_masksize);
zfs_replay_xvattr((lr_attr_t *)((caddr_t)lr + lrsize), &xva);
start = (caddr_t)(lr + 1) + xvatlen;
zfsvfs->z_fuid_replay =
zfs_replay_fuid_domain(start, &start,
lr->lr_uid, lr->lr_gid);
name = (char *)start;
zfs_fallthrough;
case TX_CREATE:
if (name == NULL)
name = (char *)start;
#if defined(__linux__)
error = zfs_create(dzp, name, &xva.xva_vattr,
0, 0, &zp, kcred, vflg, NULL, zfs_init_idmap);
#else
error = zfs_create(dzp, name, &xva.xva_vattr,
0, 0, &zp, kcred, vflg, NULL, NULL);
#endif
break;
case TX_MKDIR_ATTR:
lrattr = (lr_attr_t *)(caddr_t)(lr + 1);
xvatlen = ZIL_XVAT_SIZE(lrattr->lr_attr_masksize);
zfs_replay_xvattr((lr_attr_t *)((caddr_t)lr + lrsize), &xva);
start = (caddr_t)(lr + 1) + xvatlen;
zfsvfs->z_fuid_replay =
zfs_replay_fuid_domain(start, &start,
lr->lr_uid, lr->lr_gid);
name = (char *)start;
zfs_fallthrough;
case TX_MKDIR:
if (name == NULL)
name = (char *)(lr + 1);
#if defined(__linux__)
error = zfs_mkdir(dzp, name, &xva.xva_vattr,
&zp, kcred, vflg, NULL, zfs_init_idmap);
#else
error = zfs_mkdir(dzp, name, &xva.xva_vattr,
&zp, kcred, vflg, NULL, NULL);
#endif
break;
case TX_MKXATTR:
error = zfs_make_xattrdir(dzp, &xva.xva_vattr, &zp, kcred);
break;
case TX_SYMLINK:
name = (char *)(lr + 1);
link = name + strlen(name) + 1;
#if defined(__linux__)
error = zfs_symlink(dzp, name, &xva.xva_vattr,
link, &zp, kcred, vflg, zfs_init_idmap);
#else
error = zfs_symlink(dzp, name, &xva.xva_vattr,
link, &zp, kcred, vflg, NULL);
#endif
break;
default:
error = SET_ERROR(ENOTSUP);
}
out:
if (error == 0 && zp != NULL) {
#ifdef __FreeBSD__
VOP_UNLOCK1(ZTOV(zp));
#endif
zrele(zp);
}
zrele(dzp);
if (zfsvfs->z_fuid_replay)
zfs_fuid_info_free(zfsvfs->z_fuid_replay);
zfsvfs->z_fuid_replay = NULL;
return (error);
}
static int
zfs_replay_remove(void *arg1, void *arg2, boolean_t byteswap)
{
zfsvfs_t *zfsvfs = arg1;
lr_remove_t *lr = arg2;
char *name = (char *)(lr + 1); /* name follows lr_remove_t */
znode_t *dzp;
int error;
int vflg = 0;
+ ASSERT3U(lr->lr_common.lrc_reclen, >, sizeof (*lr));
+
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
if ((error = zfs_zget(zfsvfs, lr->lr_doid, &dzp)) != 0)
return (error);
if (lr->lr_common.lrc_txtype & TX_CI)
vflg |= FIGNORECASE;
switch ((int)lr->lr_common.lrc_txtype) {
case TX_REMOVE:
error = zfs_remove(dzp, name, kcred, vflg);
break;
case TX_RMDIR:
error = zfs_rmdir(dzp, name, NULL, kcred, vflg);
break;
default:
error = SET_ERROR(ENOTSUP);
}
zrele(dzp);
return (error);
}
static int
zfs_replay_link(void *arg1, void *arg2, boolean_t byteswap)
{
zfsvfs_t *zfsvfs = arg1;
lr_link_t *lr = arg2;
char *name = (char *)(lr + 1); /* name follows lr_link_t */
znode_t *dzp, *zp;
int error;
int vflg = 0;
+ ASSERT3U(lr->lr_common.lrc_reclen, >, sizeof (*lr));
+
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
if ((error = zfs_zget(zfsvfs, lr->lr_doid, &dzp)) != 0)
return (error);
if ((error = zfs_zget(zfsvfs, lr->lr_link_obj, &zp)) != 0) {
zrele(dzp);
return (error);
}
if (lr->lr_common.lrc_txtype & TX_CI)
vflg |= FIGNORECASE;
error = zfs_link(dzp, zp, name, kcred, vflg);
zrele(zp);
zrele(dzp);
return (error);
}
static int
do_zfs_replay_rename(zfsvfs_t *zfsvfs, lr_rename_t *lr, char *sname,
char *tname, uint64_t rflags, vattr_t *wo_vap)
{
znode_t *sdzp, *tdzp;
int error, vflg = 0;
/* Only Linux currently supports RENAME_* flags. */
#ifdef __linux__
VERIFY0(rflags & ~(RENAME_EXCHANGE | RENAME_WHITEOUT));
/* wo_vap must be non-NULL iff. we're doing RENAME_WHITEOUT */
VERIFY_EQUIV(rflags & RENAME_WHITEOUT, wo_vap != NULL);
#else
VERIFY0(rflags);
#endif
if ((error = zfs_zget(zfsvfs, lr->lr_sdoid, &sdzp)) != 0)
return (error);
if ((error = zfs_zget(zfsvfs, lr->lr_tdoid, &tdzp)) != 0) {
zrele(sdzp);
return (error);
}
if (lr->lr_common.lrc_txtype & TX_CI)
vflg |= FIGNORECASE;
#if defined(__linux__)
error = zfs_rename(sdzp, sname, tdzp, tname, kcred, vflg, rflags,
wo_vap, zfs_init_idmap);
#else
error = zfs_rename(sdzp, sname, tdzp, tname, kcred, vflg, rflags,
wo_vap, NULL);
#endif
zrele(tdzp);
zrele(sdzp);
return (error);
}
static int
zfs_replay_rename(void *arg1, void *arg2, boolean_t byteswap)
{
zfsvfs_t *zfsvfs = arg1;
lr_rename_t *lr = arg2;
- char *sname = (char *)(lr + 1); /* sname and tname follow lr_rename_t */
- char *tname = sname + strlen(sname) + 1;
+
+ ASSERT3U(lr->lr_common.lrc_reclen, >, sizeof (*lr));
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
+ char *sname = (char *)(lr + 1); /* sname and tname follow lr_rename_t */
+ char *tname = sname + strlen(sname) + 1;
return (do_zfs_replay_rename(zfsvfs, lr, sname, tname, 0, NULL));
}
static int
zfs_replay_rename_exchange(void *arg1, void *arg2, boolean_t byteswap)
{
#ifdef __linux__
zfsvfs_t *zfsvfs = arg1;
lr_rename_t *lr = arg2;
- char *sname = (char *)(lr + 1); /* sname and tname follow lr_rename_t */
- char *tname = sname + strlen(sname) + 1;
+
+ ASSERT3U(lr->lr_common.lrc_reclen, >, sizeof (*lr));
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
+ char *sname = (char *)(lr + 1); /* sname and tname follow lr_rename_t */
+ char *tname = sname + strlen(sname) + 1;
return (do_zfs_replay_rename(zfsvfs, lr, sname, tname, RENAME_EXCHANGE,
NULL));
#else
return (SET_ERROR(ENOTSUP));
#endif
}
static int
zfs_replay_rename_whiteout(void *arg1, void *arg2, boolean_t byteswap)
{
#ifdef __linux__
zfsvfs_t *zfsvfs = arg1;
lr_rename_whiteout_t *lr = arg2;
int error;
- /* sname and tname follow lr_rename_whiteout_t */
- char *sname = (char *)(lr + 1);
- char *tname = sname + strlen(sname) + 1;
/* For the whiteout file. */
xvattr_t xva;
uint64_t objid;
uint64_t dnodesize;
+ ASSERT3U(lr->lr_rename.lr_common.lrc_reclen, >, sizeof (*lr));
+
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
objid = LR_FOID_GET_OBJ(lr->lr_wfoid);
dnodesize = LR_FOID_GET_SLOTS(lr->lr_wfoid) << DNODE_SHIFT;
xva_init(&xva);
zfs_init_vattr(&xva.xva_vattr, ATTR_MODE | ATTR_UID | ATTR_GID,
lr->lr_wmode, lr->lr_wuid, lr->lr_wgid, lr->lr_wrdev, objid);
/*
* As with TX_CREATE, RENAME_WHITEOUT ends up in zfs_mknode(), which
* assigns the object's creation time, generation number, and dnode
* slot count. The generic zfs_rename() has no concept of these
* attributes, so we smuggle the values inside the vattr's otherwise
* unused va_ctime, va_nblocks, and va_fsid fields.
*/
ZFS_TIME_DECODE(&xva.xva_vattr.va_ctime, lr->lr_wcrtime);
xva.xva_vattr.va_nblocks = lr->lr_wgen;
xva.xva_vattr.va_fsid = dnodesize;
error = dnode_try_claim(zfsvfs->z_os, objid, dnodesize >> DNODE_SHIFT);
if (error)
return (error);
+ /* sname and tname follow lr_rename_whiteout_t */
+ char *sname = (char *)(lr + 1);
+ char *tname = sname + strlen(sname) + 1;
return (do_zfs_replay_rename(zfsvfs, &lr->lr_rename, sname, tname,
RENAME_WHITEOUT, &xva.xva_vattr));
#else
return (SET_ERROR(ENOTSUP));
#endif
}
static int
zfs_replay_write(void *arg1, void *arg2, boolean_t byteswap)
{
zfsvfs_t *zfsvfs = arg1;
lr_write_t *lr = arg2;
char *data = (char *)(lr + 1); /* data follows lr_write_t */
znode_t *zp;
int error;
uint64_t eod, offset, length;
+ ASSERT3U(lr->lr_common.lrc_reclen, >=, sizeof (*lr));
+
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0) {
/*
* As we can log writes out of order, it's possible the
* file has been removed. In this case just drop the write
* and return success.
*/
if (error == ENOENT)
error = 0;
return (error);
}
offset = lr->lr_offset;
length = lr->lr_length;
eod = offset + length; /* end of data for this write */
/*
* This may be a write from a dmu_sync() for a whole block,
* and may extend beyond the current end of the file.
* We can't just replay what was written for this TX_WRITE as
* a future TX_WRITE2 may extend the eof and the data for that
* write needs to be there. So we write the whole block and
* reduce the eof. This needs to be done within the single dmu
* transaction created within vn_rdwr -> zfs_write. So a possible
* new end of file is passed through in zfsvfs->z_replay_eof
*/
zfsvfs->z_replay_eof = 0; /* 0 means don't change end of file */
/* If it's a dmu_sync() block, write the whole block */
if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
if (length < blocksize) {
offset -= offset % blocksize;
length = blocksize;
}
if (zp->z_size < eod)
zfsvfs->z_replay_eof = eod;
}
error = zfs_write_simple(zp, data, length, offset, NULL);
zrele(zp);
zfsvfs->z_replay_eof = 0; /* safety */
return (error);
}
/*
* TX_WRITE2 are only generated when dmu_sync() returns EALREADY
* meaning the pool block is already being synced. So now that we always write
* out full blocks, all we have to do is expand the eof if
* the file is grown.
*/
static int
zfs_replay_write2(void *arg1, void *arg2, boolean_t byteswap)
{
zfsvfs_t *zfsvfs = arg1;
lr_write_t *lr = arg2;
znode_t *zp;
int error;
uint64_t end;
+ ASSERT3U(lr->lr_common.lrc_reclen, >=, sizeof (*lr));
+
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0)
return (error);
top:
end = lr->lr_offset + lr->lr_length;
if (end > zp->z_size) {
dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
zp->z_size = end;
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
zrele(zp);
if (error == ERESTART) {
dmu_tx_wait(tx);
dmu_tx_abort(tx);
goto top;
}
dmu_tx_abort(tx);
return (error);
}
(void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
(void *)&zp->z_size, sizeof (uint64_t), tx);
/* Ensure the replayed seq is updated */
(void) zil_replaying(zfsvfs->z_log, tx);
dmu_tx_commit(tx);
}
zrele(zp);
return (error);
}
static int
zfs_replay_truncate(void *arg1, void *arg2, boolean_t byteswap)
{
zfsvfs_t *zfsvfs = arg1;
lr_truncate_t *lr = arg2;
znode_t *zp;
flock64_t fl = {0};
int error;
+ ASSERT3U(lr->lr_common.lrc_reclen, >=, sizeof (*lr));
+
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0)
return (error);
fl.l_type = F_WRLCK;
fl.l_whence = SEEK_SET;
fl.l_start = lr->lr_offset;
fl.l_len = lr->lr_length;
error = zfs_space(zp, F_FREESP, &fl, O_RDWR | O_LARGEFILE,
lr->lr_offset, kcred);
zrele(zp);
return (error);
}
static int
zfs_replay_setattr(void *arg1, void *arg2, boolean_t byteswap)
{
zfsvfs_t *zfsvfs = arg1;
lr_setattr_t *lr = arg2;
znode_t *zp;
xvattr_t xva;
vattr_t *vap = &xva.xva_vattr;
int error;
void *start;
+ ASSERT3U(lr->lr_common.lrc_reclen, >=, sizeof (*lr));
+
xva_init(&xva);
if (byteswap) {
byteswap_uint64_array(lr, sizeof (*lr));
if ((lr->lr_mask & ATTR_XVATTR) &&
zfsvfs->z_version >= ZPL_VERSION_INITIAL)
zfs_replay_swap_attrs((lr_attr_t *)(lr + 1));
}
if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0)
return (error);
zfs_init_vattr(vap, lr->lr_mask, lr->lr_mode,
lr->lr_uid, lr->lr_gid, 0, lr->lr_foid);
vap->va_size = lr->lr_size;
ZFS_TIME_DECODE(&vap->va_atime, lr->lr_atime);
ZFS_TIME_DECODE(&vap->va_mtime, lr->lr_mtime);
gethrestime(&vap->va_ctime);
vap->va_mask |= ATTR_CTIME;
/*
* Fill in xvattr_t portions if necessary.
*/
start = (lr_setattr_t *)(lr + 1);
if (vap->va_mask & ATTR_XVATTR) {
zfs_replay_xvattr((lr_attr_t *)start, &xva);
start = (caddr_t)start +
ZIL_XVAT_SIZE(((lr_attr_t *)start)->lr_attr_masksize);
} else
xva.xva_vattr.va_mask &= ~ATTR_XVATTR;
zfsvfs->z_fuid_replay = zfs_replay_fuid_domain(start, &start,
lr->lr_uid, lr->lr_gid);
#if defined(__linux__)
error = zfs_setattr(zp, vap, 0, kcred, zfs_init_idmap);
#else
error = zfs_setattr(zp, vap, 0, kcred, NULL);
#endif
zfs_fuid_info_free(zfsvfs->z_fuid_replay);
zfsvfs->z_fuid_replay = NULL;
zrele(zp);
return (error);
}
static int
zfs_replay_setsaxattr(void *arg1, void *arg2, boolean_t byteswap)
{
zfsvfs_t *zfsvfs = arg1;
lr_setsaxattr_t *lr = arg2;
znode_t *zp;
nvlist_t *nvl;
size_t sa_size;
char *name;
char *value;
size_t size;
int error = 0;
+ ASSERT3U(lr->lr_common.lrc_reclen, >=, sizeof (*lr));
+ ASSERT3U(lr->lr_common.lrc_reclen, >, sizeof (*lr) + lr->lr_size);
+
ASSERT(spa_feature_is_active(zfsvfs->z_os->os_spa,
SPA_FEATURE_ZILSAXATTR));
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0)
return (error);
rw_enter(&zp->z_xattr_lock, RW_WRITER);
mutex_enter(&zp->z_lock);
if (zp->z_xattr_cached == NULL)
error = zfs_sa_get_xattr(zp);
mutex_exit(&zp->z_lock);
if (error)
goto out;
ASSERT(zp->z_xattr_cached);
nvl = zp->z_xattr_cached;
/* Get xattr name, value and size from log record */
size = lr->lr_size;
name = (char *)(lr + 1);
if (size == 0) {
value = NULL;
error = nvlist_remove(nvl, name, DATA_TYPE_BYTE_ARRAY);
} else {
value = name + strlen(name) + 1;
/* Limited to 32k to keep nvpair memory allocations small */
if (size > DXATTR_MAX_ENTRY_SIZE) {
error = SET_ERROR(EFBIG);
goto out;
}
/* Prevent the DXATTR SA from consuming the entire SA region */
error = nvlist_size(nvl, &sa_size, NV_ENCODE_XDR);
if (error)
goto out;
if (sa_size > DXATTR_MAX_SA_SIZE) {
error = SET_ERROR(EFBIG);
goto out;
}
error = nvlist_add_byte_array(nvl, name, (uchar_t *)value,
size);
}
/*
* Update the SA for additions, modifications, and removals. On
* error drop the inconsistent cached version of the nvlist, it
* will be reconstructed from the ARC when next accessed.
*/
if (error == 0)
error = zfs_sa_set_xattr(zp, name, value, size);
if (error) {
nvlist_free(nvl);
zp->z_xattr_cached = NULL;
}
out:
rw_exit(&zp->z_xattr_lock);
zrele(zp);
return (error);
}
static int
zfs_replay_acl_v0(void *arg1, void *arg2, boolean_t byteswap)
{
zfsvfs_t *zfsvfs = arg1;
lr_acl_v0_t *lr = arg2;
ace_t *ace = (ace_t *)(lr + 1); /* ace array follows lr_acl_t */
vsecattr_t vsa = {0};
znode_t *zp;
int error;
+ ASSERT3U(lr->lr_common.lrc_reclen, >=, sizeof (*lr));
+ ASSERT3U(lr->lr_common.lrc_reclen, >=, sizeof (*lr) +
+ sizeof (ace_t) * lr->lr_aclcnt);
+
if (byteswap) {
byteswap_uint64_array(lr, sizeof (*lr));
zfs_oldace_byteswap(ace, lr->lr_aclcnt);
}
if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0)
return (error);
vsa.vsa_mask = VSA_ACE | VSA_ACECNT;
vsa.vsa_aclcnt = lr->lr_aclcnt;
vsa.vsa_aclentsz = sizeof (ace_t) * vsa.vsa_aclcnt;
vsa.vsa_aclflags = 0;
vsa.vsa_aclentp = ace;
error = zfs_setsecattr(zp, &vsa, 0, kcred);
zrele(zp);
return (error);
}
/*
* Replaying ACLs is complicated by FUID support.
* The log record may contain some optional data
* to be used for replaying FUID's. These pieces
* are the actual FUIDs that were created initially.
* The FUID table index may no longer be valid and
* during zfs_create() a new index may be assigned.
* Because of this the log will contain the original
* domain+rid in order to create a new FUID.
*
* The individual ACEs may contain an ephemeral uid/gid which is no
* longer valid and will need to be replaced with an actual FUID.
*
*/
static int
zfs_replay_acl(void *arg1, void *arg2, boolean_t byteswap)
{
zfsvfs_t *zfsvfs = arg1;
lr_acl_t *lr = arg2;
ace_t *ace = (ace_t *)(lr + 1);
vsecattr_t vsa = {0};
znode_t *zp;
int error;
+ ASSERT3U(lr->lr_common.lrc_reclen, >=, sizeof (*lr));
+ ASSERT3U(lr->lr_common.lrc_reclen, >=, sizeof (*lr) + lr->lr_acl_bytes);
+
if (byteswap) {
byteswap_uint64_array(lr, sizeof (*lr));
zfs_ace_byteswap(ace, lr->lr_acl_bytes, B_FALSE);
if (lr->lr_fuidcnt) {
byteswap_uint64_array((caddr_t)ace +
ZIL_ACE_LENGTH(lr->lr_acl_bytes),
lr->lr_fuidcnt * sizeof (uint64_t));
}
}
if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0)
return (error);
vsa.vsa_mask = VSA_ACE | VSA_ACECNT | VSA_ACE_ACLFLAGS;
vsa.vsa_aclcnt = lr->lr_aclcnt;
vsa.vsa_aclentp = ace;
vsa.vsa_aclentsz = lr->lr_acl_bytes;
vsa.vsa_aclflags = lr->lr_acl_flags;
if (lr->lr_fuidcnt) {
void *fuidstart = (caddr_t)ace +
ZIL_ACE_LENGTH(lr->lr_acl_bytes);
zfsvfs->z_fuid_replay =
zfs_replay_fuids(fuidstart, &fuidstart,
lr->lr_fuidcnt, lr->lr_domcnt, 0, 0);
}
error = zfs_setsecattr(zp, &vsa, 0, kcred);
if (zfsvfs->z_fuid_replay)
zfs_fuid_info_free(zfsvfs->z_fuid_replay);
zfsvfs->z_fuid_replay = NULL;
zrele(zp);
return (error);
}
static int
zfs_replay_clone_range(void *arg1, void *arg2, boolean_t byteswap)
{
zfsvfs_t *zfsvfs = arg1;
lr_clone_range_t *lr = arg2;
znode_t *zp;
int error;
+ ASSERT3U(lr->lr_common.lrc_reclen, >=, sizeof (*lr));
+ ASSERT3U(lr->lr_common.lrc_reclen, >=, offsetof(lr_clone_range_t,
+ lr_bps[lr->lr_nbps]));
+
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0) {
/*
* Clones can be logged out of order, so don't be surprised if
* the file is gone - just return success.
*/
if (error == ENOENT)
error = 0;
return (error);
}
error = zfs_clone_range_replay(zp, lr->lr_offset, lr->lr_length,
lr->lr_blksz, lr->lr_bps, lr->lr_nbps);
zrele(zp);
return (error);
}
/*
* Callback vectors for replaying records
*/
zil_replay_func_t *const zfs_replay_vector[TX_MAX_TYPE] = {
zfs_replay_error, /* no such type */
zfs_replay_create, /* TX_CREATE */
zfs_replay_create, /* TX_MKDIR */
zfs_replay_create, /* TX_MKXATTR */
zfs_replay_create, /* TX_SYMLINK */
zfs_replay_remove, /* TX_REMOVE */
zfs_replay_remove, /* TX_RMDIR */
zfs_replay_link, /* TX_LINK */
zfs_replay_rename, /* TX_RENAME */
zfs_replay_write, /* TX_WRITE */
zfs_replay_truncate, /* TX_TRUNCATE */
zfs_replay_setattr, /* TX_SETATTR */
zfs_replay_acl_v0, /* TX_ACL_V0 */
zfs_replay_acl, /* TX_ACL */
zfs_replay_create_acl, /* TX_CREATE_ACL */
zfs_replay_create, /* TX_CREATE_ATTR */
zfs_replay_create_acl, /* TX_CREATE_ACL_ATTR */
zfs_replay_create_acl, /* TX_MKDIR_ACL */
zfs_replay_create, /* TX_MKDIR_ATTR */
zfs_replay_create_acl, /* TX_MKDIR_ACL_ATTR */
zfs_replay_write2, /* TX_WRITE2 */
zfs_replay_setsaxattr, /* TX_SETSAXATTR */
zfs_replay_rename_exchange, /* TX_RENAME_EXCHANGE */
zfs_replay_rename_whiteout, /* TX_RENAME_WHITEOUT */
zfs_replay_clone_range, /* TX_CLONE_RANGE */
};
diff --git a/sys/contrib/openzfs/module/zfs/zfs_vnops.c b/sys/contrib/openzfs/module/zfs/zfs_vnops.c
index 3a5fa75df2ea..2b37834d5c56 100644
--- a/sys/contrib/openzfs/module/zfs/zfs_vnops.c
+++ b/sys/contrib/openzfs/module/zfs/zfs_vnops.c
@@ -1,1505 +1,1558 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
* Copyright (c) 2015 by Chunwei Chen. All rights reserved.
* Copyright 2017 Nexenta Systems, Inc.
* Copyright (c) 2021, 2022 by Pawel Jakub Dawidek
*/
/* Portions Copyright 2007 Jeremy Teo */
/* Portions Copyright 2010 Robert Milkowski */
#include <sys/types.h>
#include <sys/param.h>
#include <sys/time.h>
#include <sys/sysmacros.h>
#include <sys/vfs.h>
#include <sys/uio_impl.h>
#include <sys/file.h>
#include <sys/stat.h>
#include <sys/kmem.h>
#include <sys/cmn_err.h>
#include <sys/errno.h>
#include <sys/zfs_dir.h>
#include <sys/zfs_acl.h>
#include <sys/zfs_ioctl.h>
#include <sys/fs/zfs.h>
#include <sys/dmu.h>
#include <sys/dmu_objset.h>
+#include <sys/dsl_crypt.h>
#include <sys/spa.h>
#include <sys/txg.h>
#include <sys/dbuf.h>
#include <sys/policy.h>
#include <sys/zfeature.h>
#include <sys/zfs_vnops.h>
#include <sys/zfs_quota.h>
#include <sys/zfs_vfsops.h>
#include <sys/zfs_znode.h>
+/*
+ * Enable the experimental block cloning feature. If this setting is 0, then
+ * even if feature@block_cloning is enabled, attempts to clone blocks will act
+ * as though the feature is disabled.
+ */
+int zfs_bclone_enabled = 0;
+
+/*
+ * When set zfs_clone_range() waits for dirty data to be written to disk.
+ * This allows the clone operation to reliably succeed when a file is modified
+ * and then immediately cloned. For small files this may be slower than making
+ * a copy of the file and is therefore not the default. However, in certain
+ * scenarios this behavior may be desirable so a tunable is provided.
+ */
+static int zfs_bclone_wait_dirty = 0;
+
+/*
+ * Maximum bytes to read per chunk in zfs_read().
+ */
+static uint64_t zfs_vnops_read_chunk_size = 1024 * 1024;
static ulong_t zfs_fsync_sync_cnt = 4;
int
zfs_fsync(znode_t *zp, int syncflag, cred_t *cr)
{
int error = 0;
zfsvfs_t *zfsvfs = ZTOZSB(zp);
(void) tsd_set(zfs_fsyncer_key, (void *)(uintptr_t)zfs_fsync_sync_cnt);
if (zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) {
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
goto out;
atomic_inc_32(&zp->z_sync_writes_cnt);
zil_commit(zfsvfs->z_log, zp->z_id);
atomic_dec_32(&zp->z_sync_writes_cnt);
zfs_exit(zfsvfs, FTAG);
}
out:
tsd_set(zfs_fsyncer_key, NULL);
return (error);
}
#if defined(SEEK_HOLE) && defined(SEEK_DATA)
/*
* Lseek support for finding holes (cmd == SEEK_HOLE) and
* data (cmd == SEEK_DATA). "off" is an in/out parameter.
*/
static int
zfs_holey_common(znode_t *zp, ulong_t cmd, loff_t *off)
{
zfs_locked_range_t *lr;
uint64_t noff = (uint64_t)*off; /* new offset */
uint64_t file_sz;
int error;
boolean_t hole;
file_sz = zp->z_size;
if (noff >= file_sz) {
return (SET_ERROR(ENXIO));
}
if (cmd == F_SEEK_HOLE)
hole = B_TRUE;
else
hole = B_FALSE;
/* Flush any mmap()'d data to disk */
if (zn_has_cached_data(zp, 0, file_sz - 1))
zn_flush_cached_data(zp, B_FALSE);
lr = zfs_rangelock_enter(&zp->z_rangelock, 0, UINT64_MAX, RL_READER);
error = dmu_offset_next(ZTOZSB(zp)->z_os, zp->z_id, hole, &noff);
zfs_rangelock_exit(lr);
if (error == ESRCH)
return (SET_ERROR(ENXIO));
/* File was dirty, so fall back to using generic logic */
if (error == EBUSY) {
if (hole)
*off = file_sz;
return (0);
}
/*
* We could find a hole that begins after the logical end-of-file,
* because dmu_offset_next() only works on whole blocks. If the
* EOF falls mid-block, then indicate that the "virtual hole"
* at the end of the file begins at the logical EOF, rather than
* at the end of the last block.
*/
if (noff > file_sz) {
ASSERT(hole);
noff = file_sz;
}
if (noff < *off)
return (error);
*off = noff;
return (error);
}
int
zfs_holey(znode_t *zp, ulong_t cmd, loff_t *off)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
int error;
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (error);
error = zfs_holey_common(zp, cmd, off);
zfs_exit(zfsvfs, FTAG);
return (error);
}
#endif /* SEEK_HOLE && SEEK_DATA */
int
zfs_access(znode_t *zp, int mode, int flag, cred_t *cr)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
int error;
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (error);
if (flag & V_ACE_MASK)
#if defined(__linux__)
error = zfs_zaccess(zp, mode, flag, B_FALSE, cr,
zfs_init_idmap);
#else
error = zfs_zaccess(zp, mode, flag, B_FALSE, cr,
NULL);
#endif
else
#if defined(__linux__)
error = zfs_zaccess_rwx(zp, mode, flag, cr, zfs_init_idmap);
#else
error = zfs_zaccess_rwx(zp, mode, flag, cr, NULL);
#endif
zfs_exit(zfsvfs, FTAG);
return (error);
}
-static uint64_t zfs_vnops_read_chunk_size = 1024 * 1024; /* Tunable */
-
/*
* Read bytes from specified file into supplied buffer.
*
* IN: zp - inode of file to be read from.
* uio - structure supplying read location, range info,
* and return buffer.
* ioflag - O_SYNC flags; used to provide FRSYNC semantics.
* O_DIRECT flag; used to bypass page cache.
* cr - credentials of caller.
*
* OUT: uio - updated offset and range, buffer filled.
*
* RETURN: 0 on success, error code on failure.
*
* Side Effects:
* inode - atime updated if byte count > 0
*/
int
zfs_read(struct znode *zp, zfs_uio_t *uio, int ioflag, cred_t *cr)
{
(void) cr;
int error = 0;
boolean_t frsync = B_FALSE;
zfsvfs_t *zfsvfs = ZTOZSB(zp);
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (error);
if (zp->z_pflags & ZFS_AV_QUARANTINED) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EACCES));
}
/* We don't copy out anything useful for directories. */
if (Z_ISDIR(ZTOTYPE(zp))) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EISDIR));
}
/*
* Validate file offset
*/
if (zfs_uio_offset(uio) < (offset_t)0) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
/*
* Fasttrack empty reads
*/
if (zfs_uio_resid(uio) == 0) {
zfs_exit(zfsvfs, FTAG);
return (0);
}
#ifdef FRSYNC
/*
* If we're in FRSYNC mode, sync out this znode before reading it.
* Only do this for non-snapshots.
*
* Some platforms do not support FRSYNC and instead map it
* to O_SYNC, which results in unnecessary calls to zil_commit. We
* only honor FRSYNC requests on platforms which support it.
*/
frsync = !!(ioflag & FRSYNC);
#endif
if (zfsvfs->z_log &&
(frsync || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS))
zil_commit(zfsvfs->z_log, zp->z_id);
/*
* Lock the range against changes.
*/
zfs_locked_range_t *lr = zfs_rangelock_enter(&zp->z_rangelock,
zfs_uio_offset(uio), zfs_uio_resid(uio), RL_READER);
/*
* If we are reading past end-of-file we can skip
* to the end; but we might still need to set atime.
*/
if (zfs_uio_offset(uio) >= zp->z_size) {
error = 0;
goto out;
}
ASSERT(zfs_uio_offset(uio) < zp->z_size);
#if defined(__linux__)
ssize_t start_offset = zfs_uio_offset(uio);
#endif
ssize_t n = MIN(zfs_uio_resid(uio), zp->z_size - zfs_uio_offset(uio));
ssize_t start_resid = n;
while (n > 0) {
ssize_t nbytes = MIN(n, zfs_vnops_read_chunk_size -
P2PHASE(zfs_uio_offset(uio), zfs_vnops_read_chunk_size));
#ifdef UIO_NOCOPY
if (zfs_uio_segflg(uio) == UIO_NOCOPY)
error = mappedread_sf(zp, nbytes, uio);
else
#endif
if (zn_has_cached_data(zp, zfs_uio_offset(uio),
zfs_uio_offset(uio) + nbytes - 1) && !(ioflag & O_DIRECT)) {
error = mappedread(zp, nbytes, uio);
} else {
error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
uio, nbytes);
}
if (error) {
/* convert checksum errors into IO errors */
if (error == ECKSUM)
error = SET_ERROR(EIO);
#if defined(__linux__)
/*
* if we actually read some bytes, bubbling EFAULT
* up to become EAGAIN isn't what we want here...
*
* ...on Linux, at least. On FBSD, doing this breaks.
*/
if (error == EFAULT &&
(zfs_uio_offset(uio) - start_offset) != 0)
error = 0;
#endif
break;
}
n -= nbytes;
}
int64_t nread = start_resid - n;
dataset_kstats_update_read_kstats(&zfsvfs->z_kstat, nread);
task_io_account_read(nread);
out:
zfs_rangelock_exit(lr);
ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
zfs_exit(zfsvfs, FTAG);
return (error);
}
static void
zfs_clear_setid_bits_if_necessary(zfsvfs_t *zfsvfs, znode_t *zp, cred_t *cr,
uint64_t *clear_setid_bits_txgp, dmu_tx_t *tx)
{
zilog_t *zilog = zfsvfs->z_log;
const uint64_t uid = KUID_TO_SUID(ZTOUID(zp));
ASSERT(clear_setid_bits_txgp != NULL);
ASSERT(tx != NULL);
/*
* Clear Set-UID/Set-GID bits on successful write if not
* privileged and at least one of the execute bits is set.
*
* It would be nice to do this after all writes have
* been done, but that would still expose the ISUID/ISGID
* to another app after the partial write is committed.
*
* Note: we don't call zfs_fuid_map_id() here because
* user 0 is not an ephemeral uid.
*/
mutex_enter(&zp->z_acl_lock);
if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) | (S_IXUSR >> 6))) != 0 &&
(zp->z_mode & (S_ISUID | S_ISGID)) != 0 &&
secpolicy_vnode_setid_retain(zp, cr,
((zp->z_mode & S_ISUID) != 0 && uid == 0)) != 0) {
uint64_t newmode;
zp->z_mode &= ~(S_ISUID | S_ISGID);
newmode = zp->z_mode;
(void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs),
(void *)&newmode, sizeof (uint64_t), tx);
mutex_exit(&zp->z_acl_lock);
/*
* Make sure SUID/SGID bits will be removed when we replay the
* log. If the setid bits are keep coming back, don't log more
* than one TX_SETATTR per transaction group.
*/
if (*clear_setid_bits_txgp != dmu_tx_get_txg(tx)) {
vattr_t va = {0};
va.va_mask = ATTR_MODE;
va.va_nodeid = zp->z_id;
va.va_mode = newmode;
zfs_log_setattr(zilog, tx, TX_SETATTR, zp, &va,
ATTR_MODE, NULL);
*clear_setid_bits_txgp = dmu_tx_get_txg(tx);
}
} else {
mutex_exit(&zp->z_acl_lock);
}
}
/*
* Write the bytes to a file.
*
* IN: zp - znode of file to be written to.
* uio - structure supplying write location, range info,
* and data buffer.
* ioflag - O_APPEND flag set if in append mode.
* O_DIRECT flag; used to bypass page cache.
* cr - credentials of caller.
*
* OUT: uio - updated offset and range.
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
* ip - ctime|mtime updated if byte count > 0
*/
int
zfs_write(znode_t *zp, zfs_uio_t *uio, int ioflag, cred_t *cr)
{
int error = 0, error1;
ssize_t start_resid = zfs_uio_resid(uio);
uint64_t clear_setid_bits_txg = 0;
/*
* Fasttrack empty write
*/
ssize_t n = start_resid;
if (n == 0)
return (0);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (error);
sa_bulk_attr_t bulk[4];
int count = 0;
uint64_t mtime[2], ctime[2];
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
&zp->z_size, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, 8);
/*
* Callers might not be able to detect properly that we are read-only,
* so check it explicitly here.
*/
if (zfs_is_readonly(zfsvfs)) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EROFS));
}
/*
* If immutable or not appending then return EPERM.
* Intentionally allow ZFS_READONLY through here.
* See zfs_zaccess_common()
*/
if ((zp->z_pflags & ZFS_IMMUTABLE) ||
((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & O_APPEND) &&
(zfs_uio_offset(uio) < zp->z_size))) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EPERM));
}
/*
* Validate file offset
*/
offset_t woff = ioflag & O_APPEND ? zp->z_size : zfs_uio_offset(uio);
if (woff < 0) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
/*
* Pre-fault the pages to ensure slow (eg NFS) pages
* don't hold up txg.
*/
ssize_t pfbytes = MIN(n, DMU_MAX_ACCESS >> 1);
if (zfs_uio_prefaultpages(pfbytes, uio)) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EFAULT));
}
/*
* If in append mode, set the io offset pointer to eof.
*/
zfs_locked_range_t *lr;
if (ioflag & O_APPEND) {
/*
* Obtain an appending range lock to guarantee file append
* semantics. We reset the write offset once we have the lock.
*/
lr = zfs_rangelock_enter(&zp->z_rangelock, 0, n, RL_APPEND);
woff = lr->lr_offset;
if (lr->lr_length == UINT64_MAX) {
/*
* We overlocked the file because this write will cause
* the file block size to increase.
* Note that zp_size cannot change with this lock held.
*/
woff = zp->z_size;
}
zfs_uio_setoffset(uio, woff);
} else {
/*
* Note that if the file block size will change as a result of
* this write, then this range lock will lock the entire file
* so that we can re-write the block safely.
*/
lr = zfs_rangelock_enter(&zp->z_rangelock, woff, n, RL_WRITER);
}
if (zn_rlimit_fsize_uio(zp, uio)) {
zfs_rangelock_exit(lr);
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EFBIG));
}
const rlim64_t limit = MAXOFFSET_T;
if (woff >= limit) {
zfs_rangelock_exit(lr);
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EFBIG));
}
if (n > limit - woff)
n = limit - woff;
uint64_t end_size = MAX(zp->z_size, woff + n);
zilog_t *zilog = zfsvfs->z_log;
const uint64_t uid = KUID_TO_SUID(ZTOUID(zp));
const uint64_t gid = KGID_TO_SGID(ZTOGID(zp));
const uint64_t projid = zp->z_projid;
/*
* Write the file in reasonable size chunks. Each chunk is written
* in a separate transaction; this keeps the intent log records small
* and allows us to do more fine-grained space accounting.
*/
while (n > 0) {
woff = zfs_uio_offset(uio);
if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT, uid) ||
zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT, gid) ||
(projid != ZFS_DEFAULT_PROJID &&
zfs_id_overblockquota(zfsvfs, DMU_PROJECTUSED_OBJECT,
projid))) {
error = SET_ERROR(EDQUOT);
break;
}
uint64_t blksz;
if (lr->lr_length == UINT64_MAX && zp->z_size <= zp->z_blksz) {
if (zp->z_blksz > zfsvfs->z_max_blksz &&
!ISP2(zp->z_blksz)) {
/*
* File's blocksize is already larger than the
* "recordsize" property. Only let it grow to
* the next power of 2.
*/
blksz = 1 << highbit64(zp->z_blksz);
} else {
blksz = zfsvfs->z_max_blksz;
}
blksz = MIN(blksz, P2ROUNDUP(end_size,
SPA_MINBLOCKSIZE));
blksz = MAX(blksz, zp->z_blksz);
} else {
blksz = zp->z_blksz;
}
arc_buf_t *abuf = NULL;
ssize_t nbytes = n;
if (n >= blksz && woff >= zp->z_size &&
P2PHASE(woff, blksz) == 0 &&
(blksz >= SPA_OLD_MAXBLOCKSIZE || n < 4 * blksz)) {
/*
* This write covers a full block. "Borrow" a buffer
* from the dmu so that we can fill it before we enter
* a transaction. This avoids the possibility of
* holding up the transaction if the data copy hangs
* up on a pagefault (e.g., from an NFS server mapping).
*/
abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
blksz);
ASSERT(abuf != NULL);
ASSERT(arc_buf_size(abuf) == blksz);
if ((error = zfs_uiocopy(abuf->b_data, blksz,
UIO_WRITE, uio, &nbytes))) {
dmu_return_arcbuf(abuf);
break;
}
ASSERT3S(nbytes, ==, blksz);
} else {
nbytes = MIN(n, (DMU_MAX_ACCESS >> 1) -
P2PHASE(woff, blksz));
if (pfbytes < nbytes) {
if (zfs_uio_prefaultpages(nbytes, uio)) {
error = SET_ERROR(EFAULT);
break;
}
pfbytes = nbytes;
}
}
/*
* Start a transaction.
*/
dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
dmu_buf_impl_t *db = (dmu_buf_impl_t *)sa_get_db(zp->z_sa_hdl);
DB_DNODE_ENTER(db);
dmu_tx_hold_write_by_dnode(tx, DB_DNODE(db), woff, nbytes);
DB_DNODE_EXIT(db);
zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
if (abuf != NULL)
dmu_return_arcbuf(abuf);
break;
}
/*
* NB: We must call zfs_clear_setid_bits_if_necessary before
* committing the transaction!
*/
/*
* If rangelock_enter() over-locked we grow the blocksize
* and then reduce the lock range. This will only happen
* on the first iteration since rangelock_reduce() will
* shrink down lr_length to the appropriate size.
*/
if (lr->lr_length == UINT64_MAX) {
zfs_grow_blocksize(zp, blksz, tx);
zfs_rangelock_reduce(lr, woff, n);
}
ssize_t tx_bytes;
if (abuf == NULL) {
tx_bytes = zfs_uio_resid(uio);
zfs_uio_fault_disable(uio, B_TRUE);
error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl),
uio, nbytes, tx);
zfs_uio_fault_disable(uio, B_FALSE);
#ifdef __linux__
if (error == EFAULT) {
zfs_clear_setid_bits_if_necessary(zfsvfs, zp,
cr, &clear_setid_bits_txg, tx);
dmu_tx_commit(tx);
/*
* Account for partial writes before
* continuing the loop.
* Update needs to occur before the next
* zfs_uio_prefaultpages, or prefaultpages may
* error, and we may break the loop early.
*/
n -= tx_bytes - zfs_uio_resid(uio);
pfbytes -= tx_bytes - zfs_uio_resid(uio);
continue;
}
#endif
/*
* On FreeBSD, EFAULT should be propagated back to the
* VFS, which will handle faulting and will retry.
*/
if (error != 0 && error != EFAULT) {
zfs_clear_setid_bits_if_necessary(zfsvfs, zp,
cr, &clear_setid_bits_txg, tx);
dmu_tx_commit(tx);
break;
}
tx_bytes -= zfs_uio_resid(uio);
} else {
/*
* Thus, we're writing a full block at a block-aligned
* offset and extending the file past EOF.
*
* dmu_assign_arcbuf_by_dbuf() will directly assign the
* arc buffer to a dbuf.
*/
error = dmu_assign_arcbuf_by_dbuf(
sa_get_db(zp->z_sa_hdl), woff, abuf, tx);
if (error != 0) {
/*
* XXX This might not be necessary if
* dmu_assign_arcbuf_by_dbuf is guaranteed
* to be atomic.
*/
zfs_clear_setid_bits_if_necessary(zfsvfs, zp,
cr, &clear_setid_bits_txg, tx);
dmu_return_arcbuf(abuf);
dmu_tx_commit(tx);
break;
}
ASSERT3S(nbytes, <=, zfs_uio_resid(uio));
zfs_uioskip(uio, nbytes);
tx_bytes = nbytes;
}
if (tx_bytes &&
zn_has_cached_data(zp, woff, woff + tx_bytes - 1) &&
!(ioflag & O_DIRECT)) {
update_pages(zp, woff, tx_bytes, zfsvfs->z_os);
}
/*
* If we made no progress, we're done. If we made even
* partial progress, update the znode and ZIL accordingly.
*/
if (tx_bytes == 0) {
(void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
(void *)&zp->z_size, sizeof (uint64_t), tx);
dmu_tx_commit(tx);
ASSERT(error != 0);
break;
}
zfs_clear_setid_bits_if_necessary(zfsvfs, zp, cr,
&clear_setid_bits_txg, tx);
zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
/*
* Update the file size (zp_size) if it has changed;
* account for possible concurrent updates.
*/
while ((end_size = zp->z_size) < zfs_uio_offset(uio)) {
(void) atomic_cas_64(&zp->z_size, end_size,
zfs_uio_offset(uio));
ASSERT(error == 0 || error == EFAULT);
}
/*
* If we are replaying and eof is non zero then force
* the file size to the specified eof. Note, there's no
* concurrency during replay.
*/
if (zfsvfs->z_replay && zfsvfs->z_replay_eof != 0)
zp->z_size = zfsvfs->z_replay_eof;
error1 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
if (error1 != 0)
/* Avoid clobbering EFAULT. */
error = error1;
/*
* NB: During replay, the TX_SETATTR record logged by
* zfs_clear_setid_bits_if_necessary must precede any of
* the TX_WRITE records logged here.
*/
zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag,
NULL, NULL);
dmu_tx_commit(tx);
if (error != 0)
break;
ASSERT3S(tx_bytes, ==, nbytes);
n -= nbytes;
pfbytes -= nbytes;
}
zfs_znode_update_vfs(zp);
zfs_rangelock_exit(lr);
/*
* If we're in replay mode, or we made no progress, or the
* uio data is inaccessible return an error. Otherwise, it's
* at least a partial write, so it's successful.
*/
if (zfsvfs->z_replay || zfs_uio_resid(uio) == start_resid ||
error == EFAULT) {
zfs_exit(zfsvfs, FTAG);
return (error);
}
if (ioflag & (O_SYNC | O_DSYNC) ||
zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, zp->z_id);
const int64_t nwritten = start_resid - zfs_uio_resid(uio);
dataset_kstats_update_write_kstats(&zfsvfs->z_kstat, nwritten);
task_io_account_write(nwritten);
zfs_exit(zfsvfs, FTAG);
return (0);
}
int
zfs_getsecattr(znode_t *zp, vsecattr_t *vsecp, int flag, cred_t *cr)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
int error;
boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (error);
error = zfs_getacl(zp, vsecp, skipaclchk, cr);
zfs_exit(zfsvfs, FTAG);
return (error);
}
int
zfs_setsecattr(znode_t *zp, vsecattr_t *vsecp, int flag, cred_t *cr)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
int error;
boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
- zilog_t *zilog = zfsvfs->z_log;
+ zilog_t *zilog;
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (error);
-
+ zilog = zfsvfs->z_log;
error = zfs_setacl(zp, vsecp, skipaclchk, cr);
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
zfs_exit(zfsvfs, FTAG);
return (error);
}
#ifdef ZFS_DEBUG
static int zil_fault_io = 0;
#endif
static void zfs_get_done(zgd_t *zgd, int error);
/*
* Get data to generate a TX_WRITE intent log record.
*/
int
zfs_get_data(void *arg, uint64_t gen, lr_write_t *lr, char *buf,
struct lwb *lwb, zio_t *zio)
{
zfsvfs_t *zfsvfs = arg;
objset_t *os = zfsvfs->z_os;
znode_t *zp;
uint64_t object = lr->lr_foid;
uint64_t offset = lr->lr_offset;
uint64_t size = lr->lr_length;
dmu_buf_t *db;
zgd_t *zgd;
int error = 0;
uint64_t zp_gen;
ASSERT3P(lwb, !=, NULL);
ASSERT3U(size, !=, 0);
/*
* Nothing to do if the file has been removed
*/
if (zfs_zget(zfsvfs, object, &zp) != 0)
return (SET_ERROR(ENOENT));
if (zp->z_unlinked) {
/*
* Release the vnode asynchronously as we currently have the
* txg stopped from syncing.
*/
zfs_zrele_async(zp);
return (SET_ERROR(ENOENT));
}
/* check if generation number matches */
if (sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs), &zp_gen,
sizeof (zp_gen)) != 0) {
zfs_zrele_async(zp);
return (SET_ERROR(EIO));
}
if (zp_gen != gen) {
zfs_zrele_async(zp);
return (SET_ERROR(ENOENT));
}
zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
zgd->zgd_lwb = lwb;
zgd->zgd_private = zp;
/*
* Write records come in two flavors: immediate and indirect.
* For small writes it's cheaper to store the data with the
* log record (immediate); for large writes it's cheaper to
* sync the data and get a pointer to it (indirect) so that
* we don't have to write the data twice.
*/
if (buf != NULL) { /* immediate write */
zgd->zgd_lr = zfs_rangelock_enter(&zp->z_rangelock,
offset, size, RL_READER);
/* test for truncation needs to be done while range locked */
if (offset >= zp->z_size) {
error = SET_ERROR(ENOENT);
} else {
error = dmu_read(os, object, offset, size, buf,
DMU_READ_NO_PREFETCH);
}
ASSERT(error == 0 || error == ENOENT);
} else { /* indirect write */
ASSERT3P(zio, !=, NULL);
/*
* Have to lock the whole block to ensure when it's
* written out and its checksum is being calculated
* that no one can change the data. We need to re-check
* blocksize after we get the lock in case it's changed!
*/
for (;;) {
uint64_t blkoff;
size = zp->z_blksz;
blkoff = ISP2(size) ? P2PHASE(offset, size) : offset;
offset -= blkoff;
zgd->zgd_lr = zfs_rangelock_enter(&zp->z_rangelock,
offset, size, RL_READER);
if (zp->z_blksz == size)
break;
offset += blkoff;
zfs_rangelock_exit(zgd->zgd_lr);
}
/* test for truncation needs to be done while range locked */
if (lr->lr_offset >= zp->z_size)
error = SET_ERROR(ENOENT);
#ifdef ZFS_DEBUG
if (zil_fault_io) {
error = SET_ERROR(EIO);
zil_fault_io = 0;
}
#endif
if (error == 0)
error = dmu_buf_hold_noread(os, object, offset, zgd,
&db);
if (error == 0) {
blkptr_t *bp = &lr->lr_blkptr;
zgd->zgd_db = db;
zgd->zgd_bp = bp;
ASSERT(db->db_offset == offset);
ASSERT(db->db_size == size);
error = dmu_sync(zio, lr->lr_common.lrc_txg,
zfs_get_done, zgd);
ASSERT(error || lr->lr_length <= size);
/*
* On success, we need to wait for the write I/O
* initiated by dmu_sync() to complete before we can
* release this dbuf. We will finish everything up
* in the zfs_get_done() callback.
*/
if (error == 0)
return (0);
if (error == EALREADY) {
lr->lr_common.lrc_txtype = TX_WRITE2;
/*
* TX_WRITE2 relies on the data previously
* written by the TX_WRITE that caused
* EALREADY. We zero out the BP because
* it is the old, currently-on-disk BP.
*/
zgd->zgd_bp = NULL;
BP_ZERO(bp);
error = 0;
}
}
}
zfs_get_done(zgd, error);
return (error);
}
static void
zfs_get_done(zgd_t *zgd, int error)
{
(void) error;
znode_t *zp = zgd->zgd_private;
if (zgd->zgd_db)
dmu_buf_rele(zgd->zgd_db, zgd);
zfs_rangelock_exit(zgd->zgd_lr);
/*
* Release the vnode asynchronously as we currently have the
* txg stopped from syncing.
*/
zfs_zrele_async(zp);
kmem_free(zgd, sizeof (zgd_t));
}
static int
zfs_enter_two(zfsvfs_t *zfsvfs1, zfsvfs_t *zfsvfs2, const char *tag)
{
int error;
/* Swap. Not sure if the order of zfs_enter()s is important. */
if (zfsvfs1 > zfsvfs2) {
zfsvfs_t *tmpzfsvfs;
tmpzfsvfs = zfsvfs2;
zfsvfs2 = zfsvfs1;
zfsvfs1 = tmpzfsvfs;
}
error = zfs_enter(zfsvfs1, tag);
if (error != 0)
return (error);
if (zfsvfs1 != zfsvfs2) {
error = zfs_enter(zfsvfs2, tag);
if (error != 0) {
zfs_exit(zfsvfs1, tag);
return (error);
}
}
return (0);
}
static void
zfs_exit_two(zfsvfs_t *zfsvfs1, zfsvfs_t *zfsvfs2, const char *tag)
{
zfs_exit(zfsvfs1, tag);
if (zfsvfs1 != zfsvfs2)
zfs_exit(zfsvfs2, tag);
}
/*
* We split each clone request in chunks that can fit into a single ZIL
* log entry. Each ZIL log entry can fit 130816 bytes for a block cloning
* operation (see zil_max_log_data() and zfs_log_clone_range()). This gives
* us room for storing 1022 block pointers.
*
* On success, the function return the number of bytes copied in *lenp.
* Note, it doesn't return how much bytes are left to be copied.
* On errors which are caused by any file system limitations or
* brt limitations `EINVAL` is returned. In the most cases a user
* requested bad parameters, it could be possible to clone the file but
* some parameters don't match the requirements.
*/
int
zfs_clone_range(znode_t *inzp, uint64_t *inoffp, znode_t *outzp,
uint64_t *outoffp, uint64_t *lenp, cred_t *cr)
{
zfsvfs_t *inzfsvfs, *outzfsvfs;
objset_t *inos, *outos;
zfs_locked_range_t *inlr, *outlr;
dmu_buf_impl_t *db;
dmu_tx_t *tx;
zilog_t *zilog;
uint64_t inoff, outoff, len, done;
uint64_t outsize, size;
int error;
int count = 0;
sa_bulk_attr_t bulk[3];
uint64_t mtime[2], ctime[2];
uint64_t uid, gid, projid;
blkptr_t *bps;
size_t maxblocks, nbps;
uint_t inblksz;
uint64_t clear_setid_bits_txg = 0;
+ uint64_t last_synced_txg = 0;
inoff = *inoffp;
outoff = *outoffp;
len = *lenp;
done = 0;
inzfsvfs = ZTOZSB(inzp);
outzfsvfs = ZTOZSB(outzp);
/*
* We need to call zfs_enter() potentially on two different datasets,
* so we need a dedicated function for that.
*/
error = zfs_enter_two(inzfsvfs, outzfsvfs, FTAG);
if (error != 0)
return (error);
inos = inzfsvfs->z_os;
outos = outzfsvfs->z_os;
/*
* Both source and destination have to belong to the same storage pool.
*/
if (dmu_objset_spa(inos) != dmu_objset_spa(outos)) {
zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
return (SET_ERROR(EXDEV));
}
/*
* outos and inos belongs to the same storage pool.
* see a few lines above, only one check.
*/
if (!spa_feature_is_enabled(dmu_objset_spa(outos),
SPA_FEATURE_BLOCK_CLONING)) {
zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
return (SET_ERROR(EOPNOTSUPP));
}
ASSERT(!outzfsvfs->z_replay);
/*
* Block cloning from an unencrypted dataset into an encrypted
* dataset and vice versa is not supported.
*/
if (inos->os_encrypted != outos->os_encrypted) {
zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
return (SET_ERROR(EXDEV));
}
+ /*
+ * Cloning across encrypted datasets is possible only if they
+ * share the same master key.
+ */
+ if (inos != outos && inos->os_encrypted &&
+ !dmu_objset_crypto_key_equal(inos, outos)) {
+ zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
+ return (SET_ERROR(EXDEV));
+ }
+
error = zfs_verify_zp(inzp);
if (error == 0)
error = zfs_verify_zp(outzp);
if (error != 0) {
zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
return (error);
}
/*
* We don't copy source file's flags that's why we don't allow to clone
* files that are in quarantine.
*/
if (inzp->z_pflags & ZFS_AV_QUARANTINED) {
zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
return (SET_ERROR(EACCES));
}
if (inoff >= inzp->z_size) {
*lenp = 0;
zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
return (0);
}
if (len > inzp->z_size - inoff) {
len = inzp->z_size - inoff;
}
if (len == 0) {
*lenp = 0;
zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
return (0);
}
/*
* Callers might not be able to detect properly that we are read-only,
* so check it explicitly here.
*/
if (zfs_is_readonly(outzfsvfs)) {
zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
return (SET_ERROR(EROFS));
}
/*
* If immutable or not appending then return EPERM.
* Intentionally allow ZFS_READONLY through here.
* See zfs_zaccess_common()
*/
if ((outzp->z_pflags & ZFS_IMMUTABLE) != 0) {
zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
return (SET_ERROR(EPERM));
}
/*
* No overlapping if we are cloning within the same file.
*/
if (inzp == outzp) {
if (inoff < outoff + len && outoff < inoff + len) {
zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
}
/*
* Maintain predictable lock order.
*/
if (inzp < outzp || (inzp == outzp && inoff < outoff)) {
inlr = zfs_rangelock_enter(&inzp->z_rangelock, inoff, len,
RL_READER);
outlr = zfs_rangelock_enter(&outzp->z_rangelock, outoff, len,
RL_WRITER);
} else {
outlr = zfs_rangelock_enter(&outzp->z_rangelock, outoff, len,
RL_WRITER);
inlr = zfs_rangelock_enter(&inzp->z_rangelock, inoff, len,
RL_READER);
}
inblksz = inzp->z_blksz;
/*
- * We cannot clone into files with different block size if we can't
- * grow it (block size is already bigger or more than one block).
+ * We cannot clone into a file with different block size if we can't
+ * grow it (block size is already bigger, has more than one block, or
+ * not locked for growth). There are other possible reasons for the
+ * grow to fail, but we cover what we can before opening transaction
+ * and the rest detect after we try to do it.
*/
+ if (inblksz < outzp->z_blksz) {
+ error = SET_ERROR(EINVAL);
+ goto unlock;
+ }
if (inblksz != outzp->z_blksz && (outzp->z_size > outzp->z_blksz ||
- outzp->z_size > inblksz)) {
+ outlr->lr_length != UINT64_MAX)) {
error = SET_ERROR(EINVAL);
goto unlock;
}
/*
* Block size must be power-of-2 if destination offset != 0.
* There can be no multiple blocks of non-power-of-2 size.
*/
if (outoff != 0 && !ISP2(inblksz)) {
error = SET_ERROR(EINVAL);
goto unlock;
}
/*
* Offsets and len must be at block boundries.
*/
if ((inoff % inblksz) != 0 || (outoff % inblksz) != 0) {
error = SET_ERROR(EINVAL);
goto unlock;
}
/*
* Length must be multipe of blksz, except for the end of the file.
*/
if ((len % inblksz) != 0 &&
(len < inzp->z_size - inoff || len < outzp->z_size - outoff)) {
error = SET_ERROR(EINVAL);
goto unlock;
}
/*
* If we are copying only one block and it is smaller than recordsize
* property, do not allow destination to grow beyond one block if it
* is not there yet. Otherwise the destination will get stuck with
* that block size forever, that can be as small as 512 bytes, no
* matter how big the destination grow later.
*/
if (len <= inblksz && inblksz < outzfsvfs->z_max_blksz &&
outzp->z_size <= inblksz && outoff + len > inblksz) {
error = SET_ERROR(EINVAL);
goto unlock;
}
error = zn_rlimit_fsize(outoff + len);
if (error != 0) {
goto unlock;
}
if (inoff >= MAXOFFSET_T || outoff >= MAXOFFSET_T) {
error = SET_ERROR(EFBIG);
goto unlock;
}
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(outzfsvfs), NULL,
&mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(outzfsvfs), NULL,
&ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(outzfsvfs), NULL,
&outzp->z_size, 8);
zilog = outzfsvfs->z_log;
maxblocks = zil_max_log_data(zilog, sizeof (lr_clone_range_t)) /
sizeof (bps[0]);
uid = KUID_TO_SUID(ZTOUID(outzp));
gid = KGID_TO_SGID(ZTOGID(outzp));
projid = outzp->z_projid;
bps = vmem_alloc(sizeof (bps[0]) * maxblocks, KM_SLEEP);
/*
* Clone the file in reasonable size chunks. Each chunk is cloned
* in a separate transaction; this keeps the intent log records small
* and allows us to do more fine-grained space accounting.
*/
while (len > 0) {
size = MIN(inblksz * maxblocks, len);
if (zfs_id_overblockquota(outzfsvfs, DMU_USERUSED_OBJECT,
uid) ||
zfs_id_overblockquota(outzfsvfs, DMU_GROUPUSED_OBJECT,
gid) ||
(projid != ZFS_DEFAULT_PROJID &&
zfs_id_overblockquota(outzfsvfs, DMU_PROJECTUSED_OBJECT,
projid))) {
error = SET_ERROR(EDQUOT);
break;
}
nbps = maxblocks;
+ last_synced_txg = spa_last_synced_txg(dmu_objset_spa(inos));
error = dmu_read_l0_bps(inos, inzp->z_id, inoff, size, bps,
&nbps);
if (error != 0) {
/*
* If we are trying to clone a block that was created
- * in the current transaction group, error will be
- * EAGAIN here, which we can just return to the caller
- * so it can fallback if it likes.
+ * in the current transaction group, the error will be
+ * EAGAIN here. Based on zfs_bclone_wait_dirty either
+ * return a shortened range to the caller so it can
+ * fallback, or wait for the next TXG and check again.
*/
- break;
- }
- /*
- * Encrypted data is fine as long as it comes from the same
- * dataset.
- * TODO: We want to extend it in the future to allow cloning to
- * datasets with the same keys, like clones or to be able to
- * clone a file from a snapshot of an encrypted dataset into the
- * dataset itself.
- */
- if (BP_IS_PROTECTED(&bps[0])) {
- if (inzfsvfs != outzfsvfs) {
- error = SET_ERROR(EXDEV);
- break;
+ if (error == EAGAIN && zfs_bclone_wait_dirty) {
+ txg_wait_synced(dmu_objset_pool(inos),
+ last_synced_txg + 1);
+ continue;
}
+
+ break;
}
/*
* Start a transaction.
*/
tx = dmu_tx_create(outos);
dmu_tx_hold_sa(tx, outzp->z_sa_hdl, B_FALSE);
db = (dmu_buf_impl_t *)sa_get_db(outzp->z_sa_hdl);
DB_DNODE_ENTER(db);
dmu_tx_hold_clone_by_dnode(tx, DB_DNODE(db), outoff, size);
DB_DNODE_EXIT(db);
zfs_sa_upgrade_txholds(tx, outzp);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error != 0) {
dmu_tx_abort(tx);
break;
}
/*
- * Copy source znode's block size. This only happens on the
- * first iteration since zfs_rangelock_reduce() will shrink down
- * lr_len to the appropriate size.
+ * Copy source znode's block size. This is done only if the
+ * whole znode is locked (see zfs_rangelock_cb()) and only
+ * on the first iteration since zfs_rangelock_reduce() will
+ * shrink down lr_length to the appropriate size.
*/
if (outlr->lr_length == UINT64_MAX) {
zfs_grow_blocksize(outzp, inblksz, tx);
+
+ /*
+ * Block growth may fail for many reasons we can not
+ * predict here. If it happen the cloning is doomed.
+ */
+ if (inblksz != outzp->z_blksz) {
+ error = SET_ERROR(EINVAL);
+ dmu_tx_abort(tx);
+ break;
+ }
+
/*
* Round range lock up to the block boundary, so we
* prevent appends until we are done.
*/
zfs_rangelock_reduce(outlr, outoff,
((len - 1) / inblksz + 1) * inblksz);
}
error = dmu_brt_clone(outos, outzp->z_id, outoff, size, tx,
bps, nbps);
if (error != 0) {
dmu_tx_commit(tx);
break;
}
+ if (zn_has_cached_data(outzp, outoff, outoff + size - 1)) {
+ update_pages(outzp, outoff, size, outos);
+ }
+
zfs_clear_setid_bits_if_necessary(outzfsvfs, outzp, cr,
&clear_setid_bits_txg, tx);
zfs_tstamp_update_setup(outzp, CONTENT_MODIFIED, mtime, ctime);
/*
* Update the file size (zp_size) if it has changed;
* account for possible concurrent updates.
*/
while ((outsize = outzp->z_size) < outoff + size) {
(void) atomic_cas_64(&outzp->z_size, outsize,
outoff + size);
}
error = sa_bulk_update(outzp->z_sa_hdl, bulk, count, tx);
zfs_log_clone_range(zilog, tx, TX_CLONE_RANGE, outzp, outoff,
size, inblksz, bps, nbps);
dmu_tx_commit(tx);
if (error != 0)
break;
inoff += size;
outoff += size;
len -= size;
done += size;
}
vmem_free(bps, sizeof (bps[0]) * maxblocks);
zfs_znode_update_vfs(outzp);
unlock:
zfs_rangelock_exit(outlr);
zfs_rangelock_exit(inlr);
if (done > 0) {
/*
* If we have made at least partial progress, reset the error.
*/
error = 0;
ZFS_ACCESSTIME_STAMP(inzfsvfs, inzp);
if (outos->os_sync == ZFS_SYNC_ALWAYS) {
zil_commit(zilog, outzp->z_id);
}
*inoffp += done;
*outoffp += done;
*lenp = done;
} else {
/*
* If we made no progress, there must be a good reason.
* EOF is handled explicitly above, before the loop.
*/
ASSERT3S(error, !=, 0);
}
zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
return (error);
}
/*
* Usual pattern would be to call zfs_clone_range() from zfs_replay_clone(),
* but we cannot do that, because when replaying we don't have source znode
* available. This is why we need a dedicated replay function.
*/
int
zfs_clone_range_replay(znode_t *zp, uint64_t off, uint64_t len, uint64_t blksz,
const blkptr_t *bps, size_t nbps)
{
zfsvfs_t *zfsvfs;
dmu_buf_impl_t *db;
dmu_tx_t *tx;
int error;
int count = 0;
sa_bulk_attr_t bulk[3];
uint64_t mtime[2], ctime[2];
ASSERT3U(off, <, MAXOFFSET_T);
ASSERT3U(len, >, 0);
ASSERT3U(nbps, >, 0);
zfsvfs = ZTOZSB(zp);
ASSERT(spa_feature_is_enabled(dmu_objset_spa(zfsvfs->z_os),
SPA_FEATURE_BLOCK_CLONING));
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (error);
ASSERT(zfsvfs->z_replay);
ASSERT(!zfs_is_readonly(zfsvfs));
if ((off % blksz) != 0) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
&zp->z_size, 8);
/*
* Start a transaction.
*/
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
db = (dmu_buf_impl_t *)sa_get_db(zp->z_sa_hdl);
DB_DNODE_ENTER(db);
dmu_tx_hold_clone_by_dnode(tx, DB_DNODE(db), off, len);
DB_DNODE_EXIT(db);
zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error != 0) {
dmu_tx_abort(tx);
zfs_exit(zfsvfs, FTAG);
return (error);
}
if (zp->z_blksz < blksz)
zfs_grow_blocksize(zp, blksz, tx);
dmu_brt_clone(zfsvfs->z_os, zp->z_id, off, len, tx, bps, nbps);
zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
if (zp->z_size < off + len)
zp->z_size = off + len;
error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
/*
* zil_replaying() not only check if we are replaying ZIL, but also
* updates the ZIL header to record replay progress.
*/
VERIFY(zil_replaying(zfsvfs->z_log, tx));
dmu_tx_commit(tx);
zfs_znode_update_vfs(zp);
zfs_exit(zfsvfs, FTAG);
return (error);
}
EXPORT_SYMBOL(zfs_access);
EXPORT_SYMBOL(zfs_fsync);
EXPORT_SYMBOL(zfs_holey);
EXPORT_SYMBOL(zfs_read);
EXPORT_SYMBOL(zfs_write);
EXPORT_SYMBOL(zfs_getsecattr);
EXPORT_SYMBOL(zfs_setsecattr);
EXPORT_SYMBOL(zfs_clone_range);
EXPORT_SYMBOL(zfs_clone_range_replay);
ZFS_MODULE_PARAM(zfs_vnops, zfs_vnops_, read_chunk_size, U64, ZMOD_RW,
"Bytes to read per chunk");
+
+ZFS_MODULE_PARAM(zfs, zfs_, bclone_enabled, INT, ZMOD_RW,
+ "Enable block cloning");
+
+ZFS_MODULE_PARAM(zfs, zfs_, bclone_wait_dirty, INT, ZMOD_RW,
+ "Wait for dirty blocks when cloning");
diff --git a/sys/contrib/openzfs/module/zfs/zil.c b/sys/contrib/openzfs/module/zfs/zil.c
index a11886136994..5642f082bdb8 100644
--- a/sys/contrib/openzfs/module/zfs/zil.c
+++ b/sys/contrib/openzfs/module/zfs/zil.c
@@ -1,4233 +1,4269 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2018 by Delphix. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright (c) 2018 Datto Inc.
*/
/* Portions Copyright 2010 Robert Milkowski */
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/dmu.h>
#include <sys/zap.h>
#include <sys/arc.h>
#include <sys/stat.h>
#include <sys/zil.h>
#include <sys/zil_impl.h>
#include <sys/dsl_dataset.h>
#include <sys/vdev_impl.h>
#include <sys/dmu_tx.h>
#include <sys/dsl_pool.h>
#include <sys/metaslab.h>
#include <sys/trace_zfs.h>
#include <sys/abd.h>
#include <sys/brt.h>
#include <sys/wmsum.h>
/*
* The ZFS Intent Log (ZIL) saves "transaction records" (itxs) of system
* calls that change the file system. Each itx has enough information to
* be able to replay them after a system crash, power loss, or
* equivalent failure mode. These are stored in memory until either:
*
* 1. they are committed to the pool by the DMU transaction group
* (txg), at which point they can be discarded; or
* 2. they are committed to the on-disk ZIL for the dataset being
* modified (e.g. due to an fsync, O_DSYNC, or other synchronous
* requirement).
*
* In the event of a crash or power loss, the itxs contained by each
* dataset's on-disk ZIL will be replayed when that dataset is first
* instantiated (e.g. if the dataset is a normal filesystem, when it is
* first mounted).
*
* As hinted at above, there is one ZIL per dataset (both the in-memory
* representation, and the on-disk representation). The on-disk format
* consists of 3 parts:
*
* - a single, per-dataset, ZIL header; which points to a chain of
* - zero or more ZIL blocks; each of which contains
* - zero or more ZIL records
*
* A ZIL record holds the information necessary to replay a single
* system call transaction. A ZIL block can hold many ZIL records, and
* the blocks are chained together, similarly to a singly linked list.
*
* Each ZIL block contains a block pointer (blkptr_t) to the next ZIL
* block in the chain, and the ZIL header points to the first block in
* the chain.
*
* Note, there is not a fixed place in the pool to hold these ZIL
* blocks; they are dynamically allocated and freed as needed from the
* blocks available on the pool, though they can be preferentially
* allocated from a dedicated "log" vdev.
*/
/*
* This controls the amount of time that a ZIL block (lwb) will remain
* "open" when it isn't "full", and it has a thread waiting for it to be
* committed to stable storage. Please refer to the zil_commit_waiter()
* function (and the comments within it) for more details.
*/
static uint_t zfs_commit_timeout_pct = 5;
/*
* Minimal time we care to delay commit waiting for more ZIL records.
* At least FreeBSD kernel can't sleep for less than 2us at its best.
* So requests to sleep for less then 5us is a waste of CPU time with
* a risk of significant log latency increase due to oversleep.
*/
static uint64_t zil_min_commit_timeout = 5000;
/*
* See zil.h for more information about these fields.
*/
static zil_kstat_values_t zil_stats = {
{ "zil_commit_count", KSTAT_DATA_UINT64 },
{ "zil_commit_writer_count", KSTAT_DATA_UINT64 },
{ "zil_itx_count", KSTAT_DATA_UINT64 },
{ "zil_itx_indirect_count", KSTAT_DATA_UINT64 },
{ "zil_itx_indirect_bytes", KSTAT_DATA_UINT64 },
{ "zil_itx_copied_count", KSTAT_DATA_UINT64 },
{ "zil_itx_copied_bytes", KSTAT_DATA_UINT64 },
{ "zil_itx_needcopy_count", KSTAT_DATA_UINT64 },
{ "zil_itx_needcopy_bytes", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_normal_count", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_normal_bytes", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_normal_write", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_normal_alloc", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_slog_count", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_slog_bytes", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_slog_write", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_slog_alloc", KSTAT_DATA_UINT64 },
};
static zil_sums_t zil_sums_global;
static kstat_t *zil_kstats_global;
/*
* Disable intent logging replay. This global ZIL switch affects all pools.
*/
int zil_replay_disable = 0;
/*
* Disable the DKIOCFLUSHWRITECACHE commands that are normally sent to
* the disk(s) by the ZIL after an LWB write has completed. Setting this
* will cause ZIL corruption on power loss if a volatile out-of-order
* write cache is enabled.
*/
static int zil_nocacheflush = 0;
/*
* Limit SLOG write size per commit executed with synchronous priority.
* Any writes above that will be executed with lower (asynchronous) priority
* to limit potential SLOG device abuse by single active ZIL writer.
*/
static uint64_t zil_slog_bulk = 64 * 1024 * 1024;
static kmem_cache_t *zil_lwb_cache;
static kmem_cache_t *zil_zcw_cache;
static void zil_lwb_commit(zilog_t *zilog, lwb_t *lwb, itx_t *itx);
static itx_t *zil_itx_clone(itx_t *oitx);
static int
zil_bp_compare(const void *x1, const void *x2)
{
const dva_t *dva1 = &((zil_bp_node_t *)x1)->zn_dva;
const dva_t *dva2 = &((zil_bp_node_t *)x2)->zn_dva;
int cmp = TREE_CMP(DVA_GET_VDEV(dva1), DVA_GET_VDEV(dva2));
if (likely(cmp))
return (cmp);
return (TREE_CMP(DVA_GET_OFFSET(dva1), DVA_GET_OFFSET(dva2)));
}
static void
zil_bp_tree_init(zilog_t *zilog)
{
avl_create(&zilog->zl_bp_tree, zil_bp_compare,
sizeof (zil_bp_node_t), offsetof(zil_bp_node_t, zn_node));
}
static void
zil_bp_tree_fini(zilog_t *zilog)
{
avl_tree_t *t = &zilog->zl_bp_tree;
zil_bp_node_t *zn;
void *cookie = NULL;
while ((zn = avl_destroy_nodes(t, &cookie)) != NULL)
kmem_free(zn, sizeof (zil_bp_node_t));
avl_destroy(t);
}
int
zil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp)
{
avl_tree_t *t = &zilog->zl_bp_tree;
const dva_t *dva;
zil_bp_node_t *zn;
avl_index_t where;
if (BP_IS_EMBEDDED(bp))
return (0);
dva = BP_IDENTITY(bp);
if (avl_find(t, dva, &where) != NULL)
return (SET_ERROR(EEXIST));
zn = kmem_alloc(sizeof (zil_bp_node_t), KM_SLEEP);
zn->zn_dva = *dva;
avl_insert(t, zn, where);
return (0);
}
static zil_header_t *
zil_header_in_syncing_context(zilog_t *zilog)
{
return ((zil_header_t *)zilog->zl_header);
}
static void
zil_init_log_chain(zilog_t *zilog, blkptr_t *bp)
{
zio_cksum_t *zc = &bp->blk_cksum;
(void) random_get_pseudo_bytes((void *)&zc->zc_word[ZIL_ZC_GUID_0],
sizeof (zc->zc_word[ZIL_ZC_GUID_0]));
(void) random_get_pseudo_bytes((void *)&zc->zc_word[ZIL_ZC_GUID_1],
sizeof (zc->zc_word[ZIL_ZC_GUID_1]));
zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os);
zc->zc_word[ZIL_ZC_SEQ] = 1ULL;
}
static int
zil_kstats_global_update(kstat_t *ksp, int rw)
{
zil_kstat_values_t *zs = ksp->ks_data;
ASSERT3P(&zil_stats, ==, zs);
if (rw == KSTAT_WRITE) {
return (SET_ERROR(EACCES));
}
zil_kstat_values_update(zs, &zil_sums_global);
return (0);
}
/*
* Read a log block and make sure it's valid.
*/
static int
zil_read_log_block(zilog_t *zilog, boolean_t decrypt, const blkptr_t *bp,
blkptr_t *nbp, char **begin, char **end, arc_buf_t **abuf)
{
zio_flag_t zio_flags = ZIO_FLAG_CANFAIL;
arc_flags_t aflags = ARC_FLAG_WAIT;
zbookmark_phys_t zb;
int error;
if (zilog->zl_header->zh_claim_txg == 0)
zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB;
if (!(zilog->zl_header->zh_flags & ZIL_CLAIM_LR_SEQ_VALID))
zio_flags |= ZIO_FLAG_SPECULATIVE;
if (!decrypt)
zio_flags |= ZIO_FLAG_RAW;
SET_BOOKMARK(&zb, bp->blk_cksum.zc_word[ZIL_ZC_OBJSET],
ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func,
abuf, ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
if (error == 0) {
zio_cksum_t cksum = bp->blk_cksum;
/*
* Validate the checksummed log block.
*
* Sequence numbers should be... sequential. The checksum
* verifier for the next block should be bp's checksum plus 1.
*
* Also check the log chain linkage and size used.
*/
cksum.zc_word[ZIL_ZC_SEQ]++;
uint64_t size = BP_GET_LSIZE(bp);
if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) {
zil_chain_t *zilc = (*abuf)->b_data;
char *lr = (char *)(zilc + 1);
if (memcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
sizeof (cksum)) ||
zilc->zc_nused < sizeof (*zilc) ||
zilc->zc_nused > size) {
error = SET_ERROR(ECKSUM);
} else {
*begin = lr;
*end = lr + zilc->zc_nused - sizeof (*zilc);
*nbp = zilc->zc_next_blk;
}
} else {
char *lr = (*abuf)->b_data;
zil_chain_t *zilc = (zil_chain_t *)(lr + size) - 1;
if (memcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
sizeof (cksum)) ||
(zilc->zc_nused > (size - sizeof (*zilc)))) {
error = SET_ERROR(ECKSUM);
} else {
*begin = lr;
*end = lr + zilc->zc_nused;
*nbp = zilc->zc_next_blk;
}
}
}
return (error);
}
/*
* Read a TX_WRITE log data block.
*/
static int
zil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf)
{
zio_flag_t zio_flags = ZIO_FLAG_CANFAIL;
const blkptr_t *bp = &lr->lr_blkptr;
arc_flags_t aflags = ARC_FLAG_WAIT;
arc_buf_t *abuf = NULL;
zbookmark_phys_t zb;
int error;
if (BP_IS_HOLE(bp)) {
if (wbuf != NULL)
memset(wbuf, 0, MAX(BP_GET_LSIZE(bp), lr->lr_length));
return (0);
}
if (zilog->zl_header->zh_claim_txg == 0)
zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB;
/*
* If we are not using the resulting data, we are just checking that
* it hasn't been corrupted so we don't need to waste CPU time
* decompressing and decrypting it.
*/
if (wbuf == NULL)
zio_flags |= ZIO_FLAG_RAW;
ASSERT3U(BP_GET_LSIZE(bp), !=, 0);
SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid,
ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp));
error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf,
ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
if (error == 0) {
if (wbuf != NULL)
memcpy(wbuf, abuf->b_data, arc_buf_size(abuf));
arc_buf_destroy(abuf, &abuf);
}
return (error);
}
void
zil_sums_init(zil_sums_t *zs)
{
wmsum_init(&zs->zil_commit_count, 0);
wmsum_init(&zs->zil_commit_writer_count, 0);
wmsum_init(&zs->zil_itx_count, 0);
wmsum_init(&zs->zil_itx_indirect_count, 0);
wmsum_init(&zs->zil_itx_indirect_bytes, 0);
wmsum_init(&zs->zil_itx_copied_count, 0);
wmsum_init(&zs->zil_itx_copied_bytes, 0);
wmsum_init(&zs->zil_itx_needcopy_count, 0);
wmsum_init(&zs->zil_itx_needcopy_bytes, 0);
wmsum_init(&zs->zil_itx_metaslab_normal_count, 0);
wmsum_init(&zs->zil_itx_metaslab_normal_bytes, 0);
wmsum_init(&zs->zil_itx_metaslab_normal_write, 0);
wmsum_init(&zs->zil_itx_metaslab_normal_alloc, 0);
wmsum_init(&zs->zil_itx_metaslab_slog_count, 0);
wmsum_init(&zs->zil_itx_metaslab_slog_bytes, 0);
wmsum_init(&zs->zil_itx_metaslab_slog_write, 0);
wmsum_init(&zs->zil_itx_metaslab_slog_alloc, 0);
}
void
zil_sums_fini(zil_sums_t *zs)
{
wmsum_fini(&zs->zil_commit_count);
wmsum_fini(&zs->zil_commit_writer_count);
wmsum_fini(&zs->zil_itx_count);
wmsum_fini(&zs->zil_itx_indirect_count);
wmsum_fini(&zs->zil_itx_indirect_bytes);
wmsum_fini(&zs->zil_itx_copied_count);
wmsum_fini(&zs->zil_itx_copied_bytes);
wmsum_fini(&zs->zil_itx_needcopy_count);
wmsum_fini(&zs->zil_itx_needcopy_bytes);
wmsum_fini(&zs->zil_itx_metaslab_normal_count);
wmsum_fini(&zs->zil_itx_metaslab_normal_bytes);
wmsum_fini(&zs->zil_itx_metaslab_normal_write);
wmsum_fini(&zs->zil_itx_metaslab_normal_alloc);
wmsum_fini(&zs->zil_itx_metaslab_slog_count);
wmsum_fini(&zs->zil_itx_metaslab_slog_bytes);
wmsum_fini(&zs->zil_itx_metaslab_slog_write);
wmsum_fini(&zs->zil_itx_metaslab_slog_alloc);
}
void
zil_kstat_values_update(zil_kstat_values_t *zs, zil_sums_t *zil_sums)
{
zs->zil_commit_count.value.ui64 =
wmsum_value(&zil_sums->zil_commit_count);
zs->zil_commit_writer_count.value.ui64 =
wmsum_value(&zil_sums->zil_commit_writer_count);
zs->zil_itx_count.value.ui64 =
wmsum_value(&zil_sums->zil_itx_count);
zs->zil_itx_indirect_count.value.ui64 =
wmsum_value(&zil_sums->zil_itx_indirect_count);
zs->zil_itx_indirect_bytes.value.ui64 =
wmsum_value(&zil_sums->zil_itx_indirect_bytes);
zs->zil_itx_copied_count.value.ui64 =
wmsum_value(&zil_sums->zil_itx_copied_count);
zs->zil_itx_copied_bytes.value.ui64 =
wmsum_value(&zil_sums->zil_itx_copied_bytes);
zs->zil_itx_needcopy_count.value.ui64 =
wmsum_value(&zil_sums->zil_itx_needcopy_count);
zs->zil_itx_needcopy_bytes.value.ui64 =
wmsum_value(&zil_sums->zil_itx_needcopy_bytes);
zs->zil_itx_metaslab_normal_count.value.ui64 =
wmsum_value(&zil_sums->zil_itx_metaslab_normal_count);
zs->zil_itx_metaslab_normal_bytes.value.ui64 =
wmsum_value(&zil_sums->zil_itx_metaslab_normal_bytes);
zs->zil_itx_metaslab_normal_write.value.ui64 =
wmsum_value(&zil_sums->zil_itx_metaslab_normal_write);
zs->zil_itx_metaslab_normal_alloc.value.ui64 =
wmsum_value(&zil_sums->zil_itx_metaslab_normal_alloc);
zs->zil_itx_metaslab_slog_count.value.ui64 =
wmsum_value(&zil_sums->zil_itx_metaslab_slog_count);
zs->zil_itx_metaslab_slog_bytes.value.ui64 =
wmsum_value(&zil_sums->zil_itx_metaslab_slog_bytes);
zs->zil_itx_metaslab_slog_write.value.ui64 =
wmsum_value(&zil_sums->zil_itx_metaslab_slog_write);
zs->zil_itx_metaslab_slog_alloc.value.ui64 =
wmsum_value(&zil_sums->zil_itx_metaslab_slog_alloc);
}
/*
* Parse the intent log, and call parse_func for each valid record within.
*/
int
zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func,
zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg,
boolean_t decrypt)
{
const zil_header_t *zh = zilog->zl_header;
boolean_t claimed = !!zh->zh_claim_txg;
uint64_t claim_blk_seq = claimed ? zh->zh_claim_blk_seq : UINT64_MAX;
uint64_t claim_lr_seq = claimed ? zh->zh_claim_lr_seq : UINT64_MAX;
uint64_t max_blk_seq = 0;
uint64_t max_lr_seq = 0;
uint64_t blk_count = 0;
uint64_t lr_count = 0;
blkptr_t blk, next_blk = {{{{0}}}};
int error = 0;
/*
* Old logs didn't record the maximum zh_claim_lr_seq.
*/
if (!(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID))
claim_lr_seq = UINT64_MAX;
/*
* Starting at the block pointed to by zh_log we read the log chain.
* For each block in the chain we strongly check that block to
* ensure its validity. We stop when an invalid block is found.
* For each block pointer in the chain we call parse_blk_func().
* For each record in each valid block we call parse_lr_func().
* If the log has been claimed, stop if we encounter a sequence
* number greater than the highest claimed sequence number.
*/
zil_bp_tree_init(zilog);
for (blk = zh->zh_log; !BP_IS_HOLE(&blk); blk = next_blk) {
uint64_t blk_seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ];
int reclen;
char *lrp, *end;
arc_buf_t *abuf = NULL;
if (blk_seq > claim_blk_seq)
break;
error = parse_blk_func(zilog, &blk, arg, txg);
if (error != 0)
break;
ASSERT3U(max_blk_seq, <, blk_seq);
max_blk_seq = blk_seq;
blk_count++;
if (max_lr_seq == claim_lr_seq && max_blk_seq == claim_blk_seq)
break;
error = zil_read_log_block(zilog, decrypt, &blk, &next_blk,
&lrp, &end, &abuf);
if (error != 0) {
if (abuf)
arc_buf_destroy(abuf, &abuf);
if (claimed) {
char name[ZFS_MAX_DATASET_NAME_LEN];
dmu_objset_name(zilog->zl_os, name);
cmn_err(CE_WARN, "ZFS read log block error %d, "
"dataset %s, seq 0x%llx\n", error, name,
(u_longlong_t)blk_seq);
}
break;
}
for (; lrp < end; lrp += reclen) {
lr_t *lr = (lr_t *)lrp;
reclen = lr->lrc_reclen;
ASSERT3U(reclen, >=, sizeof (lr_t));
+ ASSERT3U(reclen, <=, end - lrp);
if (lr->lrc_seq > claim_lr_seq) {
arc_buf_destroy(abuf, &abuf);
goto done;
}
error = parse_lr_func(zilog, lr, arg, txg);
if (error != 0) {
arc_buf_destroy(abuf, &abuf);
goto done;
}
ASSERT3U(max_lr_seq, <, lr->lrc_seq);
max_lr_seq = lr->lrc_seq;
lr_count++;
}
arc_buf_destroy(abuf, &abuf);
}
done:
zilog->zl_parse_error = error;
zilog->zl_parse_blk_seq = max_blk_seq;
zilog->zl_parse_lr_seq = max_lr_seq;
zilog->zl_parse_blk_count = blk_count;
zilog->zl_parse_lr_count = lr_count;
zil_bp_tree_fini(zilog);
return (error);
}
static int
zil_clear_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx,
uint64_t first_txg)
{
(void) tx;
ASSERT(!BP_IS_HOLE(bp));
/*
* As we call this function from the context of a rewind to a
* checkpoint, each ZIL block whose txg is later than the txg
* that we rewind to is invalid. Thus, we return -1 so
* zil_parse() doesn't attempt to read it.
*/
if (bp->blk_birth >= first_txg)
return (-1);
if (zil_bp_tree_add(zilog, bp) != 0)
return (0);
zio_free(zilog->zl_spa, first_txg, bp);
return (0);
}
static int
zil_noop_log_record(zilog_t *zilog, const lr_t *lrc, void *tx,
uint64_t first_txg)
{
(void) zilog, (void) lrc, (void) tx, (void) first_txg;
return (0);
}
static int
zil_claim_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx,
uint64_t first_txg)
{
/*
* Claim log block if not already committed and not already claimed.
* If tx == NULL, just verify that the block is claimable.
*/
if (BP_IS_HOLE(bp) || bp->blk_birth < first_txg ||
zil_bp_tree_add(zilog, bp) != 0)
return (0);
return (zio_wait(zio_claim(NULL, zilog->zl_spa,
tx == NULL ? 0 : first_txg, bp, spa_claim_notify, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB)));
}
static int
zil_claim_write(zilog_t *zilog, const lr_t *lrc, void *tx, uint64_t first_txg)
{
lr_write_t *lr = (lr_write_t *)lrc;
int error;
- ASSERT(lrc->lrc_txtype == TX_WRITE);
+ ASSERT3U(lrc->lrc_reclen, >=, sizeof (*lr));
/*
* If the block is not readable, don't claim it. This can happen
* in normal operation when a log block is written to disk before
* some of the dmu_sync() blocks it points to. In this case, the
* transaction cannot have been committed to anyone (we would have
* waited for all writes to be stable first), so it is semantically
* correct to declare this the end of the log.
*/
if (lr->lr_blkptr.blk_birth >= first_txg) {
error = zil_read_log_data(zilog, lr, NULL);
if (error != 0)
return (error);
}
return (zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg));
}
static int
-zil_claim_clone_range(zilog_t *zilog, const lr_t *lrc, void *tx)
+zil_claim_clone_range(zilog_t *zilog, const lr_t *lrc, void *tx,
+ uint64_t first_txg)
{
const lr_clone_range_t *lr = (const lr_clone_range_t *)lrc;
const blkptr_t *bp;
- spa_t *spa;
+ spa_t *spa = zilog->zl_spa;
uint_t ii;
- ASSERT(lrc->lrc_txtype == TX_CLONE_RANGE);
+ ASSERT3U(lrc->lrc_reclen, >=, sizeof (*lr));
+ ASSERT3U(lrc->lrc_reclen, >=, offsetof(lr_clone_range_t,
+ lr_bps[lr->lr_nbps]));
if (tx == NULL) {
return (0);
}
/*
* XXX: Do we need to byteswap lr?
*/
- spa = zilog->zl_spa;
-
for (ii = 0; ii < lr->lr_nbps; ii++) {
bp = &lr->lr_bps[ii];
/*
- * When data in embedded into BP there is no need to create
- * BRT entry as there is no data block. Just copy the BP as
- * it contains the data.
+ * When data is embedded into the BP there is no need to create
+ * BRT entry as there is no data block. Just copy the BP as it
+ * contains the data.
*/
- if (!BP_IS_HOLE(bp) && !BP_IS_EMBEDDED(bp)) {
+ if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
+ continue;
+
+ /*
+ * We can not handle block pointers from the future, since they
+ * are not yet allocated. It should not normally happen, but
+ * just in case lets be safe and just stop here now instead of
+ * corrupting the pool.
+ */
+ if (BP_PHYSICAL_BIRTH(bp) >= first_txg)
+ return (SET_ERROR(ENOENT));
+
+ /*
+ * Assert the block is really allocated before we reference it.
+ */
+ metaslab_check_free(spa, bp);
+ }
+
+ for (ii = 0; ii < lr->lr_nbps; ii++) {
+ bp = &lr->lr_bps[ii];
+ if (!BP_IS_HOLE(bp) && !BP_IS_EMBEDDED(bp))
brt_pending_add(spa, bp, tx);
- }
}
return (0);
}
static int
zil_claim_log_record(zilog_t *zilog, const lr_t *lrc, void *tx,
uint64_t first_txg)
{
switch (lrc->lrc_txtype) {
case TX_WRITE:
return (zil_claim_write(zilog, lrc, tx, first_txg));
case TX_CLONE_RANGE:
- return (zil_claim_clone_range(zilog, lrc, tx));
+ return (zil_claim_clone_range(zilog, lrc, tx, first_txg));
default:
return (0);
}
}
static int
zil_free_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx,
uint64_t claim_txg)
{
(void) claim_txg;
zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
return (0);
}
static int
zil_free_write(zilog_t *zilog, const lr_t *lrc, void *tx, uint64_t claim_txg)
{
lr_write_t *lr = (lr_write_t *)lrc;
blkptr_t *bp = &lr->lr_blkptr;
- ASSERT(lrc->lrc_txtype == TX_WRITE);
+ ASSERT3U(lrc->lrc_reclen, >=, sizeof (*lr));
/*
* If we previously claimed it, we need to free it.
*/
if (bp->blk_birth >= claim_txg && zil_bp_tree_add(zilog, bp) == 0 &&
!BP_IS_HOLE(bp)) {
zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
}
return (0);
}
static int
zil_free_clone_range(zilog_t *zilog, const lr_t *lrc, void *tx)
{
const lr_clone_range_t *lr = (const lr_clone_range_t *)lrc;
const blkptr_t *bp;
spa_t *spa;
uint_t ii;
- ASSERT(lrc->lrc_txtype == TX_CLONE_RANGE);
+ ASSERT3U(lrc->lrc_reclen, >=, sizeof (*lr));
+ ASSERT3U(lrc->lrc_reclen, >=, offsetof(lr_clone_range_t,
+ lr_bps[lr->lr_nbps]));
if (tx == NULL) {
return (0);
}
spa = zilog->zl_spa;
for (ii = 0; ii < lr->lr_nbps; ii++) {
bp = &lr->lr_bps[ii];
if (!BP_IS_HOLE(bp)) {
zio_free(spa, dmu_tx_get_txg(tx), bp);
}
}
return (0);
}
static int
zil_free_log_record(zilog_t *zilog, const lr_t *lrc, void *tx,
uint64_t claim_txg)
{
if (claim_txg == 0) {
return (0);
}
switch (lrc->lrc_txtype) {
case TX_WRITE:
return (zil_free_write(zilog, lrc, tx, claim_txg));
case TX_CLONE_RANGE:
return (zil_free_clone_range(zilog, lrc, tx));
default:
return (0);
}
}
static int
zil_lwb_vdev_compare(const void *x1, const void *x2)
{
const uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev;
const uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev;
return (TREE_CMP(v1, v2));
}
/*
* Allocate a new lwb. We may already have a block pointer for it, in which
* case we get size and version from there. Or we may not yet, in which case
* we choose them here and later make the block allocation match.
*/
static lwb_t *
zil_alloc_lwb(zilog_t *zilog, int sz, blkptr_t *bp, boolean_t slog,
uint64_t txg, lwb_state_t state)
{
lwb_t *lwb;
lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP);
lwb->lwb_zilog = zilog;
if (bp) {
lwb->lwb_blk = *bp;
lwb->lwb_slim = (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2);
sz = BP_GET_LSIZE(bp);
} else {
BP_ZERO(&lwb->lwb_blk);
lwb->lwb_slim = (spa_version(zilog->zl_spa) >=
SPA_VERSION_SLIM_ZIL);
}
lwb->lwb_slog = slog;
lwb->lwb_error = 0;
if (lwb->lwb_slim) {
lwb->lwb_nmax = sz;
lwb->lwb_nused = lwb->lwb_nfilled = sizeof (zil_chain_t);
} else {
lwb->lwb_nmax = sz - sizeof (zil_chain_t);
lwb->lwb_nused = lwb->lwb_nfilled = 0;
}
lwb->lwb_sz = sz;
lwb->lwb_state = state;
lwb->lwb_buf = zio_buf_alloc(sz);
lwb->lwb_child_zio = NULL;
lwb->lwb_write_zio = NULL;
lwb->lwb_root_zio = NULL;
lwb->lwb_issued_timestamp = 0;
lwb->lwb_issued_txg = 0;
lwb->lwb_alloc_txg = txg;
lwb->lwb_max_txg = 0;
mutex_enter(&zilog->zl_lock);
list_insert_tail(&zilog->zl_lwb_list, lwb);
if (state != LWB_STATE_NEW)
zilog->zl_last_lwb_opened = lwb;
mutex_exit(&zilog->zl_lock);
return (lwb);
}
static void
zil_free_lwb(zilog_t *zilog, lwb_t *lwb)
{
ASSERT(MUTEX_HELD(&zilog->zl_lock));
ASSERT(lwb->lwb_state == LWB_STATE_NEW ||
lwb->lwb_state == LWB_STATE_FLUSH_DONE);
ASSERT3P(lwb->lwb_child_zio, ==, NULL);
ASSERT3P(lwb->lwb_write_zio, ==, NULL);
ASSERT3P(lwb->lwb_root_zio, ==, NULL);
ASSERT3U(lwb->lwb_alloc_txg, <=, spa_syncing_txg(zilog->zl_spa));
ASSERT3U(lwb->lwb_max_txg, <=, spa_syncing_txg(zilog->zl_spa));
VERIFY(list_is_empty(&lwb->lwb_itxs));
VERIFY(list_is_empty(&lwb->lwb_waiters));
ASSERT(avl_is_empty(&lwb->lwb_vdev_tree));
ASSERT(!MUTEX_HELD(&lwb->lwb_vdev_lock));
/*
* Clear the zilog's field to indicate this lwb is no longer
* valid, and prevent use-after-free errors.
*/
if (zilog->zl_last_lwb_opened == lwb)
zilog->zl_last_lwb_opened = NULL;
kmem_cache_free(zil_lwb_cache, lwb);
}
/*
* Called when we create in-memory log transactions so that we know
* to cleanup the itxs at the end of spa_sync().
*/
static void
zilog_dirty(zilog_t *zilog, uint64_t txg)
{
dsl_pool_t *dp = zilog->zl_dmu_pool;
dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
ASSERT(spa_writeable(zilog->zl_spa));
if (ds->ds_is_snapshot)
panic("dirtying snapshot!");
if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) {
/* up the hold count until we can be written out */
dmu_buf_add_ref(ds->ds_dbuf, zilog);
zilog->zl_dirty_max_txg = MAX(txg, zilog->zl_dirty_max_txg);
}
}
/*
* Determine if the zil is dirty in the specified txg. Callers wanting to
* ensure that the dirty state does not change must hold the itxg_lock for
* the specified txg. Holding the lock will ensure that the zil cannot be
* dirtied (zil_itx_assign) or cleaned (zil_clean) while we check its current
* state.
*/
static boolean_t __maybe_unused
zilog_is_dirty_in_txg(zilog_t *zilog, uint64_t txg)
{
dsl_pool_t *dp = zilog->zl_dmu_pool;
if (txg_list_member(&dp->dp_dirty_zilogs, zilog, txg & TXG_MASK))
return (B_TRUE);
return (B_FALSE);
}
/*
* Determine if the zil is dirty. The zil is considered dirty if it has
* any pending itx records that have not been cleaned by zil_clean().
*/
static boolean_t
zilog_is_dirty(zilog_t *zilog)
{
dsl_pool_t *dp = zilog->zl_dmu_pool;
for (int t = 0; t < TXG_SIZE; t++) {
if (txg_list_member(&dp->dp_dirty_zilogs, zilog, t))
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Its called in zil_commit context (zil_process_commit_list()/zil_create()).
* It activates SPA_FEATURE_ZILSAXATTR feature, if its enabled.
* Check dsl_dataset_feature_is_active to avoid txg_wait_synced() on every
* zil_commit.
*/
static void
zil_commit_activate_saxattr_feature(zilog_t *zilog)
{
dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
uint64_t txg = 0;
dmu_tx_t *tx = NULL;
if (spa_feature_is_enabled(zilog->zl_spa, SPA_FEATURE_ZILSAXATTR) &&
dmu_objset_type(zilog->zl_os) != DMU_OST_ZVOL &&
!dsl_dataset_feature_is_active(ds, SPA_FEATURE_ZILSAXATTR)) {
tx = dmu_tx_create(zilog->zl_os);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
dsl_dataset_dirty(ds, tx);
txg = dmu_tx_get_txg(tx);
mutex_enter(&ds->ds_lock);
ds->ds_feature_activation[SPA_FEATURE_ZILSAXATTR] =
(void *)B_TRUE;
mutex_exit(&ds->ds_lock);
dmu_tx_commit(tx);
txg_wait_synced(zilog->zl_dmu_pool, txg);
}
}
/*
* Create an on-disk intent log.
*/
static lwb_t *
zil_create(zilog_t *zilog)
{
const zil_header_t *zh = zilog->zl_header;
lwb_t *lwb = NULL;
uint64_t txg = 0;
dmu_tx_t *tx = NULL;
blkptr_t blk;
int error = 0;
boolean_t slog = FALSE;
dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
/*
* Wait for any previous destroy to complete.
*/
txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
ASSERT(zh->zh_claim_txg == 0);
ASSERT(zh->zh_replay_seq == 0);
blk = zh->zh_log;
/*
* Allocate an initial log block if:
* - there isn't one already
* - the existing block is the wrong endianness
*/
if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) {
tx = dmu_tx_create(zilog->zl_os);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
txg = dmu_tx_get_txg(tx);
if (!BP_IS_HOLE(&blk)) {
zio_free(zilog->zl_spa, txg, &blk);
BP_ZERO(&blk);
}
error = zio_alloc_zil(zilog->zl_spa, zilog->zl_os, txg, &blk,
ZIL_MIN_BLKSZ, &slog);
if (error == 0)
zil_init_log_chain(zilog, &blk);
}
/*
* Allocate a log write block (lwb) for the first log block.
*/
if (error == 0)
lwb = zil_alloc_lwb(zilog, 0, &blk, slog, txg, LWB_STATE_NEW);
/*
* If we just allocated the first log block, commit our transaction
* and wait for zil_sync() to stuff the block pointer into zh_log.
* (zh is part of the MOS, so we cannot modify it in open context.)
*/
if (tx != NULL) {
/*
* If "zilsaxattr" feature is enabled on zpool, then activate
* it now when we're creating the ZIL chain. We can't wait with
* this until we write the first xattr log record because we
* need to wait for the feature activation to sync out.
*/
if (spa_feature_is_enabled(zilog->zl_spa,
SPA_FEATURE_ZILSAXATTR) && dmu_objset_type(zilog->zl_os) !=
DMU_OST_ZVOL) {
mutex_enter(&ds->ds_lock);
ds->ds_feature_activation[SPA_FEATURE_ZILSAXATTR] =
(void *)B_TRUE;
mutex_exit(&ds->ds_lock);
}
dmu_tx_commit(tx);
txg_wait_synced(zilog->zl_dmu_pool, txg);
} else {
/*
* This branch covers the case where we enable the feature on a
* zpool that has existing ZIL headers.
*/
zil_commit_activate_saxattr_feature(zilog);
}
IMPLY(spa_feature_is_enabled(zilog->zl_spa, SPA_FEATURE_ZILSAXATTR) &&
dmu_objset_type(zilog->zl_os) != DMU_OST_ZVOL,
dsl_dataset_feature_is_active(ds, SPA_FEATURE_ZILSAXATTR));
ASSERT(error != 0 || memcmp(&blk, &zh->zh_log, sizeof (blk)) == 0);
IMPLY(error == 0, lwb != NULL);
return (lwb);
}
/*
* In one tx, free all log blocks and clear the log header. If keep_first
* is set, then we're replaying a log with no content. We want to keep the
* first block, however, so that the first synchronous transaction doesn't
* require a txg_wait_synced() in zil_create(). We don't need to
* txg_wait_synced() here either when keep_first is set, because both
* zil_create() and zil_destroy() will wait for any in-progress destroys
* to complete.
* Return B_TRUE if there were any entries to replay.
*/
boolean_t
zil_destroy(zilog_t *zilog, boolean_t keep_first)
{
const zil_header_t *zh = zilog->zl_header;
lwb_t *lwb;
dmu_tx_t *tx;
uint64_t txg;
/*
* Wait for any previous destroy to complete.
*/
txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
zilog->zl_old_header = *zh; /* debugging aid */
if (BP_IS_HOLE(&zh->zh_log))
return (B_FALSE);
tx = dmu_tx_create(zilog->zl_os);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
txg = dmu_tx_get_txg(tx);
mutex_enter(&zilog->zl_lock);
ASSERT3U(zilog->zl_destroy_txg, <, txg);
zilog->zl_destroy_txg = txg;
zilog->zl_keep_first = keep_first;
if (!list_is_empty(&zilog->zl_lwb_list)) {
ASSERT(zh->zh_claim_txg == 0);
VERIFY(!keep_first);
while ((lwb = list_remove_head(&zilog->zl_lwb_list)) != NULL) {
if (lwb->lwb_buf != NULL)
zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
if (!BP_IS_HOLE(&lwb->lwb_blk))
zio_free(zilog->zl_spa, txg, &lwb->lwb_blk);
zil_free_lwb(zilog, lwb);
}
} else if (!keep_first) {
zil_destroy_sync(zilog, tx);
}
mutex_exit(&zilog->zl_lock);
dmu_tx_commit(tx);
return (B_TRUE);
}
void
zil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx)
{
ASSERT(list_is_empty(&zilog->zl_lwb_list));
(void) zil_parse(zilog, zil_free_log_block,
zil_free_log_record, tx, zilog->zl_header->zh_claim_txg, B_FALSE);
}
int
zil_claim(dsl_pool_t *dp, dsl_dataset_t *ds, void *txarg)
{
dmu_tx_t *tx = txarg;
zilog_t *zilog;
uint64_t first_txg;
zil_header_t *zh;
objset_t *os;
int error;
error = dmu_objset_own_obj(dp, ds->ds_object,
DMU_OST_ANY, B_FALSE, B_FALSE, FTAG, &os);
if (error != 0) {
/*
* EBUSY indicates that the objset is inconsistent, in which
* case it can not have a ZIL.
*/
if (error != EBUSY) {
cmn_err(CE_WARN, "can't open objset for %llu, error %u",
(unsigned long long)ds->ds_object, error);
}
return (0);
}
zilog = dmu_objset_zil(os);
zh = zil_header_in_syncing_context(zilog);
ASSERT3U(tx->tx_txg, ==, spa_first_txg(zilog->zl_spa));
first_txg = spa_min_claim_txg(zilog->zl_spa);
/*
* If the spa_log_state is not set to be cleared, check whether
* the current uberblock is a checkpoint one and if the current
* header has been claimed before moving on.
*
* If the current uberblock is a checkpointed uberblock then
* one of the following scenarios took place:
*
* 1] We are currently rewinding to the checkpoint of the pool.
* 2] We crashed in the middle of a checkpoint rewind but we
* did manage to write the checkpointed uberblock to the
* vdev labels, so when we tried to import the pool again
* the checkpointed uberblock was selected from the import
* procedure.
*
* In both cases we want to zero out all the ZIL blocks, except
* the ones that have been claimed at the time of the checkpoint
* (their zh_claim_txg != 0). The reason is that these blocks
* may be corrupted since we may have reused their locations on
* disk after we took the checkpoint.
*
* We could try to set spa_log_state to SPA_LOG_CLEAR earlier
* when we first figure out whether the current uberblock is
* checkpointed or not. Unfortunately, that would discard all
* the logs, including the ones that are claimed, and we would
* leak space.
*/
if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR ||
(zilog->zl_spa->spa_uberblock.ub_checkpoint_txg != 0 &&
zh->zh_claim_txg == 0)) {
if (!BP_IS_HOLE(&zh->zh_log)) {
(void) zil_parse(zilog, zil_clear_log_block,
zil_noop_log_record, tx, first_txg, B_FALSE);
}
BP_ZERO(&zh->zh_log);
if (os->os_encrypted)
os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE;
dsl_dataset_dirty(dmu_objset_ds(os), tx);
dmu_objset_disown(os, B_FALSE, FTAG);
return (0);
}
/*
* If we are not rewinding and opening the pool normally, then
* the min_claim_txg should be equal to the first txg of the pool.
*/
ASSERT3U(first_txg, ==, spa_first_txg(zilog->zl_spa));
/*
* Claim all log blocks if we haven't already done so, and remember
* the highest claimed sequence number. This ensures that if we can
* read only part of the log now (e.g. due to a missing device),
* but we can read the entire log later, we will not try to replay
* or destroy beyond the last block we successfully claimed.
*/
ASSERT3U(zh->zh_claim_txg, <=, first_txg);
if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) {
(void) zil_parse(zilog, zil_claim_log_block,
zil_claim_log_record, tx, first_txg, B_FALSE);
zh->zh_claim_txg = first_txg;
zh->zh_claim_blk_seq = zilog->zl_parse_blk_seq;
zh->zh_claim_lr_seq = zilog->zl_parse_lr_seq;
if (zilog->zl_parse_lr_count || zilog->zl_parse_blk_count > 1)
zh->zh_flags |= ZIL_REPLAY_NEEDED;
zh->zh_flags |= ZIL_CLAIM_LR_SEQ_VALID;
if (os->os_encrypted)
os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE;
dsl_dataset_dirty(dmu_objset_ds(os), tx);
}
ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1));
dmu_objset_disown(os, B_FALSE, FTAG);
return (0);
}
/*
* Check the log by walking the log chain.
* Checksum errors are ok as they indicate the end of the chain.
* Any other error (no device or read failure) returns an error.
*/
int
zil_check_log_chain(dsl_pool_t *dp, dsl_dataset_t *ds, void *tx)
{
(void) dp;
zilog_t *zilog;
objset_t *os;
blkptr_t *bp;
int error;
ASSERT(tx == NULL);
error = dmu_objset_from_ds(ds, &os);
if (error != 0) {
cmn_err(CE_WARN, "can't open objset %llu, error %d",
(unsigned long long)ds->ds_object, error);
return (0);
}
zilog = dmu_objset_zil(os);
bp = (blkptr_t *)&zilog->zl_header->zh_log;
if (!BP_IS_HOLE(bp)) {
vdev_t *vd;
boolean_t valid = B_TRUE;
/*
* Check the first block and determine if it's on a log device
* which may have been removed or faulted prior to loading this
* pool. If so, there's no point in checking the rest of the
* log as its content should have already been synced to the
* pool.
*/
spa_config_enter(os->os_spa, SCL_STATE, FTAG, RW_READER);
vd = vdev_lookup_top(os->os_spa, DVA_GET_VDEV(&bp->blk_dva[0]));
if (vd->vdev_islog && vdev_is_dead(vd))
valid = vdev_log_state_valid(vd);
spa_config_exit(os->os_spa, SCL_STATE, FTAG);
if (!valid)
return (0);
/*
* Check whether the current uberblock is checkpointed (e.g.
* we are rewinding) and whether the current header has been
* claimed or not. If it hasn't then skip verifying it. We
* do this because its ZIL blocks may be part of the pool's
* state before the rewind, which is no longer valid.
*/
zil_header_t *zh = zil_header_in_syncing_context(zilog);
if (zilog->zl_spa->spa_uberblock.ub_checkpoint_txg != 0 &&
zh->zh_claim_txg == 0)
return (0);
}
/*
* Because tx == NULL, zil_claim_log_block() will not actually claim
* any blocks, but just determine whether it is possible to do so.
* In addition to checking the log chain, zil_claim_log_block()
* will invoke zio_claim() with a done func of spa_claim_notify(),
* which will update spa_max_claim_txg. See spa_load() for details.
*/
error = zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, tx,
zilog->zl_header->zh_claim_txg ? -1ULL :
spa_min_claim_txg(os->os_spa), B_FALSE);
return ((error == ECKSUM || error == ENOENT) ? 0 : error);
}
/*
* When an itx is "skipped", this function is used to properly mark the
* waiter as "done, and signal any thread(s) waiting on it. An itx can
* be skipped (and not committed to an lwb) for a variety of reasons,
* one of them being that the itx was committed via spa_sync(), prior to
* it being committed to an lwb; this can happen if a thread calling
* zil_commit() is racing with spa_sync().
*/
static void
zil_commit_waiter_skip(zil_commit_waiter_t *zcw)
{
mutex_enter(&zcw->zcw_lock);
ASSERT3B(zcw->zcw_done, ==, B_FALSE);
zcw->zcw_done = B_TRUE;
cv_broadcast(&zcw->zcw_cv);
mutex_exit(&zcw->zcw_lock);
}
/*
* This function is used when the given waiter is to be linked into an
* lwb's "lwb_waiter" list; i.e. when the itx is committed to the lwb.
* At this point, the waiter will no longer be referenced by the itx,
* and instead, will be referenced by the lwb.
*/
static void
zil_commit_waiter_link_lwb(zil_commit_waiter_t *zcw, lwb_t *lwb)
{
/*
* The lwb_waiters field of the lwb is protected by the zilog's
* zl_issuer_lock while the lwb is open and zl_lock otherwise.
* zl_issuer_lock also protects leaving the open state.
* zcw_lwb setting is protected by zl_issuer_lock and state !=
* flush_done, which transition is protected by zl_lock.
*/
ASSERT(MUTEX_HELD(&lwb->lwb_zilog->zl_issuer_lock));
IMPLY(lwb->lwb_state != LWB_STATE_OPENED,
MUTEX_HELD(&lwb->lwb_zilog->zl_lock));
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_NEW);
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_FLUSH_DONE);
ASSERT(!list_link_active(&zcw->zcw_node));
list_insert_tail(&lwb->lwb_waiters, zcw);
ASSERT3P(zcw->zcw_lwb, ==, NULL);
zcw->zcw_lwb = lwb;
}
/*
* This function is used when zio_alloc_zil() fails to allocate a ZIL
* block, and the given waiter must be linked to the "nolwb waiters"
* list inside of zil_process_commit_list().
*/
static void
zil_commit_waiter_link_nolwb(zil_commit_waiter_t *zcw, list_t *nolwb)
{
ASSERT(!list_link_active(&zcw->zcw_node));
list_insert_tail(nolwb, zcw);
ASSERT3P(zcw->zcw_lwb, ==, NULL);
}
void
zil_lwb_add_block(lwb_t *lwb, const blkptr_t *bp)
{
avl_tree_t *t = &lwb->lwb_vdev_tree;
avl_index_t where;
zil_vdev_node_t *zv, zvsearch;
int ndvas = BP_GET_NDVAS(bp);
int i;
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_WRITE_DONE);
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_FLUSH_DONE);
if (zil_nocacheflush)
return;
mutex_enter(&lwb->lwb_vdev_lock);
for (i = 0; i < ndvas; i++) {
zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
if (avl_find(t, &zvsearch, &where) == NULL) {
zv = kmem_alloc(sizeof (*zv), KM_SLEEP);
zv->zv_vdev = zvsearch.zv_vdev;
avl_insert(t, zv, where);
}
}
mutex_exit(&lwb->lwb_vdev_lock);
}
static void
zil_lwb_flush_defer(lwb_t *lwb, lwb_t *nlwb)
{
avl_tree_t *src = &lwb->lwb_vdev_tree;
avl_tree_t *dst = &nlwb->lwb_vdev_tree;
void *cookie = NULL;
zil_vdev_node_t *zv;
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_WRITE_DONE);
ASSERT3S(nlwb->lwb_state, !=, LWB_STATE_WRITE_DONE);
ASSERT3S(nlwb->lwb_state, !=, LWB_STATE_FLUSH_DONE);
/*
* While 'lwb' is at a point in its lifetime where lwb_vdev_tree does
* not need the protection of lwb_vdev_lock (it will only be modified
* while holding zilog->zl_lock) as its writes and those of its
* children have all completed. The younger 'nlwb' may be waiting on
* future writes to additional vdevs.
*/
mutex_enter(&nlwb->lwb_vdev_lock);
/*
* Tear down the 'lwb' vdev tree, ensuring that entries which do not
* exist in 'nlwb' are moved to it, freeing any would-be duplicates.
*/
while ((zv = avl_destroy_nodes(src, &cookie)) != NULL) {
avl_index_t where;
if (avl_find(dst, zv, &where) == NULL) {
avl_insert(dst, zv, where);
} else {
kmem_free(zv, sizeof (*zv));
}
}
mutex_exit(&nlwb->lwb_vdev_lock);
}
void
zil_lwb_add_txg(lwb_t *lwb, uint64_t txg)
{
lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg);
}
/*
* This function is a called after all vdevs associated with a given lwb
* write have completed their DKIOCFLUSHWRITECACHE command; or as soon
* as the lwb write completes, if "zil_nocacheflush" is set. Further,
* all "previous" lwb's will have completed before this function is
* called; i.e. this function is called for all previous lwbs before
* it's called for "this" lwb (enforced via zio the dependencies
* configured in zil_lwb_set_zio_dependency()).
*
* The intention is for this function to be called as soon as the
* contents of an lwb are considered "stable" on disk, and will survive
* any sudden loss of power. At this point, any threads waiting for the
* lwb to reach this state are signalled, and the "waiter" structures
* are marked "done".
*/
static void
zil_lwb_flush_vdevs_done(zio_t *zio)
{
lwb_t *lwb = zio->io_private;
zilog_t *zilog = lwb->lwb_zilog;
zil_commit_waiter_t *zcw;
itx_t *itx;
spa_config_exit(zilog->zl_spa, SCL_STATE, lwb);
hrtime_t t = gethrtime() - lwb->lwb_issued_timestamp;
mutex_enter(&zilog->zl_lock);
zilog->zl_last_lwb_latency = (zilog->zl_last_lwb_latency * 7 + t) / 8;
lwb->lwb_root_zio = NULL;
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_WRITE_DONE);
lwb->lwb_state = LWB_STATE_FLUSH_DONE;
if (zilog->zl_last_lwb_opened == lwb) {
/*
* Remember the highest committed log sequence number
* for ztest. We only update this value when all the log
* writes succeeded, because ztest wants to ASSERT that
* it got the whole log chain.
*/
zilog->zl_commit_lr_seq = zilog->zl_lr_seq;
}
while ((itx = list_remove_head(&lwb->lwb_itxs)) != NULL)
zil_itx_destroy(itx);
while ((zcw = list_remove_head(&lwb->lwb_waiters)) != NULL) {
mutex_enter(&zcw->zcw_lock);
ASSERT3P(zcw->zcw_lwb, ==, lwb);
zcw->zcw_lwb = NULL;
/*
* We expect any ZIO errors from child ZIOs to have been
* propagated "up" to this specific LWB's root ZIO, in
* order for this error handling to work correctly. This
* includes ZIO errors from either this LWB's write or
* flush, as well as any errors from other dependent LWBs
* (e.g. a root LWB ZIO that might be a child of this LWB).
*
* With that said, it's important to note that LWB flush
* errors are not propagated up to the LWB root ZIO.
* This is incorrect behavior, and results in VDEV flush
* errors not being handled correctly here. See the
* comment above the call to "zio_flush" for details.
*/
zcw->zcw_zio_error = zio->io_error;
ASSERT3B(zcw->zcw_done, ==, B_FALSE);
zcw->zcw_done = B_TRUE;
cv_broadcast(&zcw->zcw_cv);
mutex_exit(&zcw->zcw_lock);
}
uint64_t txg = lwb->lwb_issued_txg;
/* Once we drop the lock, lwb may be freed by zil_sync(). */
mutex_exit(&zilog->zl_lock);
mutex_enter(&zilog->zl_lwb_io_lock);
ASSERT3U(zilog->zl_lwb_inflight[txg & TXG_MASK], >, 0);
zilog->zl_lwb_inflight[txg & TXG_MASK]--;
if (zilog->zl_lwb_inflight[txg & TXG_MASK] == 0)
cv_broadcast(&zilog->zl_lwb_io_cv);
mutex_exit(&zilog->zl_lwb_io_lock);
}
/*
* Wait for the completion of all issued write/flush of that txg provided.
* It guarantees zil_lwb_flush_vdevs_done() is called and returned.
*/
static void
zil_lwb_flush_wait_all(zilog_t *zilog, uint64_t txg)
{
ASSERT3U(txg, ==, spa_syncing_txg(zilog->zl_spa));
mutex_enter(&zilog->zl_lwb_io_lock);
while (zilog->zl_lwb_inflight[txg & TXG_MASK] > 0)
cv_wait(&zilog->zl_lwb_io_cv, &zilog->zl_lwb_io_lock);
mutex_exit(&zilog->zl_lwb_io_lock);
#ifdef ZFS_DEBUG
mutex_enter(&zilog->zl_lock);
mutex_enter(&zilog->zl_lwb_io_lock);
lwb_t *lwb = list_head(&zilog->zl_lwb_list);
while (lwb != NULL) {
if (lwb->lwb_issued_txg <= txg) {
ASSERT(lwb->lwb_state != LWB_STATE_ISSUED);
ASSERT(lwb->lwb_state != LWB_STATE_WRITE_DONE);
IMPLY(lwb->lwb_issued_txg > 0,
lwb->lwb_state == LWB_STATE_FLUSH_DONE);
}
IMPLY(lwb->lwb_state == LWB_STATE_WRITE_DONE ||
lwb->lwb_state == LWB_STATE_FLUSH_DONE,
lwb->lwb_buf == NULL);
lwb = list_next(&zilog->zl_lwb_list, lwb);
}
mutex_exit(&zilog->zl_lwb_io_lock);
mutex_exit(&zilog->zl_lock);
#endif
}
/*
* This is called when an lwb's write zio completes. The callback's
* purpose is to issue the DKIOCFLUSHWRITECACHE commands for the vdevs
* in the lwb's lwb_vdev_tree. The tree will contain the vdevs involved
* in writing out this specific lwb's data, and in the case that cache
* flushes have been deferred, vdevs involved in writing the data for
* previous lwbs. The writes corresponding to all the vdevs in the
* lwb_vdev_tree will have completed by the time this is called, due to
* the zio dependencies configured in zil_lwb_set_zio_dependency(),
* which takes deferred flushes into account. The lwb will be "done"
* once zil_lwb_flush_vdevs_done() is called, which occurs in the zio
* completion callback for the lwb's root zio.
*/
static void
zil_lwb_write_done(zio_t *zio)
{
lwb_t *lwb = zio->io_private;
spa_t *spa = zio->io_spa;
zilog_t *zilog = lwb->lwb_zilog;
avl_tree_t *t = &lwb->lwb_vdev_tree;
void *cookie = NULL;
zil_vdev_node_t *zv;
lwb_t *nlwb;
ASSERT3S(spa_config_held(spa, SCL_STATE, RW_READER), !=, 0);
abd_free(zio->io_abd);
zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
lwb->lwb_buf = NULL;
mutex_enter(&zilog->zl_lock);
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_ISSUED);
lwb->lwb_state = LWB_STATE_WRITE_DONE;
lwb->lwb_child_zio = NULL;
lwb->lwb_write_zio = NULL;
/*
* If nlwb is not yet issued, zil_lwb_set_zio_dependency() is not
* called for it yet, and when it will be, it won't be able to make
* its write ZIO a parent this ZIO. In such case we can not defer
* our flushes or below may be a race between the done callbacks.
*/
nlwb = list_next(&zilog->zl_lwb_list, lwb);
if (nlwb && nlwb->lwb_state != LWB_STATE_ISSUED)
nlwb = NULL;
mutex_exit(&zilog->zl_lock);
if (avl_numnodes(t) == 0)
return;
/*
* If there was an IO error, we're not going to call zio_flush()
* on these vdevs, so we simply empty the tree and free the
* nodes. We avoid calling zio_flush() since there isn't any
* good reason for doing so, after the lwb block failed to be
* written out.
*
* Additionally, we don't perform any further error handling at
* this point (e.g. setting "zcw_zio_error" appropriately), as
* we expect that to occur in "zil_lwb_flush_vdevs_done" (thus,
* we expect any error seen here, to have been propagated to
* that function).
*/
if (zio->io_error != 0) {
while ((zv = avl_destroy_nodes(t, &cookie)) != NULL)
kmem_free(zv, sizeof (*zv));
return;
}
/*
* If this lwb does not have any threads waiting for it to
* complete, we want to defer issuing the DKIOCFLUSHWRITECACHE
* command to the vdevs written to by "this" lwb, and instead
* rely on the "next" lwb to handle the DKIOCFLUSHWRITECACHE
* command for those vdevs. Thus, we merge the vdev tree of
* "this" lwb with the vdev tree of the "next" lwb in the list,
* and assume the "next" lwb will handle flushing the vdevs (or
* deferring the flush(s) again).
*
* This is a useful performance optimization, especially for
* workloads with lots of async write activity and few sync
* write and/or fsync activity, as it has the potential to
* coalesce multiple flush commands to a vdev into one.
*/
if (list_is_empty(&lwb->lwb_waiters) && nlwb != NULL) {
zil_lwb_flush_defer(lwb, nlwb);
ASSERT(avl_is_empty(&lwb->lwb_vdev_tree));
return;
}
while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) {
vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev);
if (vd != NULL && !vd->vdev_nowritecache) {
/*
* The "ZIO_FLAG_DONT_PROPAGATE" is currently
* always used within "zio_flush". This means,
* any errors when flushing the vdev(s), will
* (unfortunately) not be handled correctly,
* since these "zio_flush" errors will not be
* propagated up to "zil_lwb_flush_vdevs_done".
*/
zio_flush(lwb->lwb_root_zio, vd);
}
kmem_free(zv, sizeof (*zv));
}
}
/*
* Build the zio dependency chain, which is used to preserve the ordering of
* lwb completions that is required by the semantics of the ZIL. Each new lwb
* zio becomes a parent of the previous lwb zio, such that the new lwb's zio
* cannot complete until the previous lwb's zio completes.
*
* This is required by the semantics of zil_commit(): the commit waiters
* attached to the lwbs will be woken in the lwb zio's completion callback,
* so this zio dependency graph ensures the waiters are woken in the correct
* order (the same order the lwbs were created).
*/
static void
zil_lwb_set_zio_dependency(zilog_t *zilog, lwb_t *lwb)
{
ASSERT(MUTEX_HELD(&zilog->zl_lock));
lwb_t *prev_lwb = list_prev(&zilog->zl_lwb_list, lwb);
if (prev_lwb == NULL ||
prev_lwb->lwb_state == LWB_STATE_FLUSH_DONE)
return;
/*
* If the previous lwb's write hasn't already completed, we also want
* to order the completion of the lwb write zios (above, we only order
* the completion of the lwb root zios). This is required because of
* how we can defer the DKIOCFLUSHWRITECACHE commands for each lwb.
*
* When the DKIOCFLUSHWRITECACHE commands are deferred, the previous
* lwb will rely on this lwb to flush the vdevs written to by that
* previous lwb. Thus, we need to ensure this lwb doesn't issue the
* flush until after the previous lwb's write completes. We ensure
* this ordering by setting the zio parent/child relationship here.
*
* Without this relationship on the lwb's write zio, it's possible
* for this lwb's write to complete prior to the previous lwb's write
* completing; and thus, the vdevs for the previous lwb would be
* flushed prior to that lwb's data being written to those vdevs (the
* vdevs are flushed in the lwb write zio's completion handler,
* zil_lwb_write_done()).
*/
if (prev_lwb->lwb_state == LWB_STATE_ISSUED) {
ASSERT3P(prev_lwb->lwb_write_zio, !=, NULL);
zio_add_child(lwb->lwb_write_zio, prev_lwb->lwb_write_zio);
} else {
ASSERT3S(prev_lwb->lwb_state, ==, LWB_STATE_WRITE_DONE);
}
ASSERT3P(prev_lwb->lwb_root_zio, !=, NULL);
zio_add_child(lwb->lwb_root_zio, prev_lwb->lwb_root_zio);
}
/*
* This function's purpose is to "open" an lwb such that it is ready to
* accept new itxs being committed to it. This function is idempotent; if
* the passed in lwb has already been opened, it is essentially a no-op.
*/
static void
zil_lwb_write_open(zilog_t *zilog, lwb_t *lwb)
{
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
if (lwb->lwb_state != LWB_STATE_NEW) {
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED);
return;
}
mutex_enter(&zilog->zl_lock);
lwb->lwb_state = LWB_STATE_OPENED;
zilog->zl_last_lwb_opened = lwb;
mutex_exit(&zilog->zl_lock);
}
/*
* Define a limited set of intent log block sizes.
*
* These must be a multiple of 4KB. Note only the amount used (again
* aligned to 4KB) actually gets written. However, we can't always just
* allocate SPA_OLD_MAXBLOCKSIZE as the slog space could be exhausted.
*/
static const struct {
uint64_t limit;
uint64_t blksz;
} zil_block_buckets[] = {
{ 4096, 4096 }, /* non TX_WRITE */
{ 8192 + 4096, 8192 + 4096 }, /* database */
{ 32768 + 4096, 32768 + 4096 }, /* NFS writes */
{ 65536 + 4096, 65536 + 4096 }, /* 64KB writes */
- { 131072, 131072 }, /* < 128KB writes */
- { 131072 +4096, 65536 + 4096 }, /* 128KB writes */
{ UINT64_MAX, SPA_OLD_MAXBLOCKSIZE}, /* > 128KB writes */
};
/*
* Maximum block size used by the ZIL. This is picked up when the ZIL is
* initialized. Otherwise this should not be used directly; see
* zl_max_block_size instead.
*/
static uint_t zil_maxblocksize = SPA_OLD_MAXBLOCKSIZE;
/*
* Close the log block for being issued and allocate the next one.
* Has to be called under zl_issuer_lock to chain more lwbs.
*/
static lwb_t *
zil_lwb_write_close(zilog_t *zilog, lwb_t *lwb, lwb_state_t state)
{
int i;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED);
lwb->lwb_state = LWB_STATE_CLOSED;
/*
* If there was an allocation failure then returned NULL will trigger
* zil_commit_writer_stall() at the caller. This is inherently racy,
* since allocation may not have happened yet.
*/
if (lwb->lwb_error != 0)
return (NULL);
/*
* Log blocks are pre-allocated. Here we select the size of the next
* block, based on size used in the last block.
* - first find the smallest bucket that will fit the block from a
* limited set of block sizes. This is because it's faster to write
* blocks allocated from the same metaslab as they are adjacent or
* close.
* - next find the maximum from the new suggested size and an array of
* previous sizes. This lessens a picket fence effect of wrongly
* guessing the size if we have a stream of say 2k, 64k, 2k, 64k
* requests.
*
* Note we only write what is used, but we can't just allocate
* the maximum block size because we can exhaust the available
* pool log space.
*/
uint64_t zil_blksz = zilog->zl_cur_used + sizeof (zil_chain_t);
for (i = 0; zil_blksz > zil_block_buckets[i].limit; i++)
continue;
zil_blksz = MIN(zil_block_buckets[i].blksz, zilog->zl_max_block_size);
zilog->zl_prev_blks[zilog->zl_prev_rotor] = zil_blksz;
for (i = 0; i < ZIL_PREV_BLKS; i++)
zil_blksz = MAX(zil_blksz, zilog->zl_prev_blks[i]);
DTRACE_PROBE3(zil__block__size, zilog_t *, zilog,
uint64_t, zil_blksz,
uint64_t, zilog->zl_prev_blks[zilog->zl_prev_rotor]);
zilog->zl_prev_rotor = (zilog->zl_prev_rotor + 1) & (ZIL_PREV_BLKS - 1);
return (zil_alloc_lwb(zilog, zil_blksz, NULL, 0, 0, state));
}
/*
* Finalize previously closed block and issue the write zio.
*/
static void
zil_lwb_write_issue(zilog_t *zilog, lwb_t *lwb)
{
spa_t *spa = zilog->zl_spa;
zil_chain_t *zilc;
boolean_t slog;
zbookmark_phys_t zb;
zio_priority_t prio;
int error;
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_CLOSED);
/* Actually fill the lwb with the data. */
for (itx_t *itx = list_head(&lwb->lwb_itxs); itx;
itx = list_next(&lwb->lwb_itxs, itx))
zil_lwb_commit(zilog, lwb, itx);
lwb->lwb_nused = lwb->lwb_nfilled;
+ ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_nmax);
lwb->lwb_root_zio = zio_root(spa, zil_lwb_flush_vdevs_done, lwb,
ZIO_FLAG_CANFAIL);
/*
* The lwb is now ready to be issued, but it can be only if it already
* got its block pointer allocated or the allocation has failed.
* Otherwise leave it as-is, relying on some other thread to issue it
* after allocating its block pointer via calling zil_lwb_write_issue()
* for the previous lwb(s) in the chain.
*/
mutex_enter(&zilog->zl_lock);
lwb->lwb_state = LWB_STATE_READY;
if (BP_IS_HOLE(&lwb->lwb_blk) && lwb->lwb_error == 0) {
mutex_exit(&zilog->zl_lock);
return;
}
mutex_exit(&zilog->zl_lock);
next_lwb:
if (lwb->lwb_slim)
zilc = (zil_chain_t *)lwb->lwb_buf;
else
zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_nmax);
int wsz = lwb->lwb_sz;
if (lwb->lwb_error == 0) {
abd_t *lwb_abd = abd_get_from_buf(lwb->lwb_buf, lwb->lwb_sz);
if (!lwb->lwb_slog || zilog->zl_cur_used <= zil_slog_bulk)
prio = ZIO_PRIORITY_SYNC_WRITE;
else
prio = ZIO_PRIORITY_ASYNC_WRITE;
SET_BOOKMARK(&zb, lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET],
ZB_ZIL_OBJECT, ZB_ZIL_LEVEL,
lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]);
lwb->lwb_write_zio = zio_rewrite(lwb->lwb_root_zio, spa, 0,
&lwb->lwb_blk, lwb_abd, lwb->lwb_sz, zil_lwb_write_done,
lwb, prio, ZIO_FLAG_CANFAIL, &zb);
zil_lwb_add_block(lwb, &lwb->lwb_blk);
if (lwb->lwb_slim) {
/* For Slim ZIL only write what is used. */
wsz = P2ROUNDUP_TYPED(lwb->lwb_nused, ZIL_MIN_BLKSZ,
int);
ASSERT3S(wsz, <=, lwb->lwb_sz);
zio_shrink(lwb->lwb_write_zio, wsz);
wsz = lwb->lwb_write_zio->io_size;
}
memset(lwb->lwb_buf + lwb->lwb_nused, 0, wsz - lwb->lwb_nused);
zilc->zc_pad = 0;
zilc->zc_nused = lwb->lwb_nused;
zilc->zc_eck.zec_cksum = lwb->lwb_blk.blk_cksum;
} else {
/*
* We can't write the lwb if there was an allocation failure,
* so create a null zio instead just to maintain dependencies.
*/
lwb->lwb_write_zio = zio_null(lwb->lwb_root_zio, spa, NULL,
zil_lwb_write_done, lwb, ZIO_FLAG_CANFAIL);
lwb->lwb_write_zio->io_error = lwb->lwb_error;
}
if (lwb->lwb_child_zio)
zio_add_child(lwb->lwb_write_zio, lwb->lwb_child_zio);
/*
* Open transaction to allocate the next block pointer.
*/
dmu_tx_t *tx = dmu_tx_create(zilog->zl_os);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT | TXG_NOTHROTTLE));
dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
uint64_t txg = dmu_tx_get_txg(tx);
/*
* Allocate next the block pointer unless we are already in error.
*/
lwb_t *nlwb = list_next(&zilog->zl_lwb_list, lwb);
blkptr_t *bp = &zilc->zc_next_blk;
BP_ZERO(bp);
error = lwb->lwb_error;
if (error == 0) {
error = zio_alloc_zil(spa, zilog->zl_os, txg, bp, nlwb->lwb_sz,
&slog);
}
if (error == 0) {
ASSERT3U(bp->blk_birth, ==, txg);
BP_SET_CHECKSUM(bp, nlwb->lwb_slim ? ZIO_CHECKSUM_ZILOG2 :
ZIO_CHECKSUM_ZILOG);
bp->blk_cksum = lwb->lwb_blk.blk_cksum;
bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++;
}
/*
* Reduce TXG open time by incrementing inflight counter and committing
* the transaciton. zil_sync() will wait for it to return to zero.
*/
mutex_enter(&zilog->zl_lwb_io_lock);
lwb->lwb_issued_txg = txg;
zilog->zl_lwb_inflight[txg & TXG_MASK]++;
zilog->zl_lwb_max_issued_txg = MAX(txg, zilog->zl_lwb_max_issued_txg);
mutex_exit(&zilog->zl_lwb_io_lock);
dmu_tx_commit(tx);
spa_config_enter(spa, SCL_STATE, lwb, RW_READER);
/*
* We've completed all potentially blocking operations. Update the
* nlwb and allow it proceed without possible lock order reversals.
*/
mutex_enter(&zilog->zl_lock);
zil_lwb_set_zio_dependency(zilog, lwb);
lwb->lwb_state = LWB_STATE_ISSUED;
if (nlwb) {
nlwb->lwb_blk = *bp;
nlwb->lwb_error = error;
nlwb->lwb_slog = slog;
nlwb->lwb_alloc_txg = txg;
if (nlwb->lwb_state != LWB_STATE_READY)
nlwb = NULL;
}
mutex_exit(&zilog->zl_lock);
if (lwb->lwb_slog) {
ZIL_STAT_BUMP(zilog, zil_itx_metaslab_slog_count);
ZIL_STAT_INCR(zilog, zil_itx_metaslab_slog_bytes,
lwb->lwb_nused);
ZIL_STAT_INCR(zilog, zil_itx_metaslab_slog_write,
wsz);
ZIL_STAT_INCR(zilog, zil_itx_metaslab_slog_alloc,
BP_GET_LSIZE(&lwb->lwb_blk));
} else {
ZIL_STAT_BUMP(zilog, zil_itx_metaslab_normal_count);
ZIL_STAT_INCR(zilog, zil_itx_metaslab_normal_bytes,
lwb->lwb_nused);
ZIL_STAT_INCR(zilog, zil_itx_metaslab_normal_write,
wsz);
ZIL_STAT_INCR(zilog, zil_itx_metaslab_normal_alloc,
BP_GET_LSIZE(&lwb->lwb_blk));
}
lwb->lwb_issued_timestamp = gethrtime();
if (lwb->lwb_child_zio)
zio_nowait(lwb->lwb_child_zio);
zio_nowait(lwb->lwb_write_zio);
zio_nowait(lwb->lwb_root_zio);
/*
* If nlwb was ready when we gave it the block pointer,
* it is on us to issue it and possibly following ones.
*/
lwb = nlwb;
if (lwb)
goto next_lwb;
}
/*
* Maximum amount of data that can be put into single log block.
*/
uint64_t
zil_max_log_data(zilog_t *zilog, size_t hdrsize)
{
return (zilog->zl_max_block_size - sizeof (zil_chain_t) - hdrsize);
}
/*
* Maximum amount of log space we agree to waste to reduce number of
* WR_NEED_COPY chunks to reduce zl_get_data() overhead (~6%).
*/
static inline uint64_t
zil_max_waste_space(zilog_t *zilog)
{
return (zil_max_log_data(zilog, sizeof (lr_write_t)) / 16);
}
/*
* Maximum amount of write data for WR_COPIED. For correctness, consumers
* must fall back to WR_NEED_COPY if we can't fit the entire record into one
* maximum sized log block, because each WR_COPIED record must fit in a
* single log block. Below that it is a tradeoff of additional memory copy
* and possibly worse log space efficiency vs additional range lock/unlock.
*/
static uint_t zil_maxcopied = 7680;
uint64_t
zil_max_copied_data(zilog_t *zilog)
{
uint64_t max_data = zil_max_log_data(zilog, sizeof (lr_write_t));
return (MIN(max_data, zil_maxcopied));
}
/*
* Estimate space needed in the lwb for the itx. Allocate more lwbs or
* split the itx as needed, but don't touch the actual transaction data.
* Has to be called under zl_issuer_lock to call zil_lwb_write_close()
* to chain more lwbs.
*/
static lwb_t *
zil_lwb_assign(zilog_t *zilog, lwb_t *lwb, itx_t *itx, list_t *ilwbs)
{
itx_t *citx;
lr_t *lr, *clr;
lr_write_t *lrw;
uint64_t dlen, dnow, lwb_sp, reclen, max_log_data;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT3P(lwb, !=, NULL);
ASSERT3P(lwb->lwb_buf, !=, NULL);
zil_lwb_write_open(zilog, lwb);
lr = &itx->itx_lr;
lrw = (lr_write_t *)lr;
/*
* A commit itx doesn't represent any on-disk state; instead
* it's simply used as a place holder on the commit list, and
* provides a mechanism for attaching a "commit waiter" onto the
* correct lwb (such that the waiter can be signalled upon
* completion of that lwb). Thus, we don't process this itx's
* log record if it's a commit itx (these itx's don't have log
* records), and instead link the itx's waiter onto the lwb's
* list of waiters.
*
* For more details, see the comment above zil_commit().
*/
if (lr->lrc_txtype == TX_COMMIT) {
zil_commit_waiter_link_lwb(itx->itx_private, lwb);
list_insert_tail(&lwb->lwb_itxs, itx);
return (lwb);
}
+ reclen = lr->lrc_reclen;
if (lr->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) {
+ ASSERT3U(reclen, ==, sizeof (lr_write_t));
dlen = P2ROUNDUP_TYPED(
lrw->lr_length, sizeof (uint64_t), uint64_t);
} else {
+ ASSERT3U(reclen, >=, sizeof (lr_t));
dlen = 0;
}
- reclen = lr->lrc_reclen;
+ ASSERT3U(reclen, <=, zil_max_log_data(zilog, 0));
zilog->zl_cur_used += (reclen + dlen);
cont:
/*
* If this record won't fit in the current log block, start a new one.
* For WR_NEED_COPY optimize layout for minimal number of chunks.
*/
lwb_sp = lwb->lwb_nmax - lwb->lwb_nused;
max_log_data = zil_max_log_data(zilog, sizeof (lr_write_t));
if (reclen > lwb_sp || (reclen + dlen > lwb_sp &&
lwb_sp < zil_max_waste_space(zilog) &&
(dlen % max_log_data == 0 ||
lwb_sp < reclen + dlen % max_log_data))) {
list_insert_tail(ilwbs, lwb);
lwb = zil_lwb_write_close(zilog, lwb, LWB_STATE_OPENED);
if (lwb == NULL)
return (NULL);
lwb_sp = lwb->lwb_nmax - lwb->lwb_nused;
-
- /*
- * There must be enough space in the new, empty log block to
- * hold reclen. For WR_COPIED, we need to fit the whole
- * record in one block, and reclen is the header size + the
- * data size. For WR_NEED_COPY, we can create multiple
- * records, splitting the data into multiple blocks, so we
- * only need to fit one word of data per block; in this case
- * reclen is just the header size (no data).
- */
- ASSERT3U(reclen + MIN(dlen, sizeof (uint64_t)), <=, lwb_sp);
}
+ /*
+ * There must be enough space in the log block to hold reclen.
+ * For WR_COPIED, we need to fit the whole record in one block,
+ * and reclen is the write record header size + the data size.
+ * For WR_NEED_COPY, we can create multiple records, splitting
+ * the data into multiple blocks, so we only need to fit one
+ * word of data per block; in this case reclen is just the header
+ * size (no data).
+ */
+ ASSERT3U(reclen + MIN(dlen, sizeof (uint64_t)), <=, lwb_sp);
+
dnow = MIN(dlen, lwb_sp - reclen);
if (dlen > dnow) {
ASSERT3U(lr->lrc_txtype, ==, TX_WRITE);
ASSERT3U(itx->itx_wr_state, ==, WR_NEED_COPY);
citx = zil_itx_clone(itx);
clr = &citx->itx_lr;
lr_write_t *clrw = (lr_write_t *)clr;
clrw->lr_length = dnow;
lrw->lr_offset += dnow;
lrw->lr_length -= dnow;
} else {
citx = itx;
clr = lr;
}
/*
* We're actually making an entry, so update lrc_seq to be the
* log record sequence number. Note that this is generally not
* equal to the itx sequence number because not all transactions
* are synchronous, and sometimes spa_sync() gets there first.
*/
clr->lrc_seq = ++zilog->zl_lr_seq;
lwb->lwb_nused += reclen + dnow;
ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_nmax);
ASSERT0(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)));
zil_lwb_add_txg(lwb, lr->lrc_txg);
list_insert_tail(&lwb->lwb_itxs, citx);
dlen -= dnow;
if (dlen > 0) {
zilog->zl_cur_used += reclen;
goto cont;
}
if (lr->lrc_txtype == TX_WRITE &&
lr->lrc_txg > spa_freeze_txg(zilog->zl_spa))
txg_wait_synced(zilog->zl_dmu_pool, lr->lrc_txg);
return (lwb);
}
/*
* Fill the actual transaction data into the lwb, following zil_lwb_assign().
* Does not require locking.
*/
static void
zil_lwb_commit(zilog_t *zilog, lwb_t *lwb, itx_t *itx)
{
lr_t *lr, *lrb;
lr_write_t *lrw, *lrwb;
char *lr_buf;
uint64_t dlen, reclen;
lr = &itx->itx_lr;
lrw = (lr_write_t *)lr;
if (lr->lrc_txtype == TX_COMMIT)
return;
if (lr->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) {
dlen = P2ROUNDUP_TYPED(
lrw->lr_length, sizeof (uint64_t), uint64_t);
} else {
dlen = 0;
}
reclen = lr->lrc_reclen;
ASSERT3U(reclen + dlen, <=, lwb->lwb_nused - lwb->lwb_nfilled);
lr_buf = lwb->lwb_buf + lwb->lwb_nfilled;
memcpy(lr_buf, lr, reclen);
lrb = (lr_t *)lr_buf; /* Like lr, but inside lwb. */
lrwb = (lr_write_t *)lrb; /* Like lrw, but inside lwb. */
ZIL_STAT_BUMP(zilog, zil_itx_count);
/*
* If it's a write, fetch the data or get its blkptr as appropriate.
*/
if (lr->lrc_txtype == TX_WRITE) {
if (itx->itx_wr_state == WR_COPIED) {
ZIL_STAT_BUMP(zilog, zil_itx_copied_count);
ZIL_STAT_INCR(zilog, zil_itx_copied_bytes,
lrw->lr_length);
} else {
char *dbuf;
int error;
if (itx->itx_wr_state == WR_NEED_COPY) {
dbuf = lr_buf + reclen;
lrb->lrc_reclen += dlen;
ZIL_STAT_BUMP(zilog, zil_itx_needcopy_count);
ZIL_STAT_INCR(zilog, zil_itx_needcopy_bytes,
dlen);
} else {
ASSERT3S(itx->itx_wr_state, ==, WR_INDIRECT);
dbuf = NULL;
ZIL_STAT_BUMP(zilog, zil_itx_indirect_count);
ZIL_STAT_INCR(zilog, zil_itx_indirect_bytes,
lrw->lr_length);
if (lwb->lwb_child_zio == NULL) {
lwb->lwb_child_zio = zio_root(
zilog->zl_spa, NULL, NULL,
ZIO_FLAG_CANFAIL);
}
}
/*
* The "lwb_child_zio" we pass in will become a child of
* "lwb_write_zio", when one is created, so one will be
* a parent of any zio's created by the "zl_get_data".
* This way "lwb_write_zio" will first wait for children
* block pointers before own writing, and then for their
* writing completion before the vdev cache flushing.
*/
error = zilog->zl_get_data(itx->itx_private,
itx->itx_gen, lrwb, dbuf, lwb,
lwb->lwb_child_zio);
if (dbuf != NULL && error == 0) {
/* Zero any padding bytes in the last block. */
memset((char *)dbuf + lrwb->lr_length, 0,
dlen - lrwb->lr_length);
}
/*
* Typically, the only return values we should see from
* ->zl_get_data() are 0, EIO, ENOENT, EEXIST or
* EALREADY. However, it is also possible to see other
* error values such as ENOSPC or EINVAL from
* dmu_read() -> dnode_hold() -> dnode_hold_impl() or
* ENXIO as well as a multitude of others from the
* block layer through dmu_buf_hold() -> dbuf_read()
* -> zio_wait(), as well as through dmu_read() ->
* dnode_hold() -> dnode_hold_impl() -> dbuf_read() ->
* zio_wait(). When these errors happen, we can assume
* that neither an immediate write nor an indirect
* write occurred, so we need to fall back to
* txg_wait_synced(). This is unusual, so we print to
* dmesg whenever one of these errors occurs.
*/
switch (error) {
case 0:
break;
default:
cmn_err(CE_WARN, "zil_lwb_commit() received "
"unexpected error %d from ->zl_get_data()"
". Falling back to txg_wait_synced().",
error);
zfs_fallthrough;
case EIO:
txg_wait_synced(zilog->zl_dmu_pool,
lr->lrc_txg);
zfs_fallthrough;
case ENOENT:
zfs_fallthrough;
case EEXIST:
zfs_fallthrough;
case EALREADY:
return;
}
}
}
lwb->lwb_nfilled += reclen + dlen;
ASSERT3S(lwb->lwb_nfilled, <=, lwb->lwb_nused);
ASSERT0(P2PHASE(lwb->lwb_nfilled, sizeof (uint64_t)));
}
itx_t *
zil_itx_create(uint64_t txtype, size_t olrsize)
{
size_t itxsize, lrsize;
itx_t *itx;
+ ASSERT3U(olrsize, >=, sizeof (lr_t));
lrsize = P2ROUNDUP_TYPED(olrsize, sizeof (uint64_t), size_t);
+ ASSERT3U(lrsize, >=, olrsize);
itxsize = offsetof(itx_t, itx_lr) + lrsize;
itx = zio_data_buf_alloc(itxsize);
itx->itx_lr.lrc_txtype = txtype;
itx->itx_lr.lrc_reclen = lrsize;
itx->itx_lr.lrc_seq = 0; /* defensive */
memset((char *)&itx->itx_lr + olrsize, 0, lrsize - olrsize);
itx->itx_sync = B_TRUE; /* default is synchronous */
itx->itx_callback = NULL;
itx->itx_callback_data = NULL;
itx->itx_size = itxsize;
return (itx);
}
static itx_t *
zil_itx_clone(itx_t *oitx)
{
+ ASSERT3U(oitx->itx_size, >=, sizeof (itx_t));
+ ASSERT3U(oitx->itx_size, ==,
+ offsetof(itx_t, itx_lr) + oitx->itx_lr.lrc_reclen);
+
itx_t *itx = zio_data_buf_alloc(oitx->itx_size);
memcpy(itx, oitx, oitx->itx_size);
itx->itx_callback = NULL;
itx->itx_callback_data = NULL;
return (itx);
}
void
zil_itx_destroy(itx_t *itx)
{
+ ASSERT3U(itx->itx_size, >=, sizeof (itx_t));
+ ASSERT3U(itx->itx_lr.lrc_reclen, ==,
+ itx->itx_size - offsetof(itx_t, itx_lr));
IMPLY(itx->itx_lr.lrc_txtype == TX_COMMIT, itx->itx_callback == NULL);
IMPLY(itx->itx_callback != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT);
if (itx->itx_callback != NULL)
itx->itx_callback(itx->itx_callback_data);
zio_data_buf_free(itx, itx->itx_size);
}
/*
* Free up the sync and async itxs. The itxs_t has already been detached
* so no locks are needed.
*/
static void
zil_itxg_clean(void *arg)
{
itx_t *itx;
list_t *list;
avl_tree_t *t;
void *cookie;
itxs_t *itxs = arg;
itx_async_node_t *ian;
list = &itxs->i_sync_list;
while ((itx = list_remove_head(list)) != NULL) {
/*
* In the general case, commit itxs will not be found
* here, as they'll be committed to an lwb via
* zil_lwb_assign(), and free'd in that function. Having
* said that, it is still possible for commit itxs to be
* found here, due to the following race:
*
* - a thread calls zil_commit() which assigns the
* commit itx to a per-txg i_sync_list
* - zil_itxg_clean() is called (e.g. via spa_sync())
* while the waiter is still on the i_sync_list
*
* There's nothing to prevent syncing the txg while the
* waiter is on the i_sync_list. This normally doesn't
* happen because spa_sync() is slower than zil_commit(),
* but if zil_commit() calls txg_wait_synced() (e.g.
* because zil_create() or zil_commit_writer_stall() is
* called) we will hit this case.
*/
if (itx->itx_lr.lrc_txtype == TX_COMMIT)
zil_commit_waiter_skip(itx->itx_private);
zil_itx_destroy(itx);
}
cookie = NULL;
t = &itxs->i_async_tree;
while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) {
list = &ian->ia_list;
while ((itx = list_remove_head(list)) != NULL) {
/* commit itxs should never be on the async lists. */
ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT);
zil_itx_destroy(itx);
}
list_destroy(list);
kmem_free(ian, sizeof (itx_async_node_t));
}
avl_destroy(t);
kmem_free(itxs, sizeof (itxs_t));
}
static int
zil_aitx_compare(const void *x1, const void *x2)
{
const uint64_t o1 = ((itx_async_node_t *)x1)->ia_foid;
const uint64_t o2 = ((itx_async_node_t *)x2)->ia_foid;
return (TREE_CMP(o1, o2));
}
/*
* Remove all async itx with the given oid.
*/
void
zil_remove_async(zilog_t *zilog, uint64_t oid)
{
uint64_t otxg, txg;
- itx_async_node_t *ian;
+ itx_async_node_t *ian, ian_search;
avl_tree_t *t;
avl_index_t where;
list_t clean_list;
itx_t *itx;
ASSERT(oid != 0);
list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node));
if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
otxg = ZILTEST_TXG;
else
otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
mutex_enter(&itxg->itxg_lock);
if (itxg->itxg_txg != txg) {
mutex_exit(&itxg->itxg_lock);
continue;
}
/*
* Locate the object node and append its list.
*/
t = &itxg->itxg_itxs->i_async_tree;
- ian = avl_find(t, &oid, &where);
+ ian_search.ia_foid = oid;
+ ian = avl_find(t, &ian_search, &where);
if (ian != NULL)
list_move_tail(&clean_list, &ian->ia_list);
mutex_exit(&itxg->itxg_lock);
}
while ((itx = list_remove_head(&clean_list)) != NULL) {
/* commit itxs should never be on the async lists. */
ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT);
zil_itx_destroy(itx);
}
list_destroy(&clean_list);
}
void
zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx)
{
uint64_t txg;
itxg_t *itxg;
itxs_t *itxs, *clean = NULL;
/*
* Ensure the data of a renamed file is committed before the rename.
*/
if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_RENAME)
zil_async_to_sync(zilog, itx->itx_oid);
if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX)
txg = ZILTEST_TXG;
else
txg = dmu_tx_get_txg(tx);
itxg = &zilog->zl_itxg[txg & TXG_MASK];
mutex_enter(&itxg->itxg_lock);
itxs = itxg->itxg_itxs;
if (itxg->itxg_txg != txg) {
if (itxs != NULL) {
/*
* The zil_clean callback hasn't got around to cleaning
* this itxg. Save the itxs for release below.
* This should be rare.
*/
zfs_dbgmsg("zil_itx_assign: missed itx cleanup for "
"txg %llu", (u_longlong_t)itxg->itxg_txg);
clean = itxg->itxg_itxs;
}
itxg->itxg_txg = txg;
itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t),
KM_SLEEP);
list_create(&itxs->i_sync_list, sizeof (itx_t),
offsetof(itx_t, itx_node));
avl_create(&itxs->i_async_tree, zil_aitx_compare,
sizeof (itx_async_node_t),
offsetof(itx_async_node_t, ia_node));
}
if (itx->itx_sync) {
list_insert_tail(&itxs->i_sync_list, itx);
} else {
avl_tree_t *t = &itxs->i_async_tree;
uint64_t foid =
LR_FOID_GET_OBJ(((lr_ooo_t *)&itx->itx_lr)->lr_foid);
itx_async_node_t *ian;
avl_index_t where;
ian = avl_find(t, &foid, &where);
if (ian == NULL) {
ian = kmem_alloc(sizeof (itx_async_node_t),
KM_SLEEP);
list_create(&ian->ia_list, sizeof (itx_t),
offsetof(itx_t, itx_node));
ian->ia_foid = foid;
avl_insert(t, ian, where);
}
list_insert_tail(&ian->ia_list, itx);
}
itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx);
/*
* We don't want to dirty the ZIL using ZILTEST_TXG, because
* zil_clean() will never be called using ZILTEST_TXG. Thus, we
* need to be careful to always dirty the ZIL using the "real"
* TXG (not itxg_txg) even when the SPA is frozen.
*/
zilog_dirty(zilog, dmu_tx_get_txg(tx));
mutex_exit(&itxg->itxg_lock);
/* Release the old itxs now we've dropped the lock */
if (clean != NULL)
zil_itxg_clean(clean);
}
/*
* If there are any in-memory intent log transactions which have now been
* synced then start up a taskq to free them. We should only do this after we
* have written out the uberblocks (i.e. txg has been committed) so that
* don't inadvertently clean out in-memory log records that would be required
* by zil_commit().
*/
void
zil_clean(zilog_t *zilog, uint64_t synced_txg)
{
itxg_t *itxg = &zilog->zl_itxg[synced_txg & TXG_MASK];
itxs_t *clean_me;
ASSERT3U(synced_txg, <, ZILTEST_TXG);
mutex_enter(&itxg->itxg_lock);
if (itxg->itxg_itxs == NULL || itxg->itxg_txg == ZILTEST_TXG) {
mutex_exit(&itxg->itxg_lock);
return;
}
ASSERT3U(itxg->itxg_txg, <=, synced_txg);
ASSERT3U(itxg->itxg_txg, !=, 0);
clean_me = itxg->itxg_itxs;
itxg->itxg_itxs = NULL;
itxg->itxg_txg = 0;
mutex_exit(&itxg->itxg_lock);
/*
* Preferably start a task queue to free up the old itxs but
* if taskq_dispatch can't allocate resources to do that then
* free it in-line. This should be rare. Note, using TQ_SLEEP
* created a bad performance problem.
*/
ASSERT3P(zilog->zl_dmu_pool, !=, NULL);
ASSERT3P(zilog->zl_dmu_pool->dp_zil_clean_taskq, !=, NULL);
taskqid_t id = taskq_dispatch(zilog->zl_dmu_pool->dp_zil_clean_taskq,
zil_itxg_clean, clean_me, TQ_NOSLEEP);
if (id == TASKQID_INVALID)
zil_itxg_clean(clean_me);
}
/*
* This function will traverse the queue of itxs that need to be
* committed, and move them onto the ZIL's zl_itx_commit_list.
*/
static uint64_t
zil_get_commit_list(zilog_t *zilog)
{
uint64_t otxg, txg, wtxg = 0;
list_t *commit_list = &zilog->zl_itx_commit_list;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
otxg = ZILTEST_TXG;
else
otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
/*
* This is inherently racy, since there is nothing to prevent
* the last synced txg from changing. That's okay since we'll
* only commit things in the future.
*/
for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
mutex_enter(&itxg->itxg_lock);
if (itxg->itxg_txg != txg) {
mutex_exit(&itxg->itxg_lock);
continue;
}
/*
* If we're adding itx records to the zl_itx_commit_list,
* then the zil better be dirty in this "txg". We can assert
* that here since we're holding the itxg_lock which will
* prevent spa_sync from cleaning it. Once we add the itxs
* to the zl_itx_commit_list we must commit it to disk even
* if it's unnecessary (i.e. the txg was synced).
*/
ASSERT(zilog_is_dirty_in_txg(zilog, txg) ||
spa_freeze_txg(zilog->zl_spa) != UINT64_MAX);
list_t *sync_list = &itxg->itxg_itxs->i_sync_list;
if (unlikely(zilog->zl_suspend > 0)) {
/*
* ZIL was just suspended, but we lost the race.
* Allow all earlier itxs to be committed, but ask
* caller to do txg_wait_synced(txg) for any new.
*/
if (!list_is_empty(sync_list))
wtxg = MAX(wtxg, txg);
} else {
list_move_tail(commit_list, sync_list);
}
mutex_exit(&itxg->itxg_lock);
}
return (wtxg);
}
/*
* Move the async itxs for a specified object to commit into sync lists.
*/
void
zil_async_to_sync(zilog_t *zilog, uint64_t foid)
{
uint64_t otxg, txg;
- itx_async_node_t *ian;
+ itx_async_node_t *ian, ian_search;
avl_tree_t *t;
avl_index_t where;
if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
otxg = ZILTEST_TXG;
else
otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
/*
* This is inherently racy, since there is nothing to prevent
* the last synced txg from changing.
*/
for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
mutex_enter(&itxg->itxg_lock);
if (itxg->itxg_txg != txg) {
mutex_exit(&itxg->itxg_lock);
continue;
}
/*
* If a foid is specified then find that node and append its
* list. Otherwise walk the tree appending all the lists
* to the sync list. We add to the end rather than the
* beginning to ensure the create has happened.
*/
t = &itxg->itxg_itxs->i_async_tree;
if (foid != 0) {
- ian = avl_find(t, &foid, &where);
+ ian_search.ia_foid = foid;
+ ian = avl_find(t, &ian_search, &where);
if (ian != NULL) {
list_move_tail(&itxg->itxg_itxs->i_sync_list,
&ian->ia_list);
}
} else {
void *cookie = NULL;
while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) {
list_move_tail(&itxg->itxg_itxs->i_sync_list,
&ian->ia_list);
list_destroy(&ian->ia_list);
kmem_free(ian, sizeof (itx_async_node_t));
}
}
mutex_exit(&itxg->itxg_lock);
}
}
/*
* This function will prune commit itxs that are at the head of the
* commit list (it won't prune past the first non-commit itx), and
* either: a) attach them to the last lwb that's still pending
* completion, or b) skip them altogether.
*
* This is used as a performance optimization to prevent commit itxs
* from generating new lwbs when it's unnecessary to do so.
*/
static void
zil_prune_commit_list(zilog_t *zilog)
{
itx_t *itx;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
while ((itx = list_head(&zilog->zl_itx_commit_list)) != NULL) {
lr_t *lrc = &itx->itx_lr;
if (lrc->lrc_txtype != TX_COMMIT)
break;
mutex_enter(&zilog->zl_lock);
lwb_t *last_lwb = zilog->zl_last_lwb_opened;
if (last_lwb == NULL ||
last_lwb->lwb_state == LWB_STATE_FLUSH_DONE) {
/*
* All of the itxs this waiter was waiting on
* must have already completed (or there were
* never any itx's for it to wait on), so it's
* safe to skip this waiter and mark it done.
*/
zil_commit_waiter_skip(itx->itx_private);
} else {
zil_commit_waiter_link_lwb(itx->itx_private, last_lwb);
}
mutex_exit(&zilog->zl_lock);
list_remove(&zilog->zl_itx_commit_list, itx);
zil_itx_destroy(itx);
}
IMPLY(itx != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT);
}
static void
zil_commit_writer_stall(zilog_t *zilog)
{
/*
* When zio_alloc_zil() fails to allocate the next lwb block on
* disk, we must call txg_wait_synced() to ensure all of the
* lwbs in the zilog's zl_lwb_list are synced and then freed (in
* zil_sync()), such that any subsequent ZIL writer (i.e. a call
* to zil_process_commit_list()) will have to call zil_create(),
* and start a new ZIL chain.
*
* Since zil_alloc_zil() failed, the lwb that was previously
* issued does not have a pointer to the "next" lwb on disk.
* Thus, if another ZIL writer thread was to allocate the "next"
* on-disk lwb, that block could be leaked in the event of a
* crash (because the previous lwb on-disk would not point to
* it).
*
* We must hold the zilog's zl_issuer_lock while we do this, to
* ensure no new threads enter zil_process_commit_list() until
* all lwb's in the zl_lwb_list have been synced and freed
* (which is achieved via the txg_wait_synced() call).
*/
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
txg_wait_synced(zilog->zl_dmu_pool, 0);
ASSERT(list_is_empty(&zilog->zl_lwb_list));
}
/*
* This function will traverse the commit list, creating new lwbs as
* needed, and committing the itxs from the commit list to these newly
* created lwbs. Additionally, as a new lwb is created, the previous
* lwb will be issued to the zio layer to be written to disk.
*/
static void
zil_process_commit_list(zilog_t *zilog, zil_commit_waiter_t *zcw, list_t *ilwbs)
{
spa_t *spa = zilog->zl_spa;
list_t nolwb_itxs;
list_t nolwb_waiters;
lwb_t *lwb, *plwb;
itx_t *itx;
boolean_t first = B_TRUE;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
/*
* Return if there's nothing to commit before we dirty the fs by
* calling zil_create().
*/
if (list_is_empty(&zilog->zl_itx_commit_list))
return;
list_create(&nolwb_itxs, sizeof (itx_t), offsetof(itx_t, itx_node));
list_create(&nolwb_waiters, sizeof (zil_commit_waiter_t),
offsetof(zil_commit_waiter_t, zcw_node));
lwb = list_tail(&zilog->zl_lwb_list);
if (lwb == NULL) {
lwb = zil_create(zilog);
} else {
/*
* Activate SPA_FEATURE_ZILSAXATTR for the cases where ZIL will
* have already been created (zl_lwb_list not empty).
*/
zil_commit_activate_saxattr_feature(zilog);
ASSERT(lwb->lwb_state == LWB_STATE_NEW ||
lwb->lwb_state == LWB_STATE_OPENED);
first = (lwb->lwb_state == LWB_STATE_NEW) &&
((plwb = list_prev(&zilog->zl_lwb_list, lwb)) == NULL ||
plwb->lwb_state == LWB_STATE_FLUSH_DONE);
}
while ((itx = list_remove_head(&zilog->zl_itx_commit_list)) != NULL) {
lr_t *lrc = &itx->itx_lr;
uint64_t txg = lrc->lrc_txg;
ASSERT3U(txg, !=, 0);
if (lrc->lrc_txtype == TX_COMMIT) {
DTRACE_PROBE2(zil__process__commit__itx,
zilog_t *, zilog, itx_t *, itx);
} else {
DTRACE_PROBE2(zil__process__normal__itx,
zilog_t *, zilog, itx_t *, itx);
}
boolean_t synced = txg <= spa_last_synced_txg(spa);
boolean_t frozen = txg > spa_freeze_txg(spa);
/*
* If the txg of this itx has already been synced out, then
* we don't need to commit this itx to an lwb. This is
* because the data of this itx will have already been
* written to the main pool. This is inherently racy, and
* it's still ok to commit an itx whose txg has already
* been synced; this will result in a write that's
* unnecessary, but will do no harm.
*
* With that said, we always want to commit TX_COMMIT itxs
* to an lwb, regardless of whether or not that itx's txg
* has been synced out. We do this to ensure any OPENED lwb
* will always have at least one zil_commit_waiter_t linked
* to the lwb.
*
* As a counter-example, if we skipped TX_COMMIT itx's
* whose txg had already been synced, the following
* situation could occur if we happened to be racing with
* spa_sync:
*
* 1. We commit a non-TX_COMMIT itx to an lwb, where the
* itx's txg is 10 and the last synced txg is 9.
* 2. spa_sync finishes syncing out txg 10.
* 3. We move to the next itx in the list, it's a TX_COMMIT
* whose txg is 10, so we skip it rather than committing
* it to the lwb used in (1).
*
* If the itx that is skipped in (3) is the last TX_COMMIT
* itx in the commit list, than it's possible for the lwb
* used in (1) to remain in the OPENED state indefinitely.
*
* To prevent the above scenario from occurring, ensuring
* that once an lwb is OPENED it will transition to ISSUED
* and eventually DONE, we always commit TX_COMMIT itx's to
* an lwb here, even if that itx's txg has already been
* synced.
*
* Finally, if the pool is frozen, we _always_ commit the
* itx. The point of freezing the pool is to prevent data
* from being written to the main pool via spa_sync, and
* instead rely solely on the ZIL to persistently store the
* data; i.e. when the pool is frozen, the last synced txg
* value can't be trusted.
*/
if (frozen || !synced || lrc->lrc_txtype == TX_COMMIT) {
if (lwb != NULL) {
lwb = zil_lwb_assign(zilog, lwb, itx, ilwbs);
if (lwb == NULL) {
list_insert_tail(&nolwb_itxs, itx);
} else if ((zcw->zcw_lwb != NULL &&
zcw->zcw_lwb != lwb) || zcw->zcw_done) {
/*
* Our lwb is done, leave the rest of
* itx list to somebody else who care.
*/
first = B_FALSE;
break;
}
} else {
if (lrc->lrc_txtype == TX_COMMIT) {
zil_commit_waiter_link_nolwb(
itx->itx_private, &nolwb_waiters);
}
list_insert_tail(&nolwb_itxs, itx);
}
} else {
ASSERT3S(lrc->lrc_txtype, !=, TX_COMMIT);
zil_itx_destroy(itx);
}
}
if (lwb == NULL) {
/*
* This indicates zio_alloc_zil() failed to allocate the
* "next" lwb on-disk. When this happens, we must stall
* the ZIL write pipeline; see the comment within
* zil_commit_writer_stall() for more details.
*/
while ((lwb = list_remove_head(ilwbs)) != NULL)
zil_lwb_write_issue(zilog, lwb);
zil_commit_writer_stall(zilog);
/*
* Additionally, we have to signal and mark the "nolwb"
* waiters as "done" here, since without an lwb, we
* can't do this via zil_lwb_flush_vdevs_done() like
* normal.
*/
zil_commit_waiter_t *zcw;
while ((zcw = list_remove_head(&nolwb_waiters)) != NULL)
zil_commit_waiter_skip(zcw);
/*
* And finally, we have to destroy the itx's that
* couldn't be committed to an lwb; this will also call
* the itx's callback if one exists for the itx.
*/
while ((itx = list_remove_head(&nolwb_itxs)) != NULL)
zil_itx_destroy(itx);
} else {
ASSERT(list_is_empty(&nolwb_waiters));
ASSERT3P(lwb, !=, NULL);
ASSERT(lwb->lwb_state == LWB_STATE_NEW ||
lwb->lwb_state == LWB_STATE_OPENED);
/*
* At this point, the ZIL block pointed at by the "lwb"
* variable is in "new" or "opened" state.
*
* If it's "new", then no itxs have been committed to it, so
* there's no point in issuing its zio (i.e. it's "empty").
*
* If it's "opened", then it contains one or more itxs that
* eventually need to be committed to stable storage. In
* this case we intentionally do not issue the lwb's zio
* to disk yet, and instead rely on one of the following
* two mechanisms for issuing the zio:
*
* 1. Ideally, there will be more ZIL activity occurring on
* the system, such that this function will be immediately
* called again by different thread and this lwb will be
* closed by zil_lwb_assign(). This way, the lwb will be
* "full" when it is issued to disk, and we'll make use of
* the lwb's size the best we can.
*
* 2. If there isn't sufficient ZIL activity occurring on
* the system, zil_commit_waiter() will close it and issue
* the zio. If this occurs, the lwb is not guaranteed
* to be "full" by the time its zio is issued, and means
* the size of the lwb was "too large" given the amount
* of ZIL activity occurring on the system at that time.
*
* We do this for a couple of reasons:
*
* 1. To try and reduce the number of IOPs needed to
* write the same number of itxs. If an lwb has space
* available in its buffer for more itxs, and more itxs
* will be committed relatively soon (relative to the
* latency of performing a write), then it's beneficial
* to wait for these "next" itxs. This way, more itxs
* can be committed to stable storage with fewer writes.
*
* 2. To try and use the largest lwb block size that the
* incoming rate of itxs can support. Again, this is to
* try and pack as many itxs into as few lwbs as
* possible, without significantly impacting the latency
* of each individual itx.
*
* If we had no already running or open LWBs, it can be
* the workload is single-threaded. And if the ZIL write
* latency is very small or if the LWB is almost full, it
* may be cheaper to bypass the delay.
*/
if (lwb->lwb_state == LWB_STATE_OPENED && first) {
hrtime_t sleep = zilog->zl_last_lwb_latency *
zfs_commit_timeout_pct / 100;
if (sleep < zil_min_commit_timeout ||
lwb->lwb_nmax - lwb->lwb_nused <
lwb->lwb_nmax / 8) {
list_insert_tail(ilwbs, lwb);
lwb = zil_lwb_write_close(zilog, lwb,
LWB_STATE_NEW);
zilog->zl_cur_used = 0;
if (lwb == NULL) {
while ((lwb = list_remove_head(ilwbs))
!= NULL)
zil_lwb_write_issue(zilog, lwb);
zil_commit_writer_stall(zilog);
}
}
}
}
}
/*
* This function is responsible for ensuring the passed in commit waiter
* (and associated commit itx) is committed to an lwb. If the waiter is
* not already committed to an lwb, all itxs in the zilog's queue of
* itxs will be processed. The assumption is the passed in waiter's
* commit itx will found in the queue just like the other non-commit
* itxs, such that when the entire queue is processed, the waiter will
* have been committed to an lwb.
*
* The lwb associated with the passed in waiter is not guaranteed to
* have been issued by the time this function completes. If the lwb is
* not issued, we rely on future calls to zil_commit_writer() to issue
* the lwb, or the timeout mechanism found in zil_commit_waiter().
*/
static uint64_t
zil_commit_writer(zilog_t *zilog, zil_commit_waiter_t *zcw)
{
list_t ilwbs;
lwb_t *lwb;
uint64_t wtxg = 0;
ASSERT(!MUTEX_HELD(&zilog->zl_lock));
ASSERT(spa_writeable(zilog->zl_spa));
list_create(&ilwbs, sizeof (lwb_t), offsetof(lwb_t, lwb_issue_node));
mutex_enter(&zilog->zl_issuer_lock);
if (zcw->zcw_lwb != NULL || zcw->zcw_done) {
/*
* It's possible that, while we were waiting to acquire
* the "zl_issuer_lock", another thread committed this
* waiter to an lwb. If that occurs, we bail out early,
* without processing any of the zilog's queue of itxs.
*
* On certain workloads and system configurations, the
* "zl_issuer_lock" can become highly contended. In an
* attempt to reduce this contention, we immediately drop
* the lock if the waiter has already been processed.
*
* We've measured this optimization to reduce CPU spent
* contending on this lock by up to 5%, using a system
* with 32 CPUs, low latency storage (~50 usec writes),
* and 1024 threads performing sync writes.
*/
goto out;
}
ZIL_STAT_BUMP(zilog, zil_commit_writer_count);
wtxg = zil_get_commit_list(zilog);
zil_prune_commit_list(zilog);
zil_process_commit_list(zilog, zcw, &ilwbs);
out:
mutex_exit(&zilog->zl_issuer_lock);
while ((lwb = list_remove_head(&ilwbs)) != NULL)
zil_lwb_write_issue(zilog, lwb);
list_destroy(&ilwbs);
return (wtxg);
}
static void
zil_commit_waiter_timeout(zilog_t *zilog, zil_commit_waiter_t *zcw)
{
ASSERT(!MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT(MUTEX_HELD(&zcw->zcw_lock));
ASSERT3B(zcw->zcw_done, ==, B_FALSE);
lwb_t *lwb = zcw->zcw_lwb;
ASSERT3P(lwb, !=, NULL);
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_NEW);
/*
* If the lwb has already been issued by another thread, we can
* immediately return since there's no work to be done (the
* point of this function is to issue the lwb). Additionally, we
* do this prior to acquiring the zl_issuer_lock, to avoid
* acquiring it when it's not necessary to do so.
*/
if (lwb->lwb_state != LWB_STATE_OPENED)
return;
/*
* In order to call zil_lwb_write_close() we must hold the
* zilog's "zl_issuer_lock". We can't simply acquire that lock,
* since we're already holding the commit waiter's "zcw_lock",
* and those two locks are acquired in the opposite order
* elsewhere.
*/
mutex_exit(&zcw->zcw_lock);
mutex_enter(&zilog->zl_issuer_lock);
mutex_enter(&zcw->zcw_lock);
/*
* Since we just dropped and re-acquired the commit waiter's
* lock, we have to re-check to see if the waiter was marked
* "done" during that process. If the waiter was marked "done",
* the "lwb" pointer is no longer valid (it can be free'd after
* the waiter is marked "done"), so without this check we could
* wind up with a use-after-free error below.
*/
if (zcw->zcw_done) {
mutex_exit(&zilog->zl_issuer_lock);
return;
}
ASSERT3P(lwb, ==, zcw->zcw_lwb);
/*
* We've already checked this above, but since we hadn't acquired
* the zilog's zl_issuer_lock, we have to perform this check a
* second time while holding the lock.
*
* We don't need to hold the zl_lock since the lwb cannot transition
* from OPENED to CLOSED while we hold the zl_issuer_lock. The lwb
* _can_ transition from CLOSED to DONE, but it's OK to race with
* that transition since we treat the lwb the same, whether it's in
* the CLOSED, ISSUED or DONE states.
*
* The important thing, is we treat the lwb differently depending on
* if it's OPENED or CLOSED, and block any other threads that might
* attempt to close/issue this lwb. For that reason we hold the
* zl_issuer_lock when checking the lwb_state; we must not call
* zil_lwb_write_close() if the lwb had already been closed/issued.
*
* See the comment above the lwb_state_t structure definition for
* more details on the lwb states, and locking requirements.
*/
if (lwb->lwb_state != LWB_STATE_OPENED) {
mutex_exit(&zilog->zl_issuer_lock);
return;
}
/*
* We do not need zcw_lock once we hold zl_issuer_lock and know lwb
* is still open. But we have to drop it to avoid a deadlock in case
* callback of zio issued by zil_lwb_write_issue() try to get it,
* while zil_lwb_write_issue() is blocked on attempt to issue next
* lwb it found in LWB_STATE_READY state.
*/
mutex_exit(&zcw->zcw_lock);
/*
* As described in the comments above zil_commit_waiter() and
* zil_process_commit_list(), we need to issue this lwb's zio
* since we've reached the commit waiter's timeout and it still
* hasn't been issued.
*/
lwb_t *nlwb = zil_lwb_write_close(zilog, lwb, LWB_STATE_NEW);
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_CLOSED);
/*
* Since the lwb's zio hadn't been issued by the time this thread
* reached its timeout, we reset the zilog's "zl_cur_used" field
* to influence the zil block size selection algorithm.
*
* By having to issue the lwb's zio here, it means the size of the
* lwb was too large, given the incoming throughput of itxs. By
* setting "zl_cur_used" to zero, we communicate this fact to the
* block size selection algorithm, so it can take this information
* into account, and potentially select a smaller size for the
* next lwb block that is allocated.
*/
zilog->zl_cur_used = 0;
if (nlwb == NULL) {
/*
* When zil_lwb_write_close() returns NULL, this
* indicates zio_alloc_zil() failed to allocate the
* "next" lwb on-disk. When this occurs, the ZIL write
* pipeline must be stalled; see the comment within the
* zil_commit_writer_stall() function for more details.
*/
zil_lwb_write_issue(zilog, lwb);
zil_commit_writer_stall(zilog);
mutex_exit(&zilog->zl_issuer_lock);
} else {
mutex_exit(&zilog->zl_issuer_lock);
zil_lwb_write_issue(zilog, lwb);
}
mutex_enter(&zcw->zcw_lock);
}
/*
* This function is responsible for performing the following two tasks:
*
* 1. its primary responsibility is to block until the given "commit
* waiter" is considered "done".
*
* 2. its secondary responsibility is to issue the zio for the lwb that
* the given "commit waiter" is waiting on, if this function has
* waited "long enough" and the lwb is still in the "open" state.
*
* Given a sufficient amount of itxs being generated and written using
* the ZIL, the lwb's zio will be issued via the zil_lwb_assign()
* function. If this does not occur, this secondary responsibility will
* ensure the lwb is issued even if there is not other synchronous
* activity on the system.
*
* For more details, see zil_process_commit_list(); more specifically,
* the comment at the bottom of that function.
*/
static void
zil_commit_waiter(zilog_t *zilog, zil_commit_waiter_t *zcw)
{
ASSERT(!MUTEX_HELD(&zilog->zl_lock));
ASSERT(!MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT(spa_writeable(zilog->zl_spa));
mutex_enter(&zcw->zcw_lock);
/*
* The timeout is scaled based on the lwb latency to avoid
* significantly impacting the latency of each individual itx.
* For more details, see the comment at the bottom of the
* zil_process_commit_list() function.
*/
int pct = MAX(zfs_commit_timeout_pct, 1);
hrtime_t sleep = (zilog->zl_last_lwb_latency * pct) / 100;
hrtime_t wakeup = gethrtime() + sleep;
boolean_t timedout = B_FALSE;
while (!zcw->zcw_done) {
ASSERT(MUTEX_HELD(&zcw->zcw_lock));
lwb_t *lwb = zcw->zcw_lwb;
/*
* Usually, the waiter will have a non-NULL lwb field here,
* but it's possible for it to be NULL as a result of
* zil_commit() racing with spa_sync().
*
* When zil_clean() is called, it's possible for the itxg
* list (which may be cleaned via a taskq) to contain
* commit itxs. When this occurs, the commit waiters linked
* off of these commit itxs will not be committed to an
* lwb. Additionally, these commit waiters will not be
* marked done until zil_commit_waiter_skip() is called via
* zil_itxg_clean().
*
* Thus, it's possible for this commit waiter (i.e. the
* "zcw" variable) to be found in this "in between" state;
* where it's "zcw_lwb" field is NULL, and it hasn't yet
* been skipped, so it's "zcw_done" field is still B_FALSE.
*/
IMPLY(lwb != NULL, lwb->lwb_state != LWB_STATE_NEW);
if (lwb != NULL && lwb->lwb_state == LWB_STATE_OPENED) {
ASSERT3B(timedout, ==, B_FALSE);
/*
* If the lwb hasn't been issued yet, then we
* need to wait with a timeout, in case this
* function needs to issue the lwb after the
* timeout is reached; responsibility (2) from
* the comment above this function.
*/
int rc = cv_timedwait_hires(&zcw->zcw_cv,
&zcw->zcw_lock, wakeup, USEC2NSEC(1),
CALLOUT_FLAG_ABSOLUTE);
if (rc != -1 || zcw->zcw_done)
continue;
timedout = B_TRUE;
zil_commit_waiter_timeout(zilog, zcw);
if (!zcw->zcw_done) {
/*
* If the commit waiter has already been
* marked "done", it's possible for the
* waiter's lwb structure to have already
* been freed. Thus, we can only reliably
* make these assertions if the waiter
* isn't done.
*/
ASSERT3P(lwb, ==, zcw->zcw_lwb);
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_OPENED);
}
} else {
/*
* If the lwb isn't open, then it must have already
* been issued. In that case, there's no need to
* use a timeout when waiting for the lwb to
* complete.
*
* Additionally, if the lwb is NULL, the waiter
* will soon be signaled and marked done via
* zil_clean() and zil_itxg_clean(), so no timeout
* is required.
*/
IMPLY(lwb != NULL,
lwb->lwb_state == LWB_STATE_CLOSED ||
lwb->lwb_state == LWB_STATE_READY ||
lwb->lwb_state == LWB_STATE_ISSUED ||
lwb->lwb_state == LWB_STATE_WRITE_DONE ||
lwb->lwb_state == LWB_STATE_FLUSH_DONE);
cv_wait(&zcw->zcw_cv, &zcw->zcw_lock);
}
}
mutex_exit(&zcw->zcw_lock);
}
static zil_commit_waiter_t *
zil_alloc_commit_waiter(void)
{
zil_commit_waiter_t *zcw = kmem_cache_alloc(zil_zcw_cache, KM_SLEEP);
cv_init(&zcw->zcw_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&zcw->zcw_lock, NULL, MUTEX_DEFAULT, NULL);
list_link_init(&zcw->zcw_node);
zcw->zcw_lwb = NULL;
zcw->zcw_done = B_FALSE;
zcw->zcw_zio_error = 0;
return (zcw);
}
static void
zil_free_commit_waiter(zil_commit_waiter_t *zcw)
{
ASSERT(!list_link_active(&zcw->zcw_node));
ASSERT3P(zcw->zcw_lwb, ==, NULL);
ASSERT3B(zcw->zcw_done, ==, B_TRUE);
mutex_destroy(&zcw->zcw_lock);
cv_destroy(&zcw->zcw_cv);
kmem_cache_free(zil_zcw_cache, zcw);
}
/*
* This function is used to create a TX_COMMIT itx and assign it. This
* way, it will be linked into the ZIL's list of synchronous itxs, and
* then later committed to an lwb (or skipped) when
* zil_process_commit_list() is called.
*/
static void
zil_commit_itx_assign(zilog_t *zilog, zil_commit_waiter_t *zcw)
{
dmu_tx_t *tx = dmu_tx_create(zilog->zl_os);
/*
* Since we are not going to create any new dirty data, and we
* can even help with clearing the existing dirty data, we
* should not be subject to the dirty data based delays. We
* use TXG_NOTHROTTLE to bypass the delay mechanism.
*/
VERIFY0(dmu_tx_assign(tx, TXG_WAIT | TXG_NOTHROTTLE));
itx_t *itx = zil_itx_create(TX_COMMIT, sizeof (lr_t));
itx->itx_sync = B_TRUE;
itx->itx_private = zcw;
zil_itx_assign(zilog, itx, tx);
dmu_tx_commit(tx);
}
/*
* Commit ZFS Intent Log transactions (itxs) to stable storage.
*
* When writing ZIL transactions to the on-disk representation of the
* ZIL, the itxs are committed to a Log Write Block (lwb). Multiple
* itxs can be committed to a single lwb. Once a lwb is written and
* committed to stable storage (i.e. the lwb is written, and vdevs have
* been flushed), each itx that was committed to that lwb is also
* considered to be committed to stable storage.
*
* When an itx is committed to an lwb, the log record (lr_t) contained
* by the itx is copied into the lwb's zio buffer, and once this buffer
* is written to disk, it becomes an on-disk ZIL block.
*
* As itxs are generated, they're inserted into the ZIL's queue of
* uncommitted itxs. The semantics of zil_commit() are such that it will
* block until all itxs that were in the queue when it was called, are
* committed to stable storage.
*
* If "foid" is zero, this means all "synchronous" and "asynchronous"
* itxs, for all objects in the dataset, will be committed to stable
* storage prior to zil_commit() returning. If "foid" is non-zero, all
* "synchronous" itxs for all objects, but only "asynchronous" itxs
* that correspond to the foid passed in, will be committed to stable
* storage prior to zil_commit() returning.
*
* Generally speaking, when zil_commit() is called, the consumer doesn't
* actually care about _all_ of the uncommitted itxs. Instead, they're
* simply trying to waiting for a specific itx to be committed to disk,
* but the interface(s) for interacting with the ZIL don't allow such
* fine-grained communication. A better interface would allow a consumer
* to create and assign an itx, and then pass a reference to this itx to
* zil_commit(); such that zil_commit() would return as soon as that
* specific itx was committed to disk (instead of waiting for _all_
* itxs to be committed).
*
* When a thread calls zil_commit() a special "commit itx" will be
* generated, along with a corresponding "waiter" for this commit itx.
* zil_commit() will wait on this waiter's CV, such that when the waiter
* is marked done, and signaled, zil_commit() will return.
*
* This commit itx is inserted into the queue of uncommitted itxs. This
* provides an easy mechanism for determining which itxs were in the
* queue prior to zil_commit() having been called, and which itxs were
* added after zil_commit() was called.
*
* The commit itx is special; it doesn't have any on-disk representation.
* When a commit itx is "committed" to an lwb, the waiter associated
* with it is linked onto the lwb's list of waiters. Then, when that lwb
* completes, each waiter on the lwb's list is marked done and signaled
* -- allowing the thread waiting on the waiter to return from zil_commit().
*
* It's important to point out a few critical factors that allow us
* to make use of the commit itxs, commit waiters, per-lwb lists of
* commit waiters, and zio completion callbacks like we're doing:
*
* 1. The list of waiters for each lwb is traversed, and each commit
* waiter is marked "done" and signaled, in the zio completion
* callback of the lwb's zio[*].
*
* * Actually, the waiters are signaled in the zio completion
* callback of the root zio for the DKIOCFLUSHWRITECACHE commands
* that are sent to the vdevs upon completion of the lwb zio.
*
* 2. When the itxs are inserted into the ZIL's queue of uncommitted
* itxs, the order in which they are inserted is preserved[*]; as
* itxs are added to the queue, they are added to the tail of
* in-memory linked lists.
*
* When committing the itxs to lwbs (to be written to disk), they
* are committed in the same order in which the itxs were added to
* the uncommitted queue's linked list(s); i.e. the linked list of
* itxs to commit is traversed from head to tail, and each itx is
* committed to an lwb in that order.
*
* * To clarify:
*
* - the order of "sync" itxs is preserved w.r.t. other
* "sync" itxs, regardless of the corresponding objects.
* - the order of "async" itxs is preserved w.r.t. other
* "async" itxs corresponding to the same object.
* - the order of "async" itxs is *not* preserved w.r.t. other
* "async" itxs corresponding to different objects.
* - the order of "sync" itxs w.r.t. "async" itxs (or vice
* versa) is *not* preserved, even for itxs that correspond
* to the same object.
*
* For more details, see: zil_itx_assign(), zil_async_to_sync(),
* zil_get_commit_list(), and zil_process_commit_list().
*
* 3. The lwbs represent a linked list of blocks on disk. Thus, any
* lwb cannot be considered committed to stable storage, until its
* "previous" lwb is also committed to stable storage. This fact,
* coupled with the fact described above, means that itxs are
* committed in (roughly) the order in which they were generated.
* This is essential because itxs are dependent on prior itxs.
* Thus, we *must not* deem an itx as being committed to stable
* storage, until *all* prior itxs have also been committed to
* stable storage.
*
* To enforce this ordering of lwb zio's, while still leveraging as
* much of the underlying storage performance as possible, we rely
* on two fundamental concepts:
*
* 1. The creation and issuance of lwb zio's is protected by
* the zilog's "zl_issuer_lock", which ensures only a single
* thread is creating and/or issuing lwb's at a time
* 2. The "previous" lwb is a child of the "current" lwb
* (leveraging the zio parent-child dependency graph)
*
* By relying on this parent-child zio relationship, we can have
* many lwb zio's concurrently issued to the underlying storage,
* but the order in which they complete will be the same order in
* which they were created.
*/
void
zil_commit(zilog_t *zilog, uint64_t foid)
{
/*
* We should never attempt to call zil_commit on a snapshot for
* a couple of reasons:
*
* 1. A snapshot may never be modified, thus it cannot have any
* in-flight itxs that would have modified the dataset.
*
* 2. By design, when zil_commit() is called, a commit itx will
* be assigned to this zilog; as a result, the zilog will be
* dirtied. We must not dirty the zilog of a snapshot; there's
* checks in the code that enforce this invariant, and will
* cause a panic if it's not upheld.
*/
ASSERT3B(dmu_objset_is_snapshot(zilog->zl_os), ==, B_FALSE);
if (zilog->zl_sync == ZFS_SYNC_DISABLED)
return;
if (!spa_writeable(zilog->zl_spa)) {
/*
* If the SPA is not writable, there should never be any
* pending itxs waiting to be committed to disk. If that
* weren't true, we'd skip writing those itxs out, and
* would break the semantics of zil_commit(); thus, we're
* verifying that truth before we return to the caller.
*/
ASSERT(list_is_empty(&zilog->zl_lwb_list));
ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL);
for (int i = 0; i < TXG_SIZE; i++)
ASSERT3P(zilog->zl_itxg[i].itxg_itxs, ==, NULL);
return;
}
/*
* If the ZIL is suspended, we don't want to dirty it by calling
* zil_commit_itx_assign() below, nor can we write out
* lwbs like would be done in zil_commit_write(). Thus, we
* simply rely on txg_wait_synced() to maintain the necessary
* semantics, and avoid calling those functions altogether.
*/
if (zilog->zl_suspend > 0) {
txg_wait_synced(zilog->zl_dmu_pool, 0);
return;
}
zil_commit_impl(zilog, foid);
}
void
zil_commit_impl(zilog_t *zilog, uint64_t foid)
{
ZIL_STAT_BUMP(zilog, zil_commit_count);
/*
* Move the "async" itxs for the specified foid to the "sync"
* queues, such that they will be later committed (or skipped)
* to an lwb when zil_process_commit_list() is called.
*
* Since these "async" itxs must be committed prior to this
* call to zil_commit returning, we must perform this operation
* before we call zil_commit_itx_assign().
*/
zil_async_to_sync(zilog, foid);
/*
* We allocate a new "waiter" structure which will initially be
* linked to the commit itx using the itx's "itx_private" field.
* Since the commit itx doesn't represent any on-disk state,
* when it's committed to an lwb, rather than copying the its
* lr_t into the lwb's buffer, the commit itx's "waiter" will be
* added to the lwb's list of waiters. Then, when the lwb is
* committed to stable storage, each waiter in the lwb's list of
* waiters will be marked "done", and signalled.
*
* We must create the waiter and assign the commit itx prior to
* calling zil_commit_writer(), or else our specific commit itx
* is not guaranteed to be committed to an lwb prior to calling
* zil_commit_waiter().
*/
zil_commit_waiter_t *zcw = zil_alloc_commit_waiter();
zil_commit_itx_assign(zilog, zcw);
uint64_t wtxg = zil_commit_writer(zilog, zcw);
zil_commit_waiter(zilog, zcw);
if (zcw->zcw_zio_error != 0) {
/*
* If there was an error writing out the ZIL blocks that
* this thread is waiting on, then we fallback to
* relying on spa_sync() to write out the data this
* thread is waiting on. Obviously this has performance
* implications, but the expectation is for this to be
* an exceptional case, and shouldn't occur often.
*/
DTRACE_PROBE2(zil__commit__io__error,
zilog_t *, zilog, zil_commit_waiter_t *, zcw);
txg_wait_synced(zilog->zl_dmu_pool, 0);
} else if (wtxg != 0) {
txg_wait_synced(zilog->zl_dmu_pool, wtxg);
}
zil_free_commit_waiter(zcw);
}
/*
* Called in syncing context to free committed log blocks and update log header.
*/
void
zil_sync(zilog_t *zilog, dmu_tx_t *tx)
{
zil_header_t *zh = zil_header_in_syncing_context(zilog);
uint64_t txg = dmu_tx_get_txg(tx);
spa_t *spa = zilog->zl_spa;
uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK];
lwb_t *lwb;
/*
* We don't zero out zl_destroy_txg, so make sure we don't try
* to destroy it twice.
*/
if (spa_sync_pass(spa) != 1)
return;
zil_lwb_flush_wait_all(zilog, txg);
mutex_enter(&zilog->zl_lock);
ASSERT(zilog->zl_stop_sync == 0);
if (*replayed_seq != 0) {
ASSERT(zh->zh_replay_seq < *replayed_seq);
zh->zh_replay_seq = *replayed_seq;
*replayed_seq = 0;
}
if (zilog->zl_destroy_txg == txg) {
blkptr_t blk = zh->zh_log;
dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
ASSERT(list_is_empty(&zilog->zl_lwb_list));
memset(zh, 0, sizeof (zil_header_t));
memset(zilog->zl_replayed_seq, 0,
sizeof (zilog->zl_replayed_seq));
if (zilog->zl_keep_first) {
/*
* If this block was part of log chain that couldn't
* be claimed because a device was missing during
* zil_claim(), but that device later returns,
* then this block could erroneously appear valid.
* To guard against this, assign a new GUID to the new
* log chain so it doesn't matter what blk points to.
*/
zil_init_log_chain(zilog, &blk);
zh->zh_log = blk;
} else {
/*
* A destroyed ZIL chain can't contain any TX_SETSAXATTR
* records. So, deactivate the feature for this dataset.
* We activate it again when we start a new ZIL chain.
*/
if (dsl_dataset_feature_is_active(ds,
SPA_FEATURE_ZILSAXATTR))
dsl_dataset_deactivate_feature(ds,
SPA_FEATURE_ZILSAXATTR, tx);
}
}
while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
zh->zh_log = lwb->lwb_blk;
if (lwb->lwb_state != LWB_STATE_FLUSH_DONE ||
lwb->lwb_alloc_txg > txg || lwb->lwb_max_txg > txg)
break;
list_remove(&zilog->zl_lwb_list, lwb);
if (!BP_IS_HOLE(&lwb->lwb_blk))
zio_free(spa, txg, &lwb->lwb_blk);
zil_free_lwb(zilog, lwb);
/*
* If we don't have anything left in the lwb list then
* we've had an allocation failure and we need to zero
* out the zil_header blkptr so that we don't end
* up freeing the same block twice.
*/
if (list_is_empty(&zilog->zl_lwb_list))
BP_ZERO(&zh->zh_log);
}
mutex_exit(&zilog->zl_lock);
}
static int
zil_lwb_cons(void *vbuf, void *unused, int kmflag)
{
(void) unused, (void) kmflag;
lwb_t *lwb = vbuf;
list_create(&lwb->lwb_itxs, sizeof (itx_t), offsetof(itx_t, itx_node));
list_create(&lwb->lwb_waiters, sizeof (zil_commit_waiter_t),
offsetof(zil_commit_waiter_t, zcw_node));
avl_create(&lwb->lwb_vdev_tree, zil_lwb_vdev_compare,
sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node));
mutex_init(&lwb->lwb_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
return (0);
}
static void
zil_lwb_dest(void *vbuf, void *unused)
{
(void) unused;
lwb_t *lwb = vbuf;
mutex_destroy(&lwb->lwb_vdev_lock);
avl_destroy(&lwb->lwb_vdev_tree);
list_destroy(&lwb->lwb_waiters);
list_destroy(&lwb->lwb_itxs);
}
void
zil_init(void)
{
zil_lwb_cache = kmem_cache_create("zil_lwb_cache",
sizeof (lwb_t), 0, zil_lwb_cons, zil_lwb_dest, NULL, NULL, NULL, 0);
zil_zcw_cache = kmem_cache_create("zil_zcw_cache",
sizeof (zil_commit_waiter_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
zil_sums_init(&zil_sums_global);
zil_kstats_global = kstat_create("zfs", 0, "zil", "misc",
KSTAT_TYPE_NAMED, sizeof (zil_stats) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL);
if (zil_kstats_global != NULL) {
zil_kstats_global->ks_data = &zil_stats;
zil_kstats_global->ks_update = zil_kstats_global_update;
zil_kstats_global->ks_private = NULL;
kstat_install(zil_kstats_global);
}
}
void
zil_fini(void)
{
kmem_cache_destroy(zil_zcw_cache);
kmem_cache_destroy(zil_lwb_cache);
if (zil_kstats_global != NULL) {
kstat_delete(zil_kstats_global);
zil_kstats_global = NULL;
}
zil_sums_fini(&zil_sums_global);
}
void
zil_set_sync(zilog_t *zilog, uint64_t sync)
{
zilog->zl_sync = sync;
}
void
zil_set_logbias(zilog_t *zilog, uint64_t logbias)
{
zilog->zl_logbias = logbias;
}
zilog_t *
zil_alloc(objset_t *os, zil_header_t *zh_phys)
{
zilog_t *zilog;
zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP);
zilog->zl_header = zh_phys;
zilog->zl_os = os;
zilog->zl_spa = dmu_objset_spa(os);
zilog->zl_dmu_pool = dmu_objset_pool(os);
zilog->zl_destroy_txg = TXG_INITIAL - 1;
zilog->zl_logbias = dmu_objset_logbias(os);
zilog->zl_sync = dmu_objset_syncprop(os);
zilog->zl_dirty_max_txg = 0;
zilog->zl_last_lwb_opened = NULL;
zilog->zl_last_lwb_latency = 0;
zilog->zl_max_block_size = zil_maxblocksize;
mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&zilog->zl_issuer_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&zilog->zl_lwb_io_lock, NULL, MUTEX_DEFAULT, NULL);
for (int i = 0; i < TXG_SIZE; i++) {
mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL,
MUTEX_DEFAULT, NULL);
}
list_create(&zilog->zl_lwb_list, sizeof (lwb_t),
offsetof(lwb_t, lwb_node));
list_create(&zilog->zl_itx_commit_list, sizeof (itx_t),
offsetof(itx_t, itx_node));
cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL);
cv_init(&zilog->zl_lwb_io_cv, NULL, CV_DEFAULT, NULL);
return (zilog);
}
void
zil_free(zilog_t *zilog)
{
int i;
zilog->zl_stop_sync = 1;
ASSERT0(zilog->zl_suspend);
ASSERT0(zilog->zl_suspending);
ASSERT(list_is_empty(&zilog->zl_lwb_list));
list_destroy(&zilog->zl_lwb_list);
ASSERT(list_is_empty(&zilog->zl_itx_commit_list));
list_destroy(&zilog->zl_itx_commit_list);
for (i = 0; i < TXG_SIZE; i++) {
/*
* It's possible for an itx to be generated that doesn't dirty
* a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean()
* callback to remove the entry. We remove those here.
*
* Also free up the ziltest itxs.
*/
if (zilog->zl_itxg[i].itxg_itxs)
zil_itxg_clean(zilog->zl_itxg[i].itxg_itxs);
mutex_destroy(&zilog->zl_itxg[i].itxg_lock);
}
mutex_destroy(&zilog->zl_issuer_lock);
mutex_destroy(&zilog->zl_lock);
mutex_destroy(&zilog->zl_lwb_io_lock);
cv_destroy(&zilog->zl_cv_suspend);
cv_destroy(&zilog->zl_lwb_io_cv);
kmem_free(zilog, sizeof (zilog_t));
}
/*
* Open an intent log.
*/
zilog_t *
zil_open(objset_t *os, zil_get_data_t *get_data, zil_sums_t *zil_sums)
{
zilog_t *zilog = dmu_objset_zil(os);
ASSERT3P(zilog->zl_get_data, ==, NULL);
ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL);
ASSERT(list_is_empty(&zilog->zl_lwb_list));
zilog->zl_get_data = get_data;
zilog->zl_sums = zil_sums;
return (zilog);
}
/*
* Close an intent log.
*/
void
zil_close(zilog_t *zilog)
{
lwb_t *lwb;
uint64_t txg;
if (!dmu_objset_is_snapshot(zilog->zl_os)) {
zil_commit(zilog, 0);
} else {
ASSERT(list_is_empty(&zilog->zl_lwb_list));
ASSERT0(zilog->zl_dirty_max_txg);
ASSERT3B(zilog_is_dirty(zilog), ==, B_FALSE);
}
mutex_enter(&zilog->zl_lock);
txg = zilog->zl_dirty_max_txg;
lwb = list_tail(&zilog->zl_lwb_list);
if (lwb != NULL) {
txg = MAX(txg, lwb->lwb_alloc_txg);
txg = MAX(txg, lwb->lwb_max_txg);
}
mutex_exit(&zilog->zl_lock);
/*
* zl_lwb_max_issued_txg may be larger than lwb_max_txg. It depends
* on the time when the dmu_tx transaction is assigned in
* zil_lwb_write_issue().
*/
mutex_enter(&zilog->zl_lwb_io_lock);
txg = MAX(zilog->zl_lwb_max_issued_txg, txg);
mutex_exit(&zilog->zl_lwb_io_lock);
/*
* We need to use txg_wait_synced() to wait until that txg is synced.
* zil_sync() will guarantee all lwbs up to that txg have been
* written out, flushed, and cleaned.
*/
if (txg != 0)
txg_wait_synced(zilog->zl_dmu_pool, txg);
if (zilog_is_dirty(zilog))
zfs_dbgmsg("zil (%px) is dirty, txg %llu", zilog,
(u_longlong_t)txg);
if (txg < spa_freeze_txg(zilog->zl_spa))
VERIFY(!zilog_is_dirty(zilog));
zilog->zl_get_data = NULL;
/*
* We should have only one lwb left on the list; remove it now.
*/
mutex_enter(&zilog->zl_lock);
lwb = list_remove_head(&zilog->zl_lwb_list);
if (lwb != NULL) {
ASSERT(list_is_empty(&zilog->zl_lwb_list));
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_NEW);
zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
zil_free_lwb(zilog, lwb);
}
mutex_exit(&zilog->zl_lock);
}
static const char *suspend_tag = "zil suspending";
/*
* Suspend an intent log. While in suspended mode, we still honor
* synchronous semantics, but we rely on txg_wait_synced() to do it.
* On old version pools, we suspend the log briefly when taking a
* snapshot so that it will have an empty intent log.
*
* Long holds are not really intended to be used the way we do here --
* held for such a short time. A concurrent caller of dsl_dataset_long_held()
* could fail. Therefore we take pains to only put a long hold if it is
* actually necessary. Fortunately, it will only be necessary if the
* objset is currently mounted (or the ZVOL equivalent). In that case it
* will already have a long hold, so we are not really making things any worse.
*
* Ideally, we would locate the existing long-holder (i.e. the zfsvfs_t or
* zvol_state_t), and use their mechanism to prevent their hold from being
* dropped (e.g. VFS_HOLD()). However, that would be even more pain for
* very little gain.
*
* if cookiep == NULL, this does both the suspend & resume.
* Otherwise, it returns with the dataset "long held", and the cookie
* should be passed into zil_resume().
*/
int
zil_suspend(const char *osname, void **cookiep)
{
objset_t *os;
zilog_t *zilog;
const zil_header_t *zh;
int error;
error = dmu_objset_hold(osname, suspend_tag, &os);
if (error != 0)
return (error);
zilog = dmu_objset_zil(os);
mutex_enter(&zilog->zl_lock);
zh = zilog->zl_header;
if (zh->zh_flags & ZIL_REPLAY_NEEDED) { /* unplayed log */
mutex_exit(&zilog->zl_lock);
dmu_objset_rele(os, suspend_tag);
return (SET_ERROR(EBUSY));
}
/*
* Don't put a long hold in the cases where we can avoid it. This
* is when there is no cookie so we are doing a suspend & resume
* (i.e. called from zil_vdev_offline()), and there's nothing to do
* for the suspend because it's already suspended, or there's no ZIL.
*/
if (cookiep == NULL && !zilog->zl_suspending &&
(zilog->zl_suspend > 0 || BP_IS_HOLE(&zh->zh_log))) {
mutex_exit(&zilog->zl_lock);
dmu_objset_rele(os, suspend_tag);
return (0);
}
dsl_dataset_long_hold(dmu_objset_ds(os), suspend_tag);
dsl_pool_rele(dmu_objset_pool(os), suspend_tag);
zilog->zl_suspend++;
if (zilog->zl_suspend > 1) {
/*
* Someone else is already suspending it.
* Just wait for them to finish.
*/
while (zilog->zl_suspending)
cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock);
mutex_exit(&zilog->zl_lock);
if (cookiep == NULL)
zil_resume(os);
else
*cookiep = os;
return (0);
}
/*
* If there is no pointer to an on-disk block, this ZIL must not
* be active (e.g. filesystem not mounted), so there's nothing
* to clean up.
*/
if (BP_IS_HOLE(&zh->zh_log)) {
ASSERT(cookiep != NULL); /* fast path already handled */
*cookiep = os;
mutex_exit(&zilog->zl_lock);
return (0);
}
/*
* The ZIL has work to do. Ensure that the associated encryption
* key will remain mapped while we are committing the log by
* grabbing a reference to it. If the key isn't loaded we have no
* choice but to return an error until the wrapping key is loaded.
*/
if (os->os_encrypted &&
dsl_dataset_create_key_mapping(dmu_objset_ds(os)) != 0) {
zilog->zl_suspend--;
mutex_exit(&zilog->zl_lock);
dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag);
dsl_dataset_rele(dmu_objset_ds(os), suspend_tag);
return (SET_ERROR(EACCES));
}
zilog->zl_suspending = B_TRUE;
mutex_exit(&zilog->zl_lock);
/*
* We need to use zil_commit_impl to ensure we wait for all
* LWB_STATE_OPENED, _CLOSED and _READY lwbs to be committed
* to disk before proceeding. If we used zil_commit instead, it
* would just call txg_wait_synced(), because zl_suspend is set.
* txg_wait_synced() doesn't wait for these lwb's to be
* LWB_STATE_FLUSH_DONE before returning.
*/
zil_commit_impl(zilog, 0);
/*
* Now that we've ensured all lwb's are LWB_STATE_FLUSH_DONE, we
* use txg_wait_synced() to ensure the data from the zilog has
* migrated to the main pool before calling zil_destroy().
*/
txg_wait_synced(zilog->zl_dmu_pool, 0);
zil_destroy(zilog, B_FALSE);
mutex_enter(&zilog->zl_lock);
zilog->zl_suspending = B_FALSE;
cv_broadcast(&zilog->zl_cv_suspend);
mutex_exit(&zilog->zl_lock);
if (os->os_encrypted)
dsl_dataset_remove_key_mapping(dmu_objset_ds(os));
if (cookiep == NULL)
zil_resume(os);
else
*cookiep = os;
return (0);
}
void
zil_resume(void *cookie)
{
objset_t *os = cookie;
zilog_t *zilog = dmu_objset_zil(os);
mutex_enter(&zilog->zl_lock);
ASSERT(zilog->zl_suspend != 0);
zilog->zl_suspend--;
mutex_exit(&zilog->zl_lock);
dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag);
dsl_dataset_rele(dmu_objset_ds(os), suspend_tag);
}
typedef struct zil_replay_arg {
zil_replay_func_t *const *zr_replay;
void *zr_arg;
boolean_t zr_byteswap;
char *zr_lr;
} zil_replay_arg_t;
static int
zil_replay_error(zilog_t *zilog, const lr_t *lr, int error)
{
char name[ZFS_MAX_DATASET_NAME_LEN];
zilog->zl_replaying_seq--; /* didn't actually replay this one */
dmu_objset_name(zilog->zl_os, name);
cmn_err(CE_WARN, "ZFS replay transaction error %d, "
"dataset %s, seq 0x%llx, txtype %llu %s\n", error, name,
(u_longlong_t)lr->lrc_seq,
(u_longlong_t)(lr->lrc_txtype & ~TX_CI),
(lr->lrc_txtype & TX_CI) ? "CI" : "");
return (error);
}
static int
zil_replay_log_record(zilog_t *zilog, const lr_t *lr, void *zra,
uint64_t claim_txg)
{
zil_replay_arg_t *zr = zra;
const zil_header_t *zh = zilog->zl_header;
uint64_t reclen = lr->lrc_reclen;
uint64_t txtype = lr->lrc_txtype;
int error = 0;
zilog->zl_replaying_seq = lr->lrc_seq;
if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */
return (0);
if (lr->lrc_txg < claim_txg) /* already committed */
return (0);
/* Strip case-insensitive bit, still present in log record */
txtype &= ~TX_CI;
if (txtype == 0 || txtype >= TX_MAX_TYPE)
return (zil_replay_error(zilog, lr, EINVAL));
/*
* If this record type can be logged out of order, the object
* (lr_foid) may no longer exist. That's legitimate, not an error.
*/
if (TX_OOO(txtype)) {
error = dmu_object_info(zilog->zl_os,
LR_FOID_GET_OBJ(((lr_ooo_t *)lr)->lr_foid), NULL);
if (error == ENOENT || error == EEXIST)
return (0);
}
/*
* Make a copy of the data so we can revise and extend it.
*/
memcpy(zr->zr_lr, lr, reclen);
/*
* If this is a TX_WRITE with a blkptr, suck in the data.
*/
if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) {
error = zil_read_log_data(zilog, (lr_write_t *)lr,
zr->zr_lr + reclen);
if (error != 0)
return (zil_replay_error(zilog, lr, error));
}
/*
* The log block containing this lr may have been byteswapped
* so that we can easily examine common fields like lrc_txtype.
* However, the log is a mix of different record types, and only the
* replay vectors know how to byteswap their records. Therefore, if
* the lr was byteswapped, undo it before invoking the replay vector.
*/
if (zr->zr_byteswap)
byteswap_uint64_array(zr->zr_lr, reclen);
/*
* We must now do two things atomically: replay this log record,
* and update the log header sequence number to reflect the fact that
* we did so. At the end of each replay function the sequence number
* is updated if we are in replay mode.
*/
error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, zr->zr_byteswap);
if (error != 0) {
/*
* The DMU's dnode layer doesn't see removes until the txg
* commits, so a subsequent claim can spuriously fail with
* EEXIST. So if we receive any error we try syncing out
* any removes then retry the transaction. Note that we
* specify B_FALSE for byteswap now, so we don't do it twice.
*/
txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0);
error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, B_FALSE);
if (error != 0)
return (zil_replay_error(zilog, lr, error));
}
return (0);
}
static int
zil_incr_blks(zilog_t *zilog, const blkptr_t *bp, void *arg, uint64_t claim_txg)
{
(void) bp, (void) arg, (void) claim_txg;
zilog->zl_replay_blks++;
return (0);
}
/*
* If this dataset has a non-empty intent log, replay it and destroy it.
* Return B_TRUE if there were any entries to replay.
*/
boolean_t
zil_replay(objset_t *os, void *arg,
zil_replay_func_t *const replay_func[TX_MAX_TYPE])
{
zilog_t *zilog = dmu_objset_zil(os);
const zil_header_t *zh = zilog->zl_header;
zil_replay_arg_t zr;
if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) {
return (zil_destroy(zilog, B_TRUE));
}
zr.zr_replay = replay_func;
zr.zr_arg = arg;
zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log);
zr.zr_lr = vmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP);
/*
* Wait for in-progress removes to sync before starting replay.
*/
txg_wait_synced(zilog->zl_dmu_pool, 0);
zilog->zl_replay = B_TRUE;
zilog->zl_replay_time = ddi_get_lbolt();
ASSERT(zilog->zl_replay_blks == 0);
(void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr,
zh->zh_claim_txg, B_TRUE);
vmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE);
zil_destroy(zilog, B_FALSE);
txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
zilog->zl_replay = B_FALSE;
return (B_TRUE);
}
boolean_t
zil_replaying(zilog_t *zilog, dmu_tx_t *tx)
{
if (zilog->zl_sync == ZFS_SYNC_DISABLED)
return (B_TRUE);
if (zilog->zl_replay) {
dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] =
zilog->zl_replaying_seq;
return (B_TRUE);
}
return (B_FALSE);
}
int
zil_reset(const char *osname, void *arg)
{
(void) arg;
int error = zil_suspend(osname, NULL);
/* EACCES means crypto key not loaded */
if ((error == EACCES) || (error == EBUSY))
return (SET_ERROR(error));
if (error != 0)
return (SET_ERROR(EEXIST));
return (0);
}
EXPORT_SYMBOL(zil_alloc);
EXPORT_SYMBOL(zil_free);
EXPORT_SYMBOL(zil_open);
EXPORT_SYMBOL(zil_close);
EXPORT_SYMBOL(zil_replay);
EXPORT_SYMBOL(zil_replaying);
EXPORT_SYMBOL(zil_destroy);
EXPORT_SYMBOL(zil_destroy_sync);
EXPORT_SYMBOL(zil_itx_create);
EXPORT_SYMBOL(zil_itx_destroy);
EXPORT_SYMBOL(zil_itx_assign);
EXPORT_SYMBOL(zil_commit);
EXPORT_SYMBOL(zil_claim);
EXPORT_SYMBOL(zil_check_log_chain);
EXPORT_SYMBOL(zil_sync);
EXPORT_SYMBOL(zil_clean);
EXPORT_SYMBOL(zil_suspend);
EXPORT_SYMBOL(zil_resume);
EXPORT_SYMBOL(zil_lwb_add_block);
EXPORT_SYMBOL(zil_bp_tree_add);
EXPORT_SYMBOL(zil_set_sync);
EXPORT_SYMBOL(zil_set_logbias);
EXPORT_SYMBOL(zil_sums_init);
EXPORT_SYMBOL(zil_sums_fini);
EXPORT_SYMBOL(zil_kstat_values_update);
ZFS_MODULE_PARAM(zfs, zfs_, commit_timeout_pct, UINT, ZMOD_RW,
"ZIL block open timeout percentage");
ZFS_MODULE_PARAM(zfs_zil, zil_, min_commit_timeout, U64, ZMOD_RW,
"Minimum delay we care for ZIL block commit");
ZFS_MODULE_PARAM(zfs_zil, zil_, replay_disable, INT, ZMOD_RW,
"Disable intent logging replay");
ZFS_MODULE_PARAM(zfs_zil, zil_, nocacheflush, INT, ZMOD_RW,
"Disable ZIL cache flushes");
ZFS_MODULE_PARAM(zfs_zil, zil_, slog_bulk, U64, ZMOD_RW,
"Limit in bytes slog sync writes per commit");
ZFS_MODULE_PARAM(zfs_zil, zil_, maxblocksize, UINT, ZMOD_RW,
"Limit in bytes of ZIL log block size");
ZFS_MODULE_PARAM(zfs_zil, zil_, maxcopied, UINT, ZMOD_RW,
"Limit in bytes WR_COPIED size");
diff --git a/sys/contrib/openzfs/module/zfs/zio.c b/sys/contrib/openzfs/module/zfs/zio.c
index 3b3b40fa73d8..d8eb075eef54 100644
--- a/sys/contrib/openzfs/module/zfs/zio.c
+++ b/sys/contrib/openzfs/module/zfs/zio.c
@@ -1,5169 +1,5222 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2022 by Delphix. All rights reserved.
* Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019, Allan Jude
* Copyright (c) 2021, Datto, Inc.
*/
#include <sys/sysmacros.h>
#include <sys/zfs_context.h>
#include <sys/fm/fs/zfs.h>
#include <sys/spa.h>
#include <sys/txg.h>
#include <sys/spa_impl.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_trim.h>
#include <sys/zio_impl.h>
#include <sys/zio_compress.h>
#include <sys/zio_checksum.h>
#include <sys/dmu_objset.h>
#include <sys/arc.h>
#include <sys/brt.h>
#include <sys/ddt.h>
#include <sys/blkptr.h>
#include <sys/zfeature.h>
#include <sys/dsl_scan.h>
#include <sys/metaslab_impl.h>
#include <sys/time.h>
#include <sys/trace_zfs.h>
#include <sys/abd.h>
#include <sys/dsl_crypt.h>
#include <cityhash.h>
/*
* ==========================================================================
* I/O type descriptions
* ==========================================================================
*/
const char *const zio_type_name[ZIO_TYPES] = {
/*
* Note: Linux kernel thread name length is limited
* so these names will differ from upstream open zfs.
*/
"z_null", "z_rd", "z_wr", "z_fr", "z_cl", "z_ioctl", "z_trim"
};
int zio_dva_throttle_enabled = B_TRUE;
static int zio_deadman_log_all = B_FALSE;
/*
* ==========================================================================
* I/O kmem caches
* ==========================================================================
*/
static kmem_cache_t *zio_cache;
static kmem_cache_t *zio_link_cache;
kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
#if defined(ZFS_DEBUG) && !defined(_KERNEL)
static uint64_t zio_buf_cache_allocs[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
static uint64_t zio_buf_cache_frees[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
#endif
/* Mark IOs as "slow" if they take longer than 30 seconds */
static uint_t zio_slow_io_ms = (30 * MILLISEC);
#define BP_SPANB(indblkshift, level) \
(((uint64_t)1) << ((level) * ((indblkshift) - SPA_BLKPTRSHIFT)))
#define COMPARE_META_LEVEL 0x80000000ul
/*
* The following actions directly effect the spa's sync-to-convergence logic.
* The values below define the sync pass when we start performing the action.
* Care should be taken when changing these values as they directly impact
* spa_sync() performance. Tuning these values may introduce subtle performance
* pathologies and should only be done in the context of performance analysis.
* These tunables will eventually be removed and replaced with #defines once
* enough analysis has been done to determine optimal values.
*
* The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that
* regular blocks are not deferred.
*
* Starting in sync pass 8 (zfs_sync_pass_dont_compress), we disable
* compression (including of metadata). In practice, we don't have this
* many sync passes, so this has no effect.
*
* The original intent was that disabling compression would help the sync
* passes to converge. However, in practice disabling compression increases
* the average number of sync passes, because when we turn compression off, a
* lot of block's size will change and thus we have to re-allocate (not
* overwrite) them. It also increases the number of 128KB allocations (e.g.
* for indirect blocks and spacemaps) because these will not be compressed.
* The 128K allocations are especially detrimental to performance on highly
* fragmented systems, which may have very few free segments of this size,
* and may need to load new metaslabs to satisfy 128K allocations.
*/
/* defer frees starting in this pass */
uint_t zfs_sync_pass_deferred_free = 2;
/* don't compress starting in this pass */
static uint_t zfs_sync_pass_dont_compress = 8;
/* rewrite new bps starting in this pass */
static uint_t zfs_sync_pass_rewrite = 2;
/*
* An allocating zio is one that either currently has the DVA allocate
* stage set or will have it later in its lifetime.
*/
#define IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE)
/*
* Enable smaller cores by excluding metadata
* allocations as well.
*/
int zio_exclude_metadata = 0;
static int zio_requeue_io_start_cut_in_line = 1;
#ifdef ZFS_DEBUG
static const int zio_buf_debug_limit = 16384;
#else
static const int zio_buf_debug_limit = 0;
#endif
static inline void __zio_execute(zio_t *zio);
static void zio_taskq_dispatch(zio_t *, zio_taskq_type_t, boolean_t);
void
zio_init(void)
{
size_t c;
zio_cache = kmem_cache_create("zio_cache",
sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
zio_link_cache = kmem_cache_create("zio_link_cache",
sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
/*
* For small buffers, we want a cache for each multiple of
* SPA_MINBLOCKSIZE. For larger buffers, we want a cache
* for each quarter-power of 2.
*/
for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) {
size_t size = (c + 1) << SPA_MINBLOCKSHIFT;
size_t p2 = size;
size_t align = 0;
size_t data_cflags, cflags;
data_cflags = KMC_NODEBUG;
cflags = (zio_exclude_metadata || size > zio_buf_debug_limit) ?
KMC_NODEBUG : 0;
while (!ISP2(p2))
p2 &= p2 - 1;
#ifndef _KERNEL
/*
* If we are using watchpoints, put each buffer on its own page,
* to eliminate the performance overhead of trapping to the
* kernel when modifying a non-watched buffer that shares the
* page with a watched buffer.
*/
if (arc_watch && !IS_P2ALIGNED(size, PAGESIZE))
continue;
/*
* Here's the problem - on 4K native devices in userland on
* Linux using O_DIRECT, buffers must be 4K aligned or I/O
* will fail with EINVAL, causing zdb (and others) to coredump.
* Since userland probably doesn't need optimized buffer caches,
* we just force 4K alignment on everything.
*/
align = 8 * SPA_MINBLOCKSIZE;
#else
if (size < PAGESIZE) {
align = SPA_MINBLOCKSIZE;
} else if (IS_P2ALIGNED(size, p2 >> 2)) {
align = PAGESIZE;
}
#endif
if (align != 0) {
char name[36];
if (cflags == data_cflags) {
/*
* Resulting kmem caches would be identical.
* Save memory by creating only one.
*/
(void) snprintf(name, sizeof (name),
"zio_buf_comb_%lu", (ulong_t)size);
zio_buf_cache[c] = kmem_cache_create(name,
size, align, NULL, NULL, NULL, NULL, NULL,
cflags);
zio_data_buf_cache[c] = zio_buf_cache[c];
continue;
}
(void) snprintf(name, sizeof (name), "zio_buf_%lu",
(ulong_t)size);
zio_buf_cache[c] = kmem_cache_create(name, size,
align, NULL, NULL, NULL, NULL, NULL, cflags);
(void) snprintf(name, sizeof (name), "zio_data_buf_%lu",
(ulong_t)size);
zio_data_buf_cache[c] = kmem_cache_create(name, size,
align, NULL, NULL, NULL, NULL, NULL, data_cflags);
}
}
while (--c != 0) {
ASSERT(zio_buf_cache[c] != NULL);
if (zio_buf_cache[c - 1] == NULL)
zio_buf_cache[c - 1] = zio_buf_cache[c];
ASSERT(zio_data_buf_cache[c] != NULL);
if (zio_data_buf_cache[c - 1] == NULL)
zio_data_buf_cache[c - 1] = zio_data_buf_cache[c];
}
zio_inject_init();
lz4_init();
}
void
zio_fini(void)
{
size_t n = SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT;
#if defined(ZFS_DEBUG) && !defined(_KERNEL)
for (size_t i = 0; i < n; i++) {
if (zio_buf_cache_allocs[i] != zio_buf_cache_frees[i])
(void) printf("zio_fini: [%d] %llu != %llu\n",
(int)((i + 1) << SPA_MINBLOCKSHIFT),
(long long unsigned)zio_buf_cache_allocs[i],
(long long unsigned)zio_buf_cache_frees[i]);
}
#endif
/*
* The same kmem cache can show up multiple times in both zio_buf_cache
* and zio_data_buf_cache. Do a wasteful but trivially correct scan to
* sort it out.
*/
for (size_t i = 0; i < n; i++) {
kmem_cache_t *cache = zio_buf_cache[i];
if (cache == NULL)
continue;
for (size_t j = i; j < n; j++) {
if (cache == zio_buf_cache[j])
zio_buf_cache[j] = NULL;
if (cache == zio_data_buf_cache[j])
zio_data_buf_cache[j] = NULL;
}
kmem_cache_destroy(cache);
}
for (size_t i = 0; i < n; i++) {
kmem_cache_t *cache = zio_data_buf_cache[i];
if (cache == NULL)
continue;
for (size_t j = i; j < n; j++) {
if (cache == zio_data_buf_cache[j])
zio_data_buf_cache[j] = NULL;
}
kmem_cache_destroy(cache);
}
for (size_t i = 0; i < n; i++) {
VERIFY3P(zio_buf_cache[i], ==, NULL);
VERIFY3P(zio_data_buf_cache[i], ==, NULL);
}
kmem_cache_destroy(zio_link_cache);
kmem_cache_destroy(zio_cache);
zio_inject_fini();
lz4_fini();
}
/*
* ==========================================================================
* Allocate and free I/O buffers
* ==========================================================================
*/
+#ifdef ZFS_DEBUG
+static const ulong_t zio_buf_canary = (ulong_t)0xdeadc0dedead210b;
+#endif
+
+/*
+ * Use empty space after the buffer to detect overflows.
+ *
+ * Since zio_init() creates kmem caches only for certain set of buffer sizes,
+ * allocations of different sizes may have some unused space after the data.
+ * Filling part of that space with a known pattern on allocation and checking
+ * it on free should allow us to detect some buffer overflows.
+ */
+static void
+zio_buf_put_canary(ulong_t *p, size_t size, kmem_cache_t **cache, size_t c)
+{
+#ifdef ZFS_DEBUG
+ size_t off = P2ROUNDUP(size, sizeof (ulong_t));
+ ulong_t *canary = p + off / sizeof (ulong_t);
+ size_t asize = (c + 1) << SPA_MINBLOCKSHIFT;
+ if (c + 1 < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT &&
+ cache[c] == cache[c + 1])
+ asize = (c + 2) << SPA_MINBLOCKSHIFT;
+ for (; off < asize; canary++, off += sizeof (ulong_t))
+ *canary = zio_buf_canary;
+#endif
+}
+
+static void
+zio_buf_check_canary(ulong_t *p, size_t size, kmem_cache_t **cache, size_t c)
+{
+#ifdef ZFS_DEBUG
+ size_t off = P2ROUNDUP(size, sizeof (ulong_t));
+ ulong_t *canary = p + off / sizeof (ulong_t);
+ size_t asize = (c + 1) << SPA_MINBLOCKSHIFT;
+ if (c + 1 < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT &&
+ cache[c] == cache[c + 1])
+ asize = (c + 2) << SPA_MINBLOCKSHIFT;
+ for (; off < asize; canary++, off += sizeof (ulong_t)) {
+ if (unlikely(*canary != zio_buf_canary)) {
+ PANIC("ZIO buffer overflow %p (%zu) + %zu %#lx != %#lx",
+ p, size, (canary - p) * sizeof (ulong_t),
+ *canary, zio_buf_canary);
+ }
+ }
+#endif
+}
+
/*
* Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a
* crashdump if the kernel panics, so use it judiciously. Obviously, it's
* useful to inspect ZFS metadata, but if possible, we should avoid keeping
* excess / transient data in-core during a crashdump.
*/
void *
zio_buf_alloc(size_t size)
{
size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
#if defined(ZFS_DEBUG) && !defined(_KERNEL)
atomic_add_64(&zio_buf_cache_allocs[c], 1);
#endif
- return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE));
+ void *p = kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE);
+ zio_buf_put_canary(p, size, zio_buf_cache, c);
+ return (p);
}
/*
* Use zio_data_buf_alloc to allocate data. The data will not appear in a
* crashdump if the kernel panics. This exists so that we will limit the amount
* of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount
* of kernel heap dumped to disk when the kernel panics)
*/
void *
zio_data_buf_alloc(size_t size)
{
size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
- return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE));
+ void *p = kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE);
+ zio_buf_put_canary(p, size, zio_data_buf_cache, c);
+ return (p);
}
void
zio_buf_free(void *buf, size_t size)
{
size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
#if defined(ZFS_DEBUG) && !defined(_KERNEL)
atomic_add_64(&zio_buf_cache_frees[c], 1);
#endif
+ zio_buf_check_canary(buf, size, zio_buf_cache, c);
kmem_cache_free(zio_buf_cache[c], buf);
}
void
zio_data_buf_free(void *buf, size_t size)
{
size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
+ zio_buf_check_canary(buf, size, zio_data_buf_cache, c);
kmem_cache_free(zio_data_buf_cache[c], buf);
}
static void
zio_abd_free(void *abd, size_t size)
{
(void) size;
abd_free((abd_t *)abd);
}
/*
* ==========================================================================
* Push and pop I/O transform buffers
* ==========================================================================
*/
void
zio_push_transform(zio_t *zio, abd_t *data, uint64_t size, uint64_t bufsize,
zio_transform_func_t *transform)
{
zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP);
zt->zt_orig_abd = zio->io_abd;
zt->zt_orig_size = zio->io_size;
zt->zt_bufsize = bufsize;
zt->zt_transform = transform;
zt->zt_next = zio->io_transform_stack;
zio->io_transform_stack = zt;
zio->io_abd = data;
zio->io_size = size;
}
void
zio_pop_transforms(zio_t *zio)
{
zio_transform_t *zt;
while ((zt = zio->io_transform_stack) != NULL) {
if (zt->zt_transform != NULL)
zt->zt_transform(zio,
zt->zt_orig_abd, zt->zt_orig_size);
if (zt->zt_bufsize != 0)
abd_free(zio->io_abd);
zio->io_abd = zt->zt_orig_abd;
zio->io_size = zt->zt_orig_size;
zio->io_transform_stack = zt->zt_next;
kmem_free(zt, sizeof (zio_transform_t));
}
}
/*
* ==========================================================================
* I/O transform callbacks for subblocks, decompression, and decryption
* ==========================================================================
*/
static void
zio_subblock(zio_t *zio, abd_t *data, uint64_t size)
{
ASSERT(zio->io_size > size);
if (zio->io_type == ZIO_TYPE_READ)
abd_copy(data, zio->io_abd, size);
}
static void
zio_decompress(zio_t *zio, abd_t *data, uint64_t size)
{
if (zio->io_error == 0) {
void *tmp = abd_borrow_buf(data, size);
int ret = zio_decompress_data(BP_GET_COMPRESS(zio->io_bp),
zio->io_abd, tmp, zio->io_size, size,
&zio->io_prop.zp_complevel);
abd_return_buf_copy(data, tmp, size);
if (zio_injection_enabled && ret == 0)
ret = zio_handle_fault_injection(zio, EINVAL);
if (ret != 0)
zio->io_error = SET_ERROR(EIO);
}
}
static void
zio_decrypt(zio_t *zio, abd_t *data, uint64_t size)
{
int ret;
void *tmp;
blkptr_t *bp = zio->io_bp;
spa_t *spa = zio->io_spa;
uint64_t dsobj = zio->io_bookmark.zb_objset;
uint64_t lsize = BP_GET_LSIZE(bp);
dmu_object_type_t ot = BP_GET_TYPE(bp);
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
boolean_t no_crypt = B_FALSE;
ASSERT(BP_USES_CRYPT(bp));
ASSERT3U(size, !=, 0);
if (zio->io_error != 0)
return;
/*
* Verify the cksum of MACs stored in an indirect bp. It will always
* be possible to verify this since it does not require an encryption
* key.
*/
if (BP_HAS_INDIRECT_MAC_CKSUM(bp)) {
zio_crypt_decode_mac_bp(bp, mac);
if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF) {
/*
* We haven't decompressed the data yet, but
* zio_crypt_do_indirect_mac_checksum() requires
* decompressed data to be able to parse out the MACs
* from the indirect block. We decompress it now and
* throw away the result after we are finished.
*/
tmp = zio_buf_alloc(lsize);
ret = zio_decompress_data(BP_GET_COMPRESS(bp),
zio->io_abd, tmp, zio->io_size, lsize,
&zio->io_prop.zp_complevel);
if (ret != 0) {
ret = SET_ERROR(EIO);
goto error;
}
ret = zio_crypt_do_indirect_mac_checksum(B_FALSE,
tmp, lsize, BP_SHOULD_BYTESWAP(bp), mac);
zio_buf_free(tmp, lsize);
} else {
ret = zio_crypt_do_indirect_mac_checksum_abd(B_FALSE,
zio->io_abd, size, BP_SHOULD_BYTESWAP(bp), mac);
}
abd_copy(data, zio->io_abd, size);
if (zio_injection_enabled && ot != DMU_OT_DNODE && ret == 0) {
ret = zio_handle_decrypt_injection(spa,
&zio->io_bookmark, ot, ECKSUM);
}
if (ret != 0)
goto error;
return;
}
/*
* If this is an authenticated block, just check the MAC. It would be
* nice to separate this out into its own flag, but when this was done,
* we had run out of bits in what is now zio_flag_t. Future cleanup
* could make this a flag bit.
*/
if (BP_IS_AUTHENTICATED(bp)) {
if (ot == DMU_OT_OBJSET) {
ret = spa_do_crypt_objset_mac_abd(B_FALSE, spa,
dsobj, zio->io_abd, size, BP_SHOULD_BYTESWAP(bp));
} else {
zio_crypt_decode_mac_bp(bp, mac);
ret = spa_do_crypt_mac_abd(B_FALSE, spa, dsobj,
zio->io_abd, size, mac);
if (zio_injection_enabled && ret == 0) {
ret = zio_handle_decrypt_injection(spa,
&zio->io_bookmark, ot, ECKSUM);
}
}
abd_copy(data, zio->io_abd, size);
if (ret != 0)
goto error;
return;
}
zio_crypt_decode_params_bp(bp, salt, iv);
if (ot == DMU_OT_INTENT_LOG) {
tmp = abd_borrow_buf_copy(zio->io_abd, sizeof (zil_chain_t));
zio_crypt_decode_mac_zil(tmp, mac);
abd_return_buf(zio->io_abd, tmp, sizeof (zil_chain_t));
} else {
zio_crypt_decode_mac_bp(bp, mac);
}
ret = spa_do_crypt_abd(B_FALSE, spa, &zio->io_bookmark, BP_GET_TYPE(bp),
BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp), salt, iv, mac, size, data,
zio->io_abd, &no_crypt);
if (no_crypt)
abd_copy(data, zio->io_abd, size);
if (ret != 0)
goto error;
return;
error:
/* assert that the key was found unless this was speculative */
ASSERT(ret != EACCES || (zio->io_flags & ZIO_FLAG_SPECULATIVE));
/*
* If there was a decryption / authentication error return EIO as
* the io_error. If this was not a speculative zio, create an ereport.
*/
if (ret == ECKSUM) {
zio->io_error = SET_ERROR(EIO);
if ((zio->io_flags & ZIO_FLAG_SPECULATIVE) == 0) {
spa_log_error(spa, &zio->io_bookmark,
&zio->io_bp->blk_birth);
(void) zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION,
spa, NULL, &zio->io_bookmark, zio, 0);
}
} else {
zio->io_error = ret;
}
}
/*
* ==========================================================================
* I/O parent/child relationships and pipeline interlocks
* ==========================================================================
*/
zio_t *
zio_walk_parents(zio_t *cio, zio_link_t **zl)
{
list_t *pl = &cio->io_parent_list;
*zl = (*zl == NULL) ? list_head(pl) : list_next(pl, *zl);
if (*zl == NULL)
return (NULL);
ASSERT((*zl)->zl_child == cio);
return ((*zl)->zl_parent);
}
zio_t *
zio_walk_children(zio_t *pio, zio_link_t **zl)
{
list_t *cl = &pio->io_child_list;
ASSERT(MUTEX_HELD(&pio->io_lock));
*zl = (*zl == NULL) ? list_head(cl) : list_next(cl, *zl);
if (*zl == NULL)
return (NULL);
ASSERT((*zl)->zl_parent == pio);
return ((*zl)->zl_child);
}
zio_t *
zio_unique_parent(zio_t *cio)
{
zio_link_t *zl = NULL;
zio_t *pio = zio_walk_parents(cio, &zl);
VERIFY3P(zio_walk_parents(cio, &zl), ==, NULL);
return (pio);
}
void
zio_add_child(zio_t *pio, zio_t *cio)
{
/*
* Logical I/Os can have logical, gang, or vdev children.
* Gang I/Os can have gang or vdev children.
* Vdev I/Os can only have vdev children.
* The following ASSERT captures all of these constraints.
*/
ASSERT3S(cio->io_child_type, <=, pio->io_child_type);
zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP);
zl->zl_parent = pio;
zl->zl_child = cio;
mutex_enter(&pio->io_lock);
mutex_enter(&cio->io_lock);
ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0);
uint64_t *countp = pio->io_children[cio->io_child_type];
for (int w = 0; w < ZIO_WAIT_TYPES; w++)
countp[w] += !cio->io_state[w];
list_insert_head(&pio->io_child_list, zl);
list_insert_head(&cio->io_parent_list, zl);
mutex_exit(&cio->io_lock);
mutex_exit(&pio->io_lock);
}
void
zio_add_child_first(zio_t *pio, zio_t *cio)
{
/*
* Logical I/Os can have logical, gang, or vdev children.
* Gang I/Os can have gang or vdev children.
* Vdev I/Os can only have vdev children.
* The following ASSERT captures all of these constraints.
*/
ASSERT3S(cio->io_child_type, <=, pio->io_child_type);
zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP);
zl->zl_parent = pio;
zl->zl_child = cio;
ASSERT(list_is_empty(&cio->io_parent_list));
list_insert_head(&cio->io_parent_list, zl);
mutex_enter(&pio->io_lock);
ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0);
uint64_t *countp = pio->io_children[cio->io_child_type];
for (int w = 0; w < ZIO_WAIT_TYPES; w++)
countp[w] += !cio->io_state[w];
list_insert_head(&pio->io_child_list, zl);
mutex_exit(&pio->io_lock);
}
static void
zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl)
{
ASSERT(zl->zl_parent == pio);
ASSERT(zl->zl_child == cio);
mutex_enter(&pio->io_lock);
mutex_enter(&cio->io_lock);
list_remove(&pio->io_child_list, zl);
list_remove(&cio->io_parent_list, zl);
mutex_exit(&cio->io_lock);
mutex_exit(&pio->io_lock);
kmem_cache_free(zio_link_cache, zl);
}
static boolean_t
zio_wait_for_children(zio_t *zio, uint8_t childbits, enum zio_wait_type wait)
{
boolean_t waiting = B_FALSE;
mutex_enter(&zio->io_lock);
ASSERT(zio->io_stall == NULL);
for (int c = 0; c < ZIO_CHILD_TYPES; c++) {
if (!(ZIO_CHILD_BIT_IS_SET(childbits, c)))
continue;
uint64_t *countp = &zio->io_children[c][wait];
if (*countp != 0) {
zio->io_stage >>= 1;
ASSERT3U(zio->io_stage, !=, ZIO_STAGE_OPEN);
zio->io_stall = countp;
waiting = B_TRUE;
break;
}
}
mutex_exit(&zio->io_lock);
return (waiting);
}
__attribute__((always_inline))
static inline void
zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait,
zio_t **next_to_executep)
{
uint64_t *countp = &pio->io_children[zio->io_child_type][wait];
int *errorp = &pio->io_child_error[zio->io_child_type];
mutex_enter(&pio->io_lock);
if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
*errorp = zio_worst_error(*errorp, zio->io_error);
pio->io_reexecute |= zio->io_reexecute;
ASSERT3U(*countp, >, 0);
(*countp)--;
if (*countp == 0 && pio->io_stall == countp) {
zio_taskq_type_t type =
pio->io_stage < ZIO_STAGE_VDEV_IO_START ? ZIO_TASKQ_ISSUE :
ZIO_TASKQ_INTERRUPT;
pio->io_stall = NULL;
mutex_exit(&pio->io_lock);
/*
* If we can tell the caller to execute this parent next, do
* so. We only do this if the parent's zio type matches the
* child's type. Otherwise dispatch the parent zio in its
* own taskq.
*
* Having the caller execute the parent when possible reduces
* locking on the zio taskq's, reduces context switch
* overhead, and has no recursion penalty. Note that one
* read from disk typically causes at least 3 zio's: a
* zio_null(), the logical zio_read(), and then a physical
* zio. When the physical ZIO completes, we are able to call
* zio_done() on all 3 of these zio's from one invocation of
* zio_execute() by returning the parent back to
* zio_execute(). Since the parent isn't executed until this
* thread returns back to zio_execute(), the caller should do
* so promptly.
*
* In other cases, dispatching the parent prevents
* overflowing the stack when we have deeply nested
* parent-child relationships, as we do with the "mega zio"
* of writes for spa_sync(), and the chain of ZIL blocks.
*/
if (next_to_executep != NULL && *next_to_executep == NULL &&
pio->io_type == zio->io_type) {
*next_to_executep = pio;
} else {
zio_taskq_dispatch(pio, type, B_FALSE);
}
} else {
mutex_exit(&pio->io_lock);
}
}
static void
zio_inherit_child_errors(zio_t *zio, enum zio_child c)
{
if (zio->io_child_error[c] != 0 && zio->io_error == 0)
zio->io_error = zio->io_child_error[c];
}
int
zio_bookmark_compare(const void *x1, const void *x2)
{
const zio_t *z1 = x1;
const zio_t *z2 = x2;
if (z1->io_bookmark.zb_objset < z2->io_bookmark.zb_objset)
return (-1);
if (z1->io_bookmark.zb_objset > z2->io_bookmark.zb_objset)
return (1);
if (z1->io_bookmark.zb_object < z2->io_bookmark.zb_object)
return (-1);
if (z1->io_bookmark.zb_object > z2->io_bookmark.zb_object)
return (1);
if (z1->io_bookmark.zb_level < z2->io_bookmark.zb_level)
return (-1);
if (z1->io_bookmark.zb_level > z2->io_bookmark.zb_level)
return (1);
if (z1->io_bookmark.zb_blkid < z2->io_bookmark.zb_blkid)
return (-1);
if (z1->io_bookmark.zb_blkid > z2->io_bookmark.zb_blkid)
return (1);
if (z1 < z2)
return (-1);
if (z1 > z2)
return (1);
return (0);
}
/*
* ==========================================================================
* Create the various types of I/O (read, write, free, etc)
* ==========================================================================
*/
static zio_t *
zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
abd_t *data, uint64_t lsize, uint64_t psize, zio_done_func_t *done,
void *private, zio_type_t type, zio_priority_t priority,
zio_flag_t flags, vdev_t *vd, uint64_t offset,
const zbookmark_phys_t *zb, enum zio_stage stage,
enum zio_stage pipeline)
{
zio_t *zio;
IMPLY(type != ZIO_TYPE_TRIM, psize <= SPA_MAXBLOCKSIZE);
ASSERT(P2PHASE(psize, SPA_MINBLOCKSIZE) == 0);
ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0);
ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER));
ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER));
ASSERT(vd || stage == ZIO_STAGE_OPEN);
IMPLY(lsize != psize, (flags & ZIO_FLAG_RAW_COMPRESS) != 0);
zio = kmem_cache_alloc(zio_cache, KM_SLEEP);
memset(zio, 0, sizeof (zio_t));
mutex_init(&zio->io_lock, NULL, MUTEX_NOLOCKDEP, NULL);
cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL);
list_create(&zio->io_parent_list, sizeof (zio_link_t),
offsetof(zio_link_t, zl_parent_node));
list_create(&zio->io_child_list, sizeof (zio_link_t),
offsetof(zio_link_t, zl_child_node));
metaslab_trace_init(&zio->io_alloc_list);
if (vd != NULL)
zio->io_child_type = ZIO_CHILD_VDEV;
else if (flags & ZIO_FLAG_GANG_CHILD)
zio->io_child_type = ZIO_CHILD_GANG;
else if (flags & ZIO_FLAG_DDT_CHILD)
zio->io_child_type = ZIO_CHILD_DDT;
else
zio->io_child_type = ZIO_CHILD_LOGICAL;
if (bp != NULL) {
if (type != ZIO_TYPE_WRITE ||
zio->io_child_type == ZIO_CHILD_DDT) {
zio->io_bp_copy = *bp;
zio->io_bp = &zio->io_bp_copy; /* so caller can free */
} else {
zio->io_bp = (blkptr_t *)bp;
}
zio->io_bp_orig = *bp;
if (zio->io_child_type == ZIO_CHILD_LOGICAL)
zio->io_logical = zio;
if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp))
pipeline |= ZIO_GANG_STAGES;
}
zio->io_spa = spa;
zio->io_txg = txg;
zio->io_done = done;
zio->io_private = private;
zio->io_type = type;
zio->io_priority = priority;
zio->io_vd = vd;
zio->io_offset = offset;
zio->io_orig_abd = zio->io_abd = data;
zio->io_orig_size = zio->io_size = psize;
zio->io_lsize = lsize;
zio->io_orig_flags = zio->io_flags = flags;
zio->io_orig_stage = zio->io_stage = stage;
zio->io_orig_pipeline = zio->io_pipeline = pipeline;
zio->io_pipeline_trace = ZIO_STAGE_OPEN;
zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY);
zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE);
if (zb != NULL)
zio->io_bookmark = *zb;
if (pio != NULL) {
zio->io_metaslab_class = pio->io_metaslab_class;
if (zio->io_logical == NULL)
zio->io_logical = pio->io_logical;
if (zio->io_child_type == ZIO_CHILD_GANG)
zio->io_gang_leader = pio->io_gang_leader;
zio_add_child_first(pio, zio);
}
taskq_init_ent(&zio->io_tqent);
return (zio);
}
void
zio_destroy(zio_t *zio)
{
metaslab_trace_fini(&zio->io_alloc_list);
list_destroy(&zio->io_parent_list);
list_destroy(&zio->io_child_list);
mutex_destroy(&zio->io_lock);
cv_destroy(&zio->io_cv);
kmem_cache_free(zio_cache, zio);
}
zio_t *
zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done,
void *private, zio_flag_t flags)
{
zio_t *zio;
zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private,
ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL,
ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE);
return (zio);
}
zio_t *
zio_root(spa_t *spa, zio_done_func_t *done, void *private, zio_flag_t flags)
{
return (zio_null(NULL, spa, NULL, done, private, flags));
}
static int
zfs_blkptr_verify_log(spa_t *spa, const blkptr_t *bp,
enum blk_verify_flag blk_verify, const char *fmt, ...)
{
va_list adx;
char buf[256];
va_start(adx, fmt);
(void) vsnprintf(buf, sizeof (buf), fmt, adx);
va_end(adx);
zfs_dbgmsg("bad blkptr at %px: "
"DVA[0]=%#llx/%#llx "
"DVA[1]=%#llx/%#llx "
"DVA[2]=%#llx/%#llx "
"prop=%#llx "
"pad=%#llx,%#llx "
"phys_birth=%#llx "
"birth=%#llx "
"fill=%#llx "
"cksum=%#llx/%#llx/%#llx/%#llx",
bp,
(long long)bp->blk_dva[0].dva_word[0],
(long long)bp->blk_dva[0].dva_word[1],
(long long)bp->blk_dva[1].dva_word[0],
(long long)bp->blk_dva[1].dva_word[1],
(long long)bp->blk_dva[2].dva_word[0],
(long long)bp->blk_dva[2].dva_word[1],
(long long)bp->blk_prop,
(long long)bp->blk_pad[0],
(long long)bp->blk_pad[1],
(long long)bp->blk_phys_birth,
(long long)bp->blk_birth,
(long long)bp->blk_fill,
(long long)bp->blk_cksum.zc_word[0],
(long long)bp->blk_cksum.zc_word[1],
(long long)bp->blk_cksum.zc_word[2],
(long long)bp->blk_cksum.zc_word[3]);
switch (blk_verify) {
case BLK_VERIFY_HALT:
zfs_panic_recover("%s: %s", spa_name(spa), buf);
break;
case BLK_VERIFY_LOG:
zfs_dbgmsg("%s: %s", spa_name(spa), buf);
break;
case BLK_VERIFY_ONLY:
break;
}
return (1);
}
/*
* Verify the block pointer fields contain reasonable values. This means
* it only contains known object types, checksum/compression identifiers,
* block sizes within the maximum allowed limits, valid DVAs, etc.
*
* If everything checks out B_TRUE is returned. The zfs_blkptr_verify
* argument controls the behavior when an invalid field is detected.
*
* Values for blk_verify_flag:
* BLK_VERIFY_ONLY: evaluate the block
* BLK_VERIFY_LOG: evaluate the block and log problems
* BLK_VERIFY_HALT: call zfs_panic_recover on error
*
* Values for blk_config_flag:
* BLK_CONFIG_HELD: caller holds SCL_VDEV for writer
* BLK_CONFIG_NEEDED: caller holds no config lock, SCL_VDEV will be
* obtained for reader
* BLK_CONFIG_SKIP: skip checks which require SCL_VDEV, for better
* performance
*/
boolean_t
zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp,
enum blk_config_flag blk_config, enum blk_verify_flag blk_verify)
{
int errors = 0;
if (!DMU_OT_IS_VALID(BP_GET_TYPE(bp))) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px has invalid TYPE %llu",
bp, (longlong_t)BP_GET_TYPE(bp));
}
if (BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px has invalid CHECKSUM %llu",
bp, (longlong_t)BP_GET_CHECKSUM(bp));
}
if (BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px has invalid COMPRESS %llu",
bp, (longlong_t)BP_GET_COMPRESS(bp));
}
if (BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px has invalid LSIZE %llu",
bp, (longlong_t)BP_GET_LSIZE(bp));
}
if (BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px has invalid PSIZE %llu",
bp, (longlong_t)BP_GET_PSIZE(bp));
}
if (BP_IS_EMBEDDED(bp)) {
if (BPE_GET_ETYPE(bp) >= NUM_BP_EMBEDDED_TYPES) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px has invalid ETYPE %llu",
bp, (longlong_t)BPE_GET_ETYPE(bp));
}
}
/*
* Do not verify individual DVAs if the config is not trusted. This
* will be done once the zio is executed in vdev_mirror_map_alloc.
*/
if (!spa->spa_trust_config)
return (errors == 0);
switch (blk_config) {
case BLK_CONFIG_HELD:
ASSERT(spa_config_held(spa, SCL_VDEV, RW_WRITER));
break;
case BLK_CONFIG_NEEDED:
spa_config_enter(spa, SCL_VDEV, bp, RW_READER);
break;
case BLK_CONFIG_SKIP:
return (errors == 0);
default:
panic("invalid blk_config %u", blk_config);
}
/*
* Pool-specific checks.
*
* Note: it would be nice to verify that the blk_birth and
* BP_PHYSICAL_BIRTH() are not too large. However, spa_freeze()
* allows the birth time of log blocks (and dmu_sync()-ed blocks
* that are in the log) to be arbitrarily large.
*/
for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
const dva_t *dva = &bp->blk_dva[i];
uint64_t vdevid = DVA_GET_VDEV(dva);
if (vdevid >= spa->spa_root_vdev->vdev_children) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px DVA %u has invalid VDEV %llu",
bp, i, (longlong_t)vdevid);
continue;
}
vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
if (vd == NULL) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px DVA %u has invalid VDEV %llu",
bp, i, (longlong_t)vdevid);
continue;
}
if (vd->vdev_ops == &vdev_hole_ops) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px DVA %u has hole VDEV %llu",
bp, i, (longlong_t)vdevid);
continue;
}
if (vd->vdev_ops == &vdev_missing_ops) {
/*
* "missing" vdevs are valid during import, but we
* don't have their detailed info (e.g. asize), so
* we can't perform any more checks on them.
*/
continue;
}
uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t asize = DVA_GET_ASIZE(dva);
if (DVA_GET_GANG(dva))
asize = vdev_gang_header_asize(vd);
if (offset + asize > vd->vdev_asize) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px DVA %u has invalid OFFSET %llu",
bp, i, (longlong_t)offset);
}
}
if (blk_config == BLK_CONFIG_NEEDED)
spa_config_exit(spa, SCL_VDEV, bp);
return (errors == 0);
}
boolean_t
zfs_dva_valid(spa_t *spa, const dva_t *dva, const blkptr_t *bp)
{
(void) bp;
uint64_t vdevid = DVA_GET_VDEV(dva);
if (vdevid >= spa->spa_root_vdev->vdev_children)
return (B_FALSE);
vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
if (vd == NULL)
return (B_FALSE);
if (vd->vdev_ops == &vdev_hole_ops)
return (B_FALSE);
if (vd->vdev_ops == &vdev_missing_ops) {
return (B_FALSE);
}
uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t asize = DVA_GET_ASIZE(dva);
if (DVA_GET_GANG(dva))
asize = vdev_gang_header_asize(vd);
if (offset + asize > vd->vdev_asize)
return (B_FALSE);
return (B_TRUE);
}
zio_t *
zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
abd_t *data, uint64_t size, zio_done_func_t *done, void *private,
zio_priority_t priority, zio_flag_t flags, const zbookmark_phys_t *zb)
{
zio_t *zio;
zio = zio_create(pio, spa, BP_PHYSICAL_BIRTH(bp), bp,
data, size, size, done, private,
ZIO_TYPE_READ, priority, flags, NULL, 0, zb,
ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ?
ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE);
return (zio);
}
zio_t *
zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
abd_t *data, uint64_t lsize, uint64_t psize, const zio_prop_t *zp,
zio_done_func_t *ready, zio_done_func_t *children_ready,
zio_done_func_t *done, void *private, zio_priority_t priority,
zio_flag_t flags, const zbookmark_phys_t *zb)
{
zio_t *zio;
ASSERT(zp->zp_checksum >= ZIO_CHECKSUM_OFF &&
zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS &&
zp->zp_compress >= ZIO_COMPRESS_OFF &&
zp->zp_compress < ZIO_COMPRESS_FUNCTIONS &&
DMU_OT_IS_VALID(zp->zp_type) &&
zp->zp_level < 32 &&
zp->zp_copies > 0 &&
zp->zp_copies <= spa_max_replication(spa));
zio = zio_create(pio, spa, txg, bp, data, lsize, psize, done, private,
ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb,
ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ?
ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE);
zio->io_ready = ready;
zio->io_children_ready = children_ready;
zio->io_prop = *zp;
/*
* Data can be NULL if we are going to call zio_write_override() to
* provide the already-allocated BP. But we may need the data to
* verify a dedup hit (if requested). In this case, don't try to
* dedup (just take the already-allocated BP verbatim). Encrypted
* dedup blocks need data as well so we also disable dedup in this
* case.
*/
if (data == NULL &&
(zio->io_prop.zp_dedup_verify || zio->io_prop.zp_encrypt)) {
zio->io_prop.zp_dedup = zio->io_prop.zp_dedup_verify = B_FALSE;
}
return (zio);
}
zio_t *
zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, abd_t *data,
uint64_t size, zio_done_func_t *done, void *private,
zio_priority_t priority, zio_flag_t flags, zbookmark_phys_t *zb)
{
zio_t *zio;
zio = zio_create(pio, spa, txg, bp, data, size, size, done, private,
ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_IO_REWRITE, NULL, 0, zb,
ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE);
return (zio);
}
void
zio_write_override(zio_t *zio, blkptr_t *bp, int copies, boolean_t nopwrite,
boolean_t brtwrite)
{
ASSERT(zio->io_type == ZIO_TYPE_WRITE);
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(zio->io_stage == ZIO_STAGE_OPEN);
ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa));
ASSERT(!brtwrite || !nopwrite);
/*
* We must reset the io_prop to match the values that existed
* when the bp was first written by dmu_sync() keeping in mind
* that nopwrite and dedup are mutually exclusive.
*/
zio->io_prop.zp_dedup = nopwrite ? B_FALSE : zio->io_prop.zp_dedup;
zio->io_prop.zp_nopwrite = nopwrite;
zio->io_prop.zp_brtwrite = brtwrite;
zio->io_prop.zp_copies = copies;
zio->io_bp_override = bp;
}
void
zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp)
{
(void) zfs_blkptr_verify(spa, bp, BLK_CONFIG_NEEDED, BLK_VERIFY_HALT);
/*
* The check for EMBEDDED is a performance optimization. We
* process the free here (by ignoring it) rather than
* putting it on the list and then processing it in zio_free_sync().
*/
if (BP_IS_EMBEDDED(bp))
return;
/*
* Frees that are for the currently-syncing txg, are not going to be
* deferred, and which will not need to do a read (i.e. not GANG or
* DEDUP), can be processed immediately. Otherwise, put them on the
* in-memory list for later processing.
*
* Note that we only defer frees after zfs_sync_pass_deferred_free
* when the log space map feature is disabled. [see relevant comment
* in spa_sync_iterate_to_convergence()]
*/
if (BP_IS_GANG(bp) ||
BP_GET_DEDUP(bp) ||
txg != spa->spa_syncing_txg ||
(spa_sync_pass(spa) >= zfs_sync_pass_deferred_free &&
!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) ||
brt_maybe_exists(spa, bp)) {
metaslab_check_free(spa, bp);
bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp);
} else {
VERIFY3P(zio_free_sync(NULL, spa, txg, bp, 0), ==, NULL);
}
}
/*
* To improve performance, this function may return NULL if we were able
* to do the free immediately. This avoids the cost of creating a zio
* (and linking it to the parent, etc).
*/
zio_t *
zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
zio_flag_t flags)
{
ASSERT(!BP_IS_HOLE(bp));
ASSERT(spa_syncing_txg(spa) == txg);
if (BP_IS_EMBEDDED(bp))
return (NULL);
metaslab_check_free(spa, bp);
arc_freed(spa, bp);
dsl_scan_freed(spa, bp);
if (BP_IS_GANG(bp) ||
BP_GET_DEDUP(bp) ||
brt_maybe_exists(spa, bp)) {
/*
* GANG, DEDUP and BRT blocks can induce a read (for the gang
* block header, the DDT or the BRT), so issue them
* asynchronously so that this thread is not tied up.
*/
enum zio_stage stage =
ZIO_FREE_PIPELINE | ZIO_STAGE_ISSUE_ASYNC;
return (zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
BP_GET_PSIZE(bp), NULL, NULL,
ZIO_TYPE_FREE, ZIO_PRIORITY_NOW,
flags, NULL, 0, NULL, ZIO_STAGE_OPEN, stage));
} else {
metaslab_free(spa, bp, txg, B_FALSE);
return (NULL);
}
}
zio_t *
zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
zio_done_func_t *done, void *private, zio_flag_t flags)
{
zio_t *zio;
(void) zfs_blkptr_verify(spa, bp, (flags & ZIO_FLAG_CONFIG_WRITER) ?
BLK_CONFIG_HELD : BLK_CONFIG_NEEDED, BLK_VERIFY_HALT);
if (BP_IS_EMBEDDED(bp))
return (zio_null(pio, spa, NULL, NULL, NULL, 0));
/*
* A claim is an allocation of a specific block. Claims are needed
* to support immediate writes in the intent log. The issue is that
* immediate writes contain committed data, but in a txg that was
* *not* committed. Upon opening the pool after an unclean shutdown,
* the intent log claims all blocks that contain immediate write data
* so that the SPA knows they're in use.
*
* All claims *must* be resolved in the first txg -- before the SPA
* starts allocating blocks -- so that nothing is allocated twice.
* If txg == 0 we just verify that the block is claimable.
*/
ASSERT3U(spa->spa_uberblock.ub_rootbp.blk_birth, <,
spa_min_claim_txg(spa));
ASSERT(txg == spa_min_claim_txg(spa) || txg == 0);
ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa)); /* zdb(8) */
zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
BP_GET_PSIZE(bp), done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW,
flags, NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE);
ASSERT0(zio->io_queued_timestamp);
return (zio);
}
zio_t *
zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd,
zio_done_func_t *done, void *private, zio_flag_t flags)
{
zio_t *zio;
int c;
if (vd->vdev_children == 0) {
zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private,
ZIO_TYPE_IOCTL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL,
ZIO_STAGE_OPEN, ZIO_IOCTL_PIPELINE);
zio->io_cmd = cmd;
} else {
zio = zio_null(pio, spa, NULL, NULL, NULL, flags);
for (c = 0; c < vd->vdev_children; c++)
zio_nowait(zio_ioctl(zio, spa, vd->vdev_child[c], cmd,
done, private, flags));
}
return (zio);
}
zio_t *
zio_trim(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
zio_done_func_t *done, void *private, zio_priority_t priority,
zio_flag_t flags, enum trim_flag trim_flags)
{
zio_t *zio;
ASSERT0(vd->vdev_children);
ASSERT0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
ASSERT0(P2PHASE(size, 1ULL << vd->vdev_ashift));
ASSERT3U(size, !=, 0);
zio = zio_create(pio, vd->vdev_spa, 0, NULL, NULL, size, size, done,
private, ZIO_TYPE_TRIM, priority, flags | ZIO_FLAG_PHYSICAL,
vd, offset, NULL, ZIO_STAGE_OPEN, ZIO_TRIM_PIPELINE);
zio->io_trim_flags = trim_flags;
return (zio);
}
zio_t *
zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
abd_t *data, int checksum, zio_done_func_t *done, void *private,
zio_priority_t priority, zio_flag_t flags, boolean_t labels)
{
zio_t *zio;
ASSERT(vd->vdev_children == 0);
ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
ASSERT3U(offset + size, <=, vd->vdev_psize);
zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done,
private, ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL, vd,
offset, NULL, ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE);
zio->io_prop.zp_checksum = checksum;
return (zio);
}
zio_t *
zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
abd_t *data, int checksum, zio_done_func_t *done, void *private,
zio_priority_t priority, zio_flag_t flags, boolean_t labels)
{
zio_t *zio;
ASSERT(vd->vdev_children == 0);
ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
ASSERT3U(offset + size, <=, vd->vdev_psize);
zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done,
private, ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL, vd,
offset, NULL, ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE);
zio->io_prop.zp_checksum = checksum;
if (zio_checksum_table[checksum].ci_flags & ZCHECKSUM_FLAG_EMBEDDED) {
/*
* zec checksums are necessarily destructive -- they modify
* the end of the write buffer to hold the verifier/checksum.
* Therefore, we must make a local copy in case the data is
* being written to multiple places in parallel.
*/
abd_t *wbuf = abd_alloc_sametype(data, size);
abd_copy(wbuf, data, size);
zio_push_transform(zio, wbuf, size, size, NULL);
}
return (zio);
}
/*
* Create a child I/O to do some work for us.
*/
zio_t *
zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset,
abd_t *data, uint64_t size, int type, zio_priority_t priority,
zio_flag_t flags, zio_done_func_t *done, void *private)
{
enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE;
zio_t *zio;
/*
* vdev child I/Os do not propagate their error to the parent.
* Therefore, for correct operation the caller *must* check for
* and handle the error in the child i/o's done callback.
* The only exceptions are i/os that we don't care about
* (OPTIONAL or REPAIR).
*/
ASSERT((flags & ZIO_FLAG_OPTIONAL) || (flags & ZIO_FLAG_IO_REPAIR) ||
done != NULL);
if (type == ZIO_TYPE_READ && bp != NULL) {
/*
* If we have the bp, then the child should perform the
* checksum and the parent need not. This pushes error
* detection as close to the leaves as possible and
* eliminates redundant checksums in the interior nodes.
*/
pipeline |= ZIO_STAGE_CHECKSUM_VERIFY;
pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
}
if (vd->vdev_ops->vdev_op_leaf) {
ASSERT0(vd->vdev_children);
offset += VDEV_LABEL_START_SIZE;
}
flags |= ZIO_VDEV_CHILD_FLAGS(pio);
/*
* If we've decided to do a repair, the write is not speculative --
* even if the original read was.
*/
if (flags & ZIO_FLAG_IO_REPAIR)
flags &= ~ZIO_FLAG_SPECULATIVE;
/*
* If we're creating a child I/O that is not associated with a
* top-level vdev, then the child zio is not an allocating I/O.
* If this is a retried I/O then we ignore it since we will
* have already processed the original allocating I/O.
*/
if (flags & ZIO_FLAG_IO_ALLOCATING &&
(vd != vd->vdev_top || (flags & ZIO_FLAG_IO_RETRY))) {
ASSERT(pio->io_metaslab_class != NULL);
ASSERT(pio->io_metaslab_class->mc_alloc_throttle_enabled);
ASSERT(type == ZIO_TYPE_WRITE);
ASSERT(priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(!(flags & ZIO_FLAG_IO_REPAIR));
ASSERT(!(pio->io_flags & ZIO_FLAG_IO_REWRITE) ||
pio->io_child_type == ZIO_CHILD_GANG);
flags &= ~ZIO_FLAG_IO_ALLOCATING;
}
zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, size,
done, private, type, priority, flags, vd, offset, &pio->io_bookmark,
ZIO_STAGE_VDEV_IO_START >> 1, pipeline);
ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
return (zio);
}
zio_t *
zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, abd_t *data, uint64_t size,
zio_type_t type, zio_priority_t priority, zio_flag_t flags,
zio_done_func_t *done, void *private)
{
zio_t *zio;
ASSERT(vd->vdev_ops->vdev_op_leaf);
zio = zio_create(NULL, vd->vdev_spa, 0, NULL,
data, size, size, done, private, type, priority,
flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_DELEGATED,
vd, offset, NULL,
ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE);
return (zio);
}
void
zio_flush(zio_t *zio, vdev_t *vd)
{
zio_nowait(zio_ioctl(zio, zio->io_spa, vd, DKIOCFLUSHWRITECACHE,
NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY));
}
void
zio_shrink(zio_t *zio, uint64_t size)
{
ASSERT3P(zio->io_executor, ==, NULL);
ASSERT3U(zio->io_orig_size, ==, zio->io_size);
ASSERT3U(size, <=, zio->io_size);
/*
* We don't shrink for raidz because of problems with the
* reconstruction when reading back less than the block size.
* Note, BP_IS_RAIDZ() assumes no compression.
*/
ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF);
if (!BP_IS_RAIDZ(zio->io_bp)) {
/* we are not doing a raw write */
ASSERT3U(zio->io_size, ==, zio->io_lsize);
zio->io_orig_size = zio->io_size = zio->io_lsize = size;
}
}
/*
* Round provided allocation size up to a value that can be allocated
* by at least some vdev(s) in the pool with minimum or no additional
* padding and without extra space usage on others
*/
static uint64_t
zio_roundup_alloc_size(spa_t *spa, uint64_t size)
{
if (size > spa->spa_min_alloc)
return (roundup(size, spa->spa_gcd_alloc));
return (spa->spa_min_alloc);
}
/*
* ==========================================================================
* Prepare to read and write logical blocks
* ==========================================================================
*/
static zio_t *
zio_read_bp_init(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
uint64_t psize =
BP_IS_EMBEDDED(bp) ? BPE_GET_PSIZE(bp) : BP_GET_PSIZE(bp);
ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF &&
zio->io_child_type == ZIO_CHILD_LOGICAL &&
!(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) {
zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize),
psize, psize, zio_decompress);
}
if (((BP_IS_PROTECTED(bp) && !(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) ||
BP_HAS_INDIRECT_MAC_CKSUM(bp)) &&
zio->io_child_type == ZIO_CHILD_LOGICAL) {
zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize),
psize, psize, zio_decrypt);
}
if (BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA) {
int psize = BPE_GET_PSIZE(bp);
void *data = abd_borrow_buf(zio->io_abd, psize);
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
decode_embedded_bp_compressed(bp, data);
abd_return_buf_copy(zio->io_abd, data, psize);
} else {
ASSERT(!BP_IS_EMBEDDED(bp));
}
if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL)
zio->io_pipeline = ZIO_DDT_READ_PIPELINE;
return (zio);
}
static zio_t *
zio_write_bp_init(zio_t *zio)
{
if (!IO_IS_ALLOCATING(zio))
return (zio);
ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
if (zio->io_bp_override) {
blkptr_t *bp = zio->io_bp;
zio_prop_t *zp = &zio->io_prop;
ASSERT(bp->blk_birth != zio->io_txg);
*bp = *zio->io_bp_override;
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
if (zp->zp_brtwrite)
return (zio);
ASSERT(!BP_GET_DEDUP(zio->io_bp_override));
if (BP_IS_EMBEDDED(bp))
return (zio);
/*
* If we've been overridden and nopwrite is set then
* set the flag accordingly to indicate that a nopwrite
* has already occurred.
*/
if (!BP_IS_HOLE(bp) && zp->zp_nopwrite) {
ASSERT(!zp->zp_dedup);
ASSERT3U(BP_GET_CHECKSUM(bp), ==, zp->zp_checksum);
zio->io_flags |= ZIO_FLAG_NOPWRITE;
return (zio);
}
ASSERT(!zp->zp_nopwrite);
if (BP_IS_HOLE(bp) || !zp->zp_dedup)
return (zio);
ASSERT((zio_checksum_table[zp->zp_checksum].ci_flags &
ZCHECKSUM_FLAG_DEDUP) || zp->zp_dedup_verify);
if (BP_GET_CHECKSUM(bp) == zp->zp_checksum &&
!zp->zp_encrypt) {
BP_SET_DEDUP(bp, 1);
zio->io_pipeline |= ZIO_STAGE_DDT_WRITE;
return (zio);
}
/*
* We were unable to handle this as an override bp, treat
* it as a regular write I/O.
*/
zio->io_bp_override = NULL;
*bp = zio->io_bp_orig;
zio->io_pipeline = zio->io_orig_pipeline;
}
return (zio);
}
static zio_t *
zio_write_compress(zio_t *zio)
{
spa_t *spa = zio->io_spa;
zio_prop_t *zp = &zio->io_prop;
enum zio_compress compress = zp->zp_compress;
blkptr_t *bp = zio->io_bp;
uint64_t lsize = zio->io_lsize;
uint64_t psize = zio->io_size;
uint32_t pass = 1;
/*
* If our children haven't all reached the ready stage,
* wait for them and then repeat this pipeline stage.
*/
if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT |
ZIO_CHILD_GANG_BIT, ZIO_WAIT_READY)) {
return (NULL);
}
if (!IO_IS_ALLOCATING(zio))
return (zio);
if (zio->io_children_ready != NULL) {
/*
* Now that all our children are ready, run the callback
* associated with this zio in case it wants to modify the
* data to be written.
*/
ASSERT3U(zp->zp_level, >, 0);
zio->io_children_ready(zio);
}
ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
ASSERT(zio->io_bp_override == NULL);
if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg) {
/*
* We're rewriting an existing block, which means we're
* working on behalf of spa_sync(). For spa_sync() to
* converge, it must eventually be the case that we don't
* have to allocate new blocks. But compression changes
* the blocksize, which forces a reallocate, and makes
* convergence take longer. Therefore, after the first
* few passes, stop compressing to ensure convergence.
*/
pass = spa_sync_pass(spa);
ASSERT(zio->io_txg == spa_syncing_txg(spa));
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(!BP_GET_DEDUP(bp));
if (pass >= zfs_sync_pass_dont_compress)
compress = ZIO_COMPRESS_OFF;
/* Make sure someone doesn't change their mind on overwrites */
ASSERT(BP_IS_EMBEDDED(bp) || BP_IS_GANG(bp) ||
MIN(zp->zp_copies, spa_max_replication(spa))
== BP_GET_NDVAS(bp));
}
/* If it's a compressed write that is not raw, compress the buffer. */
if (compress != ZIO_COMPRESS_OFF &&
!(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) {
void *cbuf = NULL;
psize = zio_compress_data(compress, zio->io_abd, &cbuf, lsize,
zp->zp_complevel);
if (psize == 0) {
compress = ZIO_COMPRESS_OFF;
} else if (psize >= lsize) {
compress = ZIO_COMPRESS_OFF;
if (cbuf != NULL)
zio_buf_free(cbuf, lsize);
} else if (!zp->zp_dedup && !zp->zp_encrypt &&
psize <= BPE_PAYLOAD_SIZE &&
zp->zp_level == 0 && !DMU_OT_HAS_FILL(zp->zp_type) &&
spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA)) {
encode_embedded_bp_compressed(bp,
cbuf, compress, lsize, psize);
BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_DATA);
BP_SET_TYPE(bp, zio->io_prop.zp_type);
BP_SET_LEVEL(bp, zio->io_prop.zp_level);
zio_buf_free(cbuf, lsize);
bp->blk_birth = zio->io_txg;
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
ASSERT(spa_feature_is_active(spa,
SPA_FEATURE_EMBEDDED_DATA));
return (zio);
} else {
/*
* Round compressed size up to the minimum allocation
* size of the smallest-ashift device, and zero the
* tail. This ensures that the compressed size of the
* BP (and thus compressratio property) are correct,
* in that we charge for the padding used to fill out
* the last sector.
*/
size_t rounded = (size_t)zio_roundup_alloc_size(spa,
psize);
if (rounded >= lsize) {
compress = ZIO_COMPRESS_OFF;
zio_buf_free(cbuf, lsize);
psize = lsize;
} else {
abd_t *cdata = abd_get_from_buf(cbuf, lsize);
abd_take_ownership_of_buf(cdata, B_TRUE);
abd_zero_off(cdata, psize, rounded - psize);
psize = rounded;
zio_push_transform(zio, cdata,
psize, lsize, NULL);
}
}
/*
* We were unable to handle this as an override bp, treat
* it as a regular write I/O.
*/
zio->io_bp_override = NULL;
*bp = zio->io_bp_orig;
zio->io_pipeline = zio->io_orig_pipeline;
} else if ((zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) != 0 &&
zp->zp_type == DMU_OT_DNODE) {
/*
* The DMU actually relies on the zio layer's compression
* to free metadnode blocks that have had all contained
* dnodes freed. As a result, even when doing a raw
* receive, we must check whether the block can be compressed
* to a hole.
*/
psize = zio_compress_data(ZIO_COMPRESS_EMPTY,
zio->io_abd, NULL, lsize, zp->zp_complevel);
if (psize == 0 || psize >= lsize)
compress = ZIO_COMPRESS_OFF;
} else if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS &&
!(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) {
/*
* If we are raw receiving an encrypted dataset we should not
* take this codepath because it will change the on-disk block
* and decryption will fail.
*/
size_t rounded = MIN((size_t)zio_roundup_alloc_size(spa, psize),
lsize);
if (rounded != psize) {
abd_t *cdata = abd_alloc_linear(rounded, B_TRUE);
abd_zero_off(cdata, psize, rounded - psize);
abd_copy_off(cdata, zio->io_abd, 0, 0, psize);
psize = rounded;
zio_push_transform(zio, cdata,
psize, rounded, NULL);
}
} else {
ASSERT3U(psize, !=, 0);
}
/*
* The final pass of spa_sync() must be all rewrites, but the first
* few passes offer a trade-off: allocating blocks defers convergence,
* but newly allocated blocks are sequential, so they can be written
* to disk faster. Therefore, we allow the first few passes of
* spa_sync() to allocate new blocks, but force rewrites after that.
* There should only be a handful of blocks after pass 1 in any case.
*/
if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg &&
BP_GET_PSIZE(bp) == psize &&
pass >= zfs_sync_pass_rewrite) {
VERIFY3U(psize, !=, 0);
enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES;
zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages;
zio->io_flags |= ZIO_FLAG_IO_REWRITE;
} else {
BP_ZERO(bp);
zio->io_pipeline = ZIO_WRITE_PIPELINE;
}
if (psize == 0) {
if (zio->io_bp_orig.blk_birth != 0 &&
spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) {
BP_SET_LSIZE(bp, lsize);
BP_SET_TYPE(bp, zp->zp_type);
BP_SET_LEVEL(bp, zp->zp_level);
BP_SET_BIRTH(bp, zio->io_txg, 0);
}
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
} else {
ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER);
BP_SET_LSIZE(bp, lsize);
BP_SET_TYPE(bp, zp->zp_type);
BP_SET_LEVEL(bp, zp->zp_level);
BP_SET_PSIZE(bp, psize);
BP_SET_COMPRESS(bp, compress);
BP_SET_CHECKSUM(bp, zp->zp_checksum);
BP_SET_DEDUP(bp, zp->zp_dedup);
BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
if (zp->zp_dedup) {
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
ASSERT(!zp->zp_encrypt ||
DMU_OT_IS_ENCRYPTED(zp->zp_type));
zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE;
}
if (zp->zp_nopwrite) {
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
zio->io_pipeline |= ZIO_STAGE_NOP_WRITE;
}
}
return (zio);
}
static zio_t *
zio_free_bp_init(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
if (zio->io_child_type == ZIO_CHILD_LOGICAL) {
if (BP_GET_DEDUP(bp))
zio->io_pipeline = ZIO_DDT_FREE_PIPELINE;
}
ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
return (zio);
}
/*
* ==========================================================================
* Execute the I/O pipeline
* ==========================================================================
*/
static void
zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline)
{
spa_t *spa = zio->io_spa;
zio_type_t t = zio->io_type;
int flags = (cutinline ? TQ_FRONT : 0);
/*
* If we're a config writer or a probe, the normal issue and
* interrupt threads may all be blocked waiting for the config lock.
* In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL.
*/
if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE))
t = ZIO_TYPE_NULL;
/*
* A similar issue exists for the L2ARC write thread until L2ARC 2.0.
*/
if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux)
t = ZIO_TYPE_NULL;
/*
* If this is a high priority I/O, then use the high priority taskq if
* available.
*/
if ((zio->io_priority == ZIO_PRIORITY_NOW ||
zio->io_priority == ZIO_PRIORITY_SYNC_WRITE) &&
spa->spa_zio_taskq[t][q + 1].stqs_count != 0)
q++;
ASSERT3U(q, <, ZIO_TASKQ_TYPES);
/*
* NB: We are assuming that the zio can only be dispatched
* to a single taskq at a time. It would be a grievous error
* to dispatch the zio to another taskq at the same time.
*/
ASSERT(taskq_empty_ent(&zio->io_tqent));
spa_taskq_dispatch_ent(spa, t, q, zio_execute, zio, flags,
&zio->io_tqent);
}
static boolean_t
zio_taskq_member(zio_t *zio, zio_taskq_type_t q)
{
spa_t *spa = zio->io_spa;
taskq_t *tq = taskq_of_curthread();
for (zio_type_t t = 0; t < ZIO_TYPES; t++) {
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
uint_t i;
for (i = 0; i < tqs->stqs_count; i++) {
if (tqs->stqs_taskq[i] == tq)
return (B_TRUE);
}
}
return (B_FALSE);
}
static zio_t *
zio_issue_async(zio_t *zio)
{
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
return (NULL);
}
void
zio_interrupt(void *zio)
{
zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE);
}
void
zio_delay_interrupt(zio_t *zio)
{
/*
* The timeout_generic() function isn't defined in userspace, so
* rather than trying to implement the function, the zio delay
* functionality has been disabled for userspace builds.
*/
#ifdef _KERNEL
/*
* If io_target_timestamp is zero, then no delay has been registered
* for this IO, thus jump to the end of this function and "skip" the
* delay; issuing it directly to the zio layer.
*/
if (zio->io_target_timestamp != 0) {
hrtime_t now = gethrtime();
if (now >= zio->io_target_timestamp) {
/*
* This IO has already taken longer than the target
* delay to complete, so we don't want to delay it
* any longer; we "miss" the delay and issue it
* directly to the zio layer. This is likely due to
* the target latency being set to a value less than
* the underlying hardware can satisfy (e.g. delay
* set to 1ms, but the disks take 10ms to complete an
* IO request).
*/
DTRACE_PROBE2(zio__delay__miss, zio_t *, zio,
hrtime_t, now);
zio_interrupt(zio);
} else {
taskqid_t tid;
hrtime_t diff = zio->io_target_timestamp - now;
clock_t expire_at_tick = ddi_get_lbolt() +
NSEC_TO_TICK(diff);
DTRACE_PROBE3(zio__delay__hit, zio_t *, zio,
hrtime_t, now, hrtime_t, diff);
if (NSEC_TO_TICK(diff) == 0) {
/* Our delay is less than a jiffy - just spin */
zfs_sleep_until(zio->io_target_timestamp);
zio_interrupt(zio);
} else {
/*
* Use taskq_dispatch_delay() in the place of
* OpenZFS's timeout_generic().
*/
tid = taskq_dispatch_delay(system_taskq,
zio_interrupt, zio, TQ_NOSLEEP,
expire_at_tick);
if (tid == TASKQID_INVALID) {
/*
* Couldn't allocate a task. Just
* finish the zio without a delay.
*/
zio_interrupt(zio);
}
}
}
return;
}
#endif
DTRACE_PROBE1(zio__delay__skip, zio_t *, zio);
zio_interrupt(zio);
}
static void
zio_deadman_impl(zio_t *pio, int ziodepth)
{
zio_t *cio, *cio_next;
zio_link_t *zl = NULL;
vdev_t *vd = pio->io_vd;
if (zio_deadman_log_all || (vd != NULL && vd->vdev_ops->vdev_op_leaf)) {
vdev_queue_t *vq = vd ? &vd->vdev_queue : NULL;
zbookmark_phys_t *zb = &pio->io_bookmark;
uint64_t delta = gethrtime() - pio->io_timestamp;
uint64_t failmode = spa_get_deadman_failmode(pio->io_spa);
zfs_dbgmsg("slow zio[%d]: zio=%px timestamp=%llu "
"delta=%llu queued=%llu io=%llu "
"path=%s "
"last=%llu type=%d "
"priority=%d flags=0x%llx stage=0x%x "
"pipeline=0x%x pipeline-trace=0x%x "
"objset=%llu object=%llu "
"level=%llu blkid=%llu "
"offset=%llu size=%llu "
"error=%d",
ziodepth, pio, pio->io_timestamp,
(u_longlong_t)delta, pio->io_delta, pio->io_delay,
vd ? vd->vdev_path : "NULL",
vq ? vq->vq_io_complete_ts : 0, pio->io_type,
pio->io_priority, (u_longlong_t)pio->io_flags,
pio->io_stage, pio->io_pipeline, pio->io_pipeline_trace,
(u_longlong_t)zb->zb_objset, (u_longlong_t)zb->zb_object,
(u_longlong_t)zb->zb_level, (u_longlong_t)zb->zb_blkid,
(u_longlong_t)pio->io_offset, (u_longlong_t)pio->io_size,
pio->io_error);
(void) zfs_ereport_post(FM_EREPORT_ZFS_DEADMAN,
pio->io_spa, vd, zb, pio, 0);
if (failmode == ZIO_FAILURE_MODE_CONTINUE &&
taskq_empty_ent(&pio->io_tqent)) {
zio_interrupt(pio);
}
}
mutex_enter(&pio->io_lock);
for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
cio_next = zio_walk_children(pio, &zl);
zio_deadman_impl(cio, ziodepth + 1);
}
mutex_exit(&pio->io_lock);
}
/*
* Log the critical information describing this zio and all of its children
* using the zfs_dbgmsg() interface then post deadman event for the ZED.
*/
void
zio_deadman(zio_t *pio, const char *tag)
{
spa_t *spa = pio->io_spa;
char *name = spa_name(spa);
if (!zfs_deadman_enabled || spa_suspended(spa))
return;
zio_deadman_impl(pio, 0);
switch (spa_get_deadman_failmode(spa)) {
case ZIO_FAILURE_MODE_WAIT:
zfs_dbgmsg("%s waiting for hung I/O to pool '%s'", tag, name);
break;
case ZIO_FAILURE_MODE_CONTINUE:
zfs_dbgmsg("%s restarting hung I/O for pool '%s'", tag, name);
break;
case ZIO_FAILURE_MODE_PANIC:
fm_panic("%s determined I/O to pool '%s' is hung.", tag, name);
break;
}
}
/*
* Execute the I/O pipeline until one of the following occurs:
* (1) the I/O completes; (2) the pipeline stalls waiting for
* dependent child I/Os; (3) the I/O issues, so we're waiting
* for an I/O completion interrupt; (4) the I/O is delegated by
* vdev-level caching or aggregation; (5) the I/O is deferred
* due to vdev-level queueing; (6) the I/O is handed off to
* another thread. In all cases, the pipeline stops whenever
* there's no CPU work; it never burns a thread in cv_wait_io().
*
* There's no locking on io_stage because there's no legitimate way
* for multiple threads to be attempting to process the same I/O.
*/
static zio_pipe_stage_t *zio_pipeline[];
/*
* zio_execute() is a wrapper around the static function
* __zio_execute() so that we can force __zio_execute() to be
* inlined. This reduces stack overhead which is important
* because __zio_execute() is called recursively in several zio
* code paths. zio_execute() itself cannot be inlined because
* it is externally visible.
*/
void
zio_execute(void *zio)
{
fstrans_cookie_t cookie;
cookie = spl_fstrans_mark();
__zio_execute(zio);
spl_fstrans_unmark(cookie);
}
/*
* Used to determine if in the current context the stack is sized large
* enough to allow zio_execute() to be called recursively. A minimum
* stack size of 16K is required to avoid needing to re-dispatch the zio.
*/
static boolean_t
zio_execute_stack_check(zio_t *zio)
{
#if !defined(HAVE_LARGE_STACKS)
dsl_pool_t *dp = spa_get_dsl(zio->io_spa);
/* Executing in txg_sync_thread() context. */
if (dp && curthread == dp->dp_tx.tx_sync_thread)
return (B_TRUE);
/* Pool initialization outside of zio_taskq context. */
if (dp && spa_is_initializing(dp->dp_spa) &&
!zio_taskq_member(zio, ZIO_TASKQ_ISSUE) &&
!zio_taskq_member(zio, ZIO_TASKQ_ISSUE_HIGH))
return (B_TRUE);
#else
(void) zio;
#endif /* HAVE_LARGE_STACKS */
return (B_FALSE);
}
__attribute__((always_inline))
static inline void
__zio_execute(zio_t *zio)
{
ASSERT3U(zio->io_queued_timestamp, >, 0);
while (zio->io_stage < ZIO_STAGE_DONE) {
enum zio_stage pipeline = zio->io_pipeline;
enum zio_stage stage = zio->io_stage;
zio->io_executor = curthread;
ASSERT(!MUTEX_HELD(&zio->io_lock));
ASSERT(ISP2(stage));
ASSERT(zio->io_stall == NULL);
do {
stage <<= 1;
} while ((stage & pipeline) == 0);
ASSERT(stage <= ZIO_STAGE_DONE);
/*
* If we are in interrupt context and this pipeline stage
* will grab a config lock that is held across I/O,
* or may wait for an I/O that needs an interrupt thread
* to complete, issue async to avoid deadlock.
*
* For VDEV_IO_START, we cut in line so that the io will
* be sent to disk promptly.
*/
if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL &&
zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) {
boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
zio_requeue_io_start_cut_in_line : B_FALSE;
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
return;
}
/*
* If the current context doesn't have large enough stacks
* the zio must be issued asynchronously to prevent overflow.
*/
if (zio_execute_stack_check(zio)) {
boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
zio_requeue_io_start_cut_in_line : B_FALSE;
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
return;
}
zio->io_stage = stage;
zio->io_pipeline_trace |= zio->io_stage;
/*
* The zio pipeline stage returns the next zio to execute
* (typically the same as this one), or NULL if we should
* stop.
*/
zio = zio_pipeline[highbit64(stage) - 1](zio);
if (zio == NULL)
return;
}
}
/*
* ==========================================================================
* Initiate I/O, either sync or async
* ==========================================================================
*/
int
zio_wait(zio_t *zio)
{
/*
* Some routines, like zio_free_sync(), may return a NULL zio
* to avoid the performance overhead of creating and then destroying
* an unneeded zio. For the callers' simplicity, we accept a NULL
* zio and ignore it.
*/
if (zio == NULL)
return (0);
long timeout = MSEC_TO_TICK(zfs_deadman_ziotime_ms);
int error;
ASSERT3S(zio->io_stage, ==, ZIO_STAGE_OPEN);
ASSERT3P(zio->io_executor, ==, NULL);
zio->io_waiter = curthread;
ASSERT0(zio->io_queued_timestamp);
zio->io_queued_timestamp = gethrtime();
__zio_execute(zio);
mutex_enter(&zio->io_lock);
while (zio->io_executor != NULL) {
error = cv_timedwait_io(&zio->io_cv, &zio->io_lock,
ddi_get_lbolt() + timeout);
if (zfs_deadman_enabled && error == -1 &&
gethrtime() - zio->io_queued_timestamp >
spa_deadman_ziotime(zio->io_spa)) {
mutex_exit(&zio->io_lock);
timeout = MSEC_TO_TICK(zfs_deadman_checktime_ms);
zio_deadman(zio, FTAG);
mutex_enter(&zio->io_lock);
}
}
mutex_exit(&zio->io_lock);
error = zio->io_error;
zio_destroy(zio);
return (error);
}
void
zio_nowait(zio_t *zio)
{
/*
* See comment in zio_wait().
*/
if (zio == NULL)
return;
ASSERT3P(zio->io_executor, ==, NULL);
if (zio->io_child_type == ZIO_CHILD_LOGICAL &&
list_is_empty(&zio->io_parent_list)) {
zio_t *pio;
/*
* This is a logical async I/O with no parent to wait for it.
* We add it to the spa_async_root_zio "Godfather" I/O which
* will ensure they complete prior to unloading the pool.
*/
spa_t *spa = zio->io_spa;
pio = spa->spa_async_zio_root[CPU_SEQID_UNSTABLE];
zio_add_child(pio, zio);
}
ASSERT0(zio->io_queued_timestamp);
zio->io_queued_timestamp = gethrtime();
__zio_execute(zio);
}
/*
* ==========================================================================
* Reexecute, cancel, or suspend/resume failed I/O
* ==========================================================================
*/
static void
zio_reexecute(void *arg)
{
zio_t *pio = arg;
zio_t *cio, *cio_next;
ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN);
ASSERT(pio->io_gang_leader == NULL);
ASSERT(pio->io_gang_tree == NULL);
pio->io_flags = pio->io_orig_flags;
pio->io_stage = pio->io_orig_stage;
pio->io_pipeline = pio->io_orig_pipeline;
pio->io_reexecute = 0;
pio->io_flags |= ZIO_FLAG_REEXECUTED;
pio->io_pipeline_trace = 0;
pio->io_error = 0;
for (int w = 0; w < ZIO_WAIT_TYPES; w++)
pio->io_state[w] = 0;
for (int c = 0; c < ZIO_CHILD_TYPES; c++)
pio->io_child_error[c] = 0;
if (IO_IS_ALLOCATING(pio))
BP_ZERO(pio->io_bp);
/*
* As we reexecute pio's children, new children could be created.
* New children go to the head of pio's io_child_list, however,
* so we will (correctly) not reexecute them. The key is that
* the remainder of pio's io_child_list, from 'cio_next' onward,
* cannot be affected by any side effects of reexecuting 'cio'.
*/
zio_link_t *zl = NULL;
mutex_enter(&pio->io_lock);
for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
cio_next = zio_walk_children(pio, &zl);
for (int w = 0; w < ZIO_WAIT_TYPES; w++)
pio->io_children[cio->io_child_type][w]++;
mutex_exit(&pio->io_lock);
zio_reexecute(cio);
mutex_enter(&pio->io_lock);
}
mutex_exit(&pio->io_lock);
/*
* Now that all children have been reexecuted, execute the parent.
* We don't reexecute "The Godfather" I/O here as it's the
* responsibility of the caller to wait on it.
*/
if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) {
pio->io_queued_timestamp = gethrtime();
__zio_execute(pio);
}
}
void
zio_suspend(spa_t *spa, zio_t *zio, zio_suspend_reason_t reason)
{
if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC)
fm_panic("Pool '%s' has encountered an uncorrectable I/O "
"failure and the failure mode property for this pool "
"is set to panic.", spa_name(spa));
cmn_err(CE_WARN, "Pool '%s' has encountered an uncorrectable I/O "
"failure and has been suspended.\n", spa_name(spa));
(void) zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL,
NULL, NULL, 0);
mutex_enter(&spa->spa_suspend_lock);
if (spa->spa_suspend_zio_root == NULL)
spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_GODFATHER);
spa->spa_suspended = reason;
if (zio != NULL) {
ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
ASSERT(zio != spa->spa_suspend_zio_root);
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(zio_unique_parent(zio) == NULL);
ASSERT(zio->io_stage == ZIO_STAGE_DONE);
zio_add_child(spa->spa_suspend_zio_root, zio);
}
mutex_exit(&spa->spa_suspend_lock);
}
int
zio_resume(spa_t *spa)
{
zio_t *pio;
/*
* Reexecute all previously suspended i/o.
*/
mutex_enter(&spa->spa_suspend_lock);
spa->spa_suspended = ZIO_SUSPEND_NONE;
cv_broadcast(&spa->spa_suspend_cv);
pio = spa->spa_suspend_zio_root;
spa->spa_suspend_zio_root = NULL;
mutex_exit(&spa->spa_suspend_lock);
if (pio == NULL)
return (0);
zio_reexecute(pio);
return (zio_wait(pio));
}
void
zio_resume_wait(spa_t *spa)
{
mutex_enter(&spa->spa_suspend_lock);
while (spa_suspended(spa))
cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock);
mutex_exit(&spa->spa_suspend_lock);
}
/*
* ==========================================================================
* Gang blocks.
*
* A gang block is a collection of small blocks that looks to the DMU
* like one large block. When zio_dva_allocate() cannot find a block
* of the requested size, due to either severe fragmentation or the pool
* being nearly full, it calls zio_write_gang_block() to construct the
* block from smaller fragments.
*
* A gang block consists of a gang header (zio_gbh_phys_t) and up to
* three (SPA_GBH_NBLKPTRS) gang members. The gang header is just like
* an indirect block: it's an array of block pointers. It consumes
* only one sector and hence is allocatable regardless of fragmentation.
* The gang header's bps point to its gang members, which hold the data.
*
* Gang blocks are self-checksumming, using the bp's <vdev, offset, txg>
* as the verifier to ensure uniqueness of the SHA256 checksum.
* Critically, the gang block bp's blk_cksum is the checksum of the data,
* not the gang header. This ensures that data block signatures (needed for
* deduplication) are independent of how the block is physically stored.
*
* Gang blocks can be nested: a gang member may itself be a gang block.
* Thus every gang block is a tree in which root and all interior nodes are
* gang headers, and the leaves are normal blocks that contain user data.
* The root of the gang tree is called the gang leader.
*
* To perform any operation (read, rewrite, free, claim) on a gang block,
* zio_gang_assemble() first assembles the gang tree (minus data leaves)
* in the io_gang_tree field of the original logical i/o by recursively
* reading the gang leader and all gang headers below it. This yields
* an in-core tree containing the contents of every gang header and the
* bps for every constituent of the gang block.
*
* With the gang tree now assembled, zio_gang_issue() just walks the gang tree
* and invokes a callback on each bp. To free a gang block, zio_gang_issue()
* calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp.
* zio_claim_gang() provides a similarly trivial wrapper for zio_claim().
* zio_read_gang() is a wrapper around zio_read() that omits reading gang
* headers, since we already have those in io_gang_tree. zio_rewrite_gang()
* performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite()
* of the gang header plus zio_checksum_compute() of the data to update the
* gang header's blk_cksum as described above.
*
* The two-phase assemble/issue model solves the problem of partial failure --
* what if you'd freed part of a gang block but then couldn't read the
* gang header for another part? Assembling the entire gang tree first
* ensures that all the necessary gang header I/O has succeeded before
* starting the actual work of free, claim, or write. Once the gang tree
* is assembled, free and claim are in-memory operations that cannot fail.
*
* In the event that a gang write fails, zio_dva_unallocate() walks the
* gang tree to immediately free (i.e. insert back into the space map)
* everything we've allocated. This ensures that we don't get ENOSPC
* errors during repeated suspend/resume cycles due to a flaky device.
*
* Gang rewrites only happen during sync-to-convergence. If we can't assemble
* the gang tree, we won't modify the block, so we can safely defer the free
* (knowing that the block is still intact). If we *can* assemble the gang
* tree, then even if some of the rewrites fail, zio_dva_unallocate() will free
* each constituent bp and we can allocate a new block on the next sync pass.
*
* In all cases, the gang tree allows complete recovery from partial failure.
* ==========================================================================
*/
static void
zio_gang_issue_func_done(zio_t *zio)
{
abd_free(zio->io_abd);
}
static zio_t *
zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
uint64_t offset)
{
if (gn != NULL)
return (pio);
return (zio_read(pio, pio->io_spa, bp, abd_get_offset(data, offset),
BP_GET_PSIZE(bp), zio_gang_issue_func_done,
NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio),
&pio->io_bookmark));
}
static zio_t *
zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
uint64_t offset)
{
zio_t *zio;
if (gn != NULL) {
abd_t *gbh_abd =
abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE);
zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp,
gbh_abd, SPA_GANGBLOCKSIZE, zio_gang_issue_func_done, NULL,
pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio),
&pio->io_bookmark);
/*
* As we rewrite each gang header, the pipeline will compute
* a new gang block header checksum for it; but no one will
* compute a new data checksum, so we do that here. The one
* exception is the gang leader: the pipeline already computed
* its data checksum because that stage precedes gang assembly.
* (Presently, nothing actually uses interior data checksums;
* this is just good hygiene.)
*/
if (gn != pio->io_gang_leader->io_gang_tree) {
abd_t *buf = abd_get_offset(data, offset);
zio_checksum_compute(zio, BP_GET_CHECKSUM(bp),
buf, BP_GET_PSIZE(bp));
abd_free(buf);
}
/*
* If we are here to damage data for testing purposes,
* leave the GBH alone so that we can detect the damage.
*/
if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE)
zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
} else {
zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp,
abd_get_offset(data, offset), BP_GET_PSIZE(bp),
zio_gang_issue_func_done, NULL, pio->io_priority,
ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
}
return (zio);
}
static zio_t *
zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
uint64_t offset)
{
(void) gn, (void) data, (void) offset;
zio_t *zio = zio_free_sync(pio, pio->io_spa, pio->io_txg, bp,
ZIO_GANG_CHILD_FLAGS(pio));
if (zio == NULL) {
zio = zio_null(pio, pio->io_spa,
NULL, NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio));
}
return (zio);
}
static zio_t *
zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
uint64_t offset)
{
(void) gn, (void) data, (void) offset;
return (zio_claim(pio, pio->io_spa, pio->io_txg, bp,
NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio)));
}
static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = {
NULL,
zio_read_gang,
zio_rewrite_gang,
zio_free_gang,
zio_claim_gang,
NULL
};
static void zio_gang_tree_assemble_done(zio_t *zio);
static zio_gang_node_t *
zio_gang_node_alloc(zio_gang_node_t **gnpp)
{
zio_gang_node_t *gn;
ASSERT(*gnpp == NULL);
gn = kmem_zalloc(sizeof (*gn), KM_SLEEP);
gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE);
*gnpp = gn;
return (gn);
}
static void
zio_gang_node_free(zio_gang_node_t **gnpp)
{
zio_gang_node_t *gn = *gnpp;
for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
ASSERT(gn->gn_child[g] == NULL);
zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE);
kmem_free(gn, sizeof (*gn));
*gnpp = NULL;
}
static void
zio_gang_tree_free(zio_gang_node_t **gnpp)
{
zio_gang_node_t *gn = *gnpp;
if (gn == NULL)
return;
for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
zio_gang_tree_free(&gn->gn_child[g]);
zio_gang_node_free(gnpp);
}
static void
zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp)
{
zio_gang_node_t *gn = zio_gang_node_alloc(gnpp);
abd_t *gbh_abd = abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE);
ASSERT(gio->io_gang_leader == gio);
ASSERT(BP_IS_GANG(bp));
zio_nowait(zio_read(gio, gio->io_spa, bp, gbh_abd, SPA_GANGBLOCKSIZE,
zio_gang_tree_assemble_done, gn, gio->io_priority,
ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark));
}
static void
zio_gang_tree_assemble_done(zio_t *zio)
{
zio_t *gio = zio->io_gang_leader;
zio_gang_node_t *gn = zio->io_private;
blkptr_t *bp = zio->io_bp;
ASSERT(gio == zio_unique_parent(zio));
ASSERT(list_is_empty(&zio->io_child_list));
if (zio->io_error)
return;
/* this ABD was created from a linear buf in zio_gang_tree_assemble */
if (BP_SHOULD_BYTESWAP(bp))
byteswap_uint64_array(abd_to_buf(zio->io_abd), zio->io_size);
ASSERT3P(abd_to_buf(zio->io_abd), ==, gn->gn_gbh);
ASSERT(zio->io_size == SPA_GANGBLOCKSIZE);
ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC);
abd_free(zio->io_abd);
for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
if (!BP_IS_GANG(gbp))
continue;
zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]);
}
}
static void
zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, abd_t *data,
uint64_t offset)
{
zio_t *gio = pio->io_gang_leader;
zio_t *zio;
ASSERT(BP_IS_GANG(bp) == !!gn);
ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp));
ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree);
/*
* If you're a gang header, your data is in gn->gn_gbh.
* If you're a gang member, your data is in 'data' and gn == NULL.
*/
zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data, offset);
if (gn != NULL) {
ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC);
for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
if (BP_IS_HOLE(gbp))
continue;
zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data,
offset);
offset += BP_GET_PSIZE(gbp);
}
}
if (gn == gio->io_gang_tree)
ASSERT3U(gio->io_size, ==, offset);
if (zio != pio)
zio_nowait(zio);
}
static zio_t *
zio_gang_assemble(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL);
ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
zio->io_gang_leader = zio;
zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree);
return (zio);
}
static zio_t *
zio_gang_issue(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT, ZIO_WAIT_DONE)) {
return (NULL);
}
ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio);
ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
if (zio->io_child_error[ZIO_CHILD_GANG] == 0)
zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_abd,
0);
else
zio_gang_tree_free(&zio->io_gang_tree);
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
return (zio);
}
static void
zio_write_gang_member_ready(zio_t *zio)
{
zio_t *pio = zio_unique_parent(zio);
dva_t *cdva = zio->io_bp->blk_dva;
dva_t *pdva = pio->io_bp->blk_dva;
uint64_t asize;
zio_t *gio __maybe_unused = zio->io_gang_leader;
if (BP_IS_HOLE(zio->io_bp))
return;
ASSERT(BP_IS_HOLE(&zio->io_bp_orig));
ASSERT(zio->io_child_type == ZIO_CHILD_GANG);
ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies);
ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp));
ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp));
VERIFY3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp));
mutex_enter(&pio->io_lock);
for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) {
ASSERT(DVA_GET_GANG(&pdva[d]));
asize = DVA_GET_ASIZE(&pdva[d]);
asize += DVA_GET_ASIZE(&cdva[d]);
DVA_SET_ASIZE(&pdva[d], asize);
}
mutex_exit(&pio->io_lock);
}
static void
zio_write_gang_done(zio_t *zio)
{
/*
* The io_abd field will be NULL for a zio with no data. The io_flags
* will initially have the ZIO_FLAG_NODATA bit flag set, but we can't
* check for it here as it is cleared in zio_ready.
*/
if (zio->io_abd != NULL)
abd_free(zio->io_abd);
}
static zio_t *
zio_write_gang_block(zio_t *pio, metaslab_class_t *mc)
{
spa_t *spa = pio->io_spa;
blkptr_t *bp = pio->io_bp;
zio_t *gio = pio->io_gang_leader;
zio_t *zio;
zio_gang_node_t *gn, **gnpp;
zio_gbh_phys_t *gbh;
abd_t *gbh_abd;
uint64_t txg = pio->io_txg;
uint64_t resid = pio->io_size;
uint64_t lsize;
int copies = gio->io_prop.zp_copies;
zio_prop_t zp;
int error;
boolean_t has_data = !(pio->io_flags & ZIO_FLAG_NODATA);
/*
* If one copy was requested, store 2 copies of the GBH, so that we
* can still traverse all the data (e.g. to free or scrub) even if a
* block is damaged. Note that we can't store 3 copies of the GBH in
* all cases, e.g. with encryption, which uses DVA[2] for the IV+salt.
*/
int gbh_copies = copies;
if (gbh_copies == 1) {
gbh_copies = MIN(2, spa_max_replication(spa));
}
int flags = METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER;
if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(has_data);
flags |= METASLAB_ASYNC_ALLOC;
VERIFY(zfs_refcount_held(&mc->mc_allocator[pio->io_allocator].
mca_alloc_slots, pio));
/*
* The logical zio has already placed a reservation for
* 'copies' allocation slots but gang blocks may require
* additional copies. These additional copies
* (i.e. gbh_copies - copies) are guaranteed to succeed
* since metaslab_class_throttle_reserve() always allows
* additional reservations for gang blocks.
*/
VERIFY(metaslab_class_throttle_reserve(mc, gbh_copies - copies,
pio->io_allocator, pio, flags));
}
error = metaslab_alloc(spa, mc, SPA_GANGBLOCKSIZE,
bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, flags,
&pio->io_alloc_list, pio, pio->io_allocator);
if (error) {
if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(has_data);
/*
* If we failed to allocate the gang block header then
* we remove any additional allocation reservations that
* we placed here. The original reservation will
* be removed when the logical I/O goes to the ready
* stage.
*/
metaslab_class_throttle_unreserve(mc,
gbh_copies - copies, pio->io_allocator, pio);
}
pio->io_error = error;
return (pio);
}
if (pio == gio) {
gnpp = &gio->io_gang_tree;
} else {
gnpp = pio->io_private;
ASSERT(pio->io_ready == zio_write_gang_member_ready);
}
gn = zio_gang_node_alloc(gnpp);
gbh = gn->gn_gbh;
memset(gbh, 0, SPA_GANGBLOCKSIZE);
gbh_abd = abd_get_from_buf(gbh, SPA_GANGBLOCKSIZE);
/*
* Create the gang header.
*/
zio = zio_rewrite(pio, spa, txg, bp, gbh_abd, SPA_GANGBLOCKSIZE,
zio_write_gang_done, NULL, pio->io_priority,
ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
/*
* Create and nowait the gang children.
*/
for (int g = 0; resid != 0; resid -= lsize, g++) {
lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g),
SPA_MINBLOCKSIZE);
ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid);
zp.zp_checksum = gio->io_prop.zp_checksum;
zp.zp_compress = ZIO_COMPRESS_OFF;
zp.zp_complevel = gio->io_prop.zp_complevel;
zp.zp_type = DMU_OT_NONE;
zp.zp_level = 0;
zp.zp_copies = gio->io_prop.zp_copies;
zp.zp_dedup = B_FALSE;
zp.zp_dedup_verify = B_FALSE;
zp.zp_nopwrite = B_FALSE;
zp.zp_encrypt = gio->io_prop.zp_encrypt;
zp.zp_byteorder = gio->io_prop.zp_byteorder;
memset(zp.zp_salt, 0, ZIO_DATA_SALT_LEN);
memset(zp.zp_iv, 0, ZIO_DATA_IV_LEN);
memset(zp.zp_mac, 0, ZIO_DATA_MAC_LEN);
zio_t *cio = zio_write(zio, spa, txg, &gbh->zg_blkptr[g],
has_data ? abd_get_offset(pio->io_abd, pio->io_size -
resid) : NULL, lsize, lsize, &zp,
zio_write_gang_member_ready, NULL,
zio_write_gang_done, &gn->gn_child[g], pio->io_priority,
ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(has_data);
/*
* Gang children won't throttle but we should
* account for their work, so reserve an allocation
* slot for them here.
*/
VERIFY(metaslab_class_throttle_reserve(mc,
zp.zp_copies, cio->io_allocator, cio, flags));
}
zio_nowait(cio);
}
/*
* Set pio's pipeline to just wait for zio to finish.
*/
pio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
zio_nowait(zio);
return (pio);
}
/*
* The zio_nop_write stage in the pipeline determines if allocating a
* new bp is necessary. The nopwrite feature can handle writes in
* either syncing or open context (i.e. zil writes) and as a result is
* mutually exclusive with dedup.
*
* By leveraging a cryptographically secure checksum, such as SHA256, we
* can compare the checksums of the new data and the old to determine if
* allocating a new block is required. Note that our requirements for
* cryptographic strength are fairly weak: there can't be any accidental
* hash collisions, but we don't need to be secure against intentional
* (malicious) collisions. To trigger a nopwrite, you have to be able
* to write the file to begin with, and triggering an incorrect (hash
* collision) nopwrite is no worse than simply writing to the file.
* That said, there are no known attacks against the checksum algorithms
* used for nopwrite, assuming that the salt and the checksums
* themselves remain secret.
*/
static zio_t *
zio_nop_write(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
blkptr_t *bp_orig = &zio->io_bp_orig;
zio_prop_t *zp = &zio->io_prop;
ASSERT(BP_IS_HOLE(bp));
ASSERT(BP_GET_LEVEL(bp) == 0);
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
ASSERT(zp->zp_nopwrite);
ASSERT(!zp->zp_dedup);
ASSERT(zio->io_bp_override == NULL);
ASSERT(IO_IS_ALLOCATING(zio));
/*
* Check to see if the original bp and the new bp have matching
* characteristics (i.e. same checksum, compression algorithms, etc).
* If they don't then just continue with the pipeline which will
* allocate a new bp.
*/
if (BP_IS_HOLE(bp_orig) ||
!(zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_flags &
ZCHECKSUM_FLAG_NOPWRITE) ||
BP_IS_ENCRYPTED(bp) || BP_IS_ENCRYPTED(bp_orig) ||
BP_GET_CHECKSUM(bp) != BP_GET_CHECKSUM(bp_orig) ||
BP_GET_COMPRESS(bp) != BP_GET_COMPRESS(bp_orig) ||
BP_GET_DEDUP(bp) != BP_GET_DEDUP(bp_orig) ||
zp->zp_copies != BP_GET_NDVAS(bp_orig))
return (zio);
/*
* If the checksums match then reset the pipeline so that we
* avoid allocating a new bp and issuing any I/O.
*/
if (ZIO_CHECKSUM_EQUAL(bp->blk_cksum, bp_orig->blk_cksum)) {
ASSERT(zio_checksum_table[zp->zp_checksum].ci_flags &
ZCHECKSUM_FLAG_NOPWRITE);
ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(bp_orig));
ASSERT3U(BP_GET_LSIZE(bp), ==, BP_GET_LSIZE(bp_orig));
ASSERT(zp->zp_compress != ZIO_COMPRESS_OFF);
ASSERT3U(bp->blk_prop, ==, bp_orig->blk_prop);
/*
* If we're overwriting a block that is currently on an
* indirect vdev, then ignore the nopwrite request and
* allow a new block to be allocated on a concrete vdev.
*/
spa_config_enter(zio->io_spa, SCL_VDEV, FTAG, RW_READER);
for (int d = 0; d < BP_GET_NDVAS(bp_orig); d++) {
vdev_t *tvd = vdev_lookup_top(zio->io_spa,
DVA_GET_VDEV(&bp_orig->blk_dva[d]));
if (tvd->vdev_ops == &vdev_indirect_ops) {
spa_config_exit(zio->io_spa, SCL_VDEV, FTAG);
return (zio);
}
}
spa_config_exit(zio->io_spa, SCL_VDEV, FTAG);
*bp = *bp_orig;
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
zio->io_flags |= ZIO_FLAG_NOPWRITE;
}
return (zio);
}
/*
* ==========================================================================
* Block Reference Table
* ==========================================================================
*/
static zio_t *
zio_brt_free(zio_t *zio)
{
blkptr_t *bp;
bp = zio->io_bp;
if (BP_GET_LEVEL(bp) > 0 ||
BP_IS_METADATA(bp) ||
!brt_maybe_exists(zio->io_spa, bp)) {
return (zio);
}
if (!brt_entry_decref(zio->io_spa, bp)) {
/*
* This isn't the last reference, so we cannot free
* the data yet.
*/
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
}
return (zio);
}
/*
* ==========================================================================
* Dedup
* ==========================================================================
*/
static void
zio_ddt_child_read_done(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
ddt_entry_t *dde = zio->io_private;
ddt_phys_t *ddp;
zio_t *pio = zio_unique_parent(zio);
mutex_enter(&pio->io_lock);
ddp = ddt_phys_select(dde, bp);
if (zio->io_error == 0)
ddt_phys_clear(ddp); /* this ddp doesn't need repair */
if (zio->io_error == 0 && dde->dde_repair_abd == NULL)
dde->dde_repair_abd = zio->io_abd;
else
abd_free(zio->io_abd);
mutex_exit(&pio->io_lock);
}
static zio_t *
zio_ddt_read_start(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
ASSERT(BP_GET_DEDUP(bp));
ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
if (zio->io_child_error[ZIO_CHILD_DDT]) {
ddt_t *ddt = ddt_select(zio->io_spa, bp);
ddt_entry_t *dde = ddt_repair_start(ddt, bp);
ddt_phys_t *ddp = dde->dde_phys;
ddt_phys_t *ddp_self = ddt_phys_select(dde, bp);
blkptr_t blk;
ASSERT(zio->io_vsd == NULL);
zio->io_vsd = dde;
if (ddp_self == NULL)
return (zio);
for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
if (ddp->ddp_phys_birth == 0 || ddp == ddp_self)
continue;
ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, ddp,
&blk);
zio_nowait(zio_read(zio, zio->io_spa, &blk,
abd_alloc_for_io(zio->io_size, B_TRUE),
zio->io_size, zio_ddt_child_read_done, dde,
zio->io_priority, ZIO_DDT_CHILD_FLAGS(zio) |
ZIO_FLAG_DONT_PROPAGATE, &zio->io_bookmark));
}
return (zio);
}
zio_nowait(zio_read(zio, zio->io_spa, bp,
zio->io_abd, zio->io_size, NULL, NULL, zio->io_priority,
ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark));
return (zio);
}
static zio_t *
zio_ddt_read_done(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
if (zio_wait_for_children(zio, ZIO_CHILD_DDT_BIT, ZIO_WAIT_DONE)) {
return (NULL);
}
ASSERT(BP_GET_DEDUP(bp));
ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
if (zio->io_child_error[ZIO_CHILD_DDT]) {
ddt_t *ddt = ddt_select(zio->io_spa, bp);
ddt_entry_t *dde = zio->io_vsd;
if (ddt == NULL) {
ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE);
return (zio);
}
if (dde == NULL) {
zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1;
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
return (NULL);
}
if (dde->dde_repair_abd != NULL) {
abd_copy(zio->io_abd, dde->dde_repair_abd,
zio->io_size);
zio->io_child_error[ZIO_CHILD_DDT] = 0;
}
ddt_repair_done(ddt, dde);
zio->io_vsd = NULL;
}
ASSERT(zio->io_vsd == NULL);
return (zio);
}
static boolean_t
zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde)
{
spa_t *spa = zio->io_spa;
boolean_t do_raw = !!(zio->io_flags & ZIO_FLAG_RAW);
ASSERT(!(zio->io_bp_override && do_raw));
/*
* Note: we compare the original data, not the transformed data,
* because when zio->io_bp is an override bp, we will not have
* pushed the I/O transforms. That's an important optimization
* because otherwise we'd compress/encrypt all dmu_sync() data twice.
* However, we should never get a raw, override zio so in these
* cases we can compare the io_abd directly. This is useful because
* it allows us to do dedup verification even if we don't have access
* to the original data (for instance, if the encryption keys aren't
* loaded).
*/
for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) {
zio_t *lio = dde->dde_lead_zio[p];
if (lio != NULL && do_raw) {
return (lio->io_size != zio->io_size ||
abd_cmp(zio->io_abd, lio->io_abd) != 0);
} else if (lio != NULL) {
return (lio->io_orig_size != zio->io_orig_size ||
abd_cmp(zio->io_orig_abd, lio->io_orig_abd) != 0);
}
}
for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) {
ddt_phys_t *ddp = &dde->dde_phys[p];
if (ddp->ddp_phys_birth != 0 && do_raw) {
blkptr_t blk = *zio->io_bp;
uint64_t psize;
abd_t *tmpabd;
int error;
ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth);
psize = BP_GET_PSIZE(&blk);
if (psize != zio->io_size)
return (B_TRUE);
ddt_exit(ddt);
tmpabd = abd_alloc_for_io(psize, B_TRUE);
error = zio_wait(zio_read(NULL, spa, &blk, tmpabd,
psize, NULL, NULL, ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_RAW, &zio->io_bookmark));
if (error == 0) {
if (abd_cmp(tmpabd, zio->io_abd) != 0)
error = SET_ERROR(ENOENT);
}
abd_free(tmpabd);
ddt_enter(ddt);
return (error != 0);
} else if (ddp->ddp_phys_birth != 0) {
arc_buf_t *abuf = NULL;
arc_flags_t aflags = ARC_FLAG_WAIT;
blkptr_t blk = *zio->io_bp;
int error;
ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth);
if (BP_GET_LSIZE(&blk) != zio->io_orig_size)
return (B_TRUE);
ddt_exit(ddt);
error = arc_read(NULL, spa, &blk,
arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
&aflags, &zio->io_bookmark);
if (error == 0) {
if (abd_cmp_buf(zio->io_orig_abd, abuf->b_data,
zio->io_orig_size) != 0)
error = SET_ERROR(ENOENT);
arc_buf_destroy(abuf, &abuf);
}
ddt_enter(ddt);
return (error != 0);
}
}
return (B_FALSE);
}
static void
zio_ddt_child_write_ready(zio_t *zio)
{
int p = zio->io_prop.zp_copies;
ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp);
ddt_entry_t *dde = zio->io_private;
ddt_phys_t *ddp = &dde->dde_phys[p];
zio_t *pio;
if (zio->io_error)
return;
ddt_enter(ddt);
ASSERT(dde->dde_lead_zio[p] == zio);
ddt_phys_fill(ddp, zio->io_bp);
zio_link_t *zl = NULL;
while ((pio = zio_walk_parents(zio, &zl)) != NULL)
ddt_bp_fill(ddp, pio->io_bp, zio->io_txg);
ddt_exit(ddt);
}
static void
zio_ddt_child_write_done(zio_t *zio)
{
int p = zio->io_prop.zp_copies;
ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp);
ddt_entry_t *dde = zio->io_private;
ddt_phys_t *ddp = &dde->dde_phys[p];
ddt_enter(ddt);
ASSERT(ddp->ddp_refcnt == 0);
ASSERT(dde->dde_lead_zio[p] == zio);
dde->dde_lead_zio[p] = NULL;
if (zio->io_error == 0) {
zio_link_t *zl = NULL;
while (zio_walk_parents(zio, &zl) != NULL)
ddt_phys_addref(ddp);
} else {
ddt_phys_clear(ddp);
}
ddt_exit(ddt);
}
static zio_t *
zio_ddt_write(zio_t *zio)
{
spa_t *spa = zio->io_spa;
blkptr_t *bp = zio->io_bp;
uint64_t txg = zio->io_txg;
zio_prop_t *zp = &zio->io_prop;
int p = zp->zp_copies;
zio_t *cio = NULL;
ddt_t *ddt = ddt_select(spa, bp);
ddt_entry_t *dde;
ddt_phys_t *ddp;
ASSERT(BP_GET_DEDUP(bp));
ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum);
ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override);
ASSERT(!(zio->io_bp_override && (zio->io_flags & ZIO_FLAG_RAW)));
ddt_enter(ddt);
dde = ddt_lookup(ddt, bp, B_TRUE);
ddp = &dde->dde_phys[p];
if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) {
/*
* If we're using a weak checksum, upgrade to a strong checksum
* and try again. If we're already using a strong checksum,
* we can't resolve it, so just convert to an ordinary write.
* (And automatically e-mail a paper to Nature?)
*/
if (!(zio_checksum_table[zp->zp_checksum].ci_flags &
ZCHECKSUM_FLAG_DEDUP)) {
zp->zp_checksum = spa_dedup_checksum(spa);
zio_pop_transforms(zio);
zio->io_stage = ZIO_STAGE_OPEN;
BP_ZERO(bp);
} else {
zp->zp_dedup = B_FALSE;
BP_SET_DEDUP(bp, B_FALSE);
}
ASSERT(!BP_GET_DEDUP(bp));
zio->io_pipeline = ZIO_WRITE_PIPELINE;
ddt_exit(ddt);
return (zio);
}
if (ddp->ddp_phys_birth != 0 || dde->dde_lead_zio[p] != NULL) {
if (ddp->ddp_phys_birth != 0)
ddt_bp_fill(ddp, bp, txg);
if (dde->dde_lead_zio[p] != NULL)
zio_add_child(zio, dde->dde_lead_zio[p]);
else
ddt_phys_addref(ddp);
} else if (zio->io_bp_override) {
ASSERT(bp->blk_birth == txg);
ASSERT(BP_EQUAL(bp, zio->io_bp_override));
ddt_phys_fill(ddp, bp);
ddt_phys_addref(ddp);
} else {
cio = zio_write(zio, spa, txg, bp, zio->io_orig_abd,
zio->io_orig_size, zio->io_orig_size, zp,
zio_ddt_child_write_ready, NULL,
zio_ddt_child_write_done, dde, zio->io_priority,
ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark);
zio_push_transform(cio, zio->io_abd, zio->io_size, 0, NULL);
dde->dde_lead_zio[p] = cio;
}
ddt_exit(ddt);
zio_nowait(cio);
return (zio);
}
static ddt_entry_t *freedde; /* for debugging */
static zio_t *
zio_ddt_free(zio_t *zio)
{
spa_t *spa = zio->io_spa;
blkptr_t *bp = zio->io_bp;
ddt_t *ddt = ddt_select(spa, bp);
ddt_entry_t *dde;
ddt_phys_t *ddp;
ASSERT(BP_GET_DEDUP(bp));
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ddt_enter(ddt);
freedde = dde = ddt_lookup(ddt, bp, B_TRUE);
if (dde) {
ddp = ddt_phys_select(dde, bp);
if (ddp)
ddt_phys_decref(ddp);
}
ddt_exit(ddt);
return (zio);
}
/*
* ==========================================================================
* Allocate and free blocks
* ==========================================================================
*/
static zio_t *
zio_io_to_allocate(spa_t *spa, int allocator)
{
zio_t *zio;
ASSERT(MUTEX_HELD(&spa->spa_allocs[allocator].spaa_lock));
zio = avl_first(&spa->spa_allocs[allocator].spaa_tree);
if (zio == NULL)
return (NULL);
ASSERT(IO_IS_ALLOCATING(zio));
/*
* Try to place a reservation for this zio. If we're unable to
* reserve then we throttle.
*/
ASSERT3U(zio->io_allocator, ==, allocator);
if (!metaslab_class_throttle_reserve(zio->io_metaslab_class,
zio->io_prop.zp_copies, allocator, zio, 0)) {
return (NULL);
}
avl_remove(&spa->spa_allocs[allocator].spaa_tree, zio);
ASSERT3U(zio->io_stage, <, ZIO_STAGE_DVA_ALLOCATE);
return (zio);
}
static zio_t *
zio_dva_throttle(zio_t *zio)
{
spa_t *spa = zio->io_spa;
zio_t *nio;
metaslab_class_t *mc;
/* locate an appropriate allocation class */
mc = spa_preferred_class(spa, zio->io_size, zio->io_prop.zp_type,
zio->io_prop.zp_level, zio->io_prop.zp_zpl_smallblk);
if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE ||
!mc->mc_alloc_throttle_enabled ||
zio->io_child_type == ZIO_CHILD_GANG ||
zio->io_flags & ZIO_FLAG_NODATA) {
return (zio);
}
ASSERT(zio->io_type == ZIO_TYPE_WRITE);
ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
ASSERT3U(zio->io_queued_timestamp, >, 0);
ASSERT(zio->io_stage == ZIO_STAGE_DVA_THROTTLE);
zbookmark_phys_t *bm = &zio->io_bookmark;
/*
* We want to try to use as many allocators as possible to help improve
* performance, but we also want logically adjacent IOs to be physically
* adjacent to improve sequential read performance. We chunk each object
* into 2^20 block regions, and then hash based on the objset, object,
* level, and region to accomplish both of these goals.
*/
int allocator = (uint_t)cityhash4(bm->zb_objset, bm->zb_object,
bm->zb_level, bm->zb_blkid >> 20) % spa->spa_alloc_count;
zio->io_allocator = allocator;
zio->io_metaslab_class = mc;
mutex_enter(&spa->spa_allocs[allocator].spaa_lock);
avl_add(&spa->spa_allocs[allocator].spaa_tree, zio);
nio = zio_io_to_allocate(spa, allocator);
mutex_exit(&spa->spa_allocs[allocator].spaa_lock);
return (nio);
}
static void
zio_allocate_dispatch(spa_t *spa, int allocator)
{
zio_t *zio;
mutex_enter(&spa->spa_allocs[allocator].spaa_lock);
zio = zio_io_to_allocate(spa, allocator);
mutex_exit(&spa->spa_allocs[allocator].spaa_lock);
if (zio == NULL)
return;
ASSERT3U(zio->io_stage, ==, ZIO_STAGE_DVA_THROTTLE);
ASSERT0(zio->io_error);
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_TRUE);
}
static zio_t *
zio_dva_allocate(zio_t *zio)
{
spa_t *spa = zio->io_spa;
metaslab_class_t *mc;
blkptr_t *bp = zio->io_bp;
int error;
int flags = 0;
if (zio->io_gang_leader == NULL) {
ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
zio->io_gang_leader = zio;
}
ASSERT(BP_IS_HOLE(bp));
ASSERT0(BP_GET_NDVAS(bp));
ASSERT3U(zio->io_prop.zp_copies, >, 0);
ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa));
ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp));
if (zio->io_flags & ZIO_FLAG_NODATA)
flags |= METASLAB_DONT_THROTTLE;
if (zio->io_flags & ZIO_FLAG_GANG_CHILD)
flags |= METASLAB_GANG_CHILD;
if (zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE)
flags |= METASLAB_ASYNC_ALLOC;
/*
* if not already chosen, locate an appropriate allocation class
*/
mc = zio->io_metaslab_class;
if (mc == NULL) {
mc = spa_preferred_class(spa, zio->io_size,
zio->io_prop.zp_type, zio->io_prop.zp_level,
zio->io_prop.zp_zpl_smallblk);
zio->io_metaslab_class = mc;
}
/*
* Try allocating the block in the usual metaslab class.
* If that's full, allocate it in the normal class.
* If that's full, allocate as a gang block,
* and if all are full, the allocation fails (which shouldn't happen).
*
* Note that we do not fall back on embedded slog (ZIL) space, to
* preserve unfragmented slog space, which is critical for decent
* sync write performance. If a log allocation fails, we will fall
* back to spa_sync() which is abysmal for performance.
*/
error = metaslab_alloc(spa, mc, zio->io_size, bp,
zio->io_prop.zp_copies, zio->io_txg, NULL, flags,
&zio->io_alloc_list, zio, zio->io_allocator);
/*
* Fallback to normal class when an alloc class is full
*/
if (error == ENOSPC && mc != spa_normal_class(spa)) {
/*
* If throttling, transfer reservation over to normal class.
* The io_allocator slot can remain the same even though we
* are switching classes.
*/
if (mc->mc_alloc_throttle_enabled &&
(zio->io_flags & ZIO_FLAG_IO_ALLOCATING)) {
metaslab_class_throttle_unreserve(mc,
zio->io_prop.zp_copies, zio->io_allocator, zio);
zio->io_flags &= ~ZIO_FLAG_IO_ALLOCATING;
VERIFY(metaslab_class_throttle_reserve(
spa_normal_class(spa),
zio->io_prop.zp_copies, zio->io_allocator, zio,
flags | METASLAB_MUST_RESERVE));
}
zio->io_metaslab_class = mc = spa_normal_class(spa);
if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) {
zfs_dbgmsg("%s: metaslab allocation failure, "
"trying normal class: zio %px, size %llu, error %d",
spa_name(spa), zio, (u_longlong_t)zio->io_size,
error);
}
error = metaslab_alloc(spa, mc, zio->io_size, bp,
zio->io_prop.zp_copies, zio->io_txg, NULL, flags,
&zio->io_alloc_list, zio, zio->io_allocator);
}
if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE) {
if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) {
zfs_dbgmsg("%s: metaslab allocation failure, "
"trying ganging: zio %px, size %llu, error %d",
spa_name(spa), zio, (u_longlong_t)zio->io_size,
error);
}
return (zio_write_gang_block(zio, mc));
}
if (error != 0) {
if (error != ENOSPC ||
(zfs_flags & ZFS_DEBUG_METASLAB_ALLOC)) {
zfs_dbgmsg("%s: metaslab allocation failure: zio %px, "
"size %llu, error %d",
spa_name(spa), zio, (u_longlong_t)zio->io_size,
error);
}
zio->io_error = error;
}
return (zio);
}
static zio_t *
zio_dva_free(zio_t *zio)
{
metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE);
return (zio);
}
static zio_t *
zio_dva_claim(zio_t *zio)
{
int error;
error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg);
if (error)
zio->io_error = error;
return (zio);
}
/*
* Undo an allocation. This is used by zio_done() when an I/O fails
* and we want to give back the block we just allocated.
* This handles both normal blocks and gang blocks.
*/
static void
zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp)
{
ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp));
ASSERT(zio->io_bp_override == NULL);
if (!BP_IS_HOLE(bp))
metaslab_free(zio->io_spa, bp, bp->blk_birth, B_TRUE);
if (gn != NULL) {
for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
zio_dva_unallocate(zio, gn->gn_child[g],
&gn->gn_gbh->zg_blkptr[g]);
}
}
}
/*
* Try to allocate an intent log block. Return 0 on success, errno on failure.
*/
int
zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg, blkptr_t *new_bp,
uint64_t size, boolean_t *slog)
{
int error = 1;
zio_alloc_list_t io_alloc_list;
ASSERT(txg > spa_syncing_txg(spa));
metaslab_trace_init(&io_alloc_list);
/*
* Block pointer fields are useful to metaslabs for stats and debugging.
* Fill in the obvious ones before calling into metaslab_alloc().
*/
BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
BP_SET_PSIZE(new_bp, size);
BP_SET_LEVEL(new_bp, 0);
/*
* When allocating a zil block, we don't have information about
* the final destination of the block except the objset it's part
* of, so we just hash the objset ID to pick the allocator to get
* some parallelism.
*/
int flags = METASLAB_ZIL;
int allocator = (uint_t)cityhash4(0, 0, 0,
os->os_dsl_dataset->ds_object) % spa->spa_alloc_count;
error = metaslab_alloc(spa, spa_log_class(spa), size, new_bp, 1,
txg, NULL, flags, &io_alloc_list, NULL, allocator);
*slog = (error == 0);
if (error != 0) {
error = metaslab_alloc(spa, spa_embedded_log_class(spa), size,
new_bp, 1, txg, NULL, flags,
&io_alloc_list, NULL, allocator);
}
if (error != 0) {
error = metaslab_alloc(spa, spa_normal_class(spa), size,
new_bp, 1, txg, NULL, flags,
&io_alloc_list, NULL, allocator);
}
metaslab_trace_fini(&io_alloc_list);
if (error == 0) {
BP_SET_LSIZE(new_bp, size);
BP_SET_PSIZE(new_bp, size);
BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF);
BP_SET_CHECKSUM(new_bp,
spa_version(spa) >= SPA_VERSION_SLIM_ZIL
? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG);
BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
BP_SET_LEVEL(new_bp, 0);
BP_SET_DEDUP(new_bp, 0);
BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER);
/*
* encrypted blocks will require an IV and salt. We generate
* these now since we will not be rewriting the bp at
* rewrite time.
*/
if (os->os_encrypted) {
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t salt[ZIO_DATA_SALT_LEN];
BP_SET_CRYPT(new_bp, B_TRUE);
VERIFY0(spa_crypt_get_salt(spa,
dmu_objset_id(os), salt));
VERIFY0(zio_crypt_generate_iv(iv));
zio_crypt_encode_params_bp(new_bp, salt, iv);
}
} else {
zfs_dbgmsg("%s: zil block allocation failure: "
"size %llu, error %d", spa_name(spa), (u_longlong_t)size,
error);
}
return (error);
}
/*
* ==========================================================================
* Read and write to physical devices
* ==========================================================================
*/
/*
* Issue an I/O to the underlying vdev. Typically the issue pipeline
* stops after this stage and will resume upon I/O completion.
* However, there are instances where the vdev layer may need to
* continue the pipeline when an I/O was not issued. Since the I/O
* that was sent to the vdev layer might be different than the one
* currently active in the pipeline (see vdev_queue_io()), we explicitly
* force the underlying vdev layers to call either zio_execute() or
* zio_interrupt() to ensure that the pipeline continues with the correct I/O.
*/
static zio_t *
zio_vdev_io_start(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
uint64_t align;
spa_t *spa = zio->io_spa;
zio->io_delay = 0;
ASSERT(zio->io_error == 0);
ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0);
if (vd == NULL) {
if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
spa_config_enter(spa, SCL_ZIO, zio, RW_READER);
/*
* The mirror_ops handle multiple DVAs in a single BP.
*/
vdev_mirror_ops.vdev_op_io_start(zio);
return (NULL);
}
ASSERT3P(zio->io_logical, !=, zio);
if (zio->io_type == ZIO_TYPE_WRITE) {
ASSERT(spa->spa_trust_config);
/*
* Note: the code can handle other kinds of writes,
* but we don't expect them.
*/
if (zio->io_vd->vdev_noalloc) {
ASSERT(zio->io_flags &
(ZIO_FLAG_PHYSICAL | ZIO_FLAG_SELF_HEAL |
ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE));
}
}
align = 1ULL << vd->vdev_top->vdev_ashift;
if (!(zio->io_flags & ZIO_FLAG_PHYSICAL) &&
P2PHASE(zio->io_size, align) != 0) {
/* Transform logical writes to be a full physical block size. */
uint64_t asize = P2ROUNDUP(zio->io_size, align);
abd_t *abuf = abd_alloc_sametype(zio->io_abd, asize);
ASSERT(vd == vd->vdev_top);
if (zio->io_type == ZIO_TYPE_WRITE) {
abd_copy(abuf, zio->io_abd, zio->io_size);
abd_zero_off(abuf, zio->io_size, asize - zio->io_size);
}
zio_push_transform(zio, abuf, asize, asize, zio_subblock);
}
/*
* If this is not a physical io, make sure that it is properly aligned
* before proceeding.
*/
if (!(zio->io_flags & ZIO_FLAG_PHYSICAL)) {
ASSERT0(P2PHASE(zio->io_offset, align));
ASSERT0(P2PHASE(zio->io_size, align));
} else {
/*
* For physical writes, we allow 512b aligned writes and assume
* the device will perform a read-modify-write as necessary.
*/
ASSERT0(P2PHASE(zio->io_offset, SPA_MINBLOCKSIZE));
ASSERT0(P2PHASE(zio->io_size, SPA_MINBLOCKSIZE));
}
VERIFY(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa));
/*
* If this is a repair I/O, and there's no self-healing involved --
* that is, we're just resilvering what we expect to resilver --
* then don't do the I/O unless zio's txg is actually in vd's DTL.
* This prevents spurious resilvering.
*
* There are a few ways that we can end up creating these spurious
* resilver i/os:
*
* 1. A resilver i/o will be issued if any DVA in the BP has a
* dirty DTL. The mirror code will issue resilver writes to
* each DVA, including the one(s) that are not on vdevs with dirty
* DTLs.
*
* 2. With nested replication, which happens when we have a
* "replacing" or "spare" vdev that's a child of a mirror or raidz.
* For example, given mirror(replacing(A+B), C), it's likely that
* only A is out of date (it's the new device). In this case, we'll
* read from C, then use the data to resilver A+B -- but we don't
* actually want to resilver B, just A. The top-level mirror has no
* way to know this, so instead we just discard unnecessary repairs
* as we work our way down the vdev tree.
*
* 3. ZTEST also creates mirrors of mirrors, mirrors of raidz, etc.
* The same logic applies to any form of nested replication: ditto
* + mirror, RAID-Z + replacing, etc.
*
* However, indirect vdevs point off to other vdevs which may have
* DTL's, so we never bypass them. The child i/os on concrete vdevs
* will be properly bypassed instead.
*
* Leaf DTL_PARTIAL can be empty when a legitimate write comes from
* a dRAID spare vdev. For example, when a dRAID spare is first
* used, its spare blocks need to be written to but the leaf vdev's
* of such blocks can have empty DTL_PARTIAL.
*
* There seemed no clean way to allow such writes while bypassing
* spurious ones. At this point, just avoid all bypassing for dRAID
* for correctness.
*/
if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) &&
!(zio->io_flags & ZIO_FLAG_SELF_HEAL) &&
zio->io_txg != 0 && /* not a delegated i/o */
vd->vdev_ops != &vdev_indirect_ops &&
vd->vdev_top->vdev_ops != &vdev_draid_ops &&
!vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) {
ASSERT(zio->io_type == ZIO_TYPE_WRITE);
zio_vdev_io_bypass(zio);
return (zio);
}
/*
* Select the next best leaf I/O to process. Distributed spares are
* excluded since they dispatch the I/O directly to a leaf vdev after
* applying the dRAID mapping.
*/
if (vd->vdev_ops->vdev_op_leaf &&
vd->vdev_ops != &vdev_draid_spare_ops &&
(zio->io_type == ZIO_TYPE_READ ||
zio->io_type == ZIO_TYPE_WRITE ||
zio->io_type == ZIO_TYPE_TRIM)) {
if ((zio = vdev_queue_io(zio)) == NULL)
return (NULL);
if (!vdev_accessible(vd, zio)) {
zio->io_error = SET_ERROR(ENXIO);
zio_interrupt(zio);
return (NULL);
}
zio->io_delay = gethrtime();
}
vd->vdev_ops->vdev_op_io_start(zio);
return (NULL);
}
static zio_t *
zio_vdev_io_done(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops;
boolean_t unexpected_error = B_FALSE;
if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) {
return (NULL);
}
ASSERT(zio->io_type == ZIO_TYPE_READ ||
zio->io_type == ZIO_TYPE_WRITE || zio->io_type == ZIO_TYPE_TRIM);
if (zio->io_delay)
zio->io_delay = gethrtime() - zio->io_delay;
if (vd != NULL && vd->vdev_ops->vdev_op_leaf &&
vd->vdev_ops != &vdev_draid_spare_ops) {
vdev_queue_io_done(zio);
if (zio_injection_enabled && zio->io_error == 0)
zio->io_error = zio_handle_device_injections(vd, zio,
EIO, EILSEQ);
if (zio_injection_enabled && zio->io_error == 0)
zio->io_error = zio_handle_label_injection(zio, EIO);
if (zio->io_error && zio->io_type != ZIO_TYPE_TRIM) {
if (!vdev_accessible(vd, zio)) {
zio->io_error = SET_ERROR(ENXIO);
} else {
unexpected_error = B_TRUE;
}
}
}
ops->vdev_op_io_done(zio);
if (unexpected_error && vd->vdev_remove_wanted == B_FALSE)
VERIFY(vdev_probe(vd, zio) == NULL);
return (zio);
}
/*
* This function is used to change the priority of an existing zio that is
* currently in-flight. This is used by the arc to upgrade priority in the
* event that a demand read is made for a block that is currently queued
* as a scrub or async read IO. Otherwise, the high priority read request
* would end up having to wait for the lower priority IO.
*/
void
zio_change_priority(zio_t *pio, zio_priority_t priority)
{
zio_t *cio, *cio_next;
zio_link_t *zl = NULL;
ASSERT3U(priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
if (pio->io_vd != NULL && pio->io_vd->vdev_ops->vdev_op_leaf) {
vdev_queue_change_io_priority(pio, priority);
} else {
pio->io_priority = priority;
}
mutex_enter(&pio->io_lock);
for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
cio_next = zio_walk_children(pio, &zl);
zio_change_priority(cio, priority);
}
mutex_exit(&pio->io_lock);
}
/*
* For non-raidz ZIOs, we can just copy aside the bad data read from the
* disk, and use that to finish the checksum ereport later.
*/
static void
zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr,
const abd_t *good_buf)
{
/* no processing needed */
zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE);
}
void
zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr)
{
void *abd = abd_alloc_sametype(zio->io_abd, zio->io_size);
abd_copy(abd, zio->io_abd, zio->io_size);
zcr->zcr_cbinfo = zio->io_size;
zcr->zcr_cbdata = abd;
zcr->zcr_finish = zio_vsd_default_cksum_finish;
zcr->zcr_free = zio_abd_free;
}
static zio_t *
zio_vdev_io_assess(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) {
return (NULL);
}
if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
spa_config_exit(zio->io_spa, SCL_ZIO, zio);
if (zio->io_vsd != NULL) {
zio->io_vsd_ops->vsd_free(zio);
zio->io_vsd = NULL;
}
if (zio_injection_enabled && zio->io_error == 0)
zio->io_error = zio_handle_fault_injection(zio, EIO);
/*
* If the I/O failed, determine whether we should attempt to retry it.
*
* On retry, we cut in line in the issue queue, since we don't want
* compression/checksumming/etc. work to prevent our (cheap) IO reissue.
*/
if (zio->io_error && vd == NULL &&
!(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) {
ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE)); /* not a leaf */
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS)); /* not a leaf */
zio->io_error = 0;
zio->io_flags |= ZIO_FLAG_IO_RETRY | ZIO_FLAG_DONT_AGGREGATE;
zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1;
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE,
zio_requeue_io_start_cut_in_line);
return (NULL);
}
/*
* If we got an error on a leaf device, convert it to ENXIO
* if the device is not accessible at all.
*/
if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf &&
!vdev_accessible(vd, zio))
zio->io_error = SET_ERROR(ENXIO);
/*
* If we can't write to an interior vdev (mirror or RAID-Z),
* set vdev_cant_write so that we stop trying to allocate from it.
*/
if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE &&
vd != NULL && !vd->vdev_ops->vdev_op_leaf) {
vdev_dbgmsg(vd, "zio_vdev_io_assess(zio=%px) setting "
"cant_write=TRUE due to write failure with ENXIO",
zio);
vd->vdev_cant_write = B_TRUE;
}
/*
* If a cache flush returns ENOTSUP or ENOTTY, we know that no future
* attempts will ever succeed. In this case we set a persistent
* boolean flag so that we don't bother with it in the future.
*/
if ((zio->io_error == ENOTSUP || zio->io_error == ENOTTY) &&
zio->io_type == ZIO_TYPE_IOCTL &&
zio->io_cmd == DKIOCFLUSHWRITECACHE && vd != NULL)
vd->vdev_nowritecache = B_TRUE;
if (zio->io_error)
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
return (zio);
}
void
zio_vdev_io_reissue(zio_t *zio)
{
ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
ASSERT(zio->io_error == 0);
zio->io_stage >>= 1;
}
void
zio_vdev_io_redone(zio_t *zio)
{
ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE);
zio->io_stage >>= 1;
}
void
zio_vdev_io_bypass(zio_t *zio)
{
ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
ASSERT(zio->io_error == 0);
zio->io_flags |= ZIO_FLAG_IO_BYPASS;
zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1;
}
/*
* ==========================================================================
* Encrypt and store encryption parameters
* ==========================================================================
*/
/*
* This function is used for ZIO_STAGE_ENCRYPT. It is responsible for
* managing the storage of encryption parameters and passing them to the
* lower-level encryption functions.
*/
static zio_t *
zio_encrypt(zio_t *zio)
{
zio_prop_t *zp = &zio->io_prop;
spa_t *spa = zio->io_spa;
blkptr_t *bp = zio->io_bp;
uint64_t psize = BP_GET_PSIZE(bp);
uint64_t dsobj = zio->io_bookmark.zb_objset;
dmu_object_type_t ot = BP_GET_TYPE(bp);
void *enc_buf = NULL;
abd_t *eabd = NULL;
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
boolean_t no_crypt = B_FALSE;
/* the root zio already encrypted the data */
if (zio->io_child_type == ZIO_CHILD_GANG)
return (zio);
/* only ZIL blocks are re-encrypted on rewrite */
if (!IO_IS_ALLOCATING(zio) && ot != DMU_OT_INTENT_LOG)
return (zio);
if (!(zp->zp_encrypt || BP_IS_ENCRYPTED(bp))) {
BP_SET_CRYPT(bp, B_FALSE);
return (zio);
}
/* if we are doing raw encryption set the provided encryption params */
if (zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) {
ASSERT0(BP_GET_LEVEL(bp));
BP_SET_CRYPT(bp, B_TRUE);
BP_SET_BYTEORDER(bp, zp->zp_byteorder);
if (ot != DMU_OT_OBJSET)
zio_crypt_encode_mac_bp(bp, zp->zp_mac);
/* dnode blocks must be written out in the provided byteorder */
if (zp->zp_byteorder != ZFS_HOST_BYTEORDER &&
ot == DMU_OT_DNODE) {
void *bswap_buf = zio_buf_alloc(psize);
abd_t *babd = abd_get_from_buf(bswap_buf, psize);
ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
abd_copy_to_buf(bswap_buf, zio->io_abd, psize);
dmu_ot_byteswap[DMU_OT_BYTESWAP(ot)].ob_func(bswap_buf,
psize);
abd_take_ownership_of_buf(babd, B_TRUE);
zio_push_transform(zio, babd, psize, psize, NULL);
}
if (DMU_OT_IS_ENCRYPTED(ot))
zio_crypt_encode_params_bp(bp, zp->zp_salt, zp->zp_iv);
return (zio);
}
/* indirect blocks only maintain a cksum of the lower level MACs */
if (BP_GET_LEVEL(bp) > 0) {
BP_SET_CRYPT(bp, B_TRUE);
VERIFY0(zio_crypt_do_indirect_mac_checksum_abd(B_TRUE,
zio->io_orig_abd, BP_GET_LSIZE(bp), BP_SHOULD_BYTESWAP(bp),
mac));
zio_crypt_encode_mac_bp(bp, mac);
return (zio);
}
/*
* Objset blocks are a special case since they have 2 256-bit MACs
* embedded within them.
*/
if (ot == DMU_OT_OBJSET) {
ASSERT0(DMU_OT_IS_ENCRYPTED(ot));
ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
BP_SET_CRYPT(bp, B_TRUE);
VERIFY0(spa_do_crypt_objset_mac_abd(B_TRUE, spa, dsobj,
zio->io_abd, psize, BP_SHOULD_BYTESWAP(bp)));
return (zio);
}
/* unencrypted object types are only authenticated with a MAC */
if (!DMU_OT_IS_ENCRYPTED(ot)) {
BP_SET_CRYPT(bp, B_TRUE);
VERIFY0(spa_do_crypt_mac_abd(B_TRUE, spa, dsobj,
zio->io_abd, psize, mac));
zio_crypt_encode_mac_bp(bp, mac);
return (zio);
}
/*
* Later passes of sync-to-convergence may decide to rewrite data
* in place to avoid more disk reallocations. This presents a problem
* for encryption because this constitutes rewriting the new data with
* the same encryption key and IV. However, this only applies to blocks
* in the MOS (particularly the spacemaps) and we do not encrypt the
* MOS. We assert that the zio is allocating or an intent log write
* to enforce this.
*/
ASSERT(IO_IS_ALLOCATING(zio) || ot == DMU_OT_INTENT_LOG);
ASSERT(BP_GET_LEVEL(bp) == 0 || ot == DMU_OT_INTENT_LOG);
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_ENCRYPTION));
ASSERT3U(psize, !=, 0);
enc_buf = zio_buf_alloc(psize);
eabd = abd_get_from_buf(enc_buf, psize);
abd_take_ownership_of_buf(eabd, B_TRUE);
/*
* For an explanation of what encryption parameters are stored
* where, see the block comment in zio_crypt.c.
*/
if (ot == DMU_OT_INTENT_LOG) {
zio_crypt_decode_params_bp(bp, salt, iv);
} else {
BP_SET_CRYPT(bp, B_TRUE);
}
/* Perform the encryption. This should not fail */
VERIFY0(spa_do_crypt_abd(B_TRUE, spa, &zio->io_bookmark,
BP_GET_TYPE(bp), BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp),
salt, iv, mac, psize, zio->io_abd, eabd, &no_crypt));
/* encode encryption metadata into the bp */
if (ot == DMU_OT_INTENT_LOG) {
/*
* ZIL blocks store the MAC in the embedded checksum, so the
* transform must always be applied.
*/
zio_crypt_encode_mac_zil(enc_buf, mac);
zio_push_transform(zio, eabd, psize, psize, NULL);
} else {
BP_SET_CRYPT(bp, B_TRUE);
zio_crypt_encode_params_bp(bp, salt, iv);
zio_crypt_encode_mac_bp(bp, mac);
if (no_crypt) {
ASSERT3U(ot, ==, DMU_OT_DNODE);
abd_free(eabd);
} else {
zio_push_transform(zio, eabd, psize, psize, NULL);
}
}
return (zio);
}
/*
* ==========================================================================
* Generate and verify checksums
* ==========================================================================
*/
static zio_t *
zio_checksum_generate(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
enum zio_checksum checksum;
if (bp == NULL) {
/*
* This is zio_write_phys().
* We're either generating a label checksum, or none at all.
*/
checksum = zio->io_prop.zp_checksum;
if (checksum == ZIO_CHECKSUM_OFF)
return (zio);
ASSERT(checksum == ZIO_CHECKSUM_LABEL);
} else {
if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) {
ASSERT(!IO_IS_ALLOCATING(zio));
checksum = ZIO_CHECKSUM_GANG_HEADER;
} else {
checksum = BP_GET_CHECKSUM(bp);
}
}
zio_checksum_compute(zio, checksum, zio->io_abd, zio->io_size);
return (zio);
}
static zio_t *
zio_checksum_verify(zio_t *zio)
{
zio_bad_cksum_t info;
blkptr_t *bp = zio->io_bp;
int error;
ASSERT(zio->io_vd != NULL);
if (bp == NULL) {
/*
* This is zio_read_phys().
* We're either verifying a label checksum, or nothing at all.
*/
if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF)
return (zio);
ASSERT3U(zio->io_prop.zp_checksum, ==, ZIO_CHECKSUM_LABEL);
}
if ((error = zio_checksum_error(zio, &info)) != 0) {
zio->io_error = error;
if (error == ECKSUM &&
!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
mutex_enter(&zio->io_vd->vdev_stat_lock);
zio->io_vd->vdev_stat.vs_checksum_errors++;
mutex_exit(&zio->io_vd->vdev_stat_lock);
(void) zfs_ereport_start_checksum(zio->io_spa,
zio->io_vd, &zio->io_bookmark, zio,
zio->io_offset, zio->io_size, &info);
}
}
return (zio);
}
/*
* Called by RAID-Z to ensure we don't compute the checksum twice.
*/
void
zio_checksum_verified(zio_t *zio)
{
zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
}
/*
* ==========================================================================
* Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other.
* An error of 0 indicates success. ENXIO indicates whole-device failure,
* which may be transient (e.g. unplugged) or permanent. ECKSUM and EIO
* indicate errors that are specific to one I/O, and most likely permanent.
* Any other error is presumed to be worse because we weren't expecting it.
* ==========================================================================
*/
int
zio_worst_error(int e1, int e2)
{
static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO };
int r1, r2;
for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++)
if (e1 == zio_error_rank[r1])
break;
for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++)
if (e2 == zio_error_rank[r2])
break;
return (r1 > r2 ? e1 : e2);
}
/*
* ==========================================================================
* I/O completion
* ==========================================================================
*/
static zio_t *
zio_ready(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
zio_t *pio, *pio_next;
zio_link_t *zl = NULL;
if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT |
ZIO_CHILD_GANG_BIT | ZIO_CHILD_DDT_BIT, ZIO_WAIT_READY)) {
return (NULL);
}
if (zio->io_ready) {
ASSERT(IO_IS_ALLOCATING(zio));
ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp) ||
(zio->io_flags & ZIO_FLAG_NOPWRITE));
ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0);
zio->io_ready(zio);
}
#ifdef ZFS_DEBUG
if (bp != NULL && bp != &zio->io_bp_copy)
zio->io_bp_copy = *bp;
#endif
if (zio->io_error != 0) {
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
ASSERT(IO_IS_ALLOCATING(zio));
ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(zio->io_metaslab_class != NULL);
/*
* We were unable to allocate anything, unreserve and
* issue the next I/O to allocate.
*/
metaslab_class_throttle_unreserve(
zio->io_metaslab_class, zio->io_prop.zp_copies,
zio->io_allocator, zio);
zio_allocate_dispatch(zio->io_spa, zio->io_allocator);
}
}
mutex_enter(&zio->io_lock);
zio->io_state[ZIO_WAIT_READY] = 1;
pio = zio_walk_parents(zio, &zl);
mutex_exit(&zio->io_lock);
/*
* As we notify zio's parents, new parents could be added.
* New parents go to the head of zio's io_parent_list, however,
* so we will (correctly) not notify them. The remainder of zio's
* io_parent_list, from 'pio_next' onward, cannot change because
* all parents must wait for us to be done before they can be done.
*/
for (; pio != NULL; pio = pio_next) {
pio_next = zio_walk_parents(zio, &zl);
zio_notify_parent(pio, zio, ZIO_WAIT_READY, NULL);
}
if (zio->io_flags & ZIO_FLAG_NODATA) {
if (bp != NULL && BP_IS_GANG(bp)) {
zio->io_flags &= ~ZIO_FLAG_NODATA;
} else {
ASSERT((uintptr_t)zio->io_abd < SPA_MAXBLOCKSIZE);
zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
}
}
if (zio_injection_enabled &&
zio->io_spa->spa_syncing_txg == zio->io_txg)
zio_handle_ignored_writes(zio);
return (zio);
}
/*
* Update the allocation throttle accounting.
*/
static void
zio_dva_throttle_done(zio_t *zio)
{
zio_t *lio __maybe_unused = zio->io_logical;
zio_t *pio = zio_unique_parent(zio);
vdev_t *vd = zio->io_vd;
int flags = METASLAB_ASYNC_ALLOC;
ASSERT3P(zio->io_bp, !=, NULL);
ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
ASSERT3U(zio->io_priority, ==, ZIO_PRIORITY_ASYNC_WRITE);
ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
ASSERT(vd != NULL);
ASSERT3P(vd, ==, vd->vdev_top);
ASSERT(zio_injection_enabled || !(zio->io_flags & ZIO_FLAG_IO_RETRY));
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR));
ASSERT(zio->io_flags & ZIO_FLAG_IO_ALLOCATING);
ASSERT(!(lio->io_flags & ZIO_FLAG_IO_REWRITE));
ASSERT(!(lio->io_orig_flags & ZIO_FLAG_NODATA));
/*
* Parents of gang children can have two flavors -- ones that
* allocated the gang header (will have ZIO_FLAG_IO_REWRITE set)
* and ones that allocated the constituent blocks. The allocation
* throttle needs to know the allocating parent zio so we must find
* it here.
*/
if (pio->io_child_type == ZIO_CHILD_GANG) {
/*
* If our parent is a rewrite gang child then our grandparent
* would have been the one that performed the allocation.
*/
if (pio->io_flags & ZIO_FLAG_IO_REWRITE)
pio = zio_unique_parent(pio);
flags |= METASLAB_GANG_CHILD;
}
ASSERT(IO_IS_ALLOCATING(pio));
ASSERT3P(zio, !=, zio->io_logical);
ASSERT(zio->io_logical != NULL);
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR));
ASSERT0(zio->io_flags & ZIO_FLAG_NOPWRITE);
ASSERT(zio->io_metaslab_class != NULL);
mutex_enter(&pio->io_lock);
metaslab_group_alloc_decrement(zio->io_spa, vd->vdev_id, pio, flags,
pio->io_allocator, B_TRUE);
mutex_exit(&pio->io_lock);
metaslab_class_throttle_unreserve(zio->io_metaslab_class, 1,
pio->io_allocator, pio);
/*
* Call into the pipeline to see if there is more work that
* needs to be done. If there is work to be done it will be
* dispatched to another taskq thread.
*/
zio_allocate_dispatch(zio->io_spa, pio->io_allocator);
}
static zio_t *
zio_done(zio_t *zio)
{
/*
* Always attempt to keep stack usage minimal here since
* we can be called recursively up to 19 levels deep.
*/
const uint64_t psize = zio->io_size;
zio_t *pio, *pio_next;
zio_link_t *zl = NULL;
/*
* If our children haven't all completed,
* wait for them and then repeat this pipeline stage.
*/
if (zio_wait_for_children(zio, ZIO_CHILD_ALL_BITS, ZIO_WAIT_DONE)) {
return (NULL);
}
/*
* If the allocation throttle is enabled, then update the accounting.
* We only track child I/Os that are part of an allocating async
* write. We must do this since the allocation is performed
* by the logical I/O but the actual write is done by child I/Os.
*/
if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING &&
zio->io_child_type == ZIO_CHILD_VDEV) {
ASSERT(zio->io_metaslab_class != NULL);
ASSERT(zio->io_metaslab_class->mc_alloc_throttle_enabled);
zio_dva_throttle_done(zio);
}
/*
* If the allocation throttle is enabled, verify that
* we have decremented the refcounts for every I/O that was throttled.
*/
if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
ASSERT(zio->io_type == ZIO_TYPE_WRITE);
ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(zio->io_bp != NULL);
metaslab_group_alloc_verify(zio->io_spa, zio->io_bp, zio,
zio->io_allocator);
VERIFY(zfs_refcount_not_held(&zio->io_metaslab_class->
mc_allocator[zio->io_allocator].mca_alloc_slots, zio));
}
for (int c = 0; c < ZIO_CHILD_TYPES; c++)
for (int w = 0; w < ZIO_WAIT_TYPES; w++)
ASSERT(zio->io_children[c][w] == 0);
if (zio->io_bp != NULL && !BP_IS_EMBEDDED(zio->io_bp)) {
ASSERT(zio->io_bp->blk_pad[0] == 0);
ASSERT(zio->io_bp->blk_pad[1] == 0);
ASSERT(memcmp(zio->io_bp, &zio->io_bp_copy,
sizeof (blkptr_t)) == 0 ||
(zio->io_bp == zio_unique_parent(zio)->io_bp));
if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(zio->io_bp) &&
zio->io_bp_override == NULL &&
!(zio->io_flags & ZIO_FLAG_IO_REPAIR)) {
ASSERT3U(zio->io_prop.zp_copies, <=,
BP_GET_NDVAS(zio->io_bp));
ASSERT(BP_COUNT_GANG(zio->io_bp) == 0 ||
(BP_COUNT_GANG(zio->io_bp) ==
BP_GET_NDVAS(zio->io_bp)));
}
if (zio->io_flags & ZIO_FLAG_NOPWRITE)
VERIFY(BP_EQUAL(zio->io_bp, &zio->io_bp_orig));
}
/*
* If there were child vdev/gang/ddt errors, they apply to us now.
*/
zio_inherit_child_errors(zio, ZIO_CHILD_VDEV);
zio_inherit_child_errors(zio, ZIO_CHILD_GANG);
zio_inherit_child_errors(zio, ZIO_CHILD_DDT);
/*
* If the I/O on the transformed data was successful, generate any
* checksum reports now while we still have the transformed data.
*/
if (zio->io_error == 0) {
while (zio->io_cksum_report != NULL) {
zio_cksum_report_t *zcr = zio->io_cksum_report;
uint64_t align = zcr->zcr_align;
uint64_t asize = P2ROUNDUP(psize, align);
abd_t *adata = zio->io_abd;
if (adata != NULL && asize != psize) {
adata = abd_alloc(asize, B_TRUE);
abd_copy(adata, zio->io_abd, psize);
abd_zero_off(adata, psize, asize - psize);
}
zio->io_cksum_report = zcr->zcr_next;
zcr->zcr_next = NULL;
zcr->zcr_finish(zcr, adata);
zfs_ereport_free_checksum(zcr);
if (adata != NULL && asize != psize)
abd_free(adata);
}
}
zio_pop_transforms(zio); /* note: may set zio->io_error */
vdev_stat_update(zio, psize);
/*
* If this I/O is attached to a particular vdev is slow, exceeding
* 30 seconds to complete, post an error described the I/O delay.
* We ignore these errors if the device is currently unavailable.
*/
if (zio->io_delay >= MSEC2NSEC(zio_slow_io_ms)) {
if (zio->io_vd != NULL && !vdev_is_dead(zio->io_vd)) {
/*
* We want to only increment our slow IO counters if
* the IO is valid (i.e. not if the drive is removed).
*
* zfs_ereport_post() will also do these checks, but
* it can also ratelimit and have other failures, so we
* need to increment the slow_io counters independent
* of it.
*/
if (zfs_ereport_is_valid(FM_EREPORT_ZFS_DELAY,
zio->io_spa, zio->io_vd, zio)) {
mutex_enter(&zio->io_vd->vdev_stat_lock);
zio->io_vd->vdev_stat.vs_slow_ios++;
mutex_exit(&zio->io_vd->vdev_stat_lock);
(void) zfs_ereport_post(FM_EREPORT_ZFS_DELAY,
zio->io_spa, zio->io_vd, &zio->io_bookmark,
zio, 0);
}
}
}
if (zio->io_error) {
/*
* If this I/O is attached to a particular vdev,
* generate an error message describing the I/O failure
* at the block level. We ignore these errors if the
* device is currently unavailable.
*/
if (zio->io_error != ECKSUM && zio->io_vd != NULL &&
!vdev_is_dead(zio->io_vd)) {
int ret = zfs_ereport_post(FM_EREPORT_ZFS_IO,
zio->io_spa, zio->io_vd, &zio->io_bookmark, zio, 0);
if (ret != EALREADY) {
mutex_enter(&zio->io_vd->vdev_stat_lock);
if (zio->io_type == ZIO_TYPE_READ)
zio->io_vd->vdev_stat.vs_read_errors++;
else if (zio->io_type == ZIO_TYPE_WRITE)
zio->io_vd->vdev_stat.vs_write_errors++;
mutex_exit(&zio->io_vd->vdev_stat_lock);
}
}
if ((zio->io_error == EIO || !(zio->io_flags &
(ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) &&
zio == zio->io_logical) {
/*
* For logical I/O requests, tell the SPA to log the
* error and generate a logical data ereport.
*/
spa_log_error(zio->io_spa, &zio->io_bookmark,
&zio->io_bp->blk_birth);
(void) zfs_ereport_post(FM_EREPORT_ZFS_DATA,
zio->io_spa, NULL, &zio->io_bookmark, zio, 0);
}
}
if (zio->io_error && zio == zio->io_logical) {
/*
* Determine whether zio should be reexecuted. This will
* propagate all the way to the root via zio_notify_parent().
*/
ASSERT(zio->io_vd == NULL && zio->io_bp != NULL);
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
if (IO_IS_ALLOCATING(zio) &&
!(zio->io_flags & ZIO_FLAG_CANFAIL)) {
if (zio->io_error != ENOSPC)
zio->io_reexecute |= ZIO_REEXECUTE_NOW;
else
zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
}
if ((zio->io_type == ZIO_TYPE_READ ||
zio->io_type == ZIO_TYPE_FREE) &&
!(zio->io_flags & ZIO_FLAG_SCAN_THREAD) &&
zio->io_error == ENXIO &&
spa_load_state(zio->io_spa) == SPA_LOAD_NONE &&
spa_get_failmode(zio->io_spa) != ZIO_FAILURE_MODE_CONTINUE)
zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute)
zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
/*
* Here is a possibly good place to attempt to do
* either combinatorial reconstruction or error correction
* based on checksums. It also might be a good place
* to send out preliminary ereports before we suspend
* processing.
*/
}
/*
* If there were logical child errors, they apply to us now.
* We defer this until now to avoid conflating logical child
* errors with errors that happened to the zio itself when
* updating vdev stats and reporting FMA events above.
*/
zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL);
if ((zio->io_error || zio->io_reexecute) &&
IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio &&
!(zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)))
zio_dva_unallocate(zio, zio->io_gang_tree, zio->io_bp);
zio_gang_tree_free(&zio->io_gang_tree);
/*
* Godfather I/Os should never suspend.
*/
if ((zio->io_flags & ZIO_FLAG_GODFATHER) &&
(zio->io_reexecute & ZIO_REEXECUTE_SUSPEND))
zio->io_reexecute &= ~ZIO_REEXECUTE_SUSPEND;
if (zio->io_reexecute) {
/*
* This is a logical I/O that wants to reexecute.
*
* Reexecute is top-down. When an i/o fails, if it's not
* the root, it simply notifies its parent and sticks around.
* The parent, seeing that it still has children in zio_done(),
* does the same. This percolates all the way up to the root.
* The root i/o will reexecute or suspend the entire tree.
*
* This approach ensures that zio_reexecute() honors
* all the original i/o dependency relationships, e.g.
* parents not executing until children are ready.
*/
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
zio->io_gang_leader = NULL;
mutex_enter(&zio->io_lock);
zio->io_state[ZIO_WAIT_DONE] = 1;
mutex_exit(&zio->io_lock);
/*
* "The Godfather" I/O monitors its children but is
* not a true parent to them. It will track them through
* the pipeline but severs its ties whenever they get into
* trouble (e.g. suspended). This allows "The Godfather"
* I/O to return status without blocking.
*/
zl = NULL;
for (pio = zio_walk_parents(zio, &zl); pio != NULL;
pio = pio_next) {
zio_link_t *remove_zl = zl;
pio_next = zio_walk_parents(zio, &zl);
if ((pio->io_flags & ZIO_FLAG_GODFATHER) &&
(zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) {
zio_remove_child(pio, zio, remove_zl);
/*
* This is a rare code path, so we don't
* bother with "next_to_execute".
*/
zio_notify_parent(pio, zio, ZIO_WAIT_DONE,
NULL);
}
}
if ((pio = zio_unique_parent(zio)) != NULL) {
/*
* We're not a root i/o, so there's nothing to do
* but notify our parent. Don't propagate errors
* upward since we haven't permanently failed yet.
*/
ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE;
/*
* This is a rare code path, so we don't bother with
* "next_to_execute".
*/
zio_notify_parent(pio, zio, ZIO_WAIT_DONE, NULL);
} else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) {
/*
* We'd fail again if we reexecuted now, so suspend
* until conditions improve (e.g. device comes online).
*/
zio_suspend(zio->io_spa, zio, ZIO_SUSPEND_IOERR);
} else {
/*
* Reexecution is potentially a huge amount of work.
* Hand it off to the otherwise-unused claim taskq.
*/
ASSERT(taskq_empty_ent(&zio->io_tqent));
spa_taskq_dispatch_ent(zio->io_spa,
ZIO_TYPE_CLAIM, ZIO_TASKQ_ISSUE,
zio_reexecute, zio, 0, &zio->io_tqent);
}
return (NULL);
}
ASSERT(list_is_empty(&zio->io_child_list));
ASSERT(zio->io_reexecute == 0);
ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL));
/*
* Report any checksum errors, since the I/O is complete.
*/
while (zio->io_cksum_report != NULL) {
zio_cksum_report_t *zcr = zio->io_cksum_report;
zio->io_cksum_report = zcr->zcr_next;
zcr->zcr_next = NULL;
zcr->zcr_finish(zcr, NULL);
zfs_ereport_free_checksum(zcr);
}
/*
* It is the responsibility of the done callback to ensure that this
* particular zio is no longer discoverable for adoption, and as
* such, cannot acquire any new parents.
*/
if (zio->io_done)
zio->io_done(zio);
mutex_enter(&zio->io_lock);
zio->io_state[ZIO_WAIT_DONE] = 1;
mutex_exit(&zio->io_lock);
/*
* We are done executing this zio. We may want to execute a parent
* next. See the comment in zio_notify_parent().
*/
zio_t *next_to_execute = NULL;
zl = NULL;
for (pio = zio_walk_parents(zio, &zl); pio != NULL; pio = pio_next) {
zio_link_t *remove_zl = zl;
pio_next = zio_walk_parents(zio, &zl);
zio_remove_child(pio, zio, remove_zl);
zio_notify_parent(pio, zio, ZIO_WAIT_DONE, &next_to_execute);
}
if (zio->io_waiter != NULL) {
mutex_enter(&zio->io_lock);
zio->io_executor = NULL;
cv_broadcast(&zio->io_cv);
mutex_exit(&zio->io_lock);
} else {
zio_destroy(zio);
}
return (next_to_execute);
}
/*
* ==========================================================================
* I/O pipeline definition
* ==========================================================================
*/
static zio_pipe_stage_t *zio_pipeline[] = {
NULL,
zio_read_bp_init,
zio_write_bp_init,
zio_free_bp_init,
zio_issue_async,
zio_write_compress,
zio_encrypt,
zio_checksum_generate,
zio_nop_write,
zio_brt_free,
zio_ddt_read_start,
zio_ddt_read_done,
zio_ddt_write,
zio_ddt_free,
zio_gang_assemble,
zio_gang_issue,
zio_dva_throttle,
zio_dva_allocate,
zio_dva_free,
zio_dva_claim,
zio_ready,
zio_vdev_io_start,
zio_vdev_io_done,
zio_vdev_io_assess,
zio_checksum_verify,
zio_done
};
/*
* Compare two zbookmark_phys_t's to see which we would reach first in a
* pre-order traversal of the object tree.
*
* This is simple in every case aside from the meta-dnode object. For all other
* objects, we traverse them in order (object 1 before object 2, and so on).
* However, all of these objects are traversed while traversing object 0, since
* the data it points to is the list of objects. Thus, we need to convert to a
* canonical representation so we can compare meta-dnode bookmarks to
* non-meta-dnode bookmarks.
*
* We do this by calculating "equivalents" for each field of the zbookmark.
* zbookmarks outside of the meta-dnode use their own object and level, and
* calculate the level 0 equivalent (the first L0 blkid that is contained in the
* blocks this bookmark refers to) by multiplying their blkid by their span
* (the number of L0 blocks contained within one block at their level).
* zbookmarks inside the meta-dnode calculate their object equivalent
* (which is L0equiv * dnodes per data block), use 0 for their L0equiv, and use
* level + 1<<31 (any value larger than a level could ever be) for their level.
* This causes them to always compare before a bookmark in their object
* equivalent, compare appropriately to bookmarks in other objects, and to
* compare appropriately to other bookmarks in the meta-dnode.
*/
int
zbookmark_compare(uint16_t dbss1, uint8_t ibs1, uint16_t dbss2, uint8_t ibs2,
const zbookmark_phys_t *zb1, const zbookmark_phys_t *zb2)
{
/*
* These variables represent the "equivalent" values for the zbookmark,
* after converting zbookmarks inside the meta dnode to their
* normal-object equivalents.
*/
uint64_t zb1obj, zb2obj;
uint64_t zb1L0, zb2L0;
uint64_t zb1level, zb2level;
if (zb1->zb_object == zb2->zb_object &&
zb1->zb_level == zb2->zb_level &&
zb1->zb_blkid == zb2->zb_blkid)
return (0);
IMPLY(zb1->zb_level > 0, ibs1 >= SPA_MINBLOCKSHIFT);
IMPLY(zb2->zb_level > 0, ibs2 >= SPA_MINBLOCKSHIFT);
/*
* BP_SPANB calculates the span in blocks.
*/
zb1L0 = (zb1->zb_blkid) * BP_SPANB(ibs1, zb1->zb_level);
zb2L0 = (zb2->zb_blkid) * BP_SPANB(ibs2, zb2->zb_level);
if (zb1->zb_object == DMU_META_DNODE_OBJECT) {
zb1obj = zb1L0 * (dbss1 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT));
zb1L0 = 0;
zb1level = zb1->zb_level + COMPARE_META_LEVEL;
} else {
zb1obj = zb1->zb_object;
zb1level = zb1->zb_level;
}
if (zb2->zb_object == DMU_META_DNODE_OBJECT) {
zb2obj = zb2L0 * (dbss2 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT));
zb2L0 = 0;
zb2level = zb2->zb_level + COMPARE_META_LEVEL;
} else {
zb2obj = zb2->zb_object;
zb2level = zb2->zb_level;
}
/* Now that we have a canonical representation, do the comparison. */
if (zb1obj != zb2obj)
return (zb1obj < zb2obj ? -1 : 1);
else if (zb1L0 != zb2L0)
return (zb1L0 < zb2L0 ? -1 : 1);
else if (zb1level != zb2level)
return (zb1level > zb2level ? -1 : 1);
/*
* This can (theoretically) happen if the bookmarks have the same object
* and level, but different blkids, if the block sizes are not the same.
* There is presently no way to change the indirect block sizes
*/
return (0);
}
/*
* This function checks the following: given that last_block is the place that
* our traversal stopped last time, does that guarantee that we've visited
* every node under subtree_root? Therefore, we can't just use the raw output
* of zbookmark_compare. We have to pass in a modified version of
* subtree_root; by incrementing the block id, and then checking whether
* last_block is before or equal to that, we can tell whether or not having
* visited last_block implies that all of subtree_root's children have been
* visited.
*/
boolean_t
zbookmark_subtree_completed(const dnode_phys_t *dnp,
const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block)
{
zbookmark_phys_t mod_zb = *subtree_root;
mod_zb.zb_blkid++;
ASSERT0(last_block->zb_level);
/* The objset_phys_t isn't before anything. */
if (dnp == NULL)
return (B_FALSE);
/*
* We pass in 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT) for the
* data block size in sectors, because that variable is only used if
* the bookmark refers to a block in the meta-dnode. Since we don't
* know without examining it what object it refers to, and there's no
* harm in passing in this value in other cases, we always pass it in.
*
* We pass in 0 for the indirect block size shift because zb2 must be
* level 0. The indirect block size is only used to calculate the span
* of the bookmark, but since the bookmark must be level 0, the span is
* always 1, so the math works out.
*
* If you make changes to how the zbookmark_compare code works, be sure
* to make sure that this code still works afterwards.
*/
return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift,
1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, &mod_zb,
last_block) <= 0);
}
/*
* This function is similar to zbookmark_subtree_completed(), but returns true
* if subtree_root is equal or ahead of last_block, i.e. still to be done.
*/
boolean_t
zbookmark_subtree_tbd(const dnode_phys_t *dnp,
const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block)
{
ASSERT0(last_block->zb_level);
if (dnp == NULL)
return (B_FALSE);
return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift,
1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, subtree_root,
last_block) >= 0);
}
EXPORT_SYMBOL(zio_type_name);
EXPORT_SYMBOL(zio_buf_alloc);
EXPORT_SYMBOL(zio_data_buf_alloc);
EXPORT_SYMBOL(zio_buf_free);
EXPORT_SYMBOL(zio_data_buf_free);
ZFS_MODULE_PARAM(zfs_zio, zio_, slow_io_ms, INT, ZMOD_RW,
"Max I/O completion time (milliseconds) before marking it as slow");
ZFS_MODULE_PARAM(zfs_zio, zio_, requeue_io_start_cut_in_line, INT, ZMOD_RW,
"Prioritize requeued I/O");
ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_deferred_free, UINT, ZMOD_RW,
"Defer frees starting in this pass");
ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_dont_compress, UINT, ZMOD_RW,
"Don't compress starting in this pass");
ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_rewrite, UINT, ZMOD_RW,
"Rewrite new bps starting in this pass");
ZFS_MODULE_PARAM(zfs_zio, zio_, dva_throttle_enabled, INT, ZMOD_RW,
"Throttle block allocations in the ZIO pipeline");
ZFS_MODULE_PARAM(zfs_zio, zio_, deadman_log_all, INT, ZMOD_RW,
"Log all slow ZIOs, not just those with vdevs");
diff --git a/sys/contrib/openzfs/module/zfs/zio_checksum.c b/sys/contrib/openzfs/module/zfs/zio_checksum.c
index 9de515e8767a..e511b31fee6d 100644
--- a/sys/contrib/openzfs/module/zfs/zio_checksum.c
+++ b/sys/contrib/openzfs/module/zfs/zio_checksum.c
@@ -1,573 +1,577 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2016 by Delphix. All rights reserved.
* Copyright 2013 Saso Kiselkov. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/zio.h>
#include <sys/zio_checksum.h>
#include <sys/zil.h>
#include <sys/abd.h>
#include <zfs_fletcher.h>
/*
* Checksum vectors.
*
* In the SPA, everything is checksummed. We support checksum vectors
* for three distinct reasons:
*
* 1. Different kinds of data need different levels of protection.
* For SPA metadata, we always want a very strong checksum.
* For user data, we let users make the trade-off between speed
* and checksum strength.
*
* 2. Cryptographic hash and MAC algorithms are an area of active research.
* It is likely that in future hash functions will be at least as strong
* as current best-of-breed, and may be substantially faster as well.
* We want the ability to take advantage of these new hashes as soon as
* they become available.
*
* 3. If someone develops hardware that can compute a strong hash quickly,
* we want the ability to take advantage of that hardware.
*
* Of course, we don't want a checksum upgrade to invalidate existing
* data, so we store the checksum *function* in eight bits of the bp.
* This gives us room for up to 256 different checksum functions.
*
* When writing a block, we always checksum it with the latest-and-greatest
* checksum function of the appropriate strength. When reading a block,
* we compare the expected checksum against the actual checksum, which we
* compute via the checksum function specified by BP_GET_CHECKSUM(bp).
*
* SALTED CHECKSUMS
*
* To enable the use of less secure hash algorithms with dedup, we
* introduce the notion of salted checksums (MACs, really). A salted
* checksum is fed both a random 256-bit value (the salt) and the data
* to be checksummed. This salt is kept secret (stored on the pool, but
* never shown to the user). Thus even if an attacker knew of collision
* weaknesses in the hash algorithm, they won't be able to mount a known
* plaintext attack on the DDT, since the actual hash value cannot be
* known ahead of time. How the salt is used is algorithm-specific
* (some might simply prefix it to the data block, others might need to
* utilize a full-blown HMAC). On disk the salt is stored in a ZAP
* object in the MOS (DMU_POOL_CHECKSUM_SALT).
*
* CONTEXT TEMPLATES
*
* Some hashing algorithms need to perform a substantial amount of
* initialization work (e.g. salted checksums above may need to pre-hash
* the salt) before being able to process data. Performing this
* redundant work for each block would be wasteful, so we instead allow
* a checksum algorithm to do the work once (the first time it's used)
* and then keep this pre-initialized context as a template inside the
* spa_t (spa_cksum_tmpls). If the zio_checksum_info_t contains
* non-NULL ci_tmpl_init and ci_tmpl_free callbacks, they are used to
* construct and destruct the pre-initialized checksum context. The
* pre-initialized context is then reused during each checksum
* invocation and passed to the checksum function.
*/
static void
abd_checksum_off(abd_t *abd, uint64_t size,
const void *ctx_template, zio_cksum_t *zcp)
{
(void) abd, (void) size, (void) ctx_template;
ZIO_SET_CHECKSUM(zcp, 0, 0, 0, 0);
}
static void
abd_fletcher_2_native(abd_t *abd, uint64_t size,
const void *ctx_template, zio_cksum_t *zcp)
{
(void) ctx_template;
fletcher_init(zcp);
(void) abd_iterate_func(abd, 0, size,
fletcher_2_incremental_native, zcp);
}
static void
abd_fletcher_2_byteswap(abd_t *abd, uint64_t size,
const void *ctx_template, zio_cksum_t *zcp)
{
(void) ctx_template;
fletcher_init(zcp);
(void) abd_iterate_func(abd, 0, size,
fletcher_2_incremental_byteswap, zcp);
}
static inline void
abd_fletcher_4_impl(abd_t *abd, uint64_t size, zio_abd_checksum_data_t *acdp)
{
fletcher_4_abd_ops.acf_init(acdp);
abd_iterate_func(abd, 0, size, fletcher_4_abd_ops.acf_iter, acdp);
fletcher_4_abd_ops.acf_fini(acdp);
}
void
abd_fletcher_4_native(abd_t *abd, uint64_t size,
const void *ctx_template, zio_cksum_t *zcp)
{
(void) ctx_template;
fletcher_4_ctx_t ctx;
zio_abd_checksum_data_t acd = {
.acd_byteorder = ZIO_CHECKSUM_NATIVE,
.acd_zcp = zcp,
.acd_ctx = &ctx
};
abd_fletcher_4_impl(abd, size, &acd);
}
void
abd_fletcher_4_byteswap(abd_t *abd, uint64_t size,
const void *ctx_template, zio_cksum_t *zcp)
{
(void) ctx_template;
fletcher_4_ctx_t ctx;
zio_abd_checksum_data_t acd = {
.acd_byteorder = ZIO_CHECKSUM_BYTESWAP,
.acd_zcp = zcp,
.acd_ctx = &ctx
};
abd_fletcher_4_impl(abd, size, &acd);
}
zio_checksum_info_t zio_checksum_table[ZIO_CHECKSUM_FUNCTIONS] = {
{{NULL, NULL}, NULL, NULL, 0, "inherit"},
{{NULL, NULL}, NULL, NULL, 0, "on"},
{{abd_checksum_off, abd_checksum_off},
NULL, NULL, 0, "off"},
{{abd_checksum_sha256, abd_checksum_sha256},
NULL, NULL, ZCHECKSUM_FLAG_METADATA | ZCHECKSUM_FLAG_EMBEDDED,
"label"},
{{abd_checksum_sha256, abd_checksum_sha256},
NULL, NULL, ZCHECKSUM_FLAG_METADATA | ZCHECKSUM_FLAG_EMBEDDED,
"gang_header"},
{{abd_fletcher_2_native, abd_fletcher_2_byteswap},
NULL, NULL, ZCHECKSUM_FLAG_EMBEDDED, "zilog"},
{{abd_fletcher_2_native, abd_fletcher_2_byteswap},
NULL, NULL, 0, "fletcher2"},
{{abd_fletcher_4_native, abd_fletcher_4_byteswap},
NULL, NULL, ZCHECKSUM_FLAG_METADATA, "fletcher4"},
{{abd_checksum_sha256, abd_checksum_sha256},
NULL, NULL, ZCHECKSUM_FLAG_METADATA | ZCHECKSUM_FLAG_DEDUP |
ZCHECKSUM_FLAG_NOPWRITE, "sha256"},
{{abd_fletcher_4_native, abd_fletcher_4_byteswap},
NULL, NULL, ZCHECKSUM_FLAG_EMBEDDED, "zilog2"},
{{abd_checksum_off, abd_checksum_off},
NULL, NULL, 0, "noparity"},
{{abd_checksum_sha512_native, abd_checksum_sha512_byteswap},
NULL, NULL, ZCHECKSUM_FLAG_METADATA | ZCHECKSUM_FLAG_DEDUP |
ZCHECKSUM_FLAG_NOPWRITE, "sha512"},
{{abd_checksum_skein_native, abd_checksum_skein_byteswap},
abd_checksum_skein_tmpl_init, abd_checksum_skein_tmpl_free,
ZCHECKSUM_FLAG_METADATA | ZCHECKSUM_FLAG_DEDUP |
ZCHECKSUM_FLAG_SALTED | ZCHECKSUM_FLAG_NOPWRITE, "skein"},
{{abd_checksum_edonr_native, abd_checksum_edonr_byteswap},
abd_checksum_edonr_tmpl_init, abd_checksum_edonr_tmpl_free,
ZCHECKSUM_FLAG_METADATA | ZCHECKSUM_FLAG_SALTED |
ZCHECKSUM_FLAG_NOPWRITE, "edonr"},
{{abd_checksum_blake3_native, abd_checksum_blake3_byteswap},
abd_checksum_blake3_tmpl_init, abd_checksum_blake3_tmpl_free,
ZCHECKSUM_FLAG_METADATA | ZCHECKSUM_FLAG_DEDUP |
ZCHECKSUM_FLAG_SALTED | ZCHECKSUM_FLAG_NOPWRITE, "blake3"},
};
/*
* The flag corresponding to the "verify" in dedup=[checksum,]verify
* must be cleared first, so callers should use ZIO_CHECKSUM_MASK.
*/
spa_feature_t
zio_checksum_to_feature(enum zio_checksum cksum)
{
VERIFY((cksum & ~ZIO_CHECKSUM_MASK) == 0);
switch (cksum) {
case ZIO_CHECKSUM_BLAKE3:
return (SPA_FEATURE_BLAKE3);
case ZIO_CHECKSUM_SHA512:
return (SPA_FEATURE_SHA512);
case ZIO_CHECKSUM_SKEIN:
return (SPA_FEATURE_SKEIN);
case ZIO_CHECKSUM_EDONR:
return (SPA_FEATURE_EDONR);
default:
return (SPA_FEATURE_NONE);
}
}
enum zio_checksum
zio_checksum_select(enum zio_checksum child, enum zio_checksum parent)
{
ASSERT(child < ZIO_CHECKSUM_FUNCTIONS);
ASSERT(parent < ZIO_CHECKSUM_FUNCTIONS);
ASSERT(parent != ZIO_CHECKSUM_INHERIT && parent != ZIO_CHECKSUM_ON);
if (child == ZIO_CHECKSUM_INHERIT)
return (parent);
if (child == ZIO_CHECKSUM_ON)
return (ZIO_CHECKSUM_ON_VALUE);
return (child);
}
enum zio_checksum
zio_checksum_dedup_select(spa_t *spa, enum zio_checksum child,
enum zio_checksum parent)
{
ASSERT((child & ZIO_CHECKSUM_MASK) < ZIO_CHECKSUM_FUNCTIONS);
ASSERT((parent & ZIO_CHECKSUM_MASK) < ZIO_CHECKSUM_FUNCTIONS);
ASSERT(parent != ZIO_CHECKSUM_INHERIT && parent != ZIO_CHECKSUM_ON);
if (child == ZIO_CHECKSUM_INHERIT)
return (parent);
if (child == ZIO_CHECKSUM_ON)
return (spa_dedup_checksum(spa));
if (child == (ZIO_CHECKSUM_ON | ZIO_CHECKSUM_VERIFY))
return (spa_dedup_checksum(spa) | ZIO_CHECKSUM_VERIFY);
ASSERT((zio_checksum_table[child & ZIO_CHECKSUM_MASK].ci_flags &
ZCHECKSUM_FLAG_DEDUP) ||
(child & ZIO_CHECKSUM_VERIFY) || child == ZIO_CHECKSUM_OFF);
return (child);
}
/*
* Set the external verifier for a gang block based on <vdev, offset, txg>,
* a tuple which is guaranteed to be unique for the life of the pool.
*/
static void
zio_checksum_gang_verifier(zio_cksum_t *zcp, const blkptr_t *bp)
{
const dva_t *dva = BP_IDENTITY(bp);
uint64_t txg = BP_PHYSICAL_BIRTH(bp);
ASSERT(BP_IS_GANG(bp));
ZIO_SET_CHECKSUM(zcp, DVA_GET_VDEV(dva), DVA_GET_OFFSET(dva), txg, 0);
}
/*
* Set the external verifier for a label block based on its offset.
* The vdev is implicit, and the txg is unknowable at pool open time --
* hence the logic in vdev_uberblock_load() to find the most recent copy.
*/
static void
zio_checksum_label_verifier(zio_cksum_t *zcp, uint64_t offset)
{
ZIO_SET_CHECKSUM(zcp, offset, 0, 0, 0);
}
/*
* Calls the template init function of a checksum which supports context
* templates and installs the template into the spa_t.
*/
static void
zio_checksum_template_init(enum zio_checksum checksum, spa_t *spa)
{
zio_checksum_info_t *ci = &zio_checksum_table[checksum];
if (ci->ci_tmpl_init == NULL)
return;
if (spa->spa_cksum_tmpls[checksum] != NULL)
return;
VERIFY(ci->ci_tmpl_free != NULL);
mutex_enter(&spa->spa_cksum_tmpls_lock);
if (spa->spa_cksum_tmpls[checksum] == NULL) {
spa->spa_cksum_tmpls[checksum] =
ci->ci_tmpl_init(&spa->spa_cksum_salt);
VERIFY(spa->spa_cksum_tmpls[checksum] != NULL);
}
mutex_exit(&spa->spa_cksum_tmpls_lock);
}
/* convenience function to update a checksum to accommodate an encryption MAC */
static void
zio_checksum_handle_crypt(zio_cksum_t *cksum, zio_cksum_t *saved, boolean_t xor)
{
/*
* Weak checksums do not have their entropy spread evenly
* across the bits of the checksum. Therefore, when truncating
* a weak checksum we XOR the first 2 words with the last 2 so
* that we don't "lose" any entropy unnecessarily.
*/
if (xor) {
cksum->zc_word[0] ^= cksum->zc_word[2];
cksum->zc_word[1] ^= cksum->zc_word[3];
}
cksum->zc_word[2] = saved->zc_word[2];
cksum->zc_word[3] = saved->zc_word[3];
}
/*
* Generate the checksum.
*/
void
zio_checksum_compute(zio_t *zio, enum zio_checksum checksum,
abd_t *abd, uint64_t size)
{
static const uint64_t zec_magic = ZEC_MAGIC;
blkptr_t *bp = zio->io_bp;
uint64_t offset = zio->io_offset;
zio_checksum_info_t *ci = &zio_checksum_table[checksum];
zio_cksum_t cksum, saved;
spa_t *spa = zio->io_spa;
boolean_t insecure = (ci->ci_flags & ZCHECKSUM_FLAG_DEDUP) == 0;
ASSERT((uint_t)checksum < ZIO_CHECKSUM_FUNCTIONS);
ASSERT(ci->ci_func[0] != NULL);
zio_checksum_template_init(checksum, spa);
if (ci->ci_flags & ZCHECKSUM_FLAG_EMBEDDED) {
zio_eck_t eck;
size_t eck_offset;
memset(&saved, 0, sizeof (zio_cksum_t));
if (checksum == ZIO_CHECKSUM_ZILOG2) {
zil_chain_t zilc;
abd_copy_to_buf(&zilc, abd, sizeof (zil_chain_t));
- size = P2ROUNDUP_TYPED(zilc.zc_nused, ZIL_MIN_BLKSZ,
- uint64_t);
+ uint64_t nused = P2ROUNDUP_TYPED(zilc.zc_nused,
+ ZIL_MIN_BLKSZ, uint64_t);
+ ASSERT3U(size, >=, nused);
+ size = nused;
eck = zilc.zc_eck;
eck_offset = offsetof(zil_chain_t, zc_eck);
} else {
+ ASSERT3U(size, >=, sizeof (zio_eck_t));
eck_offset = size - sizeof (zio_eck_t);
abd_copy_to_buf_off(&eck, abd, eck_offset,
sizeof (zio_eck_t));
}
if (checksum == ZIO_CHECKSUM_GANG_HEADER) {
zio_checksum_gang_verifier(&eck.zec_cksum, bp);
} else if (checksum == ZIO_CHECKSUM_LABEL) {
zio_checksum_label_verifier(&eck.zec_cksum, offset);
} else {
saved = eck.zec_cksum;
eck.zec_cksum = bp->blk_cksum;
}
abd_copy_from_buf_off(abd, &zec_magic,
eck_offset + offsetof(zio_eck_t, zec_magic),
sizeof (zec_magic));
abd_copy_from_buf_off(abd, &eck.zec_cksum,
eck_offset + offsetof(zio_eck_t, zec_cksum),
sizeof (zio_cksum_t));
ci->ci_func[0](abd, size, spa->spa_cksum_tmpls[checksum],
&cksum);
if (bp != NULL && BP_USES_CRYPT(bp) &&
BP_GET_TYPE(bp) != DMU_OT_OBJSET)
zio_checksum_handle_crypt(&cksum, &saved, insecure);
abd_copy_from_buf_off(abd, &cksum,
eck_offset + offsetof(zio_eck_t, zec_cksum),
sizeof (zio_cksum_t));
} else {
saved = bp->blk_cksum;
ci->ci_func[0](abd, size, spa->spa_cksum_tmpls[checksum],
&cksum);
if (BP_USES_CRYPT(bp) && BP_GET_TYPE(bp) != DMU_OT_OBJSET)
zio_checksum_handle_crypt(&cksum, &saved, insecure);
bp->blk_cksum = cksum;
}
}
int
zio_checksum_error_impl(spa_t *spa, const blkptr_t *bp,
enum zio_checksum checksum, abd_t *abd, uint64_t size, uint64_t offset,
zio_bad_cksum_t *info)
{
zio_checksum_info_t *ci = &zio_checksum_table[checksum];
zio_cksum_t actual_cksum, expected_cksum;
zio_eck_t eck;
int byteswap;
if (checksum >= ZIO_CHECKSUM_FUNCTIONS || ci->ci_func[0] == NULL)
return (SET_ERROR(EINVAL));
zio_checksum_template_init(checksum, spa);
IMPLY(bp == NULL, ci->ci_flags & ZCHECKSUM_FLAG_EMBEDDED);
IMPLY(bp == NULL, checksum == ZIO_CHECKSUM_LABEL);
if (ci->ci_flags & ZCHECKSUM_FLAG_EMBEDDED) {
zio_cksum_t verifier;
size_t eck_offset;
if (checksum == ZIO_CHECKSUM_ZILOG2) {
zil_chain_t zilc;
uint64_t nused;
abd_copy_to_buf(&zilc, abd, sizeof (zil_chain_t));
eck = zilc.zc_eck;
eck_offset = offsetof(zil_chain_t, zc_eck) +
offsetof(zio_eck_t, zec_cksum);
if (eck.zec_magic == ZEC_MAGIC) {
nused = zilc.zc_nused;
} else if (eck.zec_magic == BSWAP_64(ZEC_MAGIC)) {
nused = BSWAP_64(zilc.zc_nused);
} else {
return (SET_ERROR(ECKSUM));
}
- if (nused > size) {
+ nused = P2ROUNDUP_TYPED(nused, ZIL_MIN_BLKSZ, uint64_t);
+ if (size < nused)
return (SET_ERROR(ECKSUM));
- }
-
- size = P2ROUNDUP_TYPED(nused, ZIL_MIN_BLKSZ, uint64_t);
+ size = nused;
} else {
+ if (size < sizeof (zio_eck_t))
+ return (SET_ERROR(ECKSUM));
eck_offset = size - sizeof (zio_eck_t);
abd_copy_to_buf_off(&eck, abd, eck_offset,
sizeof (zio_eck_t));
eck_offset += offsetof(zio_eck_t, zec_cksum);
}
if (checksum == ZIO_CHECKSUM_GANG_HEADER)
zio_checksum_gang_verifier(&verifier, bp);
else if (checksum == ZIO_CHECKSUM_LABEL)
zio_checksum_label_verifier(&verifier, offset);
else
verifier = bp->blk_cksum;
byteswap = (eck.zec_magic == BSWAP_64(ZEC_MAGIC));
if (byteswap)
byteswap_uint64_array(&verifier, sizeof (zio_cksum_t));
expected_cksum = eck.zec_cksum;
abd_copy_from_buf_off(abd, &verifier, eck_offset,
sizeof (zio_cksum_t));
ci->ci_func[byteswap](abd, size,
spa->spa_cksum_tmpls[checksum], &actual_cksum);
abd_copy_from_buf_off(abd, &expected_cksum, eck_offset,
sizeof (zio_cksum_t));
if (byteswap) {
byteswap_uint64_array(&expected_cksum,
sizeof (zio_cksum_t));
}
} else {
byteswap = BP_SHOULD_BYTESWAP(bp);
expected_cksum = bp->blk_cksum;
ci->ci_func[byteswap](abd, size,
spa->spa_cksum_tmpls[checksum], &actual_cksum);
}
/*
* MAC checksums are a special case since half of this checksum will
* actually be the encryption MAC. This will be verified by the
* decryption process, so we just check the truncated checksum now.
* Objset blocks use embedded MACs so we don't truncate the checksum
* for them.
*/
if (bp != NULL && BP_USES_CRYPT(bp) &&
BP_GET_TYPE(bp) != DMU_OT_OBJSET) {
if (!(ci->ci_flags & ZCHECKSUM_FLAG_DEDUP)) {
actual_cksum.zc_word[0] ^= actual_cksum.zc_word[2];
actual_cksum.zc_word[1] ^= actual_cksum.zc_word[3];
}
actual_cksum.zc_word[2] = 0;
actual_cksum.zc_word[3] = 0;
expected_cksum.zc_word[2] = 0;
expected_cksum.zc_word[3] = 0;
}
if (info != NULL) {
info->zbc_checksum_name = ci->ci_name;
info->zbc_byteswapped = byteswap;
info->zbc_injected = 0;
info->zbc_has_cksum = 1;
}
if (!ZIO_CHECKSUM_EQUAL(actual_cksum, expected_cksum))
return (SET_ERROR(ECKSUM));
return (0);
}
int
zio_checksum_error(zio_t *zio, zio_bad_cksum_t *info)
{
blkptr_t *bp = zio->io_bp;
uint_t checksum = (bp == NULL ? zio->io_prop.zp_checksum :
(BP_IS_GANG(bp) ? ZIO_CHECKSUM_GANG_HEADER : BP_GET_CHECKSUM(bp)));
int error;
uint64_t size = (bp == NULL ? zio->io_size :
(BP_IS_GANG(bp) ? SPA_GANGBLOCKSIZE : BP_GET_PSIZE(bp)));
uint64_t offset = zio->io_offset;
abd_t *data = zio->io_abd;
spa_t *spa = zio->io_spa;
error = zio_checksum_error_impl(spa, bp, checksum, data, size,
offset, info);
if (zio_injection_enabled && error == 0 && zio->io_error == 0) {
error = zio_handle_fault_injection(zio, ECKSUM);
if (error != 0)
info->zbc_injected = 1;
}
return (error);
}
/*
* Called by a spa_t that's about to be deallocated. This steps through
* all of the checksum context templates and deallocates any that were
* initialized using the algorithm-specific template init function.
*/
void
zio_checksum_templates_free(spa_t *spa)
{
for (enum zio_checksum checksum = 0;
checksum < ZIO_CHECKSUM_FUNCTIONS; checksum++) {
if (spa->spa_cksum_tmpls[checksum] != NULL) {
zio_checksum_info_t *ci = &zio_checksum_table[checksum];
VERIFY(ci->ci_tmpl_free != NULL);
ci->ci_tmpl_free(spa->spa_cksum_tmpls[checksum]);
spa->spa_cksum_tmpls[checksum] = NULL;
}
}
}
diff --git a/sys/contrib/openzfs/module/zfs/zvol.c b/sys/contrib/openzfs/module/zfs/zvol.c
index 53dcb4dee448..20ea71f23376 100644
--- a/sys/contrib/openzfs/module/zfs/zvol.c
+++ b/sys/contrib/openzfs/module/zfs/zvol.c
@@ -1,1791 +1,1741 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>.
* LLNL-CODE-403049.
*
* ZFS volume emulation driver.
*
* Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
* Volumes are accessed through the symbolic links named:
*
* /dev/<pool_name>/<dataset_name>
*
* Volumes are persistent through reboot and module load. No user command
* needs to be run before opening and using a device.
*
* Copyright 2014 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
* Copyright (c) 2012, 2019 by Delphix. All rights reserved.
*/
/*
* Note on locking of zvol state structures.
*
* These structures are used to maintain internal state used to emulate block
* devices on top of zvols. In particular, management of device minor number
* operations - create, remove, rename, and set_snapdev - involves access to
* these structures. The zvol_state_lock is primarily used to protect the
* zvol_state_list. The zv->zv_state_lock is used to protect the contents
* of the zvol_state_t structures, as well as to make sure that when the
* time comes to remove the structure from the list, it is not in use, and
* therefore, it can be taken off zvol_state_list and freed.
*
* The zv_suspend_lock was introduced to allow for suspending I/O to a zvol,
* e.g. for the duration of receive and rollback operations. This lock can be
* held for significant periods of time. Given that it is undesirable to hold
* mutexes for long periods of time, the following lock ordering applies:
* - take zvol_state_lock if necessary, to protect zvol_state_list
* - take zv_suspend_lock if necessary, by the code path in question
* - take zv_state_lock to protect zvol_state_t
*
* The minor operations are issued to spa->spa_zvol_taskq queues, that are
* single-threaded (to preserve order of minor operations), and are executed
* through the zvol_task_cb that dispatches the specific operations. Therefore,
* these operations are serialized per pool. Consequently, we can be certain
* that for a given zvol, there is only one operation at a time in progress.
* That is why one can be sure that first, zvol_state_t for a given zvol is
* allocated and placed on zvol_state_list, and then other minor operations
* for this zvol are going to proceed in the order of issue.
*
*/
#include <sys/dataset_kstats.h>
#include <sys/dbuf.h>
#include <sys/dmu_traverse.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_dir.h>
#include <sys/zap.h>
#include <sys/zfeature.h>
#include <sys/zil_impl.h>
#include <sys/dmu_tx.h>
#include <sys/zio.h>
#include <sys/zfs_rlock.h>
#include <sys/spa_impl.h>
#include <sys/zvol.h>
#include <sys/zvol_impl.h>
unsigned int zvol_inhibit_dev = 0;
unsigned int zvol_volmode = ZFS_VOLMODE_GEOM;
struct hlist_head *zvol_htable;
static list_t zvol_state_list;
krwlock_t zvol_state_lock;
typedef enum {
ZVOL_ASYNC_REMOVE_MINORS,
ZVOL_ASYNC_RENAME_MINORS,
ZVOL_ASYNC_SET_SNAPDEV,
ZVOL_ASYNC_SET_VOLMODE,
ZVOL_ASYNC_MAX
} zvol_async_op_t;
typedef struct {
zvol_async_op_t op;
char name1[MAXNAMELEN];
char name2[MAXNAMELEN];
uint64_t value;
} zvol_task_t;
uint64_t
zvol_name_hash(const char *name)
{
int i;
uint64_t crc = -1ULL;
const uint8_t *p = (const uint8_t *)name;
ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
for (i = 0; i < MAXNAMELEN - 1 && *p; i++, p++) {
crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (*p)) & 0xFF];
}
return (crc);
}
/*
* Find a zvol_state_t given the name and hash generated by zvol_name_hash.
* If found, return with zv_suspend_lock and zv_state_lock taken, otherwise,
* return (NULL) without the taking locks. The zv_suspend_lock is always taken
* before zv_state_lock. The mode argument indicates the mode (including none)
* for zv_suspend_lock to be taken.
*/
zvol_state_t *
zvol_find_by_name_hash(const char *name, uint64_t hash, int mode)
{
zvol_state_t *zv;
struct hlist_node *p = NULL;
rw_enter(&zvol_state_lock, RW_READER);
hlist_for_each(p, ZVOL_HT_HEAD(hash)) {
zv = hlist_entry(p, zvol_state_t, zv_hlink);
mutex_enter(&zv->zv_state_lock);
if (zv->zv_hash == hash &&
strncmp(zv->zv_name, name, MAXNAMELEN) == 0) {
/*
* this is the right zvol, take the locks in the
* right order
*/
if (mode != RW_NONE &&
!rw_tryenter(&zv->zv_suspend_lock, mode)) {
mutex_exit(&zv->zv_state_lock);
rw_enter(&zv->zv_suspend_lock, mode);
mutex_enter(&zv->zv_state_lock);
/*
* zvol cannot be renamed as we continue
* to hold zvol_state_lock
*/
ASSERT(zv->zv_hash == hash &&
strncmp(zv->zv_name, name, MAXNAMELEN)
== 0);
}
rw_exit(&zvol_state_lock);
return (zv);
}
mutex_exit(&zv->zv_state_lock);
}
rw_exit(&zvol_state_lock);
return (NULL);
}
/*
* Find a zvol_state_t given the name.
* If found, return with zv_suspend_lock and zv_state_lock taken, otherwise,
* return (NULL) without the taking locks. The zv_suspend_lock is always taken
* before zv_state_lock. The mode argument indicates the mode (including none)
* for zv_suspend_lock to be taken.
*/
static zvol_state_t *
zvol_find_by_name(const char *name, int mode)
{
return (zvol_find_by_name_hash(name, zvol_name_hash(name), mode));
}
/*
* ZFS_IOC_CREATE callback handles dmu zvol and zap object creation.
*/
void
zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
{
zfs_creat_t *zct = arg;
nvlist_t *nvprops = zct->zct_props;
int error;
uint64_t volblocksize, volsize;
VERIFY(nvlist_lookup_uint64(nvprops,
zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
if (nvlist_lookup_uint64(nvprops,
zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
/*
* These properties must be removed from the list so the generic
* property setting step won't apply to them.
*/
VERIFY(nvlist_remove_all(nvprops,
zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
(void) nvlist_remove_all(nvprops,
zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
DMU_OT_NONE, 0, tx);
ASSERT(error == 0);
error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
DMU_OT_NONE, 0, tx);
ASSERT(error == 0);
error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
ASSERT(error == 0);
}
/*
* ZFS_IOC_OBJSET_STATS entry point.
*/
int
zvol_get_stats(objset_t *os, nvlist_t *nv)
{
int error;
dmu_object_info_t *doi;
uint64_t val;
error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
if (error)
return (SET_ERROR(error));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
doi = kmem_alloc(sizeof (dmu_object_info_t), KM_SLEEP);
error = dmu_object_info(os, ZVOL_OBJ, doi);
if (error == 0) {
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
doi->doi_data_block_size);
}
kmem_free(doi, sizeof (dmu_object_info_t));
return (SET_ERROR(error));
}
/*
* Sanity check volume size.
*/
int
zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
{
if (volsize == 0)
return (SET_ERROR(EINVAL));
if (volsize % blocksize != 0)
return (SET_ERROR(EINVAL));
#ifdef _ILP32
if (volsize - 1 > SPEC_MAXOFFSET_T)
return (SET_ERROR(EOVERFLOW));
#endif
return (0);
}
/*
* Ensure the zap is flushed then inform the VFS of the capacity change.
*/
static int
zvol_update_volsize(uint64_t volsize, objset_t *os)
{
dmu_tx_t *tx;
int error;
uint64_t txg;
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
return (SET_ERROR(error));
}
txg = dmu_tx_get_txg(tx);
error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
&volsize, tx);
dmu_tx_commit(tx);
txg_wait_synced(dmu_objset_pool(os), txg);
if (error == 0)
error = dmu_free_long_range(os,
ZVOL_OBJ, volsize, DMU_OBJECT_END);
return (error);
}
/*
* Set ZFS_PROP_VOLSIZE set entry point. Note that modifying the volume
* size will result in a udev "change" event being generated.
*/
int
zvol_set_volsize(const char *name, uint64_t volsize)
{
objset_t *os = NULL;
uint64_t readonly;
int error;
boolean_t owned = B_FALSE;
error = dsl_prop_get_integer(name,
zfs_prop_to_name(ZFS_PROP_READONLY), &readonly, NULL);
if (error != 0)
return (SET_ERROR(error));
if (readonly)
return (SET_ERROR(EROFS));
zvol_state_t *zv = zvol_find_by_name(name, RW_READER);
ASSERT(zv == NULL || (MUTEX_HELD(&zv->zv_state_lock) &&
RW_READ_HELD(&zv->zv_suspend_lock)));
if (zv == NULL || zv->zv_objset == NULL) {
if (zv != NULL)
rw_exit(&zv->zv_suspend_lock);
if ((error = dmu_objset_own(name, DMU_OST_ZVOL, B_FALSE, B_TRUE,
FTAG, &os)) != 0) {
if (zv != NULL)
mutex_exit(&zv->zv_state_lock);
return (SET_ERROR(error));
}
owned = B_TRUE;
if (zv != NULL)
zv->zv_objset = os;
} else {
os = zv->zv_objset;
}
dmu_object_info_t *doi = kmem_alloc(sizeof (*doi), KM_SLEEP);
if ((error = dmu_object_info(os, ZVOL_OBJ, doi)) ||
(error = zvol_check_volsize(volsize, doi->doi_data_block_size)))
goto out;
error = zvol_update_volsize(volsize, os);
if (error == 0 && zv != NULL) {
zv->zv_volsize = volsize;
zv->zv_changed = 1;
}
out:
kmem_free(doi, sizeof (dmu_object_info_t));
if (owned) {
dmu_objset_disown(os, B_TRUE, FTAG);
if (zv != NULL)
zv->zv_objset = NULL;
} else {
rw_exit(&zv->zv_suspend_lock);
}
if (zv != NULL)
mutex_exit(&zv->zv_state_lock);
if (error == 0 && zv != NULL)
zvol_os_update_volsize(zv, volsize);
return (SET_ERROR(error));
}
/*
* Sanity check volume block size.
*/
int
zvol_check_volblocksize(const char *name, uint64_t volblocksize)
{
/* Record sizes above 128k need the feature to be enabled */
if (volblocksize > SPA_OLD_MAXBLOCKSIZE) {
spa_t *spa;
int error;
if ((error = spa_open(name, &spa, FTAG)) != 0)
return (error);
if (!spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) {
spa_close(spa, FTAG);
return (SET_ERROR(ENOTSUP));
}
/*
* We don't allow setting the property above 1MB,
* unless the tunable has been changed.
*/
if (volblocksize > zfs_max_recordsize)
return (SET_ERROR(EDOM));
spa_close(spa, FTAG);
}
if (volblocksize < SPA_MINBLOCKSIZE ||
volblocksize > SPA_MAXBLOCKSIZE ||
!ISP2(volblocksize))
return (SET_ERROR(EDOM));
return (0);
}
/*
* Replay a TX_TRUNCATE ZIL transaction if asked. TX_TRUNCATE is how we
* implement DKIOCFREE/free-long-range.
*/
static int
zvol_replay_truncate(void *arg1, void *arg2, boolean_t byteswap)
{
zvol_state_t *zv = arg1;
lr_truncate_t *lr = arg2;
uint64_t offset, length;
+ ASSERT3U(lr->lr_common.lrc_reclen, >=, sizeof (*lr));
+
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
offset = lr->lr_offset;
length = lr->lr_length;
dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
dmu_tx_mark_netfree(tx);
int error = dmu_tx_assign(tx, TXG_WAIT);
if (error != 0) {
dmu_tx_abort(tx);
} else {
(void) zil_replaying(zv->zv_zilog, tx);
dmu_tx_commit(tx);
error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset,
length);
}
return (error);
}
/*
* Replay a TX_WRITE ZIL transaction that didn't get committed
* after a system failure
*/
static int
zvol_replay_write(void *arg1, void *arg2, boolean_t byteswap)
{
zvol_state_t *zv = arg1;
lr_write_t *lr = arg2;
objset_t *os = zv->zv_objset;
char *data = (char *)(lr + 1); /* data follows lr_write_t */
uint64_t offset, length;
dmu_tx_t *tx;
int error;
+ ASSERT3U(lr->lr_common.lrc_reclen, >=, sizeof (*lr));
+
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
offset = lr->lr_offset;
length = lr->lr_length;
/* If it's a dmu_sync() block, write the whole block */
if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
if (length < blocksize) {
offset -= offset % blocksize;
length = blocksize;
}
}
tx = dmu_tx_create(os);
dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
} else {
dmu_write(os, ZVOL_OBJ, offset, length, data, tx);
(void) zil_replaying(zv->zv_zilog, tx);
dmu_tx_commit(tx);
}
return (error);
}
-/*
- * Replay a TX_CLONE_RANGE ZIL transaction that didn't get committed
- * after a system failure.
- *
- * TODO: For now we drop block cloning transations for ZVOLs as they are
- * unsupported, but we still need to inform BRT about that as we
- * claimed them during pool import.
- * This situation can occur when we try to import a pool from a ZFS
- * version supporting block cloning for ZVOLs into a system that
- * has this ZFS version, that doesn't support block cloning for ZVOLs.
- */
-static int
-zvol_replay_clone_range(void *arg1, void *arg2, boolean_t byteswap)
-{
- char name[ZFS_MAX_DATASET_NAME_LEN];
- zvol_state_t *zv = arg1;
- objset_t *os = zv->zv_objset;
- lr_clone_range_t *lr = arg2;
- blkptr_t *bp;
- dmu_tx_t *tx;
- spa_t *spa;
- uint_t ii;
- int error;
-
- dmu_objset_name(os, name);
- cmn_err(CE_WARN, "ZFS dropping block cloning transaction for %s.",
- name);
-
- if (byteswap)
- byteswap_uint64_array(lr, sizeof (*lr));
-
- tx = dmu_tx_create(os);
- error = dmu_tx_assign(tx, TXG_WAIT);
- if (error) {
- dmu_tx_abort(tx);
- return (error);
- }
-
- spa = os->os_spa;
-
- for (ii = 0; ii < lr->lr_nbps; ii++) {
- bp = &lr->lr_bps[ii];
-
- if (!BP_IS_HOLE(bp)) {
- zio_free(spa, dmu_tx_get_txg(tx), bp);
- }
- }
-
- (void) zil_replaying(zv->zv_zilog, tx);
- dmu_tx_commit(tx);
-
- return (0);
-}
-
static int
zvol_replay_err(void *arg1, void *arg2, boolean_t byteswap)
{
(void) arg1, (void) arg2, (void) byteswap;
return (SET_ERROR(ENOTSUP));
}
/*
* Callback vectors for replaying records.
* Only TX_WRITE and TX_TRUNCATE are needed for zvol.
*/
zil_replay_func_t *const zvol_replay_vector[TX_MAX_TYPE] = {
zvol_replay_err, /* no such transaction type */
zvol_replay_err, /* TX_CREATE */
zvol_replay_err, /* TX_MKDIR */
zvol_replay_err, /* TX_MKXATTR */
zvol_replay_err, /* TX_SYMLINK */
zvol_replay_err, /* TX_REMOVE */
zvol_replay_err, /* TX_RMDIR */
zvol_replay_err, /* TX_LINK */
zvol_replay_err, /* TX_RENAME */
zvol_replay_write, /* TX_WRITE */
zvol_replay_truncate, /* TX_TRUNCATE */
zvol_replay_err, /* TX_SETATTR */
zvol_replay_err, /* TX_ACL */
zvol_replay_err, /* TX_CREATE_ATTR */
zvol_replay_err, /* TX_CREATE_ACL_ATTR */
zvol_replay_err, /* TX_MKDIR_ACL */
zvol_replay_err, /* TX_MKDIR_ATTR */
zvol_replay_err, /* TX_MKDIR_ACL_ATTR */
zvol_replay_err, /* TX_WRITE2 */
zvol_replay_err, /* TX_SETSAXATTR */
zvol_replay_err, /* TX_RENAME_EXCHANGE */
zvol_replay_err, /* TX_RENAME_WHITEOUT */
- zvol_replay_clone_range /* TX_CLONE_RANGE */
+ zvol_replay_err, /* TX_CLONE_RANGE */
};
/*
* zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
*
* We store data in the log buffers if it's small enough.
* Otherwise we will later flush the data out via dmu_sync().
*/
static const ssize_t zvol_immediate_write_sz = 32768;
void
zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, uint64_t offset,
uint64_t size, int sync)
{
uint32_t blocksize = zv->zv_volblocksize;
zilog_t *zilog = zv->zv_zilog;
itx_wr_state_t write_state;
uint64_t sz = size;
if (zil_replaying(zilog, tx))
return;
if (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
write_state = WR_INDIRECT;
else if (!spa_has_slogs(zilog->zl_spa) &&
size >= blocksize && blocksize > zvol_immediate_write_sz)
write_state = WR_INDIRECT;
else if (sync)
write_state = WR_COPIED;
else
write_state = WR_NEED_COPY;
while (size) {
itx_t *itx;
lr_write_t *lr;
itx_wr_state_t wr_state = write_state;
ssize_t len = size;
if (wr_state == WR_COPIED && size > zil_max_copied_data(zilog))
wr_state = WR_NEED_COPY;
else if (wr_state == WR_INDIRECT)
len = MIN(blocksize - P2PHASE(offset, blocksize), size);
itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
(wr_state == WR_COPIED ? len : 0));
lr = (lr_write_t *)&itx->itx_lr;
if (wr_state == WR_COPIED && dmu_read_by_dnode(zv->zv_dn,
offset, len, lr+1, DMU_READ_NO_PREFETCH) != 0) {
zil_itx_destroy(itx);
itx = zil_itx_create(TX_WRITE, sizeof (*lr));
lr = (lr_write_t *)&itx->itx_lr;
wr_state = WR_NEED_COPY;
}
itx->itx_wr_state = wr_state;
lr->lr_foid = ZVOL_OBJ;
lr->lr_offset = offset;
lr->lr_length = len;
lr->lr_blkoff = 0;
BP_ZERO(&lr->lr_blkptr);
itx->itx_private = zv;
itx->itx_sync = sync;
(void) zil_itx_assign(zilog, itx, tx);
offset += len;
size -= len;
}
if (write_state == WR_COPIED || write_state == WR_NEED_COPY) {
dsl_pool_wrlog_count(zilog->zl_dmu_pool, sz, tx->tx_txg);
}
}
/*
* Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE.
*/
void
zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, uint64_t len,
boolean_t sync)
{
itx_t *itx;
lr_truncate_t *lr;
zilog_t *zilog = zv->zv_zilog;
if (zil_replaying(zilog, tx))
return;
itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
lr = (lr_truncate_t *)&itx->itx_lr;
lr->lr_foid = ZVOL_OBJ;
lr->lr_offset = off;
lr->lr_length = len;
itx->itx_sync = sync;
zil_itx_assign(zilog, itx, tx);
}
static void
zvol_get_done(zgd_t *zgd, int error)
{
(void) error;
if (zgd->zgd_db)
dmu_buf_rele(zgd->zgd_db, zgd);
zfs_rangelock_exit(zgd->zgd_lr);
kmem_free(zgd, sizeof (zgd_t));
}
/*
* Get data to generate a TX_WRITE intent log record.
*/
int
zvol_get_data(void *arg, uint64_t arg2, lr_write_t *lr, char *buf,
struct lwb *lwb, zio_t *zio)
{
zvol_state_t *zv = arg;
uint64_t offset = lr->lr_offset;
uint64_t size = lr->lr_length;
dmu_buf_t *db;
zgd_t *zgd;
int error;
ASSERT3P(lwb, !=, NULL);
ASSERT3U(size, !=, 0);
zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
zgd->zgd_lwb = lwb;
/*
* Write records come in two flavors: immediate and indirect.
* For small writes it's cheaper to store the data with the
* log record (immediate); for large writes it's cheaper to
* sync the data and get a pointer to it (indirect) so that
* we don't have to write the data twice.
*/
if (buf != NULL) { /* immediate write */
zgd->zgd_lr = zfs_rangelock_enter(&zv->zv_rangelock, offset,
size, RL_READER);
error = dmu_read_by_dnode(zv->zv_dn, offset, size, buf,
DMU_READ_NO_PREFETCH);
} else { /* indirect write */
ASSERT3P(zio, !=, NULL);
/*
* Have to lock the whole block to ensure when it's written out
* and its checksum is being calculated that no one can change
* the data. Contrarily to zfs_get_data we need not re-check
* blocksize after we get the lock because it cannot be changed.
*/
size = zv->zv_volblocksize;
offset = P2ALIGN_TYPED(offset, size, uint64_t);
zgd->zgd_lr = zfs_rangelock_enter(&zv->zv_rangelock, offset,
size, RL_READER);
error = dmu_buf_hold_noread_by_dnode(zv->zv_dn, offset, zgd,
&db);
if (error == 0) {
blkptr_t *bp = &lr->lr_blkptr;
zgd->zgd_db = db;
zgd->zgd_bp = bp;
ASSERT(db != NULL);
ASSERT(db->db_offset == offset);
ASSERT(db->db_size == size);
error = dmu_sync(zio, lr->lr_common.lrc_txg,
zvol_get_done, zgd);
if (error == 0)
return (0);
}
}
zvol_get_done(zgd, error);
return (SET_ERROR(error));
}
/*
* The zvol_state_t's are inserted into zvol_state_list and zvol_htable.
*/
void
zvol_insert(zvol_state_t *zv)
{
ASSERT(RW_WRITE_HELD(&zvol_state_lock));
list_insert_head(&zvol_state_list, zv);
hlist_add_head(&zv->zv_hlink, ZVOL_HT_HEAD(zv->zv_hash));
}
/*
* Simply remove the zvol from to list of zvols.
*/
static void
zvol_remove(zvol_state_t *zv)
{
ASSERT(RW_WRITE_HELD(&zvol_state_lock));
list_remove(&zvol_state_list, zv);
hlist_del(&zv->zv_hlink);
}
/*
* Setup zv after we just own the zv->objset
*/
static int
zvol_setup_zv(zvol_state_t *zv)
{
uint64_t volsize;
int error;
uint64_t ro;
objset_t *os = zv->zv_objset;
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
ASSERT(RW_LOCK_HELD(&zv->zv_suspend_lock));
zv->zv_zilog = NULL;
zv->zv_flags &= ~ZVOL_WRITTEN_TO;
error = dsl_prop_get_integer(zv->zv_name, "readonly", &ro, NULL);
if (error)
return (SET_ERROR(error));
error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
if (error)
return (SET_ERROR(error));
error = dnode_hold(os, ZVOL_OBJ, zv, &zv->zv_dn);
if (error)
return (SET_ERROR(error));
zvol_os_set_capacity(zv, volsize >> 9);
zv->zv_volsize = volsize;
if (ro || dmu_objset_is_snapshot(os) ||
!spa_writeable(dmu_objset_spa(os))) {
zvol_os_set_disk_ro(zv, 1);
zv->zv_flags |= ZVOL_RDONLY;
} else {
zvol_os_set_disk_ro(zv, 0);
zv->zv_flags &= ~ZVOL_RDONLY;
}
return (0);
}
/*
* Shutdown every zv_objset related stuff except zv_objset itself.
* The is the reverse of zvol_setup_zv.
*/
static void
zvol_shutdown_zv(zvol_state_t *zv)
{
ASSERT(MUTEX_HELD(&zv->zv_state_lock) &&
RW_LOCK_HELD(&zv->zv_suspend_lock));
if (zv->zv_flags & ZVOL_WRITTEN_TO) {
ASSERT(zv->zv_zilog != NULL);
zil_close(zv->zv_zilog);
}
zv->zv_zilog = NULL;
dnode_rele(zv->zv_dn, zv);
zv->zv_dn = NULL;
/*
* Evict cached data. We must write out any dirty data before
* disowning the dataset.
*/
if (zv->zv_flags & ZVOL_WRITTEN_TO)
txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
(void) dmu_objset_evict_dbufs(zv->zv_objset);
}
/*
* return the proper tag for rollback and recv
*/
void *
zvol_tag(zvol_state_t *zv)
{
ASSERT(RW_WRITE_HELD(&zv->zv_suspend_lock));
return (zv->zv_open_count > 0 ? zv : NULL);
}
/*
* Suspend the zvol for recv and rollback.
*/
zvol_state_t *
zvol_suspend(const char *name)
{
zvol_state_t *zv;
zv = zvol_find_by_name(name, RW_WRITER);
if (zv == NULL)
return (NULL);
/* block all I/O, release in zvol_resume. */
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
ASSERT(RW_WRITE_HELD(&zv->zv_suspend_lock));
atomic_inc(&zv->zv_suspend_ref);
if (zv->zv_open_count > 0)
zvol_shutdown_zv(zv);
/*
* do not hold zv_state_lock across suspend/resume to
* avoid locking up zvol lookups
*/
mutex_exit(&zv->zv_state_lock);
/* zv_suspend_lock is released in zvol_resume() */
return (zv);
}
int
zvol_resume(zvol_state_t *zv)
{
int error = 0;
ASSERT(RW_WRITE_HELD(&zv->zv_suspend_lock));
mutex_enter(&zv->zv_state_lock);
if (zv->zv_open_count > 0) {
VERIFY0(dmu_objset_hold(zv->zv_name, zv, &zv->zv_objset));
VERIFY3P(zv->zv_objset->os_dsl_dataset->ds_owner, ==, zv);
VERIFY(dsl_dataset_long_held(zv->zv_objset->os_dsl_dataset));
dmu_objset_rele(zv->zv_objset, zv);
error = zvol_setup_zv(zv);
}
mutex_exit(&zv->zv_state_lock);
rw_exit(&zv->zv_suspend_lock);
/*
* We need this because we don't hold zvol_state_lock while releasing
* zv_suspend_lock. zvol_remove_minors_impl thus cannot check
* zv_suspend_lock to determine it is safe to free because rwlock is
* not inherent atomic.
*/
atomic_dec(&zv->zv_suspend_ref);
return (SET_ERROR(error));
}
int
zvol_first_open(zvol_state_t *zv, boolean_t readonly)
{
objset_t *os;
int error;
ASSERT(RW_READ_HELD(&zv->zv_suspend_lock));
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
ASSERT(mutex_owned(&spa_namespace_lock));
boolean_t ro = (readonly || (strchr(zv->zv_name, '@') != NULL));
error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, ro, B_TRUE, zv, &os);
if (error)
return (SET_ERROR(error));
zv->zv_objset = os;
error = zvol_setup_zv(zv);
if (error) {
dmu_objset_disown(os, 1, zv);
zv->zv_objset = NULL;
}
return (error);
}
void
zvol_last_close(zvol_state_t *zv)
{
ASSERT(RW_READ_HELD(&zv->zv_suspend_lock));
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
zvol_shutdown_zv(zv);
dmu_objset_disown(zv->zv_objset, 1, zv);
zv->zv_objset = NULL;
}
typedef struct minors_job {
list_t *list;
list_node_t link;
/* input */
char *name;
/* output */
int error;
} minors_job_t;
/*
* Prefetch zvol dnodes for the minors_job
*/
static void
zvol_prefetch_minors_impl(void *arg)
{
minors_job_t *job = arg;
char *dsname = job->name;
objset_t *os = NULL;
job->error = dmu_objset_own(dsname, DMU_OST_ZVOL, B_TRUE, B_TRUE,
FTAG, &os);
if (job->error == 0) {
dmu_prefetch(os, ZVOL_OBJ, 0, 0, 0, ZIO_PRIORITY_SYNC_READ);
dmu_objset_disown(os, B_TRUE, FTAG);
}
}
/*
* Mask errors to continue dmu_objset_find() traversal
*/
static int
zvol_create_snap_minor_cb(const char *dsname, void *arg)
{
minors_job_t *j = arg;
list_t *minors_list = j->list;
const char *name = j->name;
ASSERT0(MUTEX_HELD(&spa_namespace_lock));
/* skip the designated dataset */
if (name && strcmp(dsname, name) == 0)
return (0);
/* at this point, the dsname should name a snapshot */
if (strchr(dsname, '@') == 0) {
dprintf("zvol_create_snap_minor_cb(): "
"%s is not a snapshot name\n", dsname);
} else {
minors_job_t *job;
char *n = kmem_strdup(dsname);
if (n == NULL)
return (0);
job = kmem_alloc(sizeof (minors_job_t), KM_SLEEP);
job->name = n;
job->list = minors_list;
job->error = 0;
list_insert_tail(minors_list, job);
/* don't care if dispatch fails, because job->error is 0 */
taskq_dispatch(system_taskq, zvol_prefetch_minors_impl, job,
TQ_SLEEP);
}
return (0);
}
/*
* If spa_keystore_load_wkey() is called for an encrypted zvol,
* we need to look for any clones also using the key. This function
* is "best effort" - so we just skip over it if there are failures.
*/
static void
zvol_add_clones(const char *dsname, list_t *minors_list)
{
/* Also check if it has clones */
dsl_dir_t *dd = NULL;
dsl_pool_t *dp = NULL;
if (dsl_pool_hold(dsname, FTAG, &dp) != 0)
return;
if (!spa_feature_is_enabled(dp->dp_spa,
SPA_FEATURE_ENCRYPTION))
goto out;
if (dsl_dir_hold(dp, dsname, FTAG, &dd, NULL) != 0)
goto out;
if (dsl_dir_phys(dd)->dd_clones == 0)
goto out;
zap_cursor_t *zc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP);
zap_attribute_t *za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
objset_t *mos = dd->dd_pool->dp_meta_objset;
for (zap_cursor_init(zc, mos, dsl_dir_phys(dd)->dd_clones);
zap_cursor_retrieve(zc, za) == 0;
zap_cursor_advance(zc)) {
dsl_dataset_t *clone;
minors_job_t *job;
if (dsl_dataset_hold_obj(dd->dd_pool,
za->za_first_integer, FTAG, &clone) == 0) {
char name[ZFS_MAX_DATASET_NAME_LEN];
dsl_dataset_name(clone, name);
char *n = kmem_strdup(name);
job = kmem_alloc(sizeof (minors_job_t), KM_SLEEP);
job->name = n;
job->list = minors_list;
job->error = 0;
list_insert_tail(minors_list, job);
dsl_dataset_rele(clone, FTAG);
}
}
zap_cursor_fini(zc);
kmem_free(za, sizeof (zap_attribute_t));
kmem_free(zc, sizeof (zap_cursor_t));
out:
if (dd != NULL)
dsl_dir_rele(dd, FTAG);
dsl_pool_rele(dp, FTAG);
}
/*
* Mask errors to continue dmu_objset_find() traversal
*/
static int
zvol_create_minors_cb(const char *dsname, void *arg)
{
uint64_t snapdev;
int error;
list_t *minors_list = arg;
ASSERT0(MUTEX_HELD(&spa_namespace_lock));
error = dsl_prop_get_integer(dsname, "snapdev", &snapdev, NULL);
if (error)
return (0);
/*
* Given the name and the 'snapdev' property, create device minor nodes
* with the linkages to zvols/snapshots as needed.
* If the name represents a zvol, create a minor node for the zvol, then
* check if its snapshots are 'visible', and if so, iterate over the
* snapshots and create device minor nodes for those.
*/
if (strchr(dsname, '@') == 0) {
minors_job_t *job;
char *n = kmem_strdup(dsname);
if (n == NULL)
return (0);
job = kmem_alloc(sizeof (minors_job_t), KM_SLEEP);
job->name = n;
job->list = minors_list;
job->error = 0;
list_insert_tail(minors_list, job);
/* don't care if dispatch fails, because job->error is 0 */
taskq_dispatch(system_taskq, zvol_prefetch_minors_impl, job,
TQ_SLEEP);
zvol_add_clones(dsname, minors_list);
if (snapdev == ZFS_SNAPDEV_VISIBLE) {
/*
* traverse snapshots only, do not traverse children,
* and skip the 'dsname'
*/
(void) dmu_objset_find(dsname,
zvol_create_snap_minor_cb, (void *)job,
DS_FIND_SNAPSHOTS);
}
} else {
dprintf("zvol_create_minors_cb(): %s is not a zvol name\n",
dsname);
}
return (0);
}
/*
* Create minors for the specified dataset, including children and snapshots.
* Pay attention to the 'snapdev' property and iterate over the snapshots
* only if they are 'visible'. This approach allows one to assure that the
* snapshot metadata is read from disk only if it is needed.
*
* The name can represent a dataset to be recursively scanned for zvols and
* their snapshots, or a single zvol snapshot. If the name represents a
* dataset, the scan is performed in two nested stages:
* - scan the dataset for zvols, and
* - for each zvol, create a minor node, then check if the zvol's snapshots
* are 'visible', and only then iterate over the snapshots if needed
*
* If the name represents a snapshot, a check is performed if the snapshot is
* 'visible' (which also verifies that the parent is a zvol), and if so,
* a minor node for that snapshot is created.
*/
void
zvol_create_minors_recursive(const char *name)
{
list_t minors_list;
minors_job_t *job;
if (zvol_inhibit_dev)
return;
/*
* This is the list for prefetch jobs. Whenever we found a match
* during dmu_objset_find, we insert a minors_job to the list and do
* taskq_dispatch to parallel prefetch zvol dnodes. Note we don't need
* any lock because all list operation is done on the current thread.
*
* We will use this list to do zvol_os_create_minor after prefetch
* so we don't have to traverse using dmu_objset_find again.
*/
list_create(&minors_list, sizeof (minors_job_t),
offsetof(minors_job_t, link));
if (strchr(name, '@') != NULL) {
uint64_t snapdev;
int error = dsl_prop_get_integer(name, "snapdev",
&snapdev, NULL);
if (error == 0 && snapdev == ZFS_SNAPDEV_VISIBLE)
(void) zvol_os_create_minor(name);
} else {
fstrans_cookie_t cookie = spl_fstrans_mark();
(void) dmu_objset_find(name, zvol_create_minors_cb,
&minors_list, DS_FIND_CHILDREN);
spl_fstrans_unmark(cookie);
}
taskq_wait_outstanding(system_taskq, 0);
/*
* Prefetch is completed, we can do zvol_os_create_minor
* sequentially.
*/
while ((job = list_remove_head(&minors_list)) != NULL) {
if (!job->error)
(void) zvol_os_create_minor(job->name);
kmem_strfree(job->name);
kmem_free(job, sizeof (minors_job_t));
}
list_destroy(&minors_list);
}
void
zvol_create_minor(const char *name)
{
/*
* Note: the dsl_pool_config_lock must not be held.
* Minor node creation needs to obtain the zvol_state_lock.
* zvol_open() obtains the zvol_state_lock and then the dsl pool
* config lock. Therefore, we can't have the config lock now if
* we are going to wait for the zvol_state_lock, because it
* would be a lock order inversion which could lead to deadlock.
*/
if (zvol_inhibit_dev)
return;
if (strchr(name, '@') != NULL) {
uint64_t snapdev;
int error = dsl_prop_get_integer(name,
"snapdev", &snapdev, NULL);
if (error == 0 && snapdev == ZFS_SNAPDEV_VISIBLE)
(void) zvol_os_create_minor(name);
} else {
(void) zvol_os_create_minor(name);
}
}
/*
* Remove minors for specified dataset including children and snapshots.
*/
static void
zvol_free_task(void *arg)
{
zvol_os_free(arg);
}
void
zvol_remove_minors_impl(const char *name)
{
zvol_state_t *zv, *zv_next;
int namelen = ((name) ? strlen(name) : 0);
taskqid_t t;
list_t free_list;
if (zvol_inhibit_dev)
return;
list_create(&free_list, sizeof (zvol_state_t),
offsetof(zvol_state_t, zv_next));
rw_enter(&zvol_state_lock, RW_WRITER);
for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) {
zv_next = list_next(&zvol_state_list, zv);
mutex_enter(&zv->zv_state_lock);
if (name == NULL || strcmp(zv->zv_name, name) == 0 ||
(strncmp(zv->zv_name, name, namelen) == 0 &&
(zv->zv_name[namelen] == '/' ||
zv->zv_name[namelen] == '@'))) {
/*
* By holding zv_state_lock here, we guarantee that no
* one is currently using this zv
*/
/* If in use, leave alone */
if (zv->zv_open_count > 0 ||
atomic_read(&zv->zv_suspend_ref)) {
mutex_exit(&zv->zv_state_lock);
continue;
}
zvol_remove(zv);
/*
* Cleared while holding zvol_state_lock as a writer
* which will prevent zvol_open() from opening it.
*/
zvol_os_clear_private(zv);
/* Drop zv_state_lock before zvol_free() */
mutex_exit(&zv->zv_state_lock);
/* Try parallel zv_free, if failed do it in place */
t = taskq_dispatch(system_taskq, zvol_free_task, zv,
TQ_SLEEP);
if (t == TASKQID_INVALID)
list_insert_head(&free_list, zv);
} else {
mutex_exit(&zv->zv_state_lock);
}
}
rw_exit(&zvol_state_lock);
/* Drop zvol_state_lock before calling zvol_free() */
while ((zv = list_remove_head(&free_list)) != NULL)
zvol_os_free(zv);
}
/* Remove minor for this specific volume only */
static void
zvol_remove_minor_impl(const char *name)
{
zvol_state_t *zv = NULL, *zv_next;
if (zvol_inhibit_dev)
return;
rw_enter(&zvol_state_lock, RW_WRITER);
for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) {
zv_next = list_next(&zvol_state_list, zv);
mutex_enter(&zv->zv_state_lock);
if (strcmp(zv->zv_name, name) == 0) {
/*
* By holding zv_state_lock here, we guarantee that no
* one is currently using this zv
*/
/* If in use, leave alone */
if (zv->zv_open_count > 0 ||
atomic_read(&zv->zv_suspend_ref)) {
mutex_exit(&zv->zv_state_lock);
continue;
}
zvol_remove(zv);
zvol_os_clear_private(zv);
mutex_exit(&zv->zv_state_lock);
break;
} else {
mutex_exit(&zv->zv_state_lock);
}
}
/* Drop zvol_state_lock before calling zvol_free() */
rw_exit(&zvol_state_lock);
if (zv != NULL)
zvol_os_free(zv);
}
/*
* Rename minors for specified dataset including children and snapshots.
*/
static void
zvol_rename_minors_impl(const char *oldname, const char *newname)
{
zvol_state_t *zv, *zv_next;
int oldnamelen;
if (zvol_inhibit_dev)
return;
oldnamelen = strlen(oldname);
rw_enter(&zvol_state_lock, RW_READER);
for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) {
zv_next = list_next(&zvol_state_list, zv);
mutex_enter(&zv->zv_state_lock);
if (strcmp(zv->zv_name, oldname) == 0) {
zvol_os_rename_minor(zv, newname);
} else if (strncmp(zv->zv_name, oldname, oldnamelen) == 0 &&
(zv->zv_name[oldnamelen] == '/' ||
zv->zv_name[oldnamelen] == '@')) {
char *name = kmem_asprintf("%s%c%s", newname,
zv->zv_name[oldnamelen],
zv->zv_name + oldnamelen + 1);
zvol_os_rename_minor(zv, name);
kmem_strfree(name);
}
mutex_exit(&zv->zv_state_lock);
}
rw_exit(&zvol_state_lock);
}
typedef struct zvol_snapdev_cb_arg {
uint64_t snapdev;
} zvol_snapdev_cb_arg_t;
static int
zvol_set_snapdev_cb(const char *dsname, void *param)
{
zvol_snapdev_cb_arg_t *arg = param;
if (strchr(dsname, '@') == NULL)
return (0);
switch (arg->snapdev) {
case ZFS_SNAPDEV_VISIBLE:
(void) zvol_os_create_minor(dsname);
break;
case ZFS_SNAPDEV_HIDDEN:
(void) zvol_remove_minor_impl(dsname);
break;
}
return (0);
}
static void
zvol_set_snapdev_impl(char *name, uint64_t snapdev)
{
zvol_snapdev_cb_arg_t arg = {snapdev};
fstrans_cookie_t cookie = spl_fstrans_mark();
/*
* The zvol_set_snapdev_sync() sets snapdev appropriately
* in the dataset hierarchy. Here, we only scan snapshots.
*/
dmu_objset_find(name, zvol_set_snapdev_cb, &arg, DS_FIND_SNAPSHOTS);
spl_fstrans_unmark(cookie);
}
static void
zvol_set_volmode_impl(char *name, uint64_t volmode)
{
fstrans_cookie_t cookie;
uint64_t old_volmode;
zvol_state_t *zv;
if (strchr(name, '@') != NULL)
return;
/*
* It's unfortunate we need to remove minors before we create new ones:
* this is necessary because our backing gendisk (zvol_state->zv_disk)
* could be different when we set, for instance, volmode from "geom"
* to "dev" (or vice versa).
*/
zv = zvol_find_by_name(name, RW_NONE);
if (zv == NULL && volmode == ZFS_VOLMODE_NONE)
return;
if (zv != NULL) {
old_volmode = zv->zv_volmode;
mutex_exit(&zv->zv_state_lock);
if (old_volmode == volmode)
return;
zvol_wait_close(zv);
}
cookie = spl_fstrans_mark();
switch (volmode) {
case ZFS_VOLMODE_NONE:
(void) zvol_remove_minor_impl(name);
break;
case ZFS_VOLMODE_GEOM:
case ZFS_VOLMODE_DEV:
(void) zvol_remove_minor_impl(name);
(void) zvol_os_create_minor(name);
break;
case ZFS_VOLMODE_DEFAULT:
(void) zvol_remove_minor_impl(name);
if (zvol_volmode == ZFS_VOLMODE_NONE)
break;
else /* if zvol_volmode is invalid defaults to "geom" */
(void) zvol_os_create_minor(name);
break;
}
spl_fstrans_unmark(cookie);
}
static zvol_task_t *
zvol_task_alloc(zvol_async_op_t op, const char *name1, const char *name2,
uint64_t value)
{
zvol_task_t *task;
/* Never allow tasks on hidden names. */
if (name1[0] == '$')
return (NULL);
task = kmem_zalloc(sizeof (zvol_task_t), KM_SLEEP);
task->op = op;
task->value = value;
strlcpy(task->name1, name1, MAXNAMELEN);
if (name2 != NULL)
strlcpy(task->name2, name2, MAXNAMELEN);
return (task);
}
static void
zvol_task_free(zvol_task_t *task)
{
kmem_free(task, sizeof (zvol_task_t));
}
/*
* The worker thread function performed asynchronously.
*/
static void
zvol_task_cb(void *arg)
{
zvol_task_t *task = arg;
switch (task->op) {
case ZVOL_ASYNC_REMOVE_MINORS:
zvol_remove_minors_impl(task->name1);
break;
case ZVOL_ASYNC_RENAME_MINORS:
zvol_rename_minors_impl(task->name1, task->name2);
break;
case ZVOL_ASYNC_SET_SNAPDEV:
zvol_set_snapdev_impl(task->name1, task->value);
break;
case ZVOL_ASYNC_SET_VOLMODE:
zvol_set_volmode_impl(task->name1, task->value);
break;
default:
VERIFY(0);
break;
}
zvol_task_free(task);
}
typedef struct zvol_set_prop_int_arg {
const char *zsda_name;
uint64_t zsda_value;
zprop_source_t zsda_source;
dmu_tx_t *zsda_tx;
} zvol_set_prop_int_arg_t;
/*
* Sanity check the dataset for safe use by the sync task. No additional
* conditions are imposed.
*/
static int
zvol_set_snapdev_check(void *arg, dmu_tx_t *tx)
{
zvol_set_prop_int_arg_t *zsda = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dir_t *dd;
int error;
error = dsl_dir_hold(dp, zsda->zsda_name, FTAG, &dd, NULL);
if (error != 0)
return (error);
dsl_dir_rele(dd, FTAG);
return (error);
}
static int
zvol_set_snapdev_sync_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
{
(void) arg;
char dsname[MAXNAMELEN];
zvol_task_t *task;
uint64_t snapdev;
dsl_dataset_name(ds, dsname);
if (dsl_prop_get_int_ds(ds, "snapdev", &snapdev) != 0)
return (0);
task = zvol_task_alloc(ZVOL_ASYNC_SET_SNAPDEV, dsname, NULL, snapdev);
if (task == NULL)
return (0);
(void) taskq_dispatch(dp->dp_spa->spa_zvol_taskq, zvol_task_cb,
task, TQ_SLEEP);
return (0);
}
/*
* Traverse all child datasets and apply snapdev appropriately.
* We call dsl_prop_set_sync_impl() here to set the value only on the toplevel
* dataset and read the effective "snapdev" on every child in the callback
* function: this is because the value is not guaranteed to be the same in the
* whole dataset hierarchy.
*/
static void
zvol_set_snapdev_sync(void *arg, dmu_tx_t *tx)
{
zvol_set_prop_int_arg_t *zsda = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dir_t *dd;
dsl_dataset_t *ds;
int error;
VERIFY0(dsl_dir_hold(dp, zsda->zsda_name, FTAG, &dd, NULL));
zsda->zsda_tx = tx;
error = dsl_dataset_hold(dp, zsda->zsda_name, FTAG, &ds);
if (error == 0) {
dsl_prop_set_sync_impl(ds, zfs_prop_to_name(ZFS_PROP_SNAPDEV),
zsda->zsda_source, sizeof (zsda->zsda_value), 1,
&zsda->zsda_value, zsda->zsda_tx);
dsl_dataset_rele(ds, FTAG);
}
dmu_objset_find_dp(dp, dd->dd_object, zvol_set_snapdev_sync_cb,
zsda, DS_FIND_CHILDREN);
dsl_dir_rele(dd, FTAG);
}
int
zvol_set_snapdev(const char *ddname, zprop_source_t source, uint64_t snapdev)
{
zvol_set_prop_int_arg_t zsda;
zsda.zsda_name = ddname;
zsda.zsda_source = source;
zsda.zsda_value = snapdev;
return (dsl_sync_task(ddname, zvol_set_snapdev_check,
zvol_set_snapdev_sync, &zsda, 0, ZFS_SPACE_CHECK_NONE));
}
/*
* Sanity check the dataset for safe use by the sync task. No additional
* conditions are imposed.
*/
static int
zvol_set_volmode_check(void *arg, dmu_tx_t *tx)
{
zvol_set_prop_int_arg_t *zsda = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dir_t *dd;
int error;
error = dsl_dir_hold(dp, zsda->zsda_name, FTAG, &dd, NULL);
if (error != 0)
return (error);
dsl_dir_rele(dd, FTAG);
return (error);
}
static int
zvol_set_volmode_sync_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
{
(void) arg;
char dsname[MAXNAMELEN];
zvol_task_t *task;
uint64_t volmode;
dsl_dataset_name(ds, dsname);
if (dsl_prop_get_int_ds(ds, "volmode", &volmode) != 0)
return (0);
task = zvol_task_alloc(ZVOL_ASYNC_SET_VOLMODE, dsname, NULL, volmode);
if (task == NULL)
return (0);
(void) taskq_dispatch(dp->dp_spa->spa_zvol_taskq, zvol_task_cb,
task, TQ_SLEEP);
return (0);
}
/*
* Traverse all child datasets and apply volmode appropriately.
* We call dsl_prop_set_sync_impl() here to set the value only on the toplevel
* dataset and read the effective "volmode" on every child in the callback
* function: this is because the value is not guaranteed to be the same in the
* whole dataset hierarchy.
*/
static void
zvol_set_volmode_sync(void *arg, dmu_tx_t *tx)
{
zvol_set_prop_int_arg_t *zsda = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dir_t *dd;
dsl_dataset_t *ds;
int error;
VERIFY0(dsl_dir_hold(dp, zsda->zsda_name, FTAG, &dd, NULL));
zsda->zsda_tx = tx;
error = dsl_dataset_hold(dp, zsda->zsda_name, FTAG, &ds);
if (error == 0) {
dsl_prop_set_sync_impl(ds, zfs_prop_to_name(ZFS_PROP_VOLMODE),
zsda->zsda_source, sizeof (zsda->zsda_value), 1,
&zsda->zsda_value, zsda->zsda_tx);
dsl_dataset_rele(ds, FTAG);
}
dmu_objset_find_dp(dp, dd->dd_object, zvol_set_volmode_sync_cb,
zsda, DS_FIND_CHILDREN);
dsl_dir_rele(dd, FTAG);
}
int
zvol_set_volmode(const char *ddname, zprop_source_t source, uint64_t volmode)
{
zvol_set_prop_int_arg_t zsda;
zsda.zsda_name = ddname;
zsda.zsda_source = source;
zsda.zsda_value = volmode;
return (dsl_sync_task(ddname, zvol_set_volmode_check,
zvol_set_volmode_sync, &zsda, 0, ZFS_SPACE_CHECK_NONE));
}
void
zvol_remove_minors(spa_t *spa, const char *name, boolean_t async)
{
zvol_task_t *task;
taskqid_t id;
task = zvol_task_alloc(ZVOL_ASYNC_REMOVE_MINORS, name, NULL, ~0ULL);
if (task == NULL)
return;
id = taskq_dispatch(spa->spa_zvol_taskq, zvol_task_cb, task, TQ_SLEEP);
if ((async == B_FALSE) && (id != TASKQID_INVALID))
taskq_wait_id(spa->spa_zvol_taskq, id);
}
void
zvol_rename_minors(spa_t *spa, const char *name1, const char *name2,
boolean_t async)
{
zvol_task_t *task;
taskqid_t id;
task = zvol_task_alloc(ZVOL_ASYNC_RENAME_MINORS, name1, name2, ~0ULL);
if (task == NULL)
return;
id = taskq_dispatch(spa->spa_zvol_taskq, zvol_task_cb, task, TQ_SLEEP);
if ((async == B_FALSE) && (id != TASKQID_INVALID))
taskq_wait_id(spa->spa_zvol_taskq, id);
}
boolean_t
zvol_is_zvol(const char *name)
{
return (zvol_os_is_zvol(name));
}
int
zvol_init_impl(void)
{
int i;
list_create(&zvol_state_list, sizeof (zvol_state_t),
offsetof(zvol_state_t, zv_next));
rw_init(&zvol_state_lock, NULL, RW_DEFAULT, NULL);
zvol_htable = kmem_alloc(ZVOL_HT_SIZE * sizeof (struct hlist_head),
KM_SLEEP);
for (i = 0; i < ZVOL_HT_SIZE; i++)
INIT_HLIST_HEAD(&zvol_htable[i]);
return (0);
}
void
zvol_fini_impl(void)
{
zvol_remove_minors_impl(NULL);
/*
* The call to "zvol_remove_minors_impl" may dispatch entries to
* the system_taskq, but it doesn't wait for those entries to
* complete before it returns. Thus, we must wait for all of the
* removals to finish, before we can continue.
*/
taskq_wait_outstanding(system_taskq, 0);
kmem_free(zvol_htable, ZVOL_HT_SIZE * sizeof (struct hlist_head));
list_destroy(&zvol_state_list);
rw_destroy(&zvol_state_lock);
}
diff --git a/sys/contrib/openzfs/rpm/generic/zfs-kmod.spec.in b/sys/contrib/openzfs/rpm/generic/zfs-kmod.spec.in
index 3c73e2ff2d6d..4cc075585d4b 100644
--- a/sys/contrib/openzfs/rpm/generic/zfs-kmod.spec.in
+++ b/sys/contrib/openzfs/rpm/generic/zfs-kmod.spec.in
@@ -1,171 +1,195 @@
%define module @PACKAGE@
%if !%{defined ksrc}
%if 0%{?rhel}%{?fedora}%{?openEuler}
%define ksrc ${kernel_version##*___}
%else
%define ksrc "$( \
if [ -e "/usr/src/linux-${kernel_version%%___*}" ]; then \
echo "/usr/src/linux-${kernel_version%%___*}"; \
elif [ -e "/lib/modules/${kernel_version%%___*}/source" ]; then \
echo "/lib/modules/${kernel_version%%___*}/source"; \
else \
echo "/lib/modules/${kernel_version%%___*}/build"; \
fi)"
%endif
%endif
%if !%{defined kobj}
%if 0%{?rhel}%{?fedora}%{?openEuler}
%define kobj ${kernel_version##*___}
%else
%define kobj "$( \
if [ -e "/usr/src/linux-${kernel_version%%___*}" ]; then \
echo "/usr/src/linux-${kernel_version%%___*}"; \
else \
echo "/lib/modules/${kernel_version%%___*}/build"; \
fi)"
%endif
%endif
#define repo rpmfusion
#define repo chaos
# (un)define the next line to either build for the newest or all current kernels
%define buildforkernels newest
#define buildforkernels current
#define buildforkernels akmod
%bcond_with debug
%bcond_with debuginfo
Name: %{module}-kmod
Version: @VERSION@
Release: @RELEASE@%{?dist}
Summary: Kernel module(s)
Group: System Environment/Kernel
License: @ZFS_META_LICENSE@
URL: https://github.com/openzfs/zfs
Source0: %{module}-%{version}.tar.gz
Source10: kmodtool
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id} -u -n)
%if 0%{?rhel}%{?fedora}%{?openEuler}
BuildRequires: gcc, make
BuildRequires: elfutils-libelf-devel
%endif
%if (0%{?fedora}%{?suse_version}%{?openEuler}) || (0%{?rhel} && 0%{?rhel} < 9)
# We don't directly use it, but if this isn't installed, rpmbuild as root can
# crash+corrupt rpmdb
# See issue #12071
BuildRequires: ncompress
%endif
# The developments headers will conflict with the dkms packages.
Conflicts: %{module}-dkms
%if %{defined repo}
# Building for a repository use the proper build-sysbuild package
# to determine which kernel-devel packages should be installed.
BuildRequires: %{_bindir}/kmodtool
%{!?kernels:BuildRequires: buildsys-build-%{repo}-kerneldevpkgs-%{?buildforkernels:%{buildforkernels}}%{!?buildforkernels:current}-%{_target_cpu}}
%else
# Building local packages attempt to to use the installed kernel.
%{?rhel:BuildRequires: kernel-devel}
%{?fedora:BuildRequires: kernel-devel}
%{?openEuler:BuildRequires: kernel-devel}
%{?suse_version:BuildRequires: kernel-source}
%if !%{defined kernels} && !%{defined build_src_rpm}
%if 0%{?rhel}%{?fedora}%{?suse_version}%{?openEuler}
%define kernels %(ls -1 /usr/src/kernels)
%else
%define kernels %(ls -1 /lib/modules)
%endif
%endif
%endif
# LDFLAGS are not sanitized by arch/*/Makefile for these architectures.
%ifarch ppc ppc64 ppc64le aarch64
%global __global_ldflags %{nil}
%endif
# Kmodtool does its magic here. A patched version of kmodtool is shipped
# with the source rpm until kmod development packages are supported upstream.
# https://bugzilla.rpmfusion.org/show_bug.cgi?id=2714
%{expand:%(bash %{SOURCE10} --target %{_target_cpu} %{?repo:--repo %{?repo}} --kmodname %{name} %{?buildforkernels:--%{buildforkernels}} --devel %{?prefix:--prefix "%{?prefix}"} %{?kernels:--for-kernels "%{?kernels}"} %{?kernelbuildroot:--buildroot "%{?kernelbuildroot}"} 2>/dev/null) }
%description
This package contains the ZFS kernel modules.
%prep
# Error out if there was something wrong with kmodtool.
%{?kmodtool_check}
# Print kmodtool output for debugging purposes:
bash %{SOURCE10} --target %{_target_cpu} %{?repo:--repo %{?repo}} --kmodname %{name} %{?buildforkernels:--%{buildforkernels}} --devel %{?prefix:--prefix "%{?prefix}"} %{?kernels:--for-kernels "%{?kernels}"} %{?kernelbuildroot:--buildroot "%{?kernelbuildroot}"} 2>/dev/null
%if %{with debug}
%define debug --enable-debug
%else
%define debug --disable-debug
%endif
%if %{with debuginfo}
%define debuginfo --enable-debuginfo
%else
%define debuginfo --disable-debuginfo
%endif
# Leverage VPATH from configure to avoid making multiple copies.
%define _configure ../%{module}-%{version}/configure
%setup -q -c -T -a 0
for kernel_version in %{?kernel_versions}; do
%{__mkdir} _kmod_build_${kernel_version%%___*}
done
%build
for kernel_version in %{?kernel_versions}; do
cd _kmod_build_${kernel_version%%___*}
%configure \
--with-config=kernel \
--with-linux=%{ksrc} \
--with-linux-obj=%{kobj} \
%{debug} \
%{debuginfo} \
%{?kernel_cc} \
%{?kernel_ld} \
%{?kernel_llvm}
make %{?_smp_mflags}
cd ..
done
+# Module signing (modsign)
+#
+# This must be run _after_ find-debuginfo.sh runs, otherwise that will strip
+# the signature off of the modules.
+# (Based on Fedora's kernel.spec workaround)
+%define __modsign_install_post \
+ sign_pem="%{ksrc}/certs/signing_key.pem"; \
+ sign_x509="%{ksrc}/certs/signing_key.x509"; \
+ if [ -f "${sign_x509}" ]\
+ then \
+ echo "Signing kernel modules ..."; \
+ for kmod in $(find ${RPM_BUILD_ROOT}%{kmodinstdir_prefix}/*/extra/ -name \*.ko); do \
+ %{ksrc}/scripts/sign-file sha256 ${sign_pem} ${sign_x509} ${kmod}; \
+ done \
+ fi \
+%{nil}
+
+# hack to ensure signing happens after find-debuginfo.sh runs
+%define __spec_install_post \
+ %{?__debug_package:%{__debug_install_post}}\
+ %{__arch_install_post}\
+ %{__os_install_post}\
+ %{__modsign_install_post}
+
%install
rm -rf ${RPM_BUILD_ROOT}
# Relies on the kernel 'modules_install' make target.
for kernel_version in %{?kernel_versions}; do
cd _kmod_build_${kernel_version%%___*}
make install \
DESTDIR=${RPM_BUILD_ROOT} \
%{?prefix:INSTALL_MOD_PATH=%{?prefix}} \
INSTALL_MOD_DIR=%{kmodinstdir_postfix}
cd ..
done
# find-debuginfo.sh only considers executables
chmod u+x ${RPM_BUILD_ROOT}%{kmodinstdir_prefix}/*/extra/*/*
%{?akmod_install}
%clean
rm -rf $RPM_BUILD_ROOT
diff --git a/sys/contrib/openzfs/rpm/redhat/zfs-kmod.spec.in b/sys/contrib/openzfs/rpm/redhat/zfs-kmod.spec.in
index f59551c0b43a..9c836786baea 100644
--- a/sys/contrib/openzfs/rpm/redhat/zfs-kmod.spec.in
+++ b/sys/contrib/openzfs/rpm/redhat/zfs-kmod.spec.in
@@ -1,88 +1,112 @@
%bcond_with debug
%bcond_with debuginfo
Name: @PACKAGE@-kmod
Version: @VERSION@
Release: @RELEASE@%{?dist}
Summary: Kernel module(s)
Group: System Environment/Kernel
License: @ZFS_META_LICENSE@
URL: https://github.com/openzfs/zfs
BuildRequires: %kernel_module_package_buildreqs
Source0: @PACKAGE@-%{version}.tar.gz
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
# Additional dependency information for the kmod sub-package must be specified
# by generating a preamble text file which kmodtool can append to the spec file.
%(/bin/echo -e "\
Requires: @PACKAGE@ = %{version}\n\
Conflicts: @PACKAGE@-dkms)
# LDFLAGS are not sanitized by arch/*/Makefile for these architectures.
%ifarch ppc ppc64 ppc64le aarch64
%global __global_ldflags %{nil}
%endif
%description
This package contains the ZFS kernel modules.
%define kmod_name @PACKAGE@
%kernel_module_package -n %{kmod_name} -p %{_sourcedir}/kmod-preamble
%define ksrc %{_usrsrc}/kernels/%{kverrel}
%define kobj %{ksrc}
%package -n kmod-%{kmod_name}-devel
Summary: ZFS kernel module(s) devel common
Group: System Environment/Kernel
%description -n kmod-%{kmod_name}-devel
This package provides the header files and objects to build kernel modules.
%prep
if ! [ -d "%{ksrc}" ]; then
echo "Kernel build directory isn't set properly, cannot continue"
exit 1
fi
%if %{with debug}
%define debug --enable-debug
%else
%define debug --disable-debug
%endif
%if %{with debuginfo}
%define debuginfo --enable-debuginfo
%else
%define debuginfo --disable-debuginfo
%endif
%setup -n %{kmod_name}-%{version}
%build
%configure \
--with-config=kernel \
--with-linux=%{ksrc} \
--with-linux-obj=%{kobj} \
%{debug} \
%{debuginfo} \
%{?kernel_cc} \
%{?kernel_ld} \
%{?kernel_llvm}
make %{?_smp_mflags}
+# Module signing (modsign)
+#
+# This must be run _after_ find-debuginfo.sh runs, otherwise that will strip
+# the signature off of the modules.
+# (Based on Fedora's kernel.spec workaround)
+%define __modsign_install_post \
+ sign_pem="%{ksrc}/certs/signing_key.pem"; \
+ sign_x509="%{ksrc}/certs/signing_key.x509"; \
+ if [ -f "${sign_x509}" ]\
+ then \
+ echo "Signing kernel modules ..."; \
+ for kmod in $(find %{buildroot}/lib/modules/%{kverrel}/extra/ -name \*.ko); do \
+ %{ksrc}/scripts/sign-file sha256 ${sign_pem} ${sign_x509} ${kmod}; \
+ done \
+ fi \
+%{nil}
+
+# hack to ensure signing happens after find-debuginfo.sh runs
+%define __spec_install_post \
+ %{?__debug_package:%{__debug_install_post}}\
+ %{__arch_install_post}\
+ %{__os_install_post}\
+ %{__modsign_install_post}
+
%install
make install \
DESTDIR=${RPM_BUILD_ROOT} \
INSTALL_MOD_DIR=extra/%{kmod_name}
%{__rm} -f %{buildroot}/lib/modules/%{kverrel}/modules.*
# find-debuginfo.sh only considers executables
%{__chmod} u+x %{buildroot}/lib/modules/%{kverrel}/extra/*/*
%clean
rm -rf $RPM_BUILD_ROOT
%files -n kmod-%{kmod_name}-devel
%{_usrsrc}/%{kmod_name}-%{version}
diff --git a/sys/contrib/openzfs/tests/Makefile.am b/sys/contrib/openzfs/tests/Makefile.am
index 2e633041ab59..12e9c9f9daf2 100644
--- a/sys/contrib/openzfs/tests/Makefile.am
+++ b/sys/contrib/openzfs/tests/Makefile.am
@@ -1,30 +1,31 @@
include $(srcdir)/%D%/zfs-tests/Makefile.am
scripts_test_runner_bindir = $(datadir)/$(PACKAGE)/test-runner/bin
scripts_test_runner_bin_SCRIPTS = \
%D%/test-runner/bin/test-runner.py \
%D%/test-runner/bin/zts-report.py
SUBSTFILES += $(scripts_test_runner_bin_SCRIPTS)
scripts_test_runner_includedir = $(datadir)/$(PACKAGE)/test-runner/include
dist_scripts_test_runner_include_DATA = \
%D%/test-runner/include/logapi.shlib
scripts_runfilesdir = $(datadir)/$(PACKAGE)/runfiles
dist_scripts_runfiles_DATA = \
+ %D%/runfiles/bclone.run \
%D%/runfiles/common.run \
%D%/runfiles/freebsd.run \
%D%/runfiles/linux.run \
%D%/runfiles/longevity.run \
%D%/runfiles/perf-regression.run \
%D%/runfiles/sanity.run \
%D%/runfiles/sunos.run
dist_noinst_DATA += %D%/README.md
SHELLCHECKSCRIPTS += $(shell find $(srcdir)/%D% -name '*.sh')
diff --git a/sys/contrib/openzfs/tests/runfiles/bclone.run b/sys/contrib/openzfs/tests/runfiles/bclone.run
new file mode 100644
index 000000000000..3d0f545d9226
--- /dev/null
+++ b/sys/contrib/openzfs/tests/runfiles/bclone.run
@@ -0,0 +1,46 @@
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+# This run file contains all of the common functional tests. When
+# adding a new test consider also adding it to the sanity.run file
+# if the new test runs to completion in only a few seconds.
+#
+# Approximate run time: 5 hours
+#
+
+[DEFAULT]
+pre = setup
+quiet = False
+pre_user = root
+user = root
+timeout = 28800
+post_user = root
+post = cleanup
+failsafe_user = root
+failsafe = callbacks/zfs_failsafe
+outputdir = /var/tmp/test_results
+tags = ['bclone']
+
+[tests/functional/bclone]
+tests = ['bclone_crossfs_corner_cases',
+ 'bclone_crossfs_data',
+ 'bclone_crossfs_embedded',
+ 'bclone_crossfs_hole',
+ 'bclone_diffprops_all',
+ 'bclone_diffprops_checksum',
+ 'bclone_diffprops_compress',
+ 'bclone_diffprops_copies',
+ 'bclone_diffprops_recordsize',
+ 'bclone_prop_sync',
+ 'bclone_samefs_corner_cases',
+ 'bclone_samefs_data',
+ 'bclone_samefs_embedded',
+ 'bclone_samefs_hole']
+tags = ['bclone']
diff --git a/sys/contrib/openzfs/tests/runfiles/common.run b/sys/contrib/openzfs/tests/runfiles/common.run
index ef787c65c0f9..7331244515f6 100644
--- a/sys/contrib/openzfs/tests/runfiles/common.run
+++ b/sys/contrib/openzfs/tests/runfiles/common.run
@@ -1,983 +1,1017 @@
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
# This run file contains all of the common functional tests. When
# adding a new test consider also adding it to the sanity.run file
# if the new test runs to completion in only a few seconds.
#
# Approximate run time: 4-5 hours
#
[DEFAULT]
pre = setup
quiet = False
pre_user = root
user = root
timeout = 600
post_user = root
post = cleanup
failsafe_user = root
failsafe = callbacks/zfs_failsafe
outputdir = /var/tmp/test_results
tags = ['functional']
[tests/functional/acl/off]
tests = ['dosmode', 'posixmode']
tags = ['functional', 'acl']
[tests/functional/alloc_class]
tests = ['alloc_class_001_pos', 'alloc_class_002_neg', 'alloc_class_003_pos',
'alloc_class_004_pos', 'alloc_class_005_pos', 'alloc_class_006_pos',
'alloc_class_007_pos', 'alloc_class_008_pos', 'alloc_class_009_pos',
'alloc_class_010_pos', 'alloc_class_011_neg', 'alloc_class_012_pos',
'alloc_class_013_pos', 'alloc_class_014_neg', 'alloc_class_015_pos']
tags = ['functional', 'alloc_class']
[tests/functional/append]
tests = ['file_append', 'threadsappend_001_pos']
tags = ['functional', 'append']
[tests/functional/arc]
tests = ['dbufstats_001_pos', 'dbufstats_002_pos', 'dbufstats_003_pos',
'arcstats_runtime_tuning']
tags = ['functional', 'arc']
[tests/functional/atime]
tests = ['atime_001_pos', 'atime_002_neg', 'root_atime_off', 'root_atime_on']
tags = ['functional', 'atime']
+[tests/functional/bclone]
+tests = ['bclone_crossfs_corner_cases_limited',
+ 'bclone_crossfs_data',
+ 'bclone_crossfs_embedded',
+ 'bclone_crossfs_hole',
+ 'bclone_diffprops_all',
+ 'bclone_diffprops_checksum',
+ 'bclone_diffprops_compress',
+ 'bclone_diffprops_copies',
+ 'bclone_diffprops_recordsize',
+ 'bclone_prop_sync',
+ 'bclone_samefs_corner_cases_limited',
+ 'bclone_samefs_data',
+ 'bclone_samefs_embedded',
+ 'bclone_samefs_hole']
+tags = ['functional', 'bclone']
+timeout = 7200
+
+[tests/functional/block_cloning]
+tests = ['block_cloning_clone_mmap_cached',
+ 'block_cloning_copyfilerange',
+ 'block_cloning_copyfilerange_partial',
+ 'block_cloning_copyfilerange_fallback',
+ 'block_cloning_disabled_copyfilerange',
+ 'block_cloning_copyfilerange_cross_dataset',
+ 'block_cloning_cross_enc_dataset',
+ 'block_cloning_copyfilerange_fallback_same_txg',
+ 'block_cloning_replay', 'block_cloning_replay_encrypted',
+ 'block_cloning_lwb_buffer_overflow', 'block_cloning_clone_mmap_write']
+tags = ['functional', 'block_cloning']
+
[tests/functional/bootfs]
tests = ['bootfs_001_pos', 'bootfs_002_neg', 'bootfs_003_pos',
'bootfs_004_neg', 'bootfs_005_neg', 'bootfs_006_pos', 'bootfs_007_pos',
'bootfs_008_pos']
tags = ['functional', 'bootfs']
[tests/functional/btree]
tests = ['btree_positive', 'btree_negative']
tags = ['functional', 'btree']
pre =
post =
[tests/functional/cache]
tests = ['cache_001_pos', 'cache_002_pos', 'cache_003_pos', 'cache_004_neg',
'cache_005_neg', 'cache_006_pos', 'cache_007_neg', 'cache_008_neg',
'cache_009_pos', 'cache_010_pos', 'cache_011_pos', 'cache_012_pos']
tags = ['functional', 'cache']
[tests/functional/cachefile]
tests = ['cachefile_001_pos', 'cachefile_002_pos', 'cachefile_003_pos',
'cachefile_004_pos']
tags = ['functional', 'cachefile']
[tests/functional/casenorm]
tests = ['case_all_values', 'norm_all_values', 'mixed_create_failure',
'sensitive_none_lookup', 'sensitive_none_delete',
'sensitive_formd_lookup', 'sensitive_formd_delete',
'insensitive_none_lookup', 'insensitive_none_delete',
'insensitive_formd_lookup', 'insensitive_formd_delete',
'mixed_none_lookup', 'mixed_none_lookup_ci', 'mixed_none_delete',
'mixed_formd_lookup', 'mixed_formd_lookup_ci', 'mixed_formd_delete']
tags = ['functional', 'casenorm']
[tests/functional/channel_program/lua_core]
tests = ['tst.args_to_lua', 'tst.divide_by_zero', 'tst.exists',
'tst.integer_illegal', 'tst.integer_overflow', 'tst.language_functions_neg',
'tst.language_functions_pos', 'tst.large_prog', 'tst.libraries',
'tst.memory_limit', 'tst.nested_neg', 'tst.nested_pos', 'tst.nvlist_to_lua',
'tst.recursive_neg', 'tst.recursive_pos', 'tst.return_large',
'tst.return_nvlist_neg', 'tst.return_nvlist_pos',
'tst.return_recursive_table', 'tst.stack_gsub', 'tst.timeout']
tags = ['functional', 'channel_program', 'lua_core']
[tests/functional/channel_program/synctask_core]
tests = ['tst.destroy_fs', 'tst.destroy_snap', 'tst.get_count_and_limit',
'tst.get_index_props', 'tst.get_mountpoint', 'tst.get_neg',
'tst.get_number_props', 'tst.get_string_props', 'tst.get_type',
'tst.get_userquota', 'tst.get_written', 'tst.inherit', 'tst.list_bookmarks',
'tst.list_children', 'tst.list_clones', 'tst.list_holds',
'tst.list_snapshots', 'tst.list_system_props',
'tst.list_user_props', 'tst.parse_args_neg','tst.promote_conflict',
'tst.promote_multiple', 'tst.promote_simple', 'tst.rollback_mult',
'tst.rollback_one', 'tst.set_props', 'tst.snapshot_destroy', 'tst.snapshot_neg',
'tst.snapshot_recursive', 'tst.snapshot_rename', 'tst.snapshot_simple',
'tst.bookmark.create', 'tst.bookmark.copy',
'tst.terminate_by_signal'
]
tags = ['functional', 'channel_program', 'synctask_core']
[tests/functional/checksum]
tests = ['run_edonr_test', 'run_sha2_test', 'run_skein_test', 'run_blake3_test',
'filetest_001_pos', 'filetest_002_pos']
tags = ['functional', 'checksum']
[tests/functional/clean_mirror]
tests = [ 'clean_mirror_001_pos', 'clean_mirror_002_pos',
'clean_mirror_003_pos', 'clean_mirror_004_pos']
tags = ['functional', 'clean_mirror']
[tests/functional/cli_root/zdb]
tests = ['zdb_002_pos', 'zdb_003_pos', 'zdb_004_pos', 'zdb_005_pos',
'zdb_006_pos', 'zdb_args_neg', 'zdb_args_pos',
'zdb_block_size_histogram', 'zdb_checksum', 'zdb_decompress',
'zdb_display_block', 'zdb_encrypted', 'zdb_label_checksum',
'zdb_object_range_neg', 'zdb_object_range_pos', 'zdb_objset_id',
'zdb_decompress_zstd', 'zdb_recover', 'zdb_recover_2', 'zdb_backup']
pre =
post =
tags = ['functional', 'cli_root', 'zdb']
+timeout = 1200
[tests/functional/cli_root/zfs]
tests = ['zfs_001_neg', 'zfs_002_pos']
tags = ['functional', 'cli_root', 'zfs']
[tests/functional/cli_root/zfs_bookmark]
tests = ['zfs_bookmark_cliargs']
tags = ['functional', 'cli_root', 'zfs_bookmark']
[tests/functional/cli_root/zfs_change-key]
tests = ['zfs_change-key', 'zfs_change-key_child', 'zfs_change-key_format',
'zfs_change-key_inherit', 'zfs_change-key_load', 'zfs_change-key_location',
'zfs_change-key_pbkdf2iters', 'zfs_change-key_clones']
tags = ['functional', 'cli_root', 'zfs_change-key']
[tests/functional/cli_root/zfs_clone]
tests = ['zfs_clone_001_neg', 'zfs_clone_002_pos', 'zfs_clone_003_pos',
'zfs_clone_004_pos', 'zfs_clone_005_pos', 'zfs_clone_006_pos',
'zfs_clone_007_pos', 'zfs_clone_008_neg', 'zfs_clone_009_neg',
'zfs_clone_010_pos', 'zfs_clone_encrypted', 'zfs_clone_deeply_nested',
'zfs_clone_rm_nested']
tags = ['functional', 'cli_root', 'zfs_clone']
[tests/functional/cli_root/zfs_copies]
tests = ['zfs_copies_001_pos', 'zfs_copies_002_pos', 'zfs_copies_003_pos',
'zfs_copies_004_neg', 'zfs_copies_005_neg', 'zfs_copies_006_pos']
tags = ['functional', 'cli_root', 'zfs_copies']
[tests/functional/cli_root/zfs_create]
tests = ['zfs_create_001_pos', 'zfs_create_002_pos', 'zfs_create_003_pos',
'zfs_create_004_pos', 'zfs_create_005_pos', 'zfs_create_006_pos',
'zfs_create_007_pos', 'zfs_create_008_neg', 'zfs_create_009_neg',
'zfs_create_010_neg', 'zfs_create_011_pos', 'zfs_create_012_pos',
'zfs_create_013_pos', 'zfs_create_014_pos', 'zfs_create_encrypted',
'zfs_create_crypt_combos', 'zfs_create_dryrun', 'zfs_create_nomount',
'zfs_create_verbose']
tags = ['functional', 'cli_root', 'zfs_create']
[tests/functional/cli_root/zfs_destroy]
tests = ['zfs_clone_livelist_condense_and_disable',
'zfs_clone_livelist_condense_races', 'zfs_clone_livelist_dedup',
'zfs_destroy_001_pos', 'zfs_destroy_002_pos', 'zfs_destroy_003_pos',
'zfs_destroy_004_pos', 'zfs_destroy_005_neg', 'zfs_destroy_006_neg',
'zfs_destroy_007_neg', 'zfs_destroy_008_pos', 'zfs_destroy_009_pos',
'zfs_destroy_010_pos', 'zfs_destroy_011_pos', 'zfs_destroy_012_pos',
'zfs_destroy_013_neg', 'zfs_destroy_014_pos', 'zfs_destroy_015_pos',
'zfs_destroy_016_pos', 'zfs_destroy_clone_livelist',
'zfs_destroy_dev_removal', 'zfs_destroy_dev_removal_condense']
tags = ['functional', 'cli_root', 'zfs_destroy']
[tests/functional/cli_root/zfs_diff]
tests = ['zfs_diff_changes', 'zfs_diff_cliargs', 'zfs_diff_timestamp',
'zfs_diff_types', 'zfs_diff_encrypted', 'zfs_diff_mangle']
tags = ['functional', 'cli_root', 'zfs_diff']
[tests/functional/cli_root/zfs_get]
tests = ['zfs_get_001_pos', 'zfs_get_002_pos', 'zfs_get_003_pos',
'zfs_get_004_pos', 'zfs_get_005_neg', 'zfs_get_006_neg', 'zfs_get_007_neg',
'zfs_get_008_pos', 'zfs_get_009_pos', 'zfs_get_010_neg']
tags = ['functional', 'cli_root', 'zfs_get']
[tests/functional/cli_root/zfs_ids_to_path]
tests = ['zfs_ids_to_path_001_pos']
tags = ['functional', 'cli_root', 'zfs_ids_to_path']
[tests/functional/cli_root/zfs_inherit]
tests = ['zfs_inherit_001_neg', 'zfs_inherit_002_neg', 'zfs_inherit_003_pos',
'zfs_inherit_mountpoint']
tags = ['functional', 'cli_root', 'zfs_inherit']
[tests/functional/cli_root/zfs_load-key]
tests = ['zfs_load-key', 'zfs_load-key_all', 'zfs_load-key_file',
'zfs_load-key_https', 'zfs_load-key_location', 'zfs_load-key_noop',
'zfs_load-key_recursive']
tags = ['functional', 'cli_root', 'zfs_load-key']
[tests/functional/cli_root/zfs_mount]
tests = ['zfs_mount_001_pos', 'zfs_mount_002_pos', 'zfs_mount_003_pos',
'zfs_mount_004_pos', 'zfs_mount_005_pos', 'zfs_mount_007_pos',
'zfs_mount_009_neg', 'zfs_mount_010_neg', 'zfs_mount_011_neg',
'zfs_mount_012_pos', 'zfs_mount_all_001_pos', 'zfs_mount_encrypted',
'zfs_mount_remount', 'zfs_mount_all_fail', 'zfs_mount_all_mountpoints',
'zfs_mount_test_race']
tags = ['functional', 'cli_root', 'zfs_mount']
[tests/functional/cli_root/zfs_program]
tests = ['zfs_program_json']
tags = ['functional', 'cli_root', 'zfs_program']
[tests/functional/cli_root/zfs_promote]
tests = ['zfs_promote_001_pos', 'zfs_promote_002_pos', 'zfs_promote_003_pos',
'zfs_promote_004_pos', 'zfs_promote_005_pos', 'zfs_promote_006_neg',
'zfs_promote_007_neg', 'zfs_promote_008_pos', 'zfs_promote_encryptionroot']
tags = ['functional', 'cli_root', 'zfs_promote']
[tests/functional/cli_root/zfs_property]
tests = ['zfs_written_property_001_pos']
tags = ['functional', 'cli_root', 'zfs_property']
[tests/functional/cli_root/zfs_receive]
tests = ['zfs_receive_001_pos', 'zfs_receive_002_pos', 'zfs_receive_003_pos',
'zfs_receive_004_neg', 'zfs_receive_005_neg', 'zfs_receive_006_pos',
'zfs_receive_007_neg', 'zfs_receive_008_pos', 'zfs_receive_009_neg',
'zfs_receive_010_pos', 'zfs_receive_011_pos', 'zfs_receive_012_pos',
'zfs_receive_013_pos', 'zfs_receive_014_pos', 'zfs_receive_015_pos',
'zfs_receive_016_pos', 'receive-o-x_props_override',
'receive-o-x_props_aliases',
'zfs_receive_from_encrypted', 'zfs_receive_to_encrypted',
'zfs_receive_raw', 'zfs_receive_raw_incremental', 'zfs_receive_-e',
'zfs_receive_raw_-d', 'zfs_receive_from_zstd', 'zfs_receive_new_props',
'zfs_receive_-wR-encrypted-mix', 'zfs_receive_corrective',
'zfs_receive_compressed_corrective', 'zfs_receive_large_block_corrective']
tags = ['functional', 'cli_root', 'zfs_receive']
[tests/functional/cli_root/zfs_rename]
tests = ['zfs_rename_001_pos', 'zfs_rename_002_pos', 'zfs_rename_003_pos',
'zfs_rename_004_neg', 'zfs_rename_005_neg', 'zfs_rename_006_pos',
'zfs_rename_007_pos', 'zfs_rename_008_pos', 'zfs_rename_009_neg',
'zfs_rename_010_neg', 'zfs_rename_011_pos', 'zfs_rename_012_neg',
'zfs_rename_013_pos', 'zfs_rename_014_neg', 'zfs_rename_encrypted_child',
'zfs_rename_to_encrypted', 'zfs_rename_mountpoint', 'zfs_rename_nounmount']
tags = ['functional', 'cli_root', 'zfs_rename']
[tests/functional/cli_root/zfs_reservation]
tests = ['zfs_reservation_001_pos', 'zfs_reservation_002_pos']
tags = ['functional', 'cli_root', 'zfs_reservation']
[tests/functional/cli_root/zfs_rollback]
tests = ['zfs_rollback_001_pos', 'zfs_rollback_002_pos',
'zfs_rollback_003_neg', 'zfs_rollback_004_neg']
tags = ['functional', 'cli_root', 'zfs_rollback']
[tests/functional/cli_root/zfs_send]
tests = ['zfs_send_001_pos', 'zfs_send_002_pos', 'zfs_send_003_pos',
'zfs_send_004_neg', 'zfs_send_005_pos', 'zfs_send_006_pos',
'zfs_send_007_pos', 'zfs_send_encrypted', 'zfs_send_encrypted_unloaded',
'zfs_send_raw', 'zfs_send_sparse', 'zfs_send-b', 'zfs_send_skip_missing']
tags = ['functional', 'cli_root', 'zfs_send']
[tests/functional/cli_root/zfs_set]
tests = ['cache_001_pos', 'cache_002_neg', 'canmount_001_pos',
'canmount_002_pos', 'canmount_003_pos', 'canmount_004_pos',
'checksum_001_pos', 'compression_001_pos', 'mountpoint_001_pos',
'mountpoint_002_pos', 'reservation_001_neg', 'user_property_002_pos',
'share_mount_001_neg', 'snapdir_001_pos', 'onoffs_001_pos',
'user_property_001_pos', 'user_property_003_neg', 'readonly_001_pos',
'user_property_004_pos', 'version_001_neg', 'zfs_set_001_neg',
'zfs_set_002_neg', 'zfs_set_003_neg', 'property_alias_001_pos',
'mountpoint_003_pos', 'ro_props_001_pos', 'zfs_set_keylocation',
'zfs_set_feature_activation', 'zfs_set_nomount']
tags = ['functional', 'cli_root', 'zfs_set']
[tests/functional/cli_root/zfs_share]
tests = ['zfs_share_001_pos', 'zfs_share_002_pos', 'zfs_share_003_pos',
'zfs_share_004_pos', 'zfs_share_006_pos', 'zfs_share_008_neg',
- 'zfs_share_010_neg', 'zfs_share_011_pos', 'zfs_share_concurrent_shares']
+ 'zfs_share_010_neg', 'zfs_share_011_pos', 'zfs_share_concurrent_shares',
+ 'zfs_share_after_mount']
tags = ['functional', 'cli_root', 'zfs_share']
[tests/functional/cli_root/zfs_snapshot]
tests = ['zfs_snapshot_001_neg', 'zfs_snapshot_002_neg',
'zfs_snapshot_003_neg', 'zfs_snapshot_004_neg', 'zfs_snapshot_005_neg',
'zfs_snapshot_006_pos', 'zfs_snapshot_007_neg', 'zfs_snapshot_008_neg',
'zfs_snapshot_009_pos']
tags = ['functional', 'cli_root', 'zfs_snapshot']
[tests/functional/cli_root/zfs_unload-key]
tests = ['zfs_unload-key', 'zfs_unload-key_all', 'zfs_unload-key_recursive']
tags = ['functional', 'cli_root', 'zfs_unload-key']
[tests/functional/cli_root/zfs_unmount]
tests = ['zfs_unmount_001_pos', 'zfs_unmount_002_pos', 'zfs_unmount_003_pos',
'zfs_unmount_004_pos', 'zfs_unmount_005_pos', 'zfs_unmount_006_pos',
'zfs_unmount_007_neg', 'zfs_unmount_008_neg', 'zfs_unmount_009_pos',
'zfs_unmount_all_001_pos', 'zfs_unmount_nested', 'zfs_unmount_unload_keys']
tags = ['functional', 'cli_root', 'zfs_unmount']
[tests/functional/cli_root/zfs_unshare]
tests = ['zfs_unshare_001_pos', 'zfs_unshare_002_pos', 'zfs_unshare_003_pos',
'zfs_unshare_004_neg', 'zfs_unshare_005_neg', 'zfs_unshare_006_pos',
'zfs_unshare_007_pos']
tags = ['functional', 'cli_root', 'zfs_unshare']
[tests/functional/cli_root/zfs_upgrade]
tests = ['zfs_upgrade_001_pos', 'zfs_upgrade_002_pos', 'zfs_upgrade_003_pos',
'zfs_upgrade_004_pos', 'zfs_upgrade_005_pos', 'zfs_upgrade_006_neg',
'zfs_upgrade_007_neg']
tags = ['functional', 'cli_root', 'zfs_upgrade']
[tests/functional/cli_root/zfs_wait]
tests = ['zfs_wait_deleteq', 'zfs_wait_getsubopt']
tags = ['functional', 'cli_root', 'zfs_wait']
[tests/functional/cli_root/zhack]
tests = ['zhack_label_repair_001', 'zhack_label_repair_002',
'zhack_label_repair_003', 'zhack_label_repair_004']
pre =
post =
tags = ['functional', 'cli_root', 'zhack']
[tests/functional/cli_root/zpool]
tests = ['zpool_001_neg', 'zpool_002_pos', 'zpool_003_pos', 'zpool_colors']
tags = ['functional', 'cli_root', 'zpool']
[tests/functional/cli_root/zpool_add]
tests = ['zpool_add_001_pos', 'zpool_add_002_pos', 'zpool_add_003_pos',
'zpool_add_004_pos', 'zpool_add_006_pos', 'zpool_add_007_neg',
'zpool_add_008_neg', 'zpool_add_009_neg', 'zpool_add_010_pos',
'add-o_ashift', 'add_prop_ashift', 'zpool_add_dryrun_output']
tags = ['functional', 'cli_root', 'zpool_add']
[tests/functional/cli_root/zpool_attach]
tests = ['zpool_attach_001_neg', 'attach-o_ashift']
tags = ['functional', 'cli_root', 'zpool_attach']
[tests/functional/cli_root/zpool_clear]
tests = ['zpool_clear_001_pos', 'zpool_clear_002_neg', 'zpool_clear_003_neg',
'zpool_clear_readonly']
tags = ['functional', 'cli_root', 'zpool_clear']
[tests/functional/cli_root/zpool_create]
tests = ['zpool_create_001_pos', 'zpool_create_002_pos',
'zpool_create_003_pos', 'zpool_create_004_pos', 'zpool_create_005_pos',
'zpool_create_006_pos', 'zpool_create_007_neg', 'zpool_create_008_pos',
'zpool_create_009_neg', 'zpool_create_010_neg', 'zpool_create_011_neg',
'zpool_create_012_neg', 'zpool_create_014_neg', 'zpool_create_015_neg',
'zpool_create_017_neg', 'zpool_create_018_pos', 'zpool_create_019_pos',
'zpool_create_020_pos', 'zpool_create_021_pos', 'zpool_create_022_pos',
'zpool_create_023_neg', 'zpool_create_024_pos',
'zpool_create_encrypted', 'zpool_create_crypt_combos',
'zpool_create_draid_001_pos', 'zpool_create_draid_002_pos',
'zpool_create_draid_003_pos', 'zpool_create_draid_004_pos',
'zpool_create_features_001_pos', 'zpool_create_features_002_pos',
'zpool_create_features_003_pos', 'zpool_create_features_004_neg',
'zpool_create_features_005_pos', 'zpool_create_features_006_pos',
'zpool_create_features_007_pos', 'zpool_create_features_008_pos',
'zpool_create_features_009_pos', 'create-o_ashift',
'zpool_create_tempname', 'zpool_create_dryrun_output']
tags = ['functional', 'cli_root', 'zpool_create']
[tests/functional/cli_root/zpool_destroy]
tests = ['zpool_destroy_001_pos', 'zpool_destroy_002_pos',
'zpool_destroy_003_neg']
pre =
post =
tags = ['functional', 'cli_root', 'zpool_destroy']
[tests/functional/cli_root/zpool_detach]
tests = ['zpool_detach_001_neg']
tags = ['functional', 'cli_root', 'zpool_detach']
[tests/functional/cli_root/zpool_events]
tests = ['zpool_events_clear', 'zpool_events_cliargs', 'zpool_events_follow',
'zpool_events_poolname', 'zpool_events_errors', 'zpool_events_duplicates',
'zpool_events_clear_retained']
tags = ['functional', 'cli_root', 'zpool_events']
[tests/functional/cli_root/zpool_export]
tests = ['zpool_export_001_pos', 'zpool_export_002_pos',
'zpool_export_003_neg', 'zpool_export_004_pos']
tags = ['functional', 'cli_root', 'zpool_export']
[tests/functional/cli_root/zpool_get]
tests = ['zpool_get_001_pos', 'zpool_get_002_pos', 'zpool_get_003_pos',
'zpool_get_004_neg', 'zpool_get_005_pos', 'vdev_get_001_pos']
tags = ['functional', 'cli_root', 'zpool_get']
[tests/functional/cli_root/zpool_history]
tests = ['zpool_history_001_neg', 'zpool_history_002_pos']
tags = ['functional', 'cli_root', 'zpool_history']
[tests/functional/cli_root/zpool_import]
tests = ['zpool_import_001_pos', 'zpool_import_002_pos',
'zpool_import_003_pos', 'zpool_import_004_pos', 'zpool_import_005_pos',
'zpool_import_006_pos', 'zpool_import_007_pos', 'zpool_import_008_pos',
'zpool_import_009_neg', 'zpool_import_010_pos', 'zpool_import_011_neg',
'zpool_import_012_pos', 'zpool_import_013_neg', 'zpool_import_014_pos',
'zpool_import_015_pos', 'zpool_import_016_pos', 'zpool_import_017_pos',
'zpool_import_features_001_pos', 'zpool_import_features_002_neg',
'zpool_import_features_003_pos', 'zpool_import_missing_001_pos',
'zpool_import_missing_002_pos', 'zpool_import_missing_003_pos',
'zpool_import_rename_001_pos', 'zpool_import_all_001_pos',
'zpool_import_encrypted', 'zpool_import_encrypted_load',
'zpool_import_errata3', 'zpool_import_errata4',
'import_cachefile_device_added',
'import_cachefile_device_removed',
'import_cachefile_device_replaced',
'import_cachefile_mirror_attached',
'import_cachefile_mirror_detached',
'import_cachefile_paths_changed',
'import_cachefile_shared_device',
'import_devices_missing', 'import_log_missing',
'import_paths_changed',
'import_rewind_config_changed',
'import_rewind_device_replaced']
tags = ['functional', 'cli_root', 'zpool_import']
timeout = 1200
[tests/functional/cli_root/zpool_labelclear]
tests = ['zpool_labelclear_active', 'zpool_labelclear_exported',
'zpool_labelclear_removed', 'zpool_labelclear_valid']
pre =
post =
tags = ['functional', 'cli_root', 'zpool_labelclear']
[tests/functional/cli_root/zpool_initialize]
tests = ['zpool_initialize_attach_detach_add_remove',
'zpool_initialize_fault_export_import_online',
'zpool_initialize_import_export',
'zpool_initialize_offline_export_import_online',
'zpool_initialize_online_offline',
'zpool_initialize_split',
'zpool_initialize_start_and_cancel_neg',
'zpool_initialize_start_and_cancel_pos',
'zpool_initialize_suspend_resume',
'zpool_initialize_uninit',
'zpool_initialize_unsupported_vdevs',
'zpool_initialize_verify_checksums',
'zpool_initialize_verify_initialized']
pre =
tags = ['functional', 'cli_root', 'zpool_initialize']
[tests/functional/cli_root/zpool_offline]
tests = ['zpool_offline_001_pos', 'zpool_offline_002_neg',
'zpool_offline_003_pos']
tags = ['functional', 'cli_root', 'zpool_offline']
[tests/functional/cli_root/zpool_online]
tests = ['zpool_online_001_pos', 'zpool_online_002_neg']
tags = ['functional', 'cli_root', 'zpool_online']
[tests/functional/cli_root/zpool_remove]
tests = ['zpool_remove_001_neg', 'zpool_remove_002_pos',
'zpool_remove_003_pos']
tags = ['functional', 'cli_root', 'zpool_remove']
[tests/functional/cli_root/zpool_replace]
tests = ['zpool_replace_001_neg', 'replace-o_ashift', 'replace_prop_ashift']
tags = ['functional', 'cli_root', 'zpool_replace']
[tests/functional/cli_root/zpool_resilver]
tests = ['zpool_resilver_bad_args', 'zpool_resilver_restart',
'zpool_resilver_concurrent']
tags = ['functional', 'cli_root', 'zpool_resilver']
[tests/functional/cli_root/zpool_scrub]
tests = ['zpool_scrub_001_neg', 'zpool_scrub_002_pos', 'zpool_scrub_003_pos',
'zpool_scrub_004_pos', 'zpool_scrub_005_pos',
'zpool_scrub_encrypted_unloaded', 'zpool_scrub_print_repairing',
'zpool_scrub_offline_device', 'zpool_scrub_multiple_copies',
'zpool_error_scrub_001_pos', 'zpool_error_scrub_002_pos',
'zpool_error_scrub_003_pos', 'zpool_error_scrub_004_pos']
tags = ['functional', 'cli_root', 'zpool_scrub']
[tests/functional/cli_root/zpool_set]
tests = ['zpool_set_001_pos', 'zpool_set_002_neg', 'zpool_set_003_neg',
'zpool_set_ashift', 'zpool_set_features', 'vdev_set_001_pos',
'user_property_001_pos', 'user_property_002_neg']
tags = ['functional', 'cli_root', 'zpool_set']
[tests/functional/cli_root/zpool_split]
tests = ['zpool_split_cliargs', 'zpool_split_devices',
'zpool_split_encryption', 'zpool_split_props', 'zpool_split_vdevs',
'zpool_split_resilver', 'zpool_split_indirect',
'zpool_split_dryrun_output']
tags = ['functional', 'cli_root', 'zpool_split']
[tests/functional/cli_root/zpool_status]
tests = ['zpool_status_001_pos', 'zpool_status_002_pos',
'zpool_status_003_pos', 'zpool_status_004_pos',
'zpool_status_005_pos', 'zpool_status_006_pos',
- 'zpool_status_007_pos', 'zpool_status_features_001_pos']
+ 'zpool_status_007_pos', 'zpool_status_008_pos',
+ 'zpool_status_features_001_pos']
tags = ['functional', 'cli_root', 'zpool_status']
[tests/functional/cli_root/zpool_sync]
tests = ['zpool_sync_001_pos', 'zpool_sync_002_neg']
tags = ['functional', 'cli_root', 'zpool_sync']
[tests/functional/cli_root/zpool_trim]
tests = ['zpool_trim_attach_detach_add_remove',
'zpool_trim_fault_export_import_online',
'zpool_trim_import_export', 'zpool_trim_multiple', 'zpool_trim_neg',
'zpool_trim_offline_export_import_online', 'zpool_trim_online_offline',
'zpool_trim_partial', 'zpool_trim_rate', 'zpool_trim_rate_neg',
'zpool_trim_secure', 'zpool_trim_split', 'zpool_trim_start_and_cancel_neg',
'zpool_trim_start_and_cancel_pos', 'zpool_trim_suspend_resume',
'zpool_trim_unsupported_vdevs', 'zpool_trim_verify_checksums',
'zpool_trim_verify_trimmed']
tags = ['functional', 'zpool_trim']
[tests/functional/cli_root/zpool_upgrade]
tests = ['zpool_upgrade_001_pos', 'zpool_upgrade_002_pos',
'zpool_upgrade_003_pos', 'zpool_upgrade_004_pos',
'zpool_upgrade_005_neg', 'zpool_upgrade_006_neg',
'zpool_upgrade_007_pos', 'zpool_upgrade_008_pos',
'zpool_upgrade_009_neg', 'zpool_upgrade_features_001_pos']
tags = ['functional', 'cli_root', 'zpool_upgrade']
[tests/functional/cli_root/zpool_wait]
tests = ['zpool_wait_discard', 'zpool_wait_freeing',
'zpool_wait_initialize_basic', 'zpool_wait_initialize_cancel',
'zpool_wait_initialize_flag', 'zpool_wait_multiple',
'zpool_wait_no_activity', 'zpool_wait_remove', 'zpool_wait_remove_cancel',
'zpool_wait_trim_basic', 'zpool_wait_trim_cancel', 'zpool_wait_trim_flag',
'zpool_wait_usage']
tags = ['functional', 'cli_root', 'zpool_wait']
[tests/functional/cli_root/zpool_wait/scan]
tests = ['zpool_wait_replace_cancel', 'zpool_wait_rebuild',
'zpool_wait_resilver', 'zpool_wait_scrub_cancel',
'zpool_wait_replace', 'zpool_wait_scrub_basic', 'zpool_wait_scrub_flag']
tags = ['functional', 'cli_root', 'zpool_wait']
[tests/functional/cli_user/misc]
tests = ['zdb_001_neg', 'zfs_001_neg', 'zfs_allow_001_neg',
'zfs_clone_001_neg', 'zfs_create_001_neg', 'zfs_destroy_001_neg',
'zfs_get_001_neg', 'zfs_inherit_001_neg', 'zfs_mount_001_neg',
'zfs_promote_001_neg', 'zfs_receive_001_neg', 'zfs_rename_001_neg',
'zfs_rollback_001_neg', 'zfs_send_001_neg', 'zfs_set_001_neg',
'zfs_share_001_neg', 'zfs_snapshot_001_neg', 'zfs_unallow_001_neg',
'zfs_unmount_001_neg', 'zfs_unshare_001_neg', 'zfs_upgrade_001_neg',
'zpool_001_neg', 'zpool_add_001_neg', 'zpool_attach_001_neg',
'zpool_clear_001_neg', 'zpool_create_001_neg', 'zpool_destroy_001_neg',
'zpool_detach_001_neg', 'zpool_export_001_neg', 'zpool_get_001_neg',
'zpool_history_001_neg', 'zpool_import_001_neg', 'zpool_import_002_neg',
'zpool_offline_001_neg', 'zpool_online_001_neg', 'zpool_remove_001_neg',
'zpool_replace_001_neg', 'zpool_scrub_001_neg', 'zpool_set_001_neg',
'zpool_status_001_neg', 'zpool_upgrade_001_neg', 'arcstat_001_pos',
'arc_summary_001_pos', 'arc_summary_002_neg', 'zpool_wait_privilege',
'zilstat_001_pos']
user =
tags = ['functional', 'cli_user', 'misc']
[tests/functional/cli_user/zfs_list]
tests = ['zfs_list_001_pos', 'zfs_list_002_pos', 'zfs_list_003_pos',
'zfs_list_004_neg', 'zfs_list_005_neg', 'zfs_list_007_pos',
'zfs_list_008_neg']
user =
tags = ['functional', 'cli_user', 'zfs_list']
[tests/functional/cli_user/zpool_iostat]
tests = ['zpool_iostat_001_neg', 'zpool_iostat_002_pos',
'zpool_iostat_003_neg', 'zpool_iostat_004_pos',
'zpool_iostat_005_pos', 'zpool_iostat_-c_disable',
'zpool_iostat_-c_homedir', 'zpool_iostat_-c_searchpath']
user =
tags = ['functional', 'cli_user', 'zpool_iostat']
[tests/functional/cli_user/zpool_list]
tests = ['zpool_list_001_pos', 'zpool_list_002_neg']
user =
tags = ['functional', 'cli_user', 'zpool_list']
[tests/functional/cli_user/zpool_status]
tests = ['zpool_status_003_pos', 'zpool_status_-c_disable',
'zpool_status_-c_homedir', 'zpool_status_-c_searchpath']
user =
tags = ['functional', 'cli_user', 'zpool_status']
[tests/functional/compression]
tests = ['compress_001_pos', 'compress_002_pos', 'compress_003_pos',
'l2arc_compressed_arc', 'l2arc_compressed_arc_disabled',
'l2arc_encrypted', 'l2arc_encrypted_no_compressed_arc']
tags = ['functional', 'compression']
[tests/functional/cp_files]
-tests = ['cp_files_001_pos']
+tests = ['cp_files_001_pos', 'cp_files_002_pos', 'cp_stress']
tags = ['functional', 'cp_files']
[tests/functional/crtime]
tests = ['crtime_001_pos' ]
tags = ['functional', 'crtime']
[tests/functional/ctime]
tests = ['ctime_001_pos' ]
tags = ['functional', 'ctime']
[tests/functional/deadman]
tests = ['deadman_ratelimit', 'deadman_sync', 'deadman_zio']
pre =
post =
tags = ['functional', 'deadman']
[tests/functional/delegate]
tests = ['zfs_allow_001_pos', 'zfs_allow_002_pos', 'zfs_allow_003_pos',
'zfs_allow_004_pos', 'zfs_allow_005_pos', 'zfs_allow_006_pos',
'zfs_allow_007_pos', 'zfs_allow_008_pos', 'zfs_allow_009_neg',
'zfs_allow_010_pos', 'zfs_allow_011_neg', 'zfs_allow_012_neg',
'zfs_unallow_001_pos', 'zfs_unallow_002_pos', 'zfs_unallow_003_pos',
'zfs_unallow_004_pos', 'zfs_unallow_005_pos', 'zfs_unallow_006_pos',
'zfs_unallow_007_neg', 'zfs_unallow_008_neg']
tags = ['functional', 'delegate']
[tests/functional/exec]
tests = ['exec_001_pos', 'exec_002_neg']
tags = ['functional', 'exec']
[tests/functional/fallocate]
tests = ['fallocate_punch-hole']
tags = ['functional', 'fallocate']
[tests/functional/features/async_destroy]
tests = ['async_destroy_001_pos']
tags = ['functional', 'features', 'async_destroy']
[tests/functional/features/large_dnode]
tests = ['large_dnode_001_pos', 'large_dnode_003_pos', 'large_dnode_004_neg',
'large_dnode_005_pos', 'large_dnode_007_neg', 'large_dnode_009_pos']
tags = ['functional', 'features', 'large_dnode']
[tests/functional/grow]
pre =
post =
tests = ['grow_pool_001_pos', 'grow_replicas_001_pos']
tags = ['functional', 'grow']
[tests/functional/history]
tests = ['history_001_pos', 'history_002_pos', 'history_003_pos',
'history_004_pos', 'history_005_neg', 'history_006_neg',
'history_007_pos', 'history_008_pos', 'history_009_pos',
'history_010_pos']
tags = ['functional', 'history']
[tests/functional/hkdf]
pre =
post =
tests = ['hkdf_test']
tags = ['functional', 'hkdf']
[tests/functional/inheritance]
tests = ['inherit_001_pos']
pre =
tags = ['functional', 'inheritance']
[tests/functional/io]
tests = ['sync', 'psync', 'posixaio', 'mmap']
tags = ['functional', 'io']
[tests/functional/inuse]
tests = ['inuse_004_pos', 'inuse_005_pos', 'inuse_008_pos', 'inuse_009_pos']
post =
tags = ['functional', 'inuse']
[tests/functional/large_files]
tests = ['large_files_001_pos', 'large_files_002_pos']
tags = ['functional', 'large_files']
[tests/functional/limits]
tests = ['filesystem_count', 'filesystem_limit', 'snapshot_count',
'snapshot_limit']
tags = ['functional', 'limits']
[tests/functional/link_count]
tests = ['link_count_001', 'link_count_root_inode']
tags = ['functional', 'link_count']
[tests/functional/migration]
tests = ['migration_001_pos', 'migration_002_pos', 'migration_003_pos',
'migration_004_pos', 'migration_005_pos', 'migration_006_pos',
'migration_007_pos', 'migration_008_pos', 'migration_009_pos',
'migration_010_pos', 'migration_011_pos', 'migration_012_pos']
tags = ['functional', 'migration']
[tests/functional/mmap]
tests = ['mmap_mixed', 'mmap_read_001_pos', 'mmap_seek_001_pos',
'mmap_sync_001_pos', 'mmap_write_001_pos']
tags = ['functional', 'mmap']
[tests/functional/mount]
tests = ['umount_001', 'umountall_001']
tags = ['functional', 'mount']
[tests/functional/mv_files]
tests = ['mv_files_001_pos', 'mv_files_002_pos', 'random_creation']
tags = ['functional', 'mv_files']
[tests/functional/nestedfs]
tests = ['nestedfs_001_pos']
tags = ['functional', 'nestedfs']
[tests/functional/no_space]
tests = ['enospc_001_pos', 'enospc_002_pos', 'enospc_003_pos',
'enospc_df', 'enospc_ganging', 'enospc_rm']
tags = ['functional', 'no_space']
[tests/functional/nopwrite]
tests = ['nopwrite_copies', 'nopwrite_mtime', 'nopwrite_negative',
'nopwrite_promoted_clone', 'nopwrite_recsize', 'nopwrite_sync',
'nopwrite_varying_compression', 'nopwrite_volume']
tags = ['functional', 'nopwrite']
[tests/functional/online_offline]
tests = ['online_offline_001_pos', 'online_offline_002_neg',
'online_offline_003_neg']
tags = ['functional', 'online_offline']
[tests/functional/pool_checkpoint]
tests = ['checkpoint_after_rewind', 'checkpoint_big_rewind',
'checkpoint_capacity', 'checkpoint_conf_change', 'checkpoint_discard',
'checkpoint_discard_busy', 'checkpoint_discard_many',
'checkpoint_indirect', 'checkpoint_invalid', 'checkpoint_lun_expsz',
'checkpoint_open', 'checkpoint_removal', 'checkpoint_rewind',
'checkpoint_ro_rewind', 'checkpoint_sm_scale', 'checkpoint_twice',
'checkpoint_vdev_add', 'checkpoint_zdb', 'checkpoint_zhack_feat']
tags = ['functional', 'pool_checkpoint']
timeout = 1800
[tests/functional/pool_names]
tests = ['pool_names_001_pos', 'pool_names_002_neg']
pre =
post =
tags = ['functional', 'pool_names']
[tests/functional/poolversion]
tests = ['poolversion_001_pos', 'poolversion_002_pos']
tags = ['functional', 'poolversion']
[tests/functional/pyzfs]
tests = ['pyzfs_unittest']
pre =
post =
tags = ['functional', 'pyzfs']
[tests/functional/quota]
tests = ['quota_001_pos', 'quota_002_pos', 'quota_003_pos',
'quota_004_pos', 'quota_005_pos', 'quota_006_neg']
tags = ['functional', 'quota']
[tests/functional/redacted_send]
tests = ['redacted_compressed', 'redacted_contents', 'redacted_deleted',
'redacted_disabled_feature', 'redacted_embedded', 'redacted_holes',
'redacted_incrementals', 'redacted_largeblocks', 'redacted_many_clones',
'redacted_mixed_recsize', 'redacted_mounts', 'redacted_negative',
'redacted_origin', 'redacted_panic', 'redacted_props', 'redacted_resume',
'redacted_size', 'redacted_volume']
tags = ['functional', 'redacted_send']
[tests/functional/raidz]
tests = ['raidz_001_neg', 'raidz_002_pos', 'raidz_003_pos', 'raidz_004_pos']
tags = ['functional', 'raidz']
[tests/functional/redundancy]
tests = ['redundancy_draid', 'redundancy_draid1', 'redundancy_draid2',
'redundancy_draid3', 'redundancy_draid_damaged1',
'redundancy_draid_damaged2', 'redundancy_draid_spare1',
'redundancy_draid_spare2', 'redundancy_draid_spare3', 'redundancy_mirror',
'redundancy_raidz', 'redundancy_raidz1', 'redundancy_raidz2',
'redundancy_raidz3', 'redundancy_stripe']
tags = ['functional', 'redundancy']
timeout = 1200
[tests/functional/refquota]
tests = ['refquota_001_pos', 'refquota_002_pos', 'refquota_003_pos',
'refquota_004_pos', 'refquota_005_pos', 'refquota_006_neg',
'refquota_007_neg', 'refquota_008_neg']
tags = ['functional', 'refquota']
[tests/functional/refreserv]
tests = ['refreserv_001_pos', 'refreserv_002_pos', 'refreserv_003_pos',
'refreserv_004_pos', 'refreserv_005_pos', 'refreserv_multi_raidz',
'refreserv_raidz']
tags = ['functional', 'refreserv']
[tests/functional/removal]
pre =
tests = ['removal_all_vdev', 'removal_cancel', 'removal_check_space',
'removal_condense_export', 'removal_multiple_indirection',
'removal_nopwrite', 'removal_remap_deadlists',
'removal_resume_export', 'removal_sanity', 'removal_with_add',
'removal_with_create_fs', 'removal_with_dedup',
'removal_with_errors', 'removal_with_export', 'removal_with_indirect',
'removal_with_ganging', 'removal_with_faulted',
'removal_with_remove', 'removal_with_scrub', 'removal_with_send',
'removal_with_send_recv', 'removal_with_snapshot',
'removal_with_write', 'removal_with_zdb', 'remove_expanded',
'remove_mirror', 'remove_mirror_sanity', 'remove_raidz',
'remove_indirect', 'remove_attach_mirror', 'removal_reservation']
tags = ['functional', 'removal']
[tests/functional/rename_dirs]
tests = ['rename_dirs_001_pos']
tags = ['functional', 'rename_dirs']
[tests/functional/replacement]
tests = ['attach_import', 'attach_multiple', 'attach_rebuild',
'attach_resilver', 'detach', 'rebuild_disabled_feature',
'rebuild_multiple', 'rebuild_raidz', 'replace_import', 'replace_rebuild',
'replace_resilver', 'resilver_restart_001', 'resilver_restart_002',
'scrub_cancel']
tags = ['functional', 'replacement']
[tests/functional/reservation]
tests = ['reservation_001_pos', 'reservation_002_pos', 'reservation_003_pos',
'reservation_004_pos', 'reservation_005_pos', 'reservation_006_pos',
'reservation_007_pos', 'reservation_008_pos', 'reservation_009_pos',
'reservation_010_pos', 'reservation_011_pos', 'reservation_012_pos',
'reservation_013_pos', 'reservation_014_pos', 'reservation_015_pos',
'reservation_016_pos', 'reservation_017_pos', 'reservation_018_pos',
'reservation_019_pos', 'reservation_020_pos', 'reservation_021_neg',
'reservation_022_pos']
tags = ['functional', 'reservation']
[tests/functional/rootpool]
tests = ['rootpool_002_neg', 'rootpool_003_neg', 'rootpool_007_pos']
tags = ['functional', 'rootpool']
[tests/functional/rsend]
tests = ['recv_dedup', 'recv_dedup_encrypted_zvol', 'rsend_001_pos',
'rsend_002_pos', 'rsend_003_pos', 'rsend_004_pos', 'rsend_005_pos',
'rsend_006_pos', 'rsend_007_pos', 'rsend_008_pos', 'rsend_009_pos',
'rsend_010_pos', 'rsend_011_pos', 'rsend_012_pos', 'rsend_013_pos',
'rsend_014_pos', 'rsend_016_neg', 'rsend_019_pos', 'rsend_020_pos',
'rsend_021_pos', 'rsend_022_pos', 'rsend_024_pos', 'rsend_025_pos',
'rsend_026_neg', 'rsend_027_pos', 'rsend_028_neg', 'rsend_029_neg',
'rsend_030_pos', 'rsend_031_pos', 'send-c_verify_ratio',
'send-c_verify_contents', 'send-c_props', 'send-c_incremental',
'send-c_volume', 'send-c_zstream_recompress', 'send-c_zstreamdump',
'send-c_lz4_disabled', 'send-c_recv_lz4_disabled',
'send-c_mixed_compression', 'send-c_stream_size_estimate',
'send-c_embedded_blocks', 'send-c_resume', 'send-cpL_varied_recsize',
'send-c_recv_dedup', 'send-L_toggle', 'send_encrypted_incremental',
'send_encrypted_freeobjects', 'send_encrypted_hierarchy',
'send_encrypted_props', 'send_encrypted_truncated_files',
'send_freeobjects', 'send_realloc_files', 'send_realloc_encrypted_files',
'send_spill_block', 'send_holds', 'send_hole_birth', 'send_mixed_raw',
'send-wR_encrypted_zvol', 'send_partial_dataset', 'send_invalid',
'send_doall', 'send_raw_spill_block', 'send_raw_ashift',
'send_raw_large_blocks']
tags = ['functional', 'rsend']
[tests/functional/scrub_mirror]
tests = ['scrub_mirror_001_pos', 'scrub_mirror_002_pos',
'scrub_mirror_003_pos', 'scrub_mirror_004_pos']
tags = ['functional', 'scrub_mirror']
[tests/functional/slog]
tests = ['slog_001_pos', 'slog_002_pos', 'slog_003_pos', 'slog_004_pos',
'slog_005_pos', 'slog_006_pos', 'slog_007_pos', 'slog_008_neg',
'slog_009_neg', 'slog_010_neg', 'slog_011_neg', 'slog_012_neg',
'slog_013_pos', 'slog_014_pos', 'slog_015_neg', 'slog_replay_fs_001',
'slog_replay_fs_002', 'slog_replay_volume', 'slog_016_pos']
tags = ['functional', 'slog']
[tests/functional/snapshot]
tests = ['clone_001_pos', 'rollback_001_pos', 'rollback_002_pos',
'rollback_003_pos', 'snapshot_001_pos', 'snapshot_002_pos',
'snapshot_003_pos', 'snapshot_004_pos', 'snapshot_005_pos',
'snapshot_006_pos', 'snapshot_007_pos', 'snapshot_008_pos',
'snapshot_009_pos', 'snapshot_010_pos', 'snapshot_011_pos',
'snapshot_012_pos', 'snapshot_013_pos', 'snapshot_014_pos',
'snapshot_017_pos', 'snapshot_018_pos']
tags = ['functional', 'snapshot']
[tests/functional/snapused]
tests = ['snapused_001_pos', 'snapused_002_pos', 'snapused_003_pos',
'snapused_004_pos', 'snapused_005_pos']
tags = ['functional', 'snapused']
[tests/functional/sparse]
tests = ['sparse_001_pos']
tags = ['functional', 'sparse']
[tests/functional/stat]
tests = ['stat_001_pos']
tags = ['functional', 'stat']
[tests/functional/suid]
tests = ['suid_write_to_suid', 'suid_write_to_sgid', 'suid_write_to_suid_sgid',
'suid_write_to_none', 'suid_write_zil_replay']
tags = ['functional', 'suid']
[tests/functional/trim]
tests = ['autotrim_integrity', 'autotrim_config', 'autotrim_trim_integrity',
'trim_integrity', 'trim_config', 'trim_l2arc']
tags = ['functional', 'trim']
[tests/functional/truncate]
tests = ['truncate_001_pos', 'truncate_002_pos', 'truncate_timestamps']
tags = ['functional', 'truncate']
[tests/functional/upgrade]
tests = ['upgrade_userobj_001_pos', 'upgrade_readonly_pool']
tags = ['functional', 'upgrade']
[tests/functional/userquota]
tests = [
'userquota_001_pos', 'userquota_002_pos', 'userquota_003_pos',
'userquota_004_pos', 'userquota_005_neg', 'userquota_006_pos',
'userquota_007_pos', 'userquota_008_pos', 'userquota_009_pos',
'userquota_010_pos', 'userquota_011_pos', 'userquota_012_neg',
'userspace_001_pos', 'userspace_002_pos', 'userspace_encrypted',
'userspace_send_encrypted', 'userspace_encrypted_13709']
tags = ['functional', 'userquota']
[tests/functional/vdev_zaps]
tests = ['vdev_zaps_001_pos', 'vdev_zaps_002_pos', 'vdev_zaps_003_pos',
'vdev_zaps_004_pos', 'vdev_zaps_005_pos', 'vdev_zaps_006_pos',
'vdev_zaps_007_pos']
tags = ['functional', 'vdev_zaps']
[tests/functional/write_dirs]
tests = ['write_dirs_001_pos', 'write_dirs_002_pos']
tags = ['functional', 'write_dirs']
[tests/functional/xattr]
tests = ['xattr_001_pos', 'xattr_002_neg', 'xattr_003_neg', 'xattr_004_pos',
'xattr_005_pos', 'xattr_006_pos', 'xattr_007_neg',
'xattr_011_pos', 'xattr_012_pos', 'xattr_013_pos', 'xattr_compat']
tags = ['functional', 'xattr']
[tests/functional/zvol/zvol_ENOSPC]
tests = ['zvol_ENOSPC_001_pos']
tags = ['functional', 'zvol', 'zvol_ENOSPC']
[tests/functional/zvol/zvol_cli]
tests = ['zvol_cli_001_pos', 'zvol_cli_002_pos', 'zvol_cli_003_neg']
tags = ['functional', 'zvol', 'zvol_cli']
[tests/functional/zvol/zvol_misc]
tests = ['zvol_misc_002_pos', 'zvol_misc_hierarchy', 'zvol_misc_rename_inuse',
'zvol_misc_snapdev', 'zvol_misc_trim', 'zvol_misc_volmode', 'zvol_misc_zil']
tags = ['functional', 'zvol', 'zvol_misc']
[tests/functional/zvol/zvol_stress]
tests = ['zvol_stress']
tags = ['functional', 'zvol', 'zvol_stress']
[tests/functional/zvol/zvol_swap]
tests = ['zvol_swap_001_pos', 'zvol_swap_002_pos', 'zvol_swap_004_pos']
tags = ['functional', 'zvol', 'zvol_swap']
[tests/functional/libzfs]
tests = ['many_fds', 'libzfs_input']
tags = ['functional', 'libzfs']
[tests/functional/log_spacemap]
tests = ['log_spacemap_import_logs']
pre =
post =
tags = ['functional', 'log_spacemap']
[tests/functional/l2arc]
tests = ['l2arc_arcstats_pos', 'l2arc_mfuonly_pos', 'l2arc_l2miss_pos',
'persist_l2arc_001_pos', 'persist_l2arc_002_pos',
'persist_l2arc_003_neg', 'persist_l2arc_004_pos', 'persist_l2arc_005_pos']
tags = ['functional', 'l2arc']
[tests/functional/zpool_influxdb]
tests = ['zpool_influxdb']
tags = ['functional', 'zpool_influxdb']
diff --git a/sys/contrib/openzfs/tests/runfiles/linux.run b/sys/contrib/openzfs/tests/runfiles/linux.run
index 8bc55a1b4b47..6a4cd3fe691c 100644
--- a/sys/contrib/openzfs/tests/runfiles/linux.run
+++ b/sys/contrib/openzfs/tests/runfiles/linux.run
@@ -1,228 +1,223 @@
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
[DEFAULT]
pre = setup
quiet = False
pre_user = root
user = root
timeout = 600
post_user = root
post = cleanup
failsafe_user = root
failsafe = callbacks/zfs_failsafe
outputdir = /var/tmp/test_results
tags = ['functional']
[tests/functional/acl/posix:Linux]
tests = ['posix_001_pos', 'posix_002_pos', 'posix_003_pos', 'posix_004_pos']
tags = ['functional', 'acl', 'posix']
[tests/functional/acl/posix-sa:Linux]
tests = ['posix_001_pos', 'posix_002_pos', 'posix_003_pos', 'posix_004_pos']
tags = ['functional', 'acl', 'posix-sa']
[tests/functional/atime:Linux]
tests = ['atime_003_pos', 'root_relatime_on']
tags = ['functional', 'atime']
[tests/functional/block_cloning:Linux]
-tests = ['block_cloning_copyfilerange', 'block_cloning_copyfilerange_partial',
- 'block_cloning_copyfilerange_fallback',
- 'block_cloning_ficlone', 'block_cloning_ficlonerange',
- 'block_cloning_ficlonerange_partial',
- 'block_cloning_disabled_copyfilerange', 'block_cloning_disabled_ficlone',
- 'block_cloning_disabled_ficlonerange',
- 'block_cloning_copyfilerange_cross_dataset',
- 'block_cloning_copyfilerange_fallback_same_txg']
+tests = ['block_cloning_ficlone', 'block_cloning_ficlonerange',
+ 'block_cloning_ficlonerange_partial', 'block_cloning_disabled_ficlone',
+ 'block_cloning_disabled_ficlonerange']
tags = ['functional', 'block_cloning']
[tests/functional/chattr:Linux]
tests = ['chattr_001_pos', 'chattr_002_neg']
tags = ['functional', 'chattr']
[tests/functional/cli_root/zfs:Linux]
tests = ['zfs_003_neg']
tags = ['functional', 'cli_root', 'zfs']
[tests/functional/cli_root/zfs_mount:Linux]
tests = ['zfs_mount_006_pos', 'zfs_mount_008_pos', 'zfs_mount_013_pos',
'zfs_mount_014_neg', 'zfs_multi_mount']
tags = ['functional', 'cli_root', 'zfs_mount']
[tests/functional/cli_root/zfs_share:Linux]
tests = ['zfs_share_005_pos', 'zfs_share_007_neg', 'zfs_share_009_neg',
'zfs_share_012_pos', 'zfs_share_013_pos']
tags = ['functional', 'cli_root', 'zfs_share']
[tests/functional/cli_root/zfs_unshare:Linux]
tests = ['zfs_unshare_008_pos']
tags = ['functional', 'cli_root', 'zfs_unshare']
[tests/functional/cli_root/zfs_sysfs:Linux]
tests = ['zfeature_set_unsupported', 'zfs_get_unsupported',
'zfs_set_unsupported', 'zfs_sysfs_live', 'zpool_get_unsupported',
'zpool_set_unsupported']
tags = ['functional', 'cli_root', 'zfs_sysfs']
[tests/functional/cli_root/zpool_add:Linux]
tests = ['add_nested_replacing_spare']
tags = ['functional', 'cli_root', 'zpool_add']
[tests/functional/cli_root/zpool_expand:Linux]
tests = ['zpool_expand_001_pos', 'zpool_expand_002_pos',
'zpool_expand_003_neg', 'zpool_expand_004_pos', 'zpool_expand_005_pos']
tags = ['functional', 'cli_root', 'zpool_expand']
[tests/functional/cli_root/zpool_import:Linux]
tests = ['zpool_import_hostid_changed',
'zpool_import_hostid_changed_unclean_export',
'zpool_import_hostid_changed_cachefile',
'zpool_import_hostid_changed_cachefile_unclean_export']
tags = ['functional', 'cli_root', 'zpool_import']
[tests/functional/cli_root/zpool_reopen:Linux]
tests = ['zpool_reopen_001_pos', 'zpool_reopen_002_pos',
'zpool_reopen_003_pos', 'zpool_reopen_004_pos', 'zpool_reopen_005_pos',
'zpool_reopen_006_neg', 'zpool_reopen_007_pos']
tags = ['functional', 'cli_root', 'zpool_reopen']
[tests/functional/cli_root/zpool_split:Linux]
tests = ['zpool_split_wholedisk']
tags = ['functional', 'cli_root', 'zpool_split']
[tests/functional/compression:Linux]
tests = ['compress_004_pos']
tags = ['functional', 'compression']
[tests/functional/devices:Linux]
tests = ['devices_001_pos', 'devices_002_neg', 'devices_003_pos']
tags = ['functional', 'devices']
[tests/functional/events:Linux]
tests = ['events_001_pos', 'events_002_pos', 'zed_rc_filter', 'zed_fd_spill',
'zed_cksum_reported', 'zed_cksum_config', 'zed_io_config']
tags = ['functional', 'events']
[tests/functional/fadvise:Linux]
tests = ['fadvise_sequential']
tags = ['functional', 'fadvise']
[tests/functional/fallocate:Linux]
tests = ['fallocate_prealloc', 'fallocate_zero-range']
tags = ['functional', 'fallocate']
[tests/functional/fault:Linux]
tests = ['auto_offline_001_pos', 'auto_online_001_pos', 'auto_online_002_pos',
'auto_replace_001_pos', 'auto_replace_002_pos', 'auto_spare_001_pos',
'auto_spare_002_pos', 'auto_spare_multiple', 'auto_spare_ashift',
'auto_spare_shared', 'decrypt_fault', 'decompress_fault',
'scrub_after_resilver', 'zpool_status_-s']
tags = ['functional', 'fault']
[tests/functional/features/large_dnode:Linux]
tests = ['large_dnode_002_pos', 'large_dnode_006_pos', 'large_dnode_008_pos']
tags = ['functional', 'features', 'large_dnode']
[tests/functional/io:Linux]
tests = ['libaio', 'io_uring']
tags = ['functional', 'io']
[tests/functional/largest_pool:Linux]
tests = ['largest_pool_001_pos']
pre =
post =
tags = ['functional', 'largest_pool']
[tests/functional/mmap:Linux]
tests = ['mmap_libaio_001_pos', 'mmap_sync_001_pos']
tags = ['functional', 'mmap']
[tests/functional/mmp:Linux]
tests = ['mmp_on_thread', 'mmp_on_uberblocks', 'mmp_on_off', 'mmp_interval',
'mmp_active_import', 'mmp_inactive_import', 'mmp_exported_import',
'mmp_write_uberblocks', 'mmp_reset_interval', 'multihost_history',
'mmp_on_zdb', 'mmp_write_distribution', 'mmp_hostid']
tags = ['functional', 'mmp']
[tests/functional/mount:Linux]
tests = ['umount_unlinked_drain']
tags = ['functional', 'mount']
[tests/functional/pam:Linux]
tests = ['pam_basic', 'pam_change_unmounted', 'pam_nounmount', 'pam_recursive',
'pam_short_password']
tags = ['functional', 'pam']
[tests/functional/procfs:Linux]
tests = ['procfs_list_basic', 'procfs_list_concurrent_readers',
'procfs_list_stale_read', 'pool_state']
tags = ['functional', 'procfs']
[tests/functional/projectquota:Linux]
tests = ['projectid_001_pos', 'projectid_002_pos', 'projectid_003_pos',
'projectquota_001_pos', 'projectquota_002_pos', 'projectquota_003_pos',
'projectquota_004_neg', 'projectquota_005_pos', 'projectquota_006_pos',
'projectquota_007_pos', 'projectquota_008_pos', 'projectquota_009_pos',
'projectspace_001_pos', 'projectspace_002_pos', 'projectspace_003_pos',
'projectspace_004_pos',
'projecttree_001_pos', 'projecttree_002_pos', 'projecttree_003_neg']
tags = ['functional', 'projectquota']
[tests/functional/dos_attributes:Linux]
tests = ['read_dos_attrs_001', 'write_dos_attrs_001']
tags = ['functional', 'dos_attributes']
[tests/functional/renameat2:Linux]
tests = ['renameat2_noreplace', 'renameat2_exchange', 'renameat2_whiteout']
tags = ['functional', 'renameat2']
[tests/functional/rsend:Linux]
tests = ['send_realloc_dnode_size', 'send_encrypted_files']
tags = ['functional', 'rsend']
[tests/functional/simd:Linux]
pre =
post =
tests = ['simd_supported']
tags = ['functional', 'simd']
[tests/functional/snapshot:Linux]
tests = ['snapshot_015_pos', 'snapshot_016_pos']
tags = ['functional', 'snapshot']
[tests/functional/tmpfile:Linux]
tests = ['tmpfile_001_pos', 'tmpfile_002_pos', 'tmpfile_003_pos',
'tmpfile_stat_mode']
tags = ['functional', 'tmpfile']
[tests/functional/upgrade:Linux]
tests = ['upgrade_projectquota_001_pos']
tags = ['functional', 'upgrade']
[tests/functional/user_namespace:Linux]
tests = ['user_namespace_001', 'user_namespace_002', 'user_namespace_003',
'user_namespace_004']
tags = ['functional', 'user_namespace']
[tests/functional/userquota:Linux]
tests = ['groupspace_001_pos', 'groupspace_002_pos', 'groupspace_003_pos',
'userquota_013_pos', 'userspace_003_pos']
tags = ['functional', 'userquota']
[tests/functional/zvol/zvol_misc:Linux]
tests = ['zvol_misc_fua']
tags = ['functional', 'zvol', 'zvol_misc']
[tests/functional/idmap_mount:Linux]
tests = ['idmap_mount_001', 'idmap_mount_002', 'idmap_mount_003',
'idmap_mount_004', 'idmap_mount_005']
tags = ['functional', 'idmap_mount']
diff --git a/sys/contrib/openzfs/tests/test-runner/bin/zts-report.py.in b/sys/contrib/openzfs/tests/test-runner/bin/zts-report.py.in
index 4608e87522a3..ecc50f487152 100755
--- a/sys/contrib/openzfs/tests/test-runner/bin/zts-report.py.in
+++ b/sys/contrib/openzfs/tests/test-runner/bin/zts-report.py.in
@@ -1,476 +1,525 @@
#!/usr/bin/env @PYTHON_SHEBANG@
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2017 by Delphix. All rights reserved.
# Copyright (c) 2018 by Lawrence Livermore National Security, LLC.
#
# This script must remain compatible with Python 3.6+.
#
import os
import re
import sys
import argparse
#
# This script parses the stdout of zfstest, which has this format:
#
# Test: /path/to/testa (run as root) [00:00] [PASS]
# Test: /path/to/testb (run as jkennedy) [00:00] [PASS]
# Test: /path/to/testc (run as root) [00:00] [FAIL]
# [...many more results...]
#
# Results Summary
# FAIL 22
# SKIP 32
# PASS 1156
#
# Running Time: 02:50:31
# Percent passed: 95.5%
# Log directory: /var/tmp/test_results/20180615T205926
#
#
# Common generic reasons for a test or test group to be skipped.
#
# Some test cases are known to fail in ways which are not harmful or dangerous.
# In these cases simply mark the test as a known failure until it can be
# updated and the issue resolved. Note that it's preferable to open a unique
# issue on the GitHub issue tracker for each test case failure.
#
known_reason = 'Known issue'
#
# Some tests require that a test user be able to execute the zfs utilities.
# This may not be possible when testing in-tree due to the default permissions
# on the user's home directory. When testing this can be resolved by granting
# group read access.
#
# chmod 0750 $HOME
#
exec_reason = 'Test user execute permissions required for utilities'
#
# Some tests require that the kernel supports renameat2 syscall.
#
renameat2_reason = 'Kernel renameat2 support required'
#
# Some tests require the O_TMPFILE flag which was first introduced in the
# 3.11 kernel.
#
tmpfile_reason = 'Kernel O_TMPFILE support required'
#
# Some tests require the statx(2) system call on Linux which was first
# introduced in the 4.11 kernel.
#
statx_reason = 'Kernel statx(2) system call required on Linux'
#
# Some tests require that the lsattr utility support the project id feature.
#
project_id_reason = 'lsattr with set/show project ID required'
#
# Some tests require that the kernel support user namespaces.
#
user_ns_reason = 'Kernel user namespace support required'
#
# Some rewind tests can fail since nothing guarantees that old MOS blocks
# are not overwritten. Snapshots protect datasets and data files but not
# the MOS. Reasonable efforts are made in the test case to increase the
# odds that some txgs will have their MOS data left untouched, but it is
# never a sure thing.
#
rewind_reason = 'Arbitrary pool rewind is not guaranteed'
#
# Some tests require a minimum version of the fio benchmark utility.
# Older distributions such as CentOS 6.x only provide fio-2.0.13.
#
fio_reason = 'Fio v2.3 or newer required'
#
# Some tests require that the DISKS provided support the discard operation.
# Normally this is not an issue because loop back devices are used for DISKS
# and they support discard (TRIM/UNMAP).
#
trim_reason = 'DISKS must support discard (TRIM/UNMAP)'
#
# Some tests on FreeBSD require the fspacectl(2) system call and the
# truncate(1) utility supporting the -d option. The system call was first
# introduced in FreeBSD version 1400032.
#
fspacectl_reason = 'fspacectl(2) and truncate -d support required'
#
# Some tests are not applicable to a platform or need to be updated to operate
# in the manor required by the platform. Any tests which are skipped for this
# reason will be suppressed in the final analysis output.
#
na_reason = "Not applicable"
#
# Some test cases doesn't have all requirements to run on Github actions CI.
#
ci_reason = 'CI runner doesn\'t have all requirements'
#
# Idmapped mount is only supported in kernel version >= 5.12
#
idmap_reason = 'Idmapped mount needs kernel 5.12+'
#
# copy_file_range() is not supported by all kernels
#
cfr_reason = 'Kernel copy_file_range support required'
-cfr_cross_reason = 'copy_file_range(2) cross-filesystem needs kernel 5.3+'
+
+if sys.platform.startswith('freebsd'):
+ cfr_cross_reason = 'copy_file_range(2) cross-filesystem needs FreeBSD 14+'
+else:
+ cfr_cross_reason = 'copy_file_range(2) cross-filesystem needs kernel 5.3+'
#
# These tests are known to fail, thus we use this list to prevent these
# failures from failing the job as a whole; only unexpected failures
# bubble up to cause this script to exit with a non-zero exit status.
#
# Format: { 'test-name': ['expected result', 'issue-number | reason'] }
#
# For each known failure it is recommended to link to a GitHub issue by
# setting the reason to the issue number. Alternately, one of the generic
# reasons listed above can be used.
#
known = {
'casenorm/mixed_none_lookup_ci': ['FAIL', 7633],
'casenorm/mixed_formd_lookup_ci': ['FAIL', 7633],
'cli_root/zpool_import/import_rewind_device_replaced':
['FAIL', rewind_reason],
'cli_user/misc/zfs_share_001_neg': ['SKIP', na_reason],
'cli_user/misc/zfs_unshare_001_neg': ['SKIP', na_reason],
'pool_checkpoint/checkpoint_discard_busy': ['SKIP', 12053],
'privilege/setup': ['SKIP', na_reason],
'refreserv/refreserv_004_pos': ['FAIL', known_reason],
'rootpool/setup': ['SKIP', na_reason],
'rsend/rsend_008_pos': ['SKIP', 6066],
'vdev_zaps/vdev_zaps_007_pos': ['FAIL', known_reason],
}
if sys.platform.startswith('freebsd'):
known.update({
'cli_root/zfs_receive/receive-o-x_props_override':
['FAIL', known_reason],
'cli_root/zpool_resilver/zpool_resilver_concurrent':
['SKIP', na_reason],
'cli_root/zpool_wait/zpool_wait_trim_basic': ['SKIP', trim_reason],
'cli_root/zpool_wait/zpool_wait_trim_cancel': ['SKIP', trim_reason],
'cli_root/zpool_wait/zpool_wait_trim_flag': ['SKIP', trim_reason],
'cli_root/zfs_unshare/zfs_unshare_008_pos': ['SKIP', na_reason],
+ 'cp_files/cp_files_002_pos': ['SKIP', na_reason],
'link_count/link_count_001': ['SKIP', na_reason],
'casenorm/mixed_create_failure': ['FAIL', 13215],
'mmap/mmap_sync_001_pos': ['SKIP', na_reason],
'rsend/send_raw_ashift': ['SKIP', 14961],
})
elif sys.platform.startswith('linux'):
known.update({
'casenorm/mixed_formd_lookup': ['FAIL', 7633],
'casenorm/mixed_formd_delete': ['FAIL', 7633],
'casenorm/sensitive_formd_lookup': ['FAIL', 7633],
'casenorm/sensitive_formd_delete': ['FAIL', 7633],
'removal/removal_with_zdb': ['SKIP', known_reason],
'cli_root/zfs_unshare/zfs_unshare_002_pos': ['SKIP', na_reason],
})
#
# These tests may occasionally fail or be skipped. We want there failures
# to be reported but only unexpected failures should bubble up to cause
# this script to exit with a non-zero exit status.
#
# Format: { 'test-name': ['expected result', 'issue-number | reason'] }
#
# For each known failure it is recommended to link to a GitHub issue by
# setting the reason to the issue number. Alternately, one of the generic
# reasons listed above can be used.
#
maybe = {
'append/threadsappend_001_pos': ['FAIL', 6136],
'chattr/setup': ['SKIP', exec_reason],
'crtime/crtime_001_pos': ['SKIP', statx_reason],
'cli_root/zdb/zdb_006_pos': ['FAIL', known_reason],
'cli_root/zfs_destroy/zfs_destroy_dev_removal_condense':
['FAIL', known_reason],
'cli_root/zfs_get/zfs_get_004_pos': ['FAIL', known_reason],
'cli_root/zfs_get/zfs_get_009_pos': ['SKIP', 5479],
'cli_root/zfs_rollback/zfs_rollback_001_pos': ['FAIL', known_reason],
'cli_root/zfs_rollback/zfs_rollback_002_pos': ['FAIL', known_reason],
'cli_root/zfs_share/zfs_share_concurrent_shares': ['FAIL', known_reason],
'cli_root/zfs_snapshot/zfs_snapshot_002_neg': ['FAIL', known_reason],
'cli_root/zfs_unshare/zfs_unshare_006_pos': ['SKIP', na_reason],
'cli_root/zpool_add/zpool_add_004_pos': ['FAIL', known_reason],
'cli_root/zpool_destroy/zpool_destroy_001_pos': ['SKIP', 6145],
'cli_root/zpool_import/zpool_import_missing_003_pos': ['SKIP', 6839],
'cli_root/zpool_initialize/zpool_initialize_import_export':
['FAIL', 11948],
'cli_root/zpool_labelclear/zpool_labelclear_removed':
['FAIL', known_reason],
'cli_root/zpool_trim/setup': ['SKIP', trim_reason],
'cli_root/zpool_upgrade/zpool_upgrade_004_pos': ['FAIL', 6141],
'delegate/setup': ['SKIP', exec_reason],
'fallocate/fallocate_punch-hole': ['SKIP', fspacectl_reason],
'history/history_004_pos': ['FAIL', 7026],
'history/history_005_neg': ['FAIL', 6680],
'history/history_006_neg': ['FAIL', 5657],
'history/history_008_pos': ['FAIL', known_reason],
'history/history_010_pos': ['SKIP', exec_reason],
'io/mmap': ['SKIP', fio_reason],
'largest_pool/largest_pool_001_pos': ['FAIL', known_reason],
'mmp/mmp_on_uberblocks': ['FAIL', known_reason],
'pam/setup': ['SKIP', "pamtester might be not available"],
'pool_checkpoint/checkpoint_discard_busy': ['FAIL', 11946],
'projectquota/setup': ['SKIP', exec_reason],
'removal/removal_condense_export': ['FAIL', known_reason],
'renameat2/setup': ['SKIP', renameat2_reason],
'reservation/reservation_008_pos': ['FAIL', 7741],
'reservation/reservation_018_pos': ['FAIL', 5642],
'snapshot/clone_001_pos': ['FAIL', known_reason],
'snapshot/snapshot_009_pos': ['FAIL', 7961],
'snapshot/snapshot_010_pos': ['FAIL', 7961],
'snapused/snapused_004_pos': ['FAIL', 5513],
'tmpfile/setup': ['SKIP', tmpfile_reason],
'trim/setup': ['SKIP', trim_reason],
'upgrade/upgrade_projectquota_001_pos': ['SKIP', project_id_reason],
'user_namespace/setup': ['SKIP', user_ns_reason],
'userquota/setup': ['SKIP', exec_reason],
'vdev_zaps/vdev_zaps_004_pos': ['FAIL', known_reason],
'zvol/zvol_ENOSPC/zvol_ENOSPC_001_pos': ['FAIL', 5848],
}
if sys.platform.startswith('freebsd'):
maybe.update({
'cli_root/zfs_copies/zfs_copies_002_pos': ['FAIL', known_reason],
'cli_root/zfs_inherit/zfs_inherit_001_neg': ['FAIL', known_reason],
'cli_root/zpool_import/zpool_import_012_pos': ['FAIL', known_reason],
'delegate/zfs_allow_003_pos': ['FAIL', known_reason],
'inheritance/inherit_001_pos': ['FAIL', 11829],
- 'resilver/resilver_restart_001': ['FAIL', known_reason],
'pool_checkpoint/checkpoint_big_rewind': ['FAIL', 12622],
'pool_checkpoint/checkpoint_indirect': ['FAIL', 12623],
+ 'resilver/resilver_restart_001': ['FAIL', known_reason],
'snapshot/snapshot_002_pos': ['FAIL', '14831'],
+ 'bclone/bclone_crossfs_corner_cases': ['SKIP', cfr_cross_reason],
+ 'bclone/bclone_crossfs_corner_cases_limited':
+ ['SKIP', cfr_cross_reason],
+ 'bclone/bclone_crossfs_data': ['SKIP', cfr_cross_reason],
+ 'bclone/bclone_crossfs_embedded': ['SKIP', cfr_cross_reason],
+ 'bclone/bclone_crossfs_hole': ['SKIP', cfr_cross_reason],
+ 'bclone/bclone_diffprops_all': ['SKIP', cfr_cross_reason],
+ 'bclone/bclone_diffprops_checksum': ['SKIP', cfr_cross_reason],
+ 'bclone/bclone_diffprops_compress': ['SKIP', cfr_cross_reason],
+ 'bclone/bclone_diffprops_copies': ['SKIP', cfr_cross_reason],
+ 'bclone/bclone_diffprops_recordsize': ['SKIP', cfr_cross_reason],
+ 'bclone/bclone_prop_sync': ['SKIP', cfr_cross_reason],
+ 'block_cloning/block_cloning_cross_enc_dataset':
+ ['SKIP', cfr_cross_reason],
+ 'block_cloning/block_cloning_copyfilerange_cross_dataset':
+ ['SKIP', cfr_cross_reason]
})
elif sys.platform.startswith('linux'):
maybe.update({
+ 'bclone/bclone_crossfs_corner_cases': ['SKIP', cfr_cross_reason],
+ 'bclone/bclone_crossfs_corner_cases_limited':
+ ['SKIP', cfr_cross_reason],
+ 'bclone/bclone_crossfs_data': ['SKIP', cfr_cross_reason],
+ 'bclone/bclone_crossfs_embedded': ['SKIP', cfr_cross_reason],
+ 'bclone/bclone_crossfs_hole': ['SKIP', cfr_cross_reason],
+ 'bclone/bclone_diffprops_all': ['SKIP', cfr_cross_reason],
+ 'bclone/bclone_diffprops_checksum': ['SKIP', cfr_cross_reason],
+ 'bclone/bclone_diffprops_compress': ['SKIP', cfr_cross_reason],
+ 'bclone/bclone_diffprops_copies': ['SKIP', cfr_cross_reason],
+ 'bclone/bclone_diffprops_recordsize': ['SKIP', cfr_cross_reason],
+ 'bclone/bclone_prop_sync': ['SKIP', cfr_cross_reason],
+ 'bclone/bclone_samefs_corner_cases': ['SKIP', cfr_reason],
+ 'bclone/bclone_samefs_corner_cases_limited': ['SKIP', cfr_reason],
+ 'bclone/bclone_samefs_data': ['SKIP', cfr_reason],
+ 'bclone/bclone_samefs_embedded': ['SKIP', cfr_reason],
+ 'bclone/bclone_samefs_hole': ['SKIP', cfr_reason],
+ 'block_cloning/block_cloning_clone_mmap_cached': ['SKIP', cfr_reason],
+ 'block_cloning/block_cloning_clone_mmap_write':
+ ['SKIP', cfr_reason],
+ 'block_cloning/block_cloning_copyfilerange':
+ ['SKIP', cfr_reason],
+ 'block_cloning/block_cloning_copyfilerange_cross_dataset':
+ ['SKIP', cfr_cross_reason],
+ 'block_cloning/block_cloning_copyfilerange_fallback':
+ ['SKIP', cfr_reason],
+ 'block_cloning/block_cloning_copyfilerange_fallback_same_txg':
+ ['SKIP', cfr_cross_reason],
+ 'block_cloning/block_cloning_copyfilerange_partial':
+ ['SKIP', cfr_reason],
+ 'block_cloning/block_cloning_cross_enc_dataset':
+ ['SKIP', cfr_cross_reason],
+ 'block_cloning/block_cloning_disabled_copyfilerange':
+ ['SKIP', cfr_reason],
+ 'block_cloning/block_cloning_lwb_buffer_overflow':
+ ['SKIP', cfr_reason],
+ 'block_cloning/block_cloning_replay':
+ ['SKIP', cfr_reason],
+ 'block_cloning/block_cloning_replay_encrypted':
+ ['SKIP', cfr_reason],
'cli_root/zfs_rename/zfs_rename_002_pos': ['FAIL', known_reason],
'cli_root/zpool_reopen/zpool_reopen_003_pos': ['FAIL', known_reason],
+ 'cp_files/cp_files_002_pos': ['SKIP', cfr_reason],
'fault/auto_online_002_pos': ['FAIL', 11889],
'fault/auto_replace_001_pos': ['FAIL', 14851],
'fault/auto_spare_002_pos': ['FAIL', 11889],
'fault/auto_spare_multiple': ['FAIL', 11889],
'fault/auto_spare_shared': ['FAIL', 11889],
'fault/decompress_fault': ['FAIL', 11889],
+ 'idmap_mount/idmap_mount_001': ['SKIP', idmap_reason],
+ 'idmap_mount/idmap_mount_002': ['SKIP', idmap_reason],
+ 'idmap_mount/idmap_mount_003': ['SKIP', idmap_reason],
+ 'idmap_mount/idmap_mount_004': ['SKIP', idmap_reason],
+ 'idmap_mount/idmap_mount_005': ['SKIP', idmap_reason],
'io/io_uring': ['SKIP', 'io_uring support required'],
'limits/filesystem_limit': ['SKIP', known_reason],
'limits/snapshot_limit': ['SKIP', known_reason],
'mmp/mmp_active_import': ['FAIL', known_reason],
'mmp/mmp_exported_import': ['FAIL', known_reason],
'mmp/mmp_inactive_import': ['FAIL', known_reason],
- 'zvol/zvol_misc/zvol_misc_snapdev': ['FAIL', 12621],
- 'zvol/zvol_misc/zvol_misc_volmode': ['FAIL', known_reason],
'zvol/zvol_misc/zvol_misc_fua': ['SKIP', 14872],
+ 'zvol/zvol_misc/zvol_misc_snapdev': ['FAIL', 12621],
'zvol/zvol_misc/zvol_misc_trim': ['SKIP', 14872],
- 'idmap_mount/idmap_mount_001': ['SKIP', idmap_reason],
- 'idmap_mount/idmap_mount_002': ['SKIP', idmap_reason],
- 'idmap_mount/idmap_mount_003': ['SKIP', idmap_reason],
- 'idmap_mount/idmap_mount_004': ['SKIP', idmap_reason],
- 'idmap_mount/idmap_mount_005': ['SKIP', idmap_reason],
- 'block_cloning/block_cloning_disabled_copyfilerange':
- ['SKIP', cfr_reason],
- 'block_cloning/block_cloning_copyfilerange':
- ['SKIP', cfr_reason],
- 'block_cloning/block_cloning_copyfilerange_partial':
- ['SKIP', cfr_reason],
- 'block_cloning/block_cloning_copyfilerange_fallback':
- ['SKIP', cfr_reason],
- 'block_cloning/block_cloning_copyfilerange_cross_dataset':
- ['SKIP', cfr_cross_reason],
- 'block_cloning/block_cloning_copyfilerange_fallback_same_txg':
- ['SKIP', cfr_cross_reason],
+ 'zvol/zvol_misc/zvol_misc_volmode': ['FAIL', known_reason],
})
-
# Not all Github actions runners have scsi_debug module, so we may skip
# some tests which use it.
if os.environ.get('CI') == 'true':
known.update({
'cli_root/zpool_expand/zpool_expand_001_pos': ['SKIP', ci_reason],
'cli_root/zpool_expand/zpool_expand_003_neg': ['SKIP', ci_reason],
'cli_root/zpool_expand/zpool_expand_005_pos': ['SKIP', ci_reason],
'cli_root/zpool_reopen/setup': ['SKIP', ci_reason],
'cli_root/zpool_reopen/zpool_reopen_001_pos': ['SKIP', ci_reason],
'cli_root/zpool_reopen/zpool_reopen_002_pos': ['SKIP', ci_reason],
'cli_root/zpool_reopen/zpool_reopen_003_pos': ['SKIP', ci_reason],
'cli_root/zpool_reopen/zpool_reopen_004_pos': ['SKIP', ci_reason],
'cli_root/zpool_reopen/zpool_reopen_005_pos': ['SKIP', ci_reason],
'cli_root/zpool_reopen/zpool_reopen_006_neg': ['SKIP', ci_reason],
'cli_root/zpool_reopen/zpool_reopen_007_pos': ['SKIP', ci_reason],
'cli_root/zpool_split/zpool_split_wholedisk': ['SKIP', ci_reason],
'fault/auto_offline_001_pos': ['SKIP', ci_reason],
'fault/auto_online_001_pos': ['SKIP', ci_reason],
'fault/auto_online_002_pos': ['SKIP', ci_reason],
'fault/auto_replace_001_pos': ['SKIP', ci_reason],
'fault/auto_replace_002_pos': ['SKIP', ci_reason],
'fault/auto_spare_ashift': ['SKIP', ci_reason],
'fault/auto_spare_shared': ['SKIP', ci_reason],
'procfs/pool_state': ['SKIP', ci_reason],
})
maybe.update({
'events/events_002_pos': ['FAIL', 11546],
})
def process_results(pathname):
try:
f = open(pathname)
except IOError as e:
print('Error opening file:', e)
sys.exit(1)
prefix = '/zfs-tests/tests/(?:functional|perf/regression)/'
pattern = \
r'^Test(?:\s+\(\S+\))?:' + \
rf'\s*\S*{prefix}(\S+)' + \
r'\s*\(run as (\S+)\)\s*\[(\S+)\]\s*\[(\S+)\]'
pattern_log = r'^\s*Log directory:\s*(\S*)'
d = {}
logdir = 'Could not determine log directory.'
for line in f.readlines():
m = re.match(pattern, line)
if m and len(m.groups()) == 4:
d[m.group(1)] = m.group(4)
continue
m = re.match(pattern_log, line)
if m:
logdir = m.group(1)
return d, logdir
class ListMaybesAction(argparse.Action):
def __init__(self,
option_strings,
dest="SUPPRESS",
default="SUPPRESS",
help="list flaky tests and exit"):
super(ListMaybesAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
for test in maybe:
print(test)
sys.exit(0)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Analyze ZTS logs')
parser.add_argument('logfile')
parser.add_argument('--list-maybes', action=ListMaybesAction)
parser.add_argument('--no-maybes', action='store_false', dest='maybes')
args = parser.parse_args()
results, logdir = process_results(args.logfile)
if not results:
print("\n\nNo test results were found.")
print("Log directory:", logdir)
sys.exit(0)
expected = []
unexpected = []
all_maybes = True
for test in list(results.keys()):
if results[test] == "PASS":
continue
setup = test.replace(os.path.basename(test), "setup")
if results[test] == "SKIP" and test != setup:
if setup in known and known[setup][0] == "SKIP":
continue
if setup in maybe and maybe[setup][0] == "SKIP":
continue
if (test in known and results[test] in known[test][0]):
expected.append(test)
elif test in maybe and results[test] in maybe[test][0]:
if results[test] == 'SKIP' or args.maybes:
expected.append(test)
elif not args.maybes:
unexpected.append(test)
else:
unexpected.append(test)
all_maybes = False
print("\nTests with results other than PASS that are expected:")
for test in sorted(expected):
issue_url = 'https://github.com/openzfs/zfs/issues/'
# Include the reason why the result is expected, given the following:
# 1. Suppress test results which set the "Not applicable" reason.
# 2. Numerical reasons are assumed to be GitHub issue numbers.
# 3. When an entire test group is skipped only report the setup reason.
if test in known:
if known[test][1] == na_reason:
continue
elif isinstance(known[test][1], int):
expect = f"{issue_url}{known[test][1]}"
else:
expect = known[test][1]
elif test in maybe:
if isinstance(maybe[test][1], int):
expect = f"{issue_url}{maybe[test][1]}"
else:
expect = maybe[test][1]
elif setup in known and known[setup][0] == "SKIP" and setup != test:
continue
elif setup in maybe and maybe[setup][0] == "SKIP" and setup != test:
continue
else:
expect = "UNKNOWN REASON"
print(f" {results[test]} {test} ({expect})")
print("\nTests with result of PASS that are unexpected:")
for test in sorted(known.keys()):
# We probably should not be silently ignoring the case
# where "test" is not in "results".
if test not in results or results[test] != "PASS":
continue
print(f" {results[test]} {test} (expected {known[test][0]})")
print("\nTests with results other than PASS that are unexpected:")
for test in sorted(unexpected):
expect = "PASS" if test not in known else known[test][0]
print(f" {results[test]} {test} (expected {expect})")
if len(unexpected) == 0:
sys.exit(0)
elif not args.maybes and all_maybes:
sys.exit(2)
else:
sys.exit(1)
diff --git a/sys/contrib/openzfs/tests/zfs-tests/Makefile.am b/sys/contrib/openzfs/tests/zfs-tests/Makefile.am
index f8166352489e..3dd1a6452728 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/Makefile.am
+++ b/sys/contrib/openzfs/tests/zfs-tests/Makefile.am
@@ -1,48 +1,51 @@
SUBDIRS += %D%/tests
include $(srcdir)/%D%/cmd/Makefile.am
scripts_zfs_tests_functional_libzfsdir = $(datadir)/$(PACKAGE)/zfs-tests/tests/functional/libzfs
scripts_zfs_tests_functional_libzfs_PROGRAMS = %D%/tests/functional/libzfs/many_fds
%C%_tests_functional_libzfs_many_fds_LDADD = \
libzfs.la
scripts_zfs_tests_functional_hkdfdir = $(datadir)/$(PACKAGE)/zfs-tests/tests/functional/hkdf
scripts_zfs_tests_functional_hkdf_PROGRAMS = %D%/tests/functional/hkdf/hkdf_test
%C%_tests_functional_hkdf_hkdf_test_LDADD = \
libzpool.la
+scripts_zfs_tests_functional_cp_filesdir = $(datadir)/$(PACKAGE)/zfs-tests/tests/functional/cp_files
+scripts_zfs_tests_functional_cp_files_PROGRAMS = %D%/tests/functional/cp_files/seekflood
+
if BUILD_LINUX
scripts_zfs_tests_functional_tmpfiledir = $(datadir)/$(PACKAGE)/zfs-tests/tests/functional/tmpfile
scripts_zfs_tests_functional_tmpfile_PROGRAMS = \
%D%/tests/functional/tmpfile/tmpfile_001_pos \
%D%/tests/functional/tmpfile/tmpfile_002_pos \
%D%/tests/functional/tmpfile/tmpfile_003_pos \
%D%/tests/functional/tmpfile/tmpfile_stat_mode \
%D%/tests/functional/tmpfile/tmpfile_test
endif
scripts_zfs_tests_callbacksdir = $(datadir)/$(PACKAGE)/zfs-tests/callbacks
dist_scripts_zfs_tests_callbacks_SCRIPTS = \
%D%/callbacks/zfs_dbgmsg.ksh \
%D%/callbacks/zfs_dmesg.ksh \
%D%/callbacks/zfs_failsafe.ksh \
%D%/callbacks/zfs_mmp.ksh
scripts_zfs_tests_includedir = $(datadir)/$(PACKAGE)/zfs-tests/include
dist_scripts_zfs_tests_include_DATA = \
%D%/include/blkdev.shlib \
%D%/include/commands.cfg \
%D%/include/libtest.shlib \
%D%/include/math.shlib \
%D%/include/properties.shlib \
%D%/include/tunables.cfg \
%D%/include/zpool_script.shlib
nodist_scripts_zfs_tests_include_DATA = \
%D%/include/default.cfg
SUBSTFILES += $(nodist_scripts_zfs_tests_include_DATA)
diff --git a/sys/contrib/openzfs/tests/zfs-tests/cmd/.gitignore b/sys/contrib/openzfs/tests/zfs-tests/cmd/.gitignore
index 5f53b687191a..0ed0a69eb013 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/cmd/.gitignore
+++ b/sys/contrib/openzfs/tests/zfs-tests/cmd/.gitignore
@@ -1,52 +1,54 @@
/badsend
/btree_test
/chg_usr_exec
/clonefile
+/clone_mmap_cached
+/clone_mmap_write
/devname2devid
/dir_rd_update
/draid
/file_fadvise
/file_append
/file_check
/file_trunc
/file_write
/get_diff
/getversion
/largest_file
/libzfs_input_check
/mkbusy
/mkfile
/mkfiles
/mktree
/mmap_exec
/mmap_libaio
/mmap_seek
/mmap_sync
/mmapwrite
/nvlist_to_lua
/randfree_file
/randwritecomp
/read_dos_attributes
/readmmap
/renameat2
/rename_dir
/rm_lnkcnt_zero_file
/send_doall
/stride_dd
/threadsappend
/user_ns_exec
/write_dos_attributes
/xattrtest
/zed_fd_spill-zedlet
/suid_write_to_file
/cp_files
/ctime
/truncate_test
/ereports
/zfs_diff-socket
/dosmode_readonly_write
/blake3_test
/edonr_test
/skein_test
/sha2_test
/idmap_util
diff --git a/sys/contrib/openzfs/tests/zfs-tests/cmd/Makefile.am b/sys/contrib/openzfs/tests/zfs-tests/cmd/Makefile.am
index 9bdb3c209756..23848a82ffbd 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/cmd/Makefile.am
+++ b/sys/contrib/openzfs/tests/zfs-tests/cmd/Makefile.am
@@ -1,136 +1,138 @@
scripts_zfs_tests_bindir = $(datadir)/$(PACKAGE)/zfs-tests/bin
scripts_zfs_tests_bin_PROGRAMS = %D%/chg_usr_exec
+scripts_zfs_tests_bin_PROGRAMS += %D%/clonefile
+scripts_zfs_tests_bin_PROGRAMS += %D%/clone_mmap_cached
+scripts_zfs_tests_bin_PROGRAMS += %D%/clone_mmap_write
scripts_zfs_tests_bin_PROGRAMS += %D%/cp_files
scripts_zfs_tests_bin_PROGRAMS += %D%/ctime
scripts_zfs_tests_bin_PROGRAMS += %D%/dir_rd_update
scripts_zfs_tests_bin_PROGRAMS += %D%/dosmode_readonly_write
scripts_zfs_tests_bin_PROGRAMS += %D%/get_diff
scripts_zfs_tests_bin_PROGRAMS += %D%/rename_dir
scripts_zfs_tests_bin_PROGRAMS += %D%/suid_write_to_file
scripts_zfs_tests_bin_PROGRAMS += %D%/truncate_test
scripts_zfs_tests_bin_PROGRAMS += %D%/zfs_diff-socket
scripts_zfs_tests_bin_PROGRAMS += %D%/badsend
%C%_badsend_LDADD = \
libzfs_core.la \
libzfs.la \
libnvpair.la
scripts_zfs_tests_bin_PROGRAMS += %D%/btree_test
%C%_btree_test_CPPFLAGS = $(AM_CPPFLAGS) $(FORCEDEBUG_CPPFLAGS)
%C%_btree_test_LDADD = \
libzpool.la \
libzfs_core.la
if WANT_DEVNAME2DEVID
scripts_zfs_tests_bin_PROGRAMS += %D%/devname2devid
%C%_devname2devid_CFLAGS = $(AM_CFLAGS) $(LIBUDEV_CFLAGS)
%C%_devname2devid_LDADD = $(LIBUDEV_LIBS)
endif
scripts_zfs_tests_bin_PROGRAMS += %D%/draid
%C%_draid_CFLAGS = $(AM_CFLAGS) $(ZLIB_CFLAGS)
%C%_draid_LDADD = \
libzpool.la \
libnvpair.la
%C%_draid_LDADD += $(ZLIB_LIBS)
dist_noinst_DATA += %D%/file/file_common.h
scripts_zfs_tests_bin_PROGRAMS += %D%/file_append %D%/file_check %D%/file_trunc %D%/file_write %D%/largest_file %D%/randwritecomp
%C%_file_append_SOURCES = %D%/file/file_append.c
%C%_file_check_SOURCES = %D%/file/file_check.c
%C%_file_trunc_SOURCES = %D%/file/file_trunc.c
%C%_file_write_SOURCES = %D%/file/file_write.c
%C%_largest_file_SOURCES = %D%/file/largest_file.c
%C%_randwritecomp_SOURCES = %D%/file/randwritecomp.c
scripts_zfs_tests_bin_PROGRAMS += %D%/libzfs_input_check
%C%_libzfs_input_check_CPPFLAGS = $(AM_CPPFLAGS) -I$(top_srcdir)/include/os/@ac_system_l@/zfs
%C%_libzfs_input_check_LDADD = \
libzfs_core.la \
libnvpair.la
scripts_zfs_tests_bin_PROGRAMS += %D%/mkbusy %D%/mkfile %D%/mkfiles %D%/mktree
%C%_mkfile_LDADD = $(LTLIBINTL)
scripts_zfs_tests_bin_PROGRAMS += %D%/mmap_exec %D%/mmap_seek %D%/mmap_sync %D%/mmapwrite %D%/readmmap
%C%_mmapwrite_LDADD = -lpthread
if WANT_MMAP_LIBAIO
scripts_zfs_tests_bin_PROGRAMS += %D%/mmap_libaio
%C%_mmap_libaio_CFLAGS = $(AM_CFLAGS) $(LIBAIO_CFLAGS)
%C%_mmap_libaio_LDADD = $(LIBAIO_LIBS)
endif
scripts_zfs_tests_bin_PROGRAMS += %D%/nvlist_to_lua
%C%_nvlist_to_lua_LDADD = \
libzfs_core.la \
libnvpair.la
scripts_zfs_tests_bin_PROGRAMS += %D%/rm_lnkcnt_zero_file
%C%_rm_lnkcnt_zero_file_LDADD = -lpthread
scripts_zfs_tests_bin_PROGRAMS += %D%/send_doall
%C%_send_doall_LDADD = \
libzfs_core.la \
libzfs.la \
libnvpair.la
scripts_zfs_tests_bin_PROGRAMS += %D%/stride_dd
%C%_stride_dd_LDADD = -lrt
scripts_zfs_tests_bin_PROGRAMS += %D%/threadsappend
%C%_threadsappend_LDADD = -lpthread
scripts_zfs_tests_bin_PROGRAMS += %D%/ereports
%C%_ereports_LDADD = \
libnvpair.la \
libzfs.la
scripts_zfs_tests_bin_PROGRAMS += %D%/edonr_test %D%/skein_test \
%D%/sha2_test %D%/blake3_test
%C%_skein_test_SOURCES = %D%/checksum/skein_test.c
%C%_sha2_test_SOURCES = %D%/checksum/sha2_test.c
%C%_edonr_test_SOURCES = %D%/checksum/edonr_test.c
%C%_blake3_test_SOURCES = %D%/checksum/blake3_test.c
%C%_skein_test_LDADD = \
libicp.la \
libspl.la \
libspl_assert.la
%C%_sha2_test_LDADD = $(%C%_skein_test_LDADD)
%C%_edonr_test_LDADD = $(%C%_skein_test_LDADD)
%C%_blake3_test_LDADD = $(%C%_skein_test_LDADD)
if BUILD_LINUX
scripts_zfs_tests_bin_PROGRAMS += %D%/getversion
scripts_zfs_tests_bin_PROGRAMS += %D%/user_ns_exec
scripts_zfs_tests_bin_PROGRAMS += %D%/renameat2
scripts_zfs_tests_bin_PROGRAMS += %D%/xattrtest
scripts_zfs_tests_bin_PROGRAMS += %D%/zed_fd_spill-zedlet
scripts_zfs_tests_bin_PROGRAMS += %D%/idmap_util
-scripts_zfs_tests_bin_PROGRAMS += %D%/clonefile
%C%_idmap_util_LDADD = libspl.la
dist_noinst_DATA += %D%/linux_dos_attributes/dos_attributes.h
scripts_zfs_tests_bin_PROGRAMS += %D%/read_dos_attributes %D%/write_dos_attributes
%C%_read_dos_attributes_SOURCES = %D%/linux_dos_attributes/read_dos_attributes.c
%C%_write_dos_attributes_SOURCES = %D%/linux_dos_attributes/write_dos_attributes.c
scripts_zfs_tests_bin_PROGRAMS += %D%/randfree_file
%C%_randfree_file_SOURCES = %D%/file/randfree_file.c
scripts_zfs_tests_bin_PROGRAMS += %D%/file_fadvise
%C%_file_fadvise_SOURCES = %D%/file/file_fadvise.c
endif
diff --git a/sys/contrib/openzfs/tests/zfs-tests/cmd/clone_mmap_cached.c b/sys/contrib/openzfs/tests/zfs-tests/cmd/clone_mmap_cached.c
new file mode 100644
index 000000000000..c1cdf796cfb4
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/cmd/clone_mmap_cached.c
@@ -0,0 +1,146 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or https://opensource.org/licenses/CDDL-1.0.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright (c) 2024 by Pawel Jakub Dawidek
+ */
+
+#include <sys/mman.h>
+#include <sys/stat.h>
+
+#include <assert.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#ifdef __FreeBSD__
+#define loff_t off_t
+#endif
+
+ssize_t
+copy_file_range(int, loff_t *, int, loff_t *, size_t, unsigned int)
+ __attribute__((weak));
+
+static void *
+mmap_file(int fd, size_t size)
+{
+ void *p;
+
+ p = mmap(NULL, size, PROT_READ, MAP_SHARED, fd, 0);
+ if (p == MAP_FAILED) {
+ (void) fprintf(stderr, "mmap failed: %s\n", strerror(errno));
+ exit(2);
+ }
+
+ return (p);
+}
+
+static void
+usage(const char *progname)
+{
+
+ /*
+ * -i cache input before copy_file_range(2).
+ * -o cache input before copy_file_range(2).
+ */
+ (void) fprintf(stderr, "usage: %s [-io] <input> <output>\n", progname);
+ exit(3);
+}
+
+int
+main(int argc, char *argv[])
+{
+ int dfd, sfd;
+ size_t dsize, ssize;
+ void *dmem, *smem, *ptr;
+ off_t doff, soff;
+ struct stat sb;
+ bool cache_input, cache_output;
+ const char *progname;
+ int c;
+
+ progname = argv[0];
+ cache_input = cache_output = false;
+
+ while ((c = getopt(argc, argv, "io")) != -1) {
+ switch (c) {
+ case 'i':
+ cache_input = true;
+ break;
+ case 'o':
+ cache_output = true;
+ break;
+ default:
+ usage(progname);
+ }
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 2) {
+ usage(progname);
+ }
+
+ sfd = open(argv[0], O_RDONLY);
+ if (fstat(sfd, &sb) == -1) {
+ (void) fprintf(stderr, "fstat failed: %s\n", strerror(errno));
+ exit(2);
+ }
+ ssize = sb.st_size;
+ smem = mmap_file(sfd, ssize);
+
+ dfd = open(argv[1], O_RDWR);
+ if (fstat(dfd, &sb) == -1) {
+ (void) fprintf(stderr, "fstat failed: %s\n", strerror(errno));
+ exit(2);
+ }
+ dsize = sb.st_size;
+ dmem = mmap_file(dfd, dsize);
+
+ /*
+ * Hopefully it won't be compiled out.
+ */
+ if (cache_input) {
+ ptr = malloc(ssize);
+ assert(ptr != NULL);
+ memcpy(ptr, smem, ssize);
+ free(ptr);
+ }
+ if (cache_output) {
+ ptr = malloc(ssize);
+ assert(ptr != NULL);
+ memcpy(ptr, dmem, dsize);
+ free(ptr);
+ }
+
+ soff = doff = 0;
+ if (copy_file_range(sfd, &soff, dfd, &doff, ssize, 0) < 0) {
+ (void) fprintf(stderr, "copy_file_range failed: %s\n",
+ strerror(errno));
+ exit(2);
+ }
+
+ exit(memcmp(smem, dmem, ssize) == 0 ? 0 : 1);
+}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/cmd/clone_mmap_write.c b/sys/contrib/openzfs/tests/zfs-tests/cmd/clone_mmap_write.c
new file mode 100644
index 000000000000..6a5cd8721c57
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/cmd/clone_mmap_write.c
@@ -0,0 +1,123 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or https://opensource.org/licenses/CDDL-1.0.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * This program clones the file, mmap it, and writes from the map into
+ * file. This scenario triggers a panic on Linux in dbuf_redirty(),
+ * which is fixed under PR#15656. On FreeBSD, the same test causes data
+ * corruption, which is fixed by PR#15665.
+ *
+ * It would be good to test for this scenario in ZTS. This program and
+ * issue was initially produced by @robn.
+ */
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+
+#include <fcntl.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+
+#ifdef __FreeBSD__
+#define loff_t off_t
+#endif
+
+ssize_t
+copy_file_range(int, loff_t *, int, loff_t *, size_t, unsigned int)
+ __attribute__((weak));
+
+static int
+open_file(const char *source)
+{
+ int fd;
+ if ((fd = open(source, O_RDWR | O_APPEND)) < 0) {
+ (void) fprintf(stderr, "Error opening %s\n", source);
+ exit(1);
+ }
+ sync();
+ return (fd);
+}
+
+static int
+clone_file(int sfd, long long size, const char *dest)
+{
+ int dfd;
+
+ if ((dfd = open(dest, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR)) < 0) {
+ (void) fprintf(stderr, "Error opening %s\n", dest);
+ exit(1);
+ }
+
+ if (copy_file_range(sfd, 0, dfd, 0, size, 0) < 0) {
+ (void) fprintf(stderr, "copy_file_range failed\n");
+ exit(1);
+ }
+
+ return (dfd);
+}
+
+static void *
+map_file(int fd, long long size)
+{
+ void *p = mmap(NULL, size, PROT_READ, MAP_SHARED, fd, 0);
+ if (p == MAP_FAILED) {
+ (void) fprintf(stderr, "mmap failed\n");
+ exit(1);
+ }
+
+ return (p);
+}
+
+static void
+map_write(void *p, int fd)
+{
+ if (pwrite(fd, p, 1024*128, 0) < 0) {
+ (void) fprintf(stderr, "write failed\n");
+ exit(1);
+ }
+}
+
+int
+main(int argc, char **argv)
+{
+ int sfd, dfd;
+ void *p;
+ struct stat sb;
+ if (argc != 3) {
+ (void) printf("usage: %s <input source file> "
+ "<clone destination file>\n", argv[0]);
+ exit(1);
+ }
+ sfd = open_file(argv[1]);
+ if (fstat(sfd, &sb) == -1) {
+ (void) fprintf(stderr, "fstat failed\n");
+ exit(1);
+ }
+ dfd = clone_file(sfd, sb.st_size, argv[2]);
+ p = map_file(dfd, sb.st_size);
+ map_write(p, dfd);
+ return (0);
+}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/cmd/clonefile.c b/sys/contrib/openzfs/tests/zfs-tests/cmd/clonefile.c
index 696dc471d8c3..bc30bb7798e9 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/cmd/clonefile.c
+++ b/sys/contrib/openzfs/tests/zfs-tests/cmd/clonefile.c
@@ -1,333 +1,373 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright (c) 2023, Rob Norris <robn@despairlabs.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
/*
* This program is to test the availability and behaviour of copy_file_range,
* FICLONE, FICLONERANGE and FIDEDUPERANGE in the Linux kernel. It should
* compile and run even if these features aren't exposed through the libc.
*/
#include <sys/ioctl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdint.h>
#include <unistd.h>
#include <sys/syscall.h>
#include <stdlib.h>
#include <limits.h>
#include <stdio.h>
#include <string.h>
#include <errno.h>
#ifndef __NR_copy_file_range
#if defined(__x86_64__)
#define __NR_copy_file_range (326)
#elif defined(__i386__)
#define __NR_copy_file_range (377)
#elif defined(__s390__)
#define __NR_copy_file_range (375)
#elif defined(__arm__)
#define __NR_copy_file_range (391)
#elif defined(__aarch64__)
#define __NR_copy_file_range (285)
#elif defined(__powerpc__)
#define __NR_copy_file_range (379)
#else
#error "no definition of __NR_copy_file_range for this platform"
#endif
#endif /* __NR_copy_file_range */
+#ifdef __FreeBSD__
+#define loff_t off_t
+#endif
+
ssize_t
copy_file_range(int, loff_t *, int, loff_t *, size_t, unsigned int)
__attribute__((weak));
static inline ssize_t
cf_copy_file_range(int sfd, loff_t *soff, int dfd, loff_t *doff,
size_t len, unsigned int flags)
{
if (copy_file_range)
return (copy_file_range(sfd, soff, dfd, doff, len, flags));
return (
syscall(__NR_copy_file_range, sfd, soff, dfd, doff, len, flags));
}
/* Define missing FICLONE */
#ifdef FICLONE
#define CF_FICLONE FICLONE
#else
#define CF_FICLONE _IOW(0x94, 9, int)
#endif
/* Define missing FICLONERANGE and support structs */
#ifdef FICLONERANGE
#define CF_FICLONERANGE FICLONERANGE
typedef struct file_clone_range cf_file_clone_range_t;
#else
typedef struct {
int64_t src_fd;
uint64_t src_offset;
uint64_t src_length;
uint64_t dest_offset;
} cf_file_clone_range_t;
#define CF_FICLONERANGE _IOW(0x94, 13, cf_file_clone_range_t)
#endif
/* Define missing FIDEDUPERANGE and support structs */
#ifdef FIDEDUPERANGE
#define CF_FIDEDUPERANGE FIDEDUPERANGE
#define CF_FILE_DEDUPE_RANGE_SAME FILE_DEDUPE_RANGE_SAME
#define CF_FILE_DEDUPE_RANGE_DIFFERS FILE_DEDUPE_RANGE_DIFFERS
typedef struct file_dedupe_range_info cf_file_dedupe_range_info_t;
typedef struct file_dedupe_range cf_file_dedupe_range_t;
#else
typedef struct {
int64_t dest_fd;
uint64_t dest_offset;
uint64_t bytes_deduped;
int32_t status;
uint32_t reserved;
} cf_file_dedupe_range_info_t;
typedef struct {
uint64_t src_offset;
uint64_t src_length;
uint16_t dest_count;
uint16_t reserved1;
uint32_t reserved2;
cf_file_dedupe_range_info_t info[0];
} cf_file_dedupe_range_t;
#define CF_FIDEDUPERANGE _IOWR(0x94, 54, cf_file_dedupe_range_t)
#define CF_FILE_DEDUPE_RANGE_SAME (0)
#define CF_FILE_DEDUPE_RANGE_DIFFERS (1)
#endif
typedef enum {
CF_MODE_NONE,
CF_MODE_CLONE,
CF_MODE_CLONERANGE,
CF_MODE_COPYFILERANGE,
CF_MODE_DEDUPERANGE,
} cf_mode_t;
static int
usage(void)
{
printf(
"usage:\n"
" FICLONE:\n"
" clonefile -c <src> <dst>\n"
" FICLONERANGE:\n"
" clonefile -r <src> <dst> <soff> <doff> <len>\n"
" copy_file_range:\n"
- " clonefile -f <src> <dst> <soff> <doff> <len>\n"
+ " clonefile -f <src> <dst> [<soff> <doff> <len | \"all\">]\n"
" FIDEDUPERANGE:\n"
" clonefile -d <src> <dst> <soff> <doff> <len>\n");
return (1);
}
int do_clone(int sfd, int dfd);
int do_clonerange(int sfd, int dfd, loff_t soff, loff_t doff, size_t len);
int do_copyfilerange(int sfd, int dfd, loff_t soff, loff_t doff, size_t len);
int do_deduperange(int sfd, int dfd, loff_t soff, loff_t doff, size_t len);
int quiet = 0;
int
main(int argc, char **argv)
{
cf_mode_t mode = CF_MODE_NONE;
- char c;
+ int c;
while ((c = getopt(argc, argv, "crfdq")) != -1) {
switch (c) {
case 'c':
mode = CF_MODE_CLONE;
break;
case 'r':
mode = CF_MODE_CLONERANGE;
break;
case 'f':
mode = CF_MODE_COPYFILERANGE;
break;
case 'd':
mode = CF_MODE_DEDUPERANGE;
break;
case 'q':
quiet = 1;
break;
}
}
- if (mode == CF_MODE_NONE || (argc-optind) < 2 ||
- (mode != CF_MODE_CLONE && (argc-optind) < 5))
- return (usage());
+ switch (mode) {
+ case CF_MODE_NONE:
+ return (usage());
+ case CF_MODE_CLONE:
+ if ((argc-optind) != 2)
+ return (usage());
+ break;
+ case CF_MODE_CLONERANGE:
+ case CF_MODE_DEDUPERANGE:
+ if ((argc-optind) != 5)
+ return (usage());
+ break;
+ case CF_MODE_COPYFILERANGE:
+ if ((argc-optind) != 2 && (argc-optind) != 5)
+ return (usage());
+ break;
+ default:
+ abort();
+ }
loff_t soff = 0, doff = 0;
- size_t len = 0;
- if (mode != CF_MODE_CLONE) {
+ size_t len = SSIZE_MAX;
+ if ((argc-optind) == 5) {
soff = strtoull(argv[optind+2], NULL, 10);
if (soff == ULLONG_MAX) {
fprintf(stderr, "invalid source offset");
return (1);
}
doff = strtoull(argv[optind+3], NULL, 10);
if (doff == ULLONG_MAX) {
fprintf(stderr, "invalid dest offset");
return (1);
}
- len = strtoull(argv[optind+4], NULL, 10);
- if (len == ULLONG_MAX) {
- fprintf(stderr, "invalid length");
- return (1);
+ if (mode == CF_MODE_COPYFILERANGE &&
+ strcmp(argv[optind+4], "all") == 0) {
+ len = SSIZE_MAX;
+ } else {
+ len = strtoull(argv[optind+4], NULL, 10);
+ if (len == ULLONG_MAX) {
+ fprintf(stderr, "invalid length");
+ return (1);
+ }
}
}
int sfd = open(argv[optind], O_RDONLY);
if (sfd < 0) {
fprintf(stderr, "open: %s: %s\n",
argv[optind], strerror(errno));
return (1);
}
int dfd = open(argv[optind+1], O_WRONLY|O_CREAT,
S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH);
if (dfd < 0) {
fprintf(stderr, "open: %s: %s\n",
argv[optind+1], strerror(errno));
close(sfd);
return (1);
}
int err;
switch (mode) {
case CF_MODE_CLONE:
err = do_clone(sfd, dfd);
break;
case CF_MODE_CLONERANGE:
err = do_clonerange(sfd, dfd, soff, doff, len);
break;
case CF_MODE_COPYFILERANGE:
err = do_copyfilerange(sfd, dfd, soff, doff, len);
break;
case CF_MODE_DEDUPERANGE:
err = do_deduperange(sfd, dfd, soff, doff, len);
break;
default:
abort();
}
- off_t spos = lseek(sfd, 0, SEEK_CUR);
- off_t slen = lseek(sfd, 0, SEEK_END);
- off_t dpos = lseek(dfd, 0, SEEK_CUR);
- off_t dlen = lseek(dfd, 0, SEEK_END);
+ if (!quiet) {
+ off_t spos = lseek(sfd, 0, SEEK_CUR);
+ off_t slen = lseek(sfd, 0, SEEK_END);
+ off_t dpos = lseek(dfd, 0, SEEK_CUR);
+ off_t dlen = lseek(dfd, 0, SEEK_END);
- fprintf(stderr, "file offsets: src=%lu/%lu; dst=%lu/%lu\n", spos, slen,
- dpos, dlen);
+ fprintf(stderr, "file offsets: src=%lu/%lu; dst=%lu/%lu\n",
+ spos, slen, dpos, dlen);
+ }
close(dfd);
close(sfd);
return (err == 0 ? 0 : 1);
}
int
do_clone(int sfd, int dfd)
{
- fprintf(stderr, "using FICLONE\n");
+ if (!quiet)
+ fprintf(stderr, "using FICLONE\n");
int err = ioctl(dfd, CF_FICLONE, sfd);
if (err < 0) {
fprintf(stderr, "ioctl(FICLONE): %s\n", strerror(errno));
return (err);
}
return (0);
}
int
do_clonerange(int sfd, int dfd, loff_t soff, loff_t doff, size_t len)
{
- fprintf(stderr, "using FICLONERANGE\n");
+ if (!quiet)
+ fprintf(stderr, "using FICLONERANGE\n");
cf_file_clone_range_t fcr = {
.src_fd = sfd,
.src_offset = soff,
.src_length = len,
.dest_offset = doff,
};
int err = ioctl(dfd, CF_FICLONERANGE, &fcr);
if (err < 0) {
fprintf(stderr, "ioctl(FICLONERANGE): %s\n", strerror(errno));
return (err);
}
return (0);
}
int
do_copyfilerange(int sfd, int dfd, loff_t soff, loff_t doff, size_t len)
{
- fprintf(stderr, "using copy_file_range\n");
+ if (!quiet)
+ fprintf(stderr, "using copy_file_range\n");
ssize_t copied = cf_copy_file_range(sfd, &soff, dfd, &doff, len, 0);
if (copied < 0) {
fprintf(stderr, "copy_file_range: %s\n", strerror(errno));
return (1);
}
+ if (len == SSIZE_MAX) {
+ struct stat sb;
+
+ if (fstat(sfd, &sb) < 0) {
+ fprintf(stderr, "fstat(sfd): %s\n", strerror(errno));
+ return (1);
+ }
+ len = sb.st_size;
+ }
if (copied != len) {
fprintf(stderr, "copy_file_range: copied less than requested: "
"requested=%lu; copied=%lu\n", len, copied);
return (1);
}
return (0);
}
int
do_deduperange(int sfd, int dfd, loff_t soff, loff_t doff, size_t len)
{
- fprintf(stderr, "using FIDEDUPERANGE\n");
+ if (!quiet)
+ fprintf(stderr, "using FIDEDUPERANGE\n");
char buf[sizeof (cf_file_dedupe_range_t)+
sizeof (cf_file_dedupe_range_info_t)] = {0};
cf_file_dedupe_range_t *fdr = (cf_file_dedupe_range_t *)&buf[0];
cf_file_dedupe_range_info_t *fdri =
(cf_file_dedupe_range_info_t *)
&buf[sizeof (cf_file_dedupe_range_t)];
fdr->src_offset = soff;
fdr->src_length = len;
fdr->dest_count = 1;
fdri->dest_fd = dfd;
fdri->dest_offset = doff;
int err = ioctl(sfd, CF_FIDEDUPERANGE, fdr);
if (err != 0)
fprintf(stderr, "ioctl(FIDEDUPERANGE): %s\n", strerror(errno));
if (fdri->status < 0) {
fprintf(stderr, "dedup failed: %s\n", strerror(-fdri->status));
err = -1;
} else if (fdri->status == CF_FILE_DEDUPE_RANGE_DIFFERS) {
fprintf(stderr, "dedup failed: range differs\n");
err = -1;
}
return (err);
}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/cmd/ctime.c b/sys/contrib/openzfs/tests/zfs-tests/cmd/ctime.c
index 0f5d81aea613..5ff1cea8a869 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/cmd/ctime.c
+++ b/sys/contrib/openzfs/tests/zfs-tests/cmd/ctime.c
@@ -1,376 +1,384 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/types.h>
#include <sys/stat.h>
#ifndef __FreeBSD__
#include <sys/xattr.h>
#endif
#include <utime.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <fcntl.h>
#include <libgen.h>
#include <string.h>
#define ST_ATIME 0
#define ST_CTIME 1
#define ST_MTIME 2
#define ALL_MODE (mode_t)(S_IRWXU|S_IRWXG|S_IRWXO)
typedef struct timetest {
int type;
const char *name;
int (*func)(const char *pfile);
} timetest_t;
static char tfile[BUFSIZ] = { 0 };
/*
* DESCRIPTION:
* Verify time will be changed correctly after each operation.
*
* STRATEGY:
* 1. Define time test array.
* 2. Loop through each item in this array.
* 3. Verify the time is changed after each operation.
*
*/
static int
get_file_time(const char *pfile, int what, time_t *ptr)
{
struct stat stat_buf;
if (pfile == NULL || ptr == NULL) {
return (-1);
}
if (stat(pfile, &stat_buf) == -1) {
return (-1);
}
switch (what) {
case ST_ATIME:
*ptr = stat_buf.st_atime;
return (0);
case ST_CTIME:
*ptr = stat_buf.st_ctime;
return (0);
case ST_MTIME:
*ptr = stat_buf.st_mtime;
return (0);
default:
return (-1);
}
}
static ssize_t
get_dirnamelen(const char *path)
{
const char *end = strrchr(path, '/');
return (end ? end - path : -1);
}
static int
do_read(const char *pfile)
{
int fd, ret = 0;
char buf[BUFSIZ] = { 0 };
if (pfile == NULL) {
return (-1);
}
if ((fd = open(pfile, O_RDONLY, ALL_MODE)) == -1) {
return (-1);
}
if (read(fd, buf, sizeof (buf)) == -1) {
(void) fprintf(stderr, "read(%d, buf, %zd) failed with errno "
"%d\n", fd, sizeof (buf), errno);
(void) close(fd);
return (1);
}
(void) close(fd);
return (ret);
}
static int
do_write(const char *pfile)
{
int fd, ret = 0;
char buf[BUFSIZ] = "call function do_write()";
if (pfile == NULL) {
return (-1);
}
if ((fd = open(pfile, O_WRONLY, ALL_MODE)) == -1) {
return (-1);
}
if (write(fd, buf, strlen(buf)) == -1) {
(void) fprintf(stderr, "write(%d, buf, %d) failed with errno "
"%d\n", fd, (int)strlen(buf), errno);
(void) close(fd);
return (1);
}
(void) close(fd);
return (ret);
}
static int
do_link(const char *pfile)
{
int ret = 0;
char link_file[BUFSIZ + 16] = { 0 };
if (pfile == NULL) {
return (-1);
}
/*
* Figure out source file directory name, and create
* the link file in the same directory.
*/
(void) snprintf(link_file, sizeof (link_file),
"%.*s/%s", (int)get_dirnamelen(pfile), pfile, "link_file");
if (link(pfile, link_file) == -1) {
(void) fprintf(stderr, "link(%s, %s) failed with errno %d\n",
pfile, link_file, errno);
return (1);
}
(void) unlink(link_file);
return (ret);
}
static int
do_creat(const char *pfile)
{
int fd, ret = 0;
if (pfile == NULL) {
return (-1);
}
if ((fd = creat(pfile, ALL_MODE)) == -1) {
(void) fprintf(stderr, "creat(%s, ALL_MODE) failed with errno "
"%d\n", pfile, errno);
return (1);
}
(void) close(fd);
return (ret);
}
static int
do_utime(const char *pfile)
{
int ret = 0;
if (pfile == NULL) {
return (-1);
}
/*
* Times of the file are set to the current time
*/
if (utime(pfile, NULL) == -1) {
(void) fprintf(stderr, "utime(%s, NULL) failed with errno "
"%d\n", pfile, errno);
return (1);
}
return (ret);
}
static int
do_chmod(const char *pfile)
{
int ret = 0;
if (pfile == NULL) {
return (-1);
}
if (chmod(pfile, ALL_MODE) == -1) {
(void) fprintf(stderr, "chmod(%s, ALL_MODE) failed with "
"errno %d\n", pfile, errno);
return (1);
}
return (ret);
}
static int
do_chown(const char *pfile)
{
int ret = 0;
if (pfile == NULL) {
return (-1);
}
if (chown(pfile, getuid(), getgid()) == -1) {
(void) fprintf(stderr, "chown(%s, %d, %d) failed with errno "
"%d\n", pfile, (int)getuid(), (int)getgid(), errno);
return (1);
}
return (ret);
}
#ifndef __FreeBSD__
static int
do_xattr(const char *pfile)
{
int ret = 0;
const char *value = "user.value";
if (pfile == NULL) {
return (-1);
}
if (setxattr(pfile, "user.x", value, strlen(value), 0) == -1) {
(void) fprintf(stderr, "setxattr(%s, %d, %d) failed with errno "
"%d\n", pfile, (int)getuid(), (int)getgid(), errno);
return (1);
}
return (ret);
}
#endif
static void
cleanup(void)
{
if ((strlen(tfile) != 0) && (access(tfile, F_OK) == 0)) {
(void) unlink(tfile);
}
}
static timetest_t timetest_table[] = {
{ ST_ATIME, "st_atime", do_read },
{ ST_ATIME, "st_atime", do_utime },
{ ST_MTIME, "st_mtime", do_creat },
{ ST_MTIME, "st_mtime", do_write },
{ ST_MTIME, "st_mtime", do_utime },
{ ST_CTIME, "st_ctime", do_creat },
{ ST_CTIME, "st_ctime", do_write },
{ ST_CTIME, "st_ctime", do_chmod },
{ ST_CTIME, "st_ctime", do_chown },
{ ST_CTIME, "st_ctime", do_link },
{ ST_CTIME, "st_ctime", do_utime },
#ifndef __FreeBSD__
{ ST_CTIME, "st_ctime", do_xattr },
#endif
};
#define NCOMMAND (sizeof (timetest_table) / sizeof (timetest_table[0]))
int
main(void)
{
int i, ret, fd;
const char *penv[] = {"TESTDIR", "TESTFILE0"};
(void) atexit(cleanup);
/*
* Get the environment variable values.
*/
for (i = 0; i < sizeof (penv) / sizeof (char *); i++) {
if ((penv[i] = getenv(penv[i])) == NULL) {
(void) fprintf(stderr, "getenv(penv[%d])\n", i);
return (1);
}
}
(void) snprintf(tfile, sizeof (tfile), "%s/%s", penv[0], penv[1]);
/*
* If the test file exists, remove it first.
*/
if (access(tfile, F_OK) == 0) {
(void) unlink(tfile);
}
if ((fd = open(tfile, O_WRONLY | O_CREAT | O_TRUNC, ALL_MODE)) == -1) {
(void) fprintf(stderr, "open(%s) failed: %d\n", tfile, errno);
return (1);
}
(void) close(fd);
for (i = 0; i < NCOMMAND; i++) {
time_t t1, t2;
/*
* Get original time before operating.
*/
ret = get_file_time(tfile, timetest_table[i].type, &t1);
if (ret != 0) {
(void) fprintf(stderr, "get_file_time(%s %d) = %d\n",
tfile, timetest_table[i].type, ret);
return (1);
}
/*
* Sleep 2 seconds, then invoke command on given file
*/
(void) sleep(2);
timetest_table[i].func(tfile);
/*
* Get time after operating.
*/
ret = get_file_time(tfile, timetest_table[i].type, &t2);
if (ret != 0) {
(void) fprintf(stderr, "get_file_time(%s %d) = %d\n",
tfile, timetest_table[i].type, ret);
return (1);
}
- if (t1 == t2) {
- (void) fprintf(stderr, "%s: t1(%ld) == t2(%ld)\n",
+
+ /*
+ * Ideally, time change would be exactly two seconds, but allow
+ * a little slack in case of scheduling delays or similar.
+ */
+ long delta = (long)t2 - (long)t1;
+ if (delta < 2 || delta > 4) {
+ (void) fprintf(stderr,
+ "%s: BAD time change: t1(%ld), t2(%ld)\n",
timetest_table[i].name, (long)t1, (long)t2);
return (1);
} else {
- (void) fprintf(stderr, "%s: t1(%ld) != t2(%ld)\n",
+ (void) fprintf(stderr,
+ "%s: good time change: t1(%ld), t2(%ld)\n",
timetest_table[i].name, (long)t1, (long)t2);
}
}
return (0);
}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/include/commands.cfg b/sys/contrib/openzfs/tests/zfs-tests/include/commands.cfg
index 648f2203dfba..daa794551682 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/include/commands.cfg
+++ b/sys/contrib/openzfs/tests/zfs-tests/include/commands.cfg
@@ -1,234 +1,237 @@
#
# Copyright (c) 2016, 2019 by Delphix. All rights reserved.
# These variables are used by zfs-tests.sh to constrain which utilities
# may be used by the suite. The suite will create a directory which is
# the only element of $PATH and create symlinks from that dir to the
# binaries listed below.
#
# Please keep the contents of each variable sorted for ease of reading
# and maintenance.
#
export SYSTEM_FILES_COMMON='awk
basename
bc
bunzip2
bzcat
cat
chgrp
chmod
chown
cksum
cmp
cp
cpio
cut
date
dd
df
diff
dirname
dmesg
du
echo
env
expr
false
file
find
fio
getconf
getent
getfacl
grep
gunzip
gzip
head
hostname
id
iostat
kill
ksh
ldd
ln
ls
mkdir
mknod
mkfifo
mktemp
mount
mv
net
od
openssl
pamtester
pax
pgrep
ping
pkill
printf
ps
python3
readlink
rm
rmdir
rsync
scp
script
sed
seq
setfacl
sh
sleep
sort
ssh
stat
strings
sudo
swapoff
swapon
sync
tail
tar
timeout
touch
tr
true
truncate
umount
uname
uniq
vmstat
- wc'
+ wc
+ xargs'
export SYSTEM_FILES_FREEBSD='chflags
compress
diskinfo
fsck
getextattr
gpart
jail
jexec
jls
lsextattr
md5
mdconfig
newfs
pw
rmextattr
setextattr
sha256
showmount
swapctl
sysctl
trim
uncompress'
export SYSTEM_FILES_LINUX='attr
blkid
blkdiscard
blockdev
chattr
exportfs
fallocate
flock
free
getfattr
groupadd
groupdel
groupmod
hostid
logger
losetup
lsattr
lsblk
lscpu
lsmod
lsscsi
md5sum
mkswap
modprobe
mountpoint
mpstat
nsenter
parted
perf
setfattr
setpriv
sha256sum
udevadm
unshare
useradd
userdel
usermod
wipefs'
export ZFS_FILES='zdb
zfs
zhack
zinject
zpool
ztest
raidz_test
arc_summary
arcstat
zilstat
dbufstat
mount.zfs
zed
zgenhostid
zstream
zfs_ids_to_path
zpool_influxdb'
export ZFSTEST_FILES='badsend
btree_test
chg_usr_exec
clonefile
+ clone_mmap_cached
+ clone_mmap_write
devname2devid
dir_rd_update
draid
file_fadvise
file_append
file_check
file_trunc
file_write
get_diff
getversion
largest_file
libzfs_input_check
mkbusy
mkfile
mkfiles
mktree
mmap_exec
mmap_libaio
mmap_seek
mmap_sync
mmapwrite
nvlist_to_lua
randfree_file
randwritecomp
readmmap
read_dos_attributes
renameat2
rename_dir
rm_lnkcnt_zero_file
send_doall
threadsappend
user_ns_exec
write_dos_attributes
xattrtest
stride_dd
zed_fd_spill-zedlet
suid_write_to_file
cp_files
blake3_test
edonr_test
skein_test
sha2_test
ctime
truncate_test
ereports
zfs_diff-socket
dosmode_readonly_write
idmap_util'
diff --git a/sys/contrib/openzfs/tests/zfs-tests/include/libtest.shlib b/sys/contrib/openzfs/tests/zfs-tests/include/libtest.shlib
index b4d2b91dd476..dfab48d2cdaf 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/include/libtest.shlib
+++ b/sys/contrib/openzfs/tests/zfs-tests/include/libtest.shlib
@@ -1,3896 +1,3909 @@
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright (c) 2009, Sun Microsystems Inc. All rights reserved.
# Copyright (c) 2012, 2020, Delphix. All rights reserved.
# Copyright (c) 2017, Tim Chase. All rights reserved.
# Copyright (c) 2017, Nexenta Systems Inc. All rights reserved.
# Copyright (c) 2017, Lawrence Livermore National Security LLC.
# Copyright (c) 2017, Datto Inc. All rights reserved.
# Copyright (c) 2017, Open-E Inc. All rights reserved.
# Copyright (c) 2021, The FreeBSD Foundation.
# Use is subject to license terms.
#
. ${STF_SUITE}/include/tunables.cfg
. ${STF_TOOLS}/include/logapi.shlib
. ${STF_SUITE}/include/math.shlib
. ${STF_SUITE}/include/blkdev.shlib
# On AlmaLinux 9 we will see $PWD = '.' instead of the full path. This causes
# some tests to fail. Fix it up here.
if [ "$PWD" = "." ] ; then
PWD="$(readlink -f $PWD)"
fi
#
# Apply constrained path when available. This is required since the
# PATH may have been modified by sudo's secure_path behavior.
#
if [ -n "$STF_PATH" ]; then
export PATH="$STF_PATH"
fi
#
# Generic dot version comparison function
#
# Returns success when version $1 is greater than or equal to $2.
#
function compare_version_gte
{
[ "$(printf "$1\n$2" | sort -V | tail -n1)" = "$1" ]
}
-# Linux kernel version comparison function
-#
-# $1 Linux version ("4.10", "2.6.32") or blank for installed Linux version
-#
-# Used for comparison: if [ $(linux_version) -ge $(linux_version "2.6.32") ]
-#
-function linux_version
+# Helper function used by linux_version() and freebsd_version()
+function kernel_version
{
typeset ver="$1"
[ -z "$ver" ] && ver=$(uname -r | grep -Eo "^[0-9]+\.[0-9]+\.[0-9]+")
typeset version major minor _
IFS='.' read -r version major minor _ <<<"$ver"
[ -z "$version" ] && version=0
[ -z "$major" ] && major=0
[ -z "$minor" ] && minor=0
echo $((version * 100000 + major * 1000 + minor))
}
+# Linux kernel version comparison function
+#
+# $1 Linux version ("4.10", "2.6.32") or blank for installed Linux version
+#
+# Used for comparison: if [ $(linux_version) -ge $(linux_version "2.6.32") ]
+function linux_version {
+ kernel_version "$1"
+}
+
+# FreeBSD version comparison function
+#
+# $1 FreeBSD version ("13.2", "14.0") or blank for installed FreeBSD version
+#
+# Used for comparison: if [ $(freebsd_version) -ge $(freebsd_version "13.2") ]
+function freebsd_version {
+ kernel_version "$1"
+}
+
# Determine if this is a Linux test system
#
# Return 0 if platform Linux, 1 if otherwise
function is_linux
{
[ "$UNAME" = "Linux" ]
}
# Determine if this is an illumos test system
#
# Return 0 if platform illumos, 1 if otherwise
function is_illumos
{
[ "$UNAME" = "illumos" ]
}
# Determine if this is a FreeBSD test system
#
# Return 0 if platform FreeBSD, 1 if otherwise
function is_freebsd
{
[ "$UNAME" = "FreeBSD" ]
}
# Determine if this is a 32-bit system
#
# Return 0 if platform is 32-bit, 1 if otherwise
function is_32bit
{
[ $(getconf LONG_BIT) = "32" ]
}
# Determine if kmemleak is enabled
#
# Return 0 if kmemleak is enabled, 1 if otherwise
function is_kmemleak
{
is_linux && [ -e /sys/kernel/debug/kmemleak ]
}
# Determine whether a dataset is mounted
#
# $1 dataset name
# $2 filesystem type; optional - defaulted to zfs
#
# Return 0 if dataset is mounted; 1 if unmounted; 2 on error
function ismounted
{
typeset fstype=$2
[[ -z $fstype ]] && fstype=zfs
typeset out dir name
case $fstype in
zfs)
if [[ "$1" == "/"* ]] ; then
! zfs mount | awk -v fs="$1" '$2 == fs {exit 1}'
else
! zfs mount | awk -v ds="$1" '$1 == ds {exit 1}'
fi
;;
ufs|nfs)
if is_freebsd; then
mount -pt $fstype | while read dev dir _t _flags; do
[[ "$1" == "$dev" || "$1" == "$dir" ]] && return 0
done
else
out=$(df -F $fstype $1 2>/dev/null) || return
dir=${out%%\(*}
dir=${dir%% *}
name=${out##*\(}
name=${name%%\)*}
name=${name%% *}
[[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
fi
;;
ext*)
df -t $fstype $1 > /dev/null 2>&1
;;
zvol)
if [[ -L "$ZVOL_DEVDIR/$1" ]]; then
link=$(readlink -f $ZVOL_DEVDIR/$1)
[[ -n "$link" ]] && \
mount | grep -q "^$link" && \
return 0
fi
;;
*)
false
;;
esac
}
# Return 0 if a dataset is mounted; 1 otherwise
#
# $1 dataset name
# $2 filesystem type; optional - defaulted to zfs
function mounted
{
ismounted $1 $2
}
# Return 0 if a dataset is unmounted; 1 otherwise
#
# $1 dataset name
# $2 filesystem type; optional - defaulted to zfs
function unmounted
{
! ismounted $1 $2
}
function default_setup
{
default_setup_noexit "$@"
log_pass
}
function default_setup_no_mountpoint
{
default_setup_noexit "$1" "$2" "$3" "yes"
log_pass
}
#
# Given a list of disks, setup storage pools and datasets.
#
function default_setup_noexit
{
typeset disklist=$1
typeset container=$2
typeset volume=$3
typeset no_mountpoint=$4
log_note begin default_setup_noexit
if is_global_zone; then
if poolexists $TESTPOOL ; then
destroy_pool $TESTPOOL
fi
[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
log_must zpool create -f $TESTPOOL $disklist
else
reexport_pool
fi
rm -rf $TESTDIR || log_unresolved Could not remove $TESTDIR
mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
log_must zfs create $TESTPOOL/$TESTFS
if [[ -z $no_mountpoint ]]; then
log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
fi
if [[ -n $container ]]; then
rm -rf $TESTDIR1 || \
log_unresolved Could not remove $TESTDIR1
mkdir -p $TESTDIR1 || \
log_unresolved Could not create $TESTDIR1
log_must zfs create $TESTPOOL/$TESTCTR
log_must zfs set canmount=off $TESTPOOL/$TESTCTR
log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
if [[ -z $no_mountpoint ]]; then
log_must zfs set mountpoint=$TESTDIR1 \
$TESTPOOL/$TESTCTR/$TESTFS1
fi
fi
if [[ -n $volume ]]; then
if is_global_zone ; then
log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
block_device_wait
else
log_must zfs create $TESTPOOL/$TESTVOL
fi
fi
}
#
# Given a list of disks, setup a storage pool, file system and
# a container.
#
function default_container_setup
{
typeset disklist=$1
default_setup "$disklist" "true"
}
#
# Given a list of disks, setup a storage pool,file system
# and a volume.
#
function default_volume_setup
{
typeset disklist=$1
default_setup "$disklist" "" "true"
}
#
# Given a list of disks, setup a storage pool,file system,
# a container and a volume.
#
function default_container_volume_setup
{
typeset disklist=$1
default_setup "$disklist" "true" "true"
}
#
# Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
# filesystem
#
# $1 Existing filesystem or volume name. Default, $TESTPOOL/$TESTFS
# $2 snapshot name. Default, $TESTSNAP
#
function create_snapshot
{
typeset fs_vol=${1:-$TESTPOOL/$TESTFS}
typeset snap=${2:-$TESTSNAP}
[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
if snapexists $fs_vol@$snap; then
log_fail "$fs_vol@$snap already exists."
fi
datasetexists $fs_vol || \
log_fail "$fs_vol must exist."
log_must zfs snapshot $fs_vol@$snap
}
#
# Create a clone from a snapshot, default clone name is $TESTCLONE.
#
# $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
# $2 Clone name, $TESTPOOL/$TESTCLONE is default.
#
function create_clone # snapshot clone
{
typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
typeset clone=${2:-$TESTPOOL/$TESTCLONE}
[[ -z $snap ]] && \
log_fail "Snapshot name is undefined."
[[ -z $clone ]] && \
log_fail "Clone name is undefined."
log_must zfs clone $snap $clone
}
#
# Create a bookmark of the given snapshot. Defaultly create a bookmark on
# filesystem.
#
# $1 Existing filesystem or volume name. Default, $TESTFS
# $2 Existing snapshot name. Default, $TESTSNAP
# $3 bookmark name. Default, $TESTBKMARK
#
function create_bookmark
{
typeset fs_vol=${1:-$TESTFS}
typeset snap=${2:-$TESTSNAP}
typeset bkmark=${3:-$TESTBKMARK}
[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
[[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
if bkmarkexists $fs_vol#$bkmark; then
log_fail "$fs_vol#$bkmark already exists."
fi
datasetexists $fs_vol || \
log_fail "$fs_vol must exist."
snapexists $fs_vol@$snap || \
log_fail "$fs_vol@$snap must exist."
log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
}
#
# Create a temporary clone result of an interrupted resumable 'zfs receive'
# $1 Destination filesystem name. Must not exist, will be created as the result
# of this function along with its %recv temporary clone
# $2 Source filesystem name. Must not exist, will be created and destroyed
#
function create_recv_clone
{
typeset recvfs="$1"
typeset sendfs="${2:-$TESTPOOL/create_recv_clone}"
typeset snap="$sendfs@snap1"
typeset incr="$sendfs@snap2"
typeset mountpoint="$TESTDIR/create_recv_clone"
typeset sendfile="$TESTDIR/create_recv_clone.zsnap"
[[ -z $recvfs ]] && log_fail "Recv filesystem's name is undefined."
datasetexists $recvfs && log_fail "Recv filesystem must not exist."
datasetexists $sendfs && log_fail "Send filesystem must not exist."
log_must zfs create -o compression=off -o mountpoint="$mountpoint" $sendfs
log_must zfs snapshot $snap
log_must eval "zfs send $snap | zfs recv -u $recvfs"
log_must mkfile 1m "$mountpoint/data"
log_must zfs snapshot $incr
log_must eval "zfs send -i $snap $incr | dd bs=10K count=1 \
iflag=fullblock > $sendfile"
log_mustnot eval "zfs recv -su $recvfs < $sendfile"
destroy_dataset "$sendfs" "-r"
log_must rm -f "$sendfile"
if [[ $(get_prop 'inconsistent' "$recvfs/%recv") -ne 1 ]]; then
log_fail "Error creating temporary $recvfs/%recv clone"
fi
}
function default_mirror_setup
{
default_mirror_setup_noexit $1 $2 $3
log_pass
}
#
# Given a pair of disks, set up a storage pool and dataset for the mirror
# @parameters: $1 the primary side of the mirror
# $2 the secondary side of the mirror
# @uses: ZPOOL ZFS TESTPOOL TESTFS
function default_mirror_setup_noexit
{
readonly func="default_mirror_setup_noexit"
typeset primary=$1
typeset secondary=$2
[[ -z $primary ]] && \
log_fail "$func: No parameters passed"
[[ -z $secondary ]] && \
log_fail "$func: No secondary partition passed"
[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
log_must zpool create -f $TESTPOOL mirror $@
log_must zfs create $TESTPOOL/$TESTFS
log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
}
#
# Destroy the configured testpool mirrors.
# the mirrors are of the form ${TESTPOOL}{number}
# @uses: ZPOOL ZFS TESTPOOL
function destroy_mirrors
{
default_cleanup_noexit
log_pass
}
function default_raidz_setup
{
default_raidz_setup_noexit "$*"
log_pass
}
#
# Given a minimum of two disks, set up a storage pool and dataset for the raid-z
# $1 the list of disks
#
function default_raidz_setup_noexit
{
typeset disklist="$*"
disks=(${disklist[*]})
if [[ ${#disks[*]} -lt 2 ]]; then
log_fail "A raid-z requires a minimum of two disks."
fi
[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
log_must zpool create -f $TESTPOOL raidz $disklist
log_must zfs create $TESTPOOL/$TESTFS
log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
}
#
# Common function used to cleanup storage pools and datasets.
#
# Invoked at the start of the test suite to ensure the system
# is in a known state, and also at the end of each set of
# sub-tests to ensure errors from one set of tests doesn't
# impact the execution of the next set.
function default_cleanup
{
default_cleanup_noexit
log_pass
}
#
# Utility function used to list all available pool names.
#
# NOTE: $KEEP is a variable containing pool names, separated by a newline
# character, that must be excluded from the returned list.
#
function get_all_pools
{
zpool list -H -o name | grep -Fvx "$KEEP" | grep -v "$NO_POOLS"
}
function default_cleanup_noexit
{
typeset pool=""
#
# Destroying the pool will also destroy any
# filesystems it contains.
#
if is_global_zone; then
zfs unmount -a > /dev/null 2>&1
ALL_POOLS=$(get_all_pools)
# Here, we loop through the pools we're allowed to
# destroy, only destroying them if it's safe to do
# so.
while [ ! -z ${ALL_POOLS} ]
do
for pool in ${ALL_POOLS}
do
if safe_to_destroy_pool $pool ;
then
destroy_pool $pool
fi
done
ALL_POOLS=$(get_all_pools)
done
zfs mount -a
else
typeset fs=""
for fs in $(zfs list -H -o name \
| grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
destroy_dataset "$fs" "-Rf"
done
# Need cleanup here to avoid garbage dir left.
for fs in $(zfs list -H -o name); do
[[ $fs == /$ZONE_POOL ]] && continue
[[ -d $fs ]] && log_must rm -rf $fs/*
done
#
# Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
# the default value
#
for fs in $(zfs list -H -o name); do
if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
log_must zfs set reservation=none $fs
log_must zfs set recordsize=128K $fs
log_must zfs set mountpoint=/$fs $fs
typeset enc=$(get_prop encryption $fs)
if [ -z "$enc" ] || [ "$enc" = "off" ]; then
log_must zfs set checksum=on $fs
fi
log_must zfs set compression=off $fs
log_must zfs set atime=on $fs
log_must zfs set devices=off $fs
log_must zfs set exec=on $fs
log_must zfs set setuid=on $fs
log_must zfs set readonly=off $fs
log_must zfs set snapdir=hidden $fs
log_must zfs set aclmode=groupmask $fs
log_must zfs set aclinherit=secure $fs
fi
done
fi
[[ -d $TESTDIR ]] && \
log_must rm -rf $TESTDIR
disk1=${DISKS%% *}
if is_mpath_device $disk1; then
delete_partitions
fi
rm -f $TEST_BASE_DIR/{err,out}
}
#
# Common function used to cleanup storage pools, file systems
# and containers.
#
function default_container_cleanup
{
if ! is_global_zone; then
reexport_pool
fi
ismounted $TESTPOOL/$TESTCTR/$TESTFS1 &&
log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
destroy_dataset "$TESTPOOL/$TESTCTR/$TESTFS1" "-R"
destroy_dataset "$TESTPOOL/$TESTCTR" "-Rf"
[[ -e $TESTDIR1 ]] && \
log_must rm -rf $TESTDIR1
default_cleanup
}
#
# Common function used to cleanup snapshot of file system or volume. Default to
# delete the file system's snapshot
#
# $1 snapshot name
#
function destroy_snapshot
{
typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
if ! snapexists $snap; then
log_fail "'$snap' does not exist."
fi
#
# For the sake of the value which come from 'get_prop' is not equal
# to the really mountpoint when the snapshot is unmounted. So, firstly
# check and make sure this snapshot's been mounted in current system.
#
typeset mtpt=""
if ismounted $snap; then
mtpt=$(get_prop mountpoint $snap)
fi
destroy_dataset "$snap"
[[ $mtpt != "" && -d $mtpt ]] && \
log_must rm -rf $mtpt
}
#
# Common function used to cleanup clone.
#
# $1 clone name
#
function destroy_clone
{
typeset clone=${1:-$TESTPOOL/$TESTCLONE}
if ! datasetexists $clone; then
log_fail "'$clone' does not existed."
fi
# With the same reason in destroy_snapshot
typeset mtpt=""
if ismounted $clone; then
mtpt=$(get_prop mountpoint $clone)
fi
destroy_dataset "$clone"
[[ $mtpt != "" && -d $mtpt ]] && \
log_must rm -rf $mtpt
}
#
# Common function used to cleanup bookmark of file system or volume. Default
# to delete the file system's bookmark.
#
# $1 bookmark name
#
function destroy_bookmark
{
typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
if ! bkmarkexists $bkmark; then
log_fail "'$bkmarkp' does not existed."
fi
destroy_dataset "$bkmark"
}
# Return 0 if a snapshot exists; $? otherwise
#
# $1 - snapshot name
function snapexists
{
zfs list -H -t snapshot "$1" > /dev/null 2>&1
}
#
# Return 0 if a bookmark exists; $? otherwise
#
# $1 - bookmark name
#
function bkmarkexists
{
zfs list -H -t bookmark "$1" > /dev/null 2>&1
}
#
# Return 0 if a hold exists; $? otherwise
#
# $1 - hold tag
# $2 - snapshot name
#
function holdexists
{
! zfs holds "$2" | awk -v t="$1" '$2 ~ t { exit 1 }'
}
#
# Set a property to a certain value on a dataset.
# Sets a property of the dataset to the value as passed in.
# @param:
# $1 dataset who's property is being set
# $2 property to set
# $3 value to set property to
# @return:
# 0 if the property could be set.
# non-zero otherwise.
# @use: ZFS
#
function dataset_setprop
{
typeset fn=dataset_setprop
if (($# < 3)); then
log_note "$fn: Insufficient parameters (need 3, had $#)"
return 1
fi
typeset output=
output=$(zfs set $2=$3 $1 2>&1)
typeset rv=$?
if ((rv != 0)); then
log_note "Setting property on $1 failed."
log_note "property $2=$3"
log_note "Return Code: $rv"
log_note "Output: $output"
return $rv
fi
return 0
}
#
# Check a numeric assertion
# @parameter: $@ the assertion to check
# @output: big loud notice if assertion failed
# @use: log_fail
#
function assert
{
(($@)) || log_fail "$@"
}
#
# Function to format partition size of a disk
# Given a disk cxtxdx reduces all partitions
# to 0 size
#
function zero_partitions #<whole_disk_name>
{
typeset diskname=$1
typeset i
if is_freebsd; then
gpart destroy -F $diskname
elif is_linux; then
DSK=$DEV_DSKDIR/$diskname
DSK=$(echo $DSK | sed -e "s|//|/|g")
log_must parted $DSK -s -- mklabel gpt
blockdev --rereadpt $DSK 2>/dev/null
block_device_wait
else
for i in 0 1 3 4 5 6 7
do
log_must set_partition $i "" 0mb $diskname
done
fi
return 0
}
#
# Given a slice, size and disk, this function
# formats the slice to the specified size.
# Size should be specified with units as per
# the `format` command requirements eg. 100mb 3gb
#
# NOTE: This entire interface is problematic for the Linux parted utility
# which requires the end of the partition to be specified. It would be
# best to retire this interface and replace it with something more flexible.
# At the moment a best effort is made.
#
# arguments: <slice_num> <slice_start> <size_plus_units> <whole_disk_name>
function set_partition
{
typeset -i slicenum=$1
typeset start=$2
typeset size=$3
typeset disk=${4#$DEV_DSKDIR/}
disk=${disk#$DEV_RDSKDIR/}
case "$UNAME" in
Linux)
if [[ -z $size || -z $disk ]]; then
log_fail "The size or disk name is unspecified."
fi
disk=$DEV_DSKDIR/$disk
typeset size_mb=${size%%[mMgG]}
size_mb=${size_mb%%[mMgG][bB]}
if [[ ${size:1:1} == 'g' ]]; then
((size_mb = size_mb * 1024))
fi
# Create GPT partition table when setting slice 0 or
# when the device doesn't already contain a GPT label.
parted $disk -s -- print 1 >/dev/null
typeset ret_val=$?
if [[ $slicenum -eq 0 || $ret_val -ne 0 ]]; then
if ! parted $disk -s -- mklabel gpt; then
log_note "Failed to create GPT partition table on $disk"
return 1
fi
fi
# When no start is given align on the first cylinder.
if [[ -z "$start" ]]; then
start=1
fi
# Determine the cylinder size for the device and using
# that calculate the end offset in cylinders.
typeset -i cly_size_kb=0
cly_size_kb=$(parted -m $disk -s -- unit cyl print |
awk -F '[:k.]' 'NR == 3 {print $4}')
((end = (size_mb * 1024 / cly_size_kb) + start))
parted $disk -s -- \
mkpart part$slicenum ${start}cyl ${end}cyl
typeset ret_val=$?
if [[ $ret_val -ne 0 ]]; then
log_note "Failed to create partition $slicenum on $disk"
return 1
fi
blockdev --rereadpt $disk 2>/dev/null
block_device_wait $disk
;;
FreeBSD)
if [[ -z $size || -z $disk ]]; then
log_fail "The size or disk name is unspecified."
fi
disk=$DEV_DSKDIR/$disk
if [[ $slicenum -eq 0 ]] || ! gpart show $disk >/dev/null 2>&1; then
gpart destroy -F $disk >/dev/null 2>&1
if ! gpart create -s GPT $disk; then
log_note "Failed to create GPT partition table on $disk"
return 1
fi
fi
typeset index=$((slicenum + 1))
if [[ -n $start ]]; then
start="-b $start"
fi
gpart add -t freebsd-zfs $start -s $size -i $index $disk
if [[ $ret_val -ne 0 ]]; then
log_note "Failed to create partition $slicenum on $disk"
return 1
fi
block_device_wait $disk
;;
*)
if [[ -z $slicenum || -z $size || -z $disk ]]; then
log_fail "The slice, size or disk name is unspecified."
fi
typeset format_file=/var/tmp/format_in.$$
echo "partition" >$format_file
echo "$slicenum" >> $format_file
echo "" >> $format_file
echo "" >> $format_file
echo "$start" >> $format_file
echo "$size" >> $format_file
echo "label" >> $format_file
echo "" >> $format_file
echo "q" >> $format_file
echo "q" >> $format_file
format -e -s -d $disk -f $format_file
typeset ret_val=$?
rm -f $format_file
;;
esac
if [[ $ret_val -ne 0 ]]; then
log_note "Unable to format $disk slice $slicenum to $size"
return 1
fi
return 0
}
#
# Delete all partitions on all disks - this is specifically for the use of multipath
# devices which currently can only be used in the test suite as raw/un-partitioned
# devices (ie a zpool cannot be created on a whole mpath device that has partitions)
#
function delete_partitions
{
typeset disk
if [[ -z $DISKSARRAY ]]; then
DISKSARRAY=$DISKS
fi
if is_linux; then
typeset -i part
for disk in $DISKSARRAY; do
for (( part = 1; part < MAX_PARTITIONS; part++ )); do
typeset partition=${disk}${SLICE_PREFIX}${part}
parted $DEV_DSKDIR/$disk -s rm $part > /dev/null 2>&1
if lsblk | grep -qF ${partition}; then
log_fail "Partition ${partition} not deleted"
else
log_note "Partition ${partition} deleted"
fi
done
done
elif is_freebsd; then
for disk in $DISKSARRAY; do
if gpart destroy -F $disk; then
log_note "Partitions for ${disk} deleted"
else
log_fail "Partitions for ${disk} not deleted"
fi
done
fi
}
#
# Get the end cyl of the given slice
#
function get_endslice #<disk> <slice>
{
typeset disk=$1
typeset slice=$2
if [[ -z $disk || -z $slice ]] ; then
log_fail "The disk name or slice number is unspecified."
fi
case "$UNAME" in
Linux)
endcyl=$(parted -s $DEV_DSKDIR/$disk -- unit cyl print | \
awk "/part${slice}/"' {sub(/cyl/, "", $3); print $3}')
((endcyl = (endcyl + 1)))
;;
FreeBSD)
disk=${disk#/dev/zvol/}
disk=${disk%p*}
slice=$((slice + 1))
endcyl=$(gpart show $disk | \
awk -v slice=$slice '$3 == slice { print $1 + $2 }')
;;
*)
disk=${disk#/dev/dsk/}
disk=${disk#/dev/rdsk/}
disk=${disk%s*}
typeset -i ratio=0
ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
awk '/sectors\/cylinder/ {print $2}')
if ((ratio == 0)); then
return
fi
typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
awk -v token="$slice" '$1 == token {print $6}')
((endcyl = (endcyl + 1) / ratio))
;;
esac
echo $endcyl
}
#
# Given a size,disk and total slice number, this function formats the
# disk slices from 0 to the total slice number with the same specified
# size.
#
function partition_disk #<slice_size> <whole_disk_name> <total_slices>
{
typeset -i i=0
typeset slice_size=$1
typeset disk_name=$2
typeset total_slices=$3
typeset cyl
zero_partitions $disk_name
while ((i < $total_slices)); do
if ! is_linux; then
if ((i == 2)); then
((i = i + 1))
continue
fi
fi
log_must set_partition $i "$cyl" $slice_size $disk_name
cyl=$(get_endslice $disk_name $i)
((i = i+1))
done
}
#
# This function continues to write to a filenum number of files into dirnum
# number of directories until either file_write returns an error or the
# maximum number of files per directory have been written.
#
# Usage:
# fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
#
# Return value: 0 on success
# non 0 on error
#
# Where :
# destdir: is the directory where everything is to be created under
# dirnum: the maximum number of subdirectories to use, -1 no limit
# filenum: the maximum number of files per subdirectory
# bytes: number of bytes to write
# num_writes: number of types to write out bytes
# data: the data that will be written
#
# E.g.
# fill_fs /testdir 20 25 1024 256 0
#
# Note: bytes * num_writes equals the size of the testfile
#
function fill_fs # destdir dirnum filenum bytes num_writes data
{
typeset destdir=${1:-$TESTDIR}
typeset -i dirnum=${2:-50}
typeset -i filenum=${3:-50}
typeset -i bytes=${4:-8192}
typeset -i num_writes=${5:-10240}
typeset data=${6:-0}
mkdir -p $destdir/{1..$dirnum}
for f in $destdir/{1..$dirnum}/$TESTFILE{1..$filenum}; do
file_write -o create -f $f -b $bytes -c $num_writes -d $data \
|| return
done
}
# Get the specified dataset property in parsable format or fail
function get_prop # property dataset
{
typeset prop=$1
typeset dataset=$2
zfs get -Hpo value "$prop" "$dataset" || log_fail "zfs get $prop $dataset"
}
# Get the specified pool property in parsable format or fail
function get_pool_prop # property pool
{
typeset prop=$1
typeset pool=$2
zpool get -Hpo value "$prop" "$pool" || log_fail "zpool get $prop $pool"
}
# Return 0 if a pool exists; $? otherwise
#
# $1 - pool name
function poolexists
{
typeset pool=$1
if [[ -z $pool ]]; then
log_note "No pool name given."
return 1
fi
zpool get name "$pool" > /dev/null 2>&1
}
# Return 0 if all the specified datasets exist; $? otherwise
#
# $1-n dataset name
function datasetexists
{
if (($# == 0)); then
log_note "No dataset name given."
return 1
fi
zfs get name "$@" > /dev/null 2>&1
}
# return 0 if none of the specified datasets exists, otherwise return 1.
#
# $1-n dataset name
function datasetnonexists
{
if (($# == 0)); then
log_note "No dataset name given."
return 1
fi
while (($# > 0)); do
zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
&& return 1
shift
done
return 0
}
# FreeBSD breaks exports(5) at whitespace and doesn't process escapes
# Solaris just breaks
#
# cf. https://github.com/openzfs/zfs/pull/13165#issuecomment-1059845807
#
# Linux can have spaces (which are \OOO-escaped),
# but can't have backslashes because they're parsed recursively
function shares_can_have_whitespace
{
is_linux
}
function is_shared_freebsd
{
typeset fs=$1
pgrep -q mountd && showmount -E | grep -qx "$fs"
}
function is_shared_illumos
{
typeset fs=$1
typeset mtpt
for mtpt in `share | awk '{print $2}'` ; do
if [[ $mtpt == $fs ]] ; then
return 0
fi
done
typeset stat=$(svcs -H -o STA nfs/server:default)
if [[ $stat != "ON" ]]; then
log_note "Current nfs/server status: $stat"
fi
return 1
}
function is_shared_linux
{
typeset fs=$1
! exportfs -s | awk -v fs="${fs//\\/\\\\}" '/^\// && $1 == fs {exit 1}'
}
#
# Given a mountpoint, or a dataset name, determine if it is shared via NFS.
#
# Returns 0 if shared, 1 otherwise.
#
function is_shared
{
typeset fs=$1
typeset mtpt
if [[ $fs != "/"* ]] ; then
if datasetnonexists "$fs" ; then
return 1
else
mtpt=$(get_prop mountpoint "$fs")
case "$mtpt" in
none|legacy|-) return 1
;;
*) fs=$mtpt
;;
esac
fi
fi
case "$UNAME" in
FreeBSD) is_shared_freebsd "$fs" ;;
Linux) is_shared_linux "$fs" ;;
*) is_shared_illumos "$fs" ;;
esac
}
function is_exported_illumos
{
typeset fs=$1
typeset mtpt _
while read -r mtpt _; do
[ "$mtpt" = "$fs" ] && return
done < /etc/dfs/sharetab
return 1
}
function is_exported_freebsd
{
typeset fs=$1
typeset mtpt _
while read -r mtpt _; do
[ "$mtpt" = "$fs" ] && return
done < /etc/zfs/exports
return 1
}
function is_exported_linux
{
typeset fs=$1
typeset mtpt _
while read -r mtpt _; do
[ "$(printf "$mtpt")" = "$fs" ] && return
done < /etc/exports.d/zfs.exports
return 1
}
#
# Given a mountpoint, or a dataset name, determine if it is exported via
# the os-specific NFS exports file.
#
# Returns 0 if exported, 1 otherwise.
#
function is_exported
{
typeset fs=$1
typeset mtpt
if [[ $fs != "/"* ]] ; then
if datasetnonexists "$fs" ; then
return 1
else
mtpt=$(get_prop mountpoint "$fs")
case $mtpt in
none|legacy|-) return 1
;;
*) fs=$mtpt
;;
esac
fi
fi
case "$UNAME" in
FreeBSD) is_exported_freebsd "$fs" ;;
Linux) is_exported_linux "$fs" ;;
*) is_exported_illumos "$fs" ;;
esac
}
#
# Given a dataset name determine if it is shared via SMB.
#
# Returns 0 if shared, 1 otherwise.
#
function is_shared_smb
{
typeset fs=$1
datasetexists "$fs" || return
if is_linux; then
net usershare list | grep -xFq "${fs//[-\/]/_}"
else
log_note "SMB on $UNAME currently unsupported by the test framework"
return 1
fi
}
#
# Given a mountpoint, determine if it is not shared via NFS.
#
# Returns 0 if not shared, 1 otherwise.
#
function not_shared
{
! is_shared $1
}
#
# Given a dataset determine if it is not shared via SMB.
#
# Returns 0 if not shared, 1 otherwise.
#
function not_shared_smb
{
! is_shared_smb $1
}
#
# Helper function to unshare a mountpoint.
#
function unshare_fs #fs
{
typeset fs=$1
if is_shared $fs || is_shared_smb $fs; then
log_must zfs unshare $fs
fi
}
#
# Helper function to share a NFS mountpoint.
#
function share_nfs #fs
{
typeset fs=$1
is_shared "$fs" && return
case "$UNAME" in
Linux)
log_must exportfs "*:$fs"
;;
FreeBSD)
typeset mountd
read -r mountd < /var/run/mountd.pid
log_must eval "printf '%s\t\n' \"$fs\" >> /etc/zfs/exports"
log_must kill -s HUP "$mountd"
;;
*)
log_must share -F nfs "$fs"
;;
esac
return 0
}
#
# Helper function to unshare a NFS mountpoint.
#
function unshare_nfs #fs
{
typeset fs=$1
! is_shared "$fs" && return
case "$UNAME" in
Linux)
log_must exportfs -u "*:$fs"
;;
FreeBSD)
typeset mountd
read -r mountd < /var/run/mountd.pid
awk -v fs="${fs//\\/\\\\}" '$1 != fs' /etc/zfs/exports > /etc/zfs/exports.$$
log_must mv /etc/zfs/exports.$$ /etc/zfs/exports
log_must kill -s HUP "$mountd"
;;
*)
log_must unshare -F nfs $fs
;;
esac
return 0
}
#
# Helper function to show NFS shares.
#
function showshares_nfs
{
case "$UNAME" in
Linux)
exportfs -v
;;
FreeBSD)
showmount
;;
*)
share -F nfs
;;
esac
}
function check_nfs
{
case "$UNAME" in
Linux)
exportfs -s
;;
FreeBSD)
showmount -e
;;
*)
log_unsupported "Unknown platform"
;;
esac || log_unsupported "The NFS utilities are not installed"
}
#
# Check NFS server status and trigger it online.
#
function setup_nfs_server
{
# Cannot share directory in non-global zone.
#
if ! is_global_zone; then
log_note "Cannot trigger NFS server by sharing in LZ."
return
fi
if is_linux; then
#
# Re-synchronize /var/lib/nfs/etab with /etc/exports and
# /etc/exports.d./* to provide a clean test environment.
#
log_must exportfs -r
log_note "NFS server must be started prior to running ZTS."
return
elif is_freebsd; then
log_must kill -s HUP $(</var/run/mountd.pid)
log_note "NFS server must be started prior to running ZTS."
return
fi
typeset nfs_fmri="svc:/network/nfs/server:default"
if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
#
# Only really sharing operation can enable NFS server
# to online permanently.
#
typeset dummy=/tmp/dummy
if [[ -d $dummy ]]; then
log_must rm -rf $dummy
fi
log_must mkdir $dummy
log_must share $dummy
#
# Waiting for fmri's status to be the final status.
# Otherwise, in transition, an asterisk (*) is appended for
# instances, unshare will reverse status to 'DIS' again.
#
# Waiting for 1's at least.
#
log_must sleep 1
timeout=10
while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
do
log_must sleep 1
((timeout -= 1))
done
log_must unshare $dummy
log_must rm -rf $dummy
fi
log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
}
#
# To verify whether calling process is in global zone
#
# Return 0 if in global zone, 1 in non-global zone
#
function is_global_zone
{
if is_linux || is_freebsd; then
return 0
else
typeset cur_zone=$(zonename 2>/dev/null)
[ $cur_zone = "global" ]
fi
}
#
# Verify whether test is permitted to run from
# global zone, local zone, or both
#
# $1 zone limit, could be "global", "local", or "both"(no limit)
#
# Return 0 if permitted, otherwise exit with log_unsupported
#
function verify_runnable # zone limit
{
typeset limit=$1
[[ -z $limit ]] && return 0
if is_global_zone ; then
case $limit in
global|both)
;;
local) log_unsupported "Test is unable to run from "\
"global zone."
;;
*) log_note "Warning: unknown limit $limit - " \
"use both."
;;
esac
else
case $limit in
local|both)
;;
global) log_unsupported "Test is unable to run from "\
"local zone."
;;
*) log_note "Warning: unknown limit $limit - " \
"use both."
;;
esac
reexport_pool
fi
return 0
}
# Return 0 if create successfully or the pool exists; $? otherwise
# Note: In local zones, this function should return 0 silently.
#
# $1 - pool name
# $2-n - [keyword] devs_list
function create_pool #pool devs_list
{
typeset pool=${1%%/*}
shift
if [[ -z $pool ]]; then
log_note "Missing pool name."
return 1
fi
if poolexists $pool ; then
destroy_pool $pool
fi
if is_global_zone ; then
[[ -d /$pool ]] && rm -rf /$pool
log_must zpool create -f $pool $@
fi
return 0
}
# Return 0 if destroy successfully or the pool exists; $? otherwise
# Note: In local zones, this function should return 0 silently.
#
# $1 - pool name
# Destroy pool with the given parameters.
function destroy_pool #pool
{
typeset pool=${1%%/*}
typeset mtpt
if [[ -z $pool ]]; then
log_note "No pool name given."
return 1
fi
if is_global_zone ; then
if poolexists "$pool" ; then
mtpt=$(get_prop mountpoint "$pool")
# At times, syseventd/udev activity can cause attempts
# to destroy a pool to fail with EBUSY. We retry a few
# times allowing failures before requiring the destroy
# to succeed.
log_must_busy zpool destroy -f $pool
[[ -d $mtpt ]] && \
log_must rm -rf $mtpt
else
log_note "Pool does not exist. ($pool)"
return 1
fi
fi
return 0
}
# Return 0 if created successfully; $? otherwise
#
# $1 - dataset name
# $2-n - dataset options
function create_dataset #dataset dataset_options
{
typeset dataset=$1
shift
if [[ -z $dataset ]]; then
log_note "Missing dataset name."
return 1
fi
if datasetexists $dataset ; then
destroy_dataset $dataset
fi
log_must zfs create $@ $dataset
return 0
}
# Return 0 if destroy successfully or the dataset exists; $? otherwise
# Note: In local zones, this function should return 0 silently.
#
# $1 - dataset name
# $2 - custom arguments for zfs destroy
# Destroy dataset with the given parameters.
function destroy_dataset # dataset [args]
{
typeset dataset=$1
typeset mtpt
typeset args=${2:-""}
if [[ -z $dataset ]]; then
log_note "No dataset name given."
return 1
fi
if is_global_zone ; then
if datasetexists "$dataset" ; then
mtpt=$(get_prop mountpoint "$dataset")
log_must_busy zfs destroy $args $dataset
[ -d $mtpt ] && log_must rm -rf $mtpt
else
log_note "Dataset does not exist. ($dataset)"
return 1
fi
fi
return 0
}
#
# Reexport TESTPOOL & TESTPOOL(1-4)
#
function reexport_pool
{
typeset -i cntctr=5
typeset -i i=0
while ((i < cntctr)); do
if ((i == 0)); then
TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
if ! ismounted $TESTPOOL; then
log_must zfs mount $TESTPOOL
fi
else
eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
if eval ! ismounted \$TESTPOOL$i; then
log_must eval zfs mount \$TESTPOOL$i
fi
fi
((i += 1))
done
}
#
# Verify a given disk or pool state
#
# Return 0 is pool/disk matches expected state, 1 otherwise
#
function check_state # pool disk state{online,offline,degraded}
{
typeset pool=$1
typeset disk=${2#$DEV_DSKDIR/}
typeset state=$3
[[ -z $pool ]] || [[ -z $state ]] \
&& log_fail "Arguments invalid or missing"
if [[ -z $disk ]]; then
#check pool state only
zpool get -H -o value health $pool | grep -qi "$state"
else
zpool status -v $pool | grep "$disk" | grep -qi "$state"
fi
}
#
# Get the mountpoint of snapshot
# For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
# as its mountpoint
#
function snapshot_mountpoint
{
typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
if [[ $dataset != *@* ]]; then
log_fail "Error name of snapshot '$dataset'."
fi
typeset fs=${dataset%@*}
typeset snap=${dataset#*@}
if [[ -z $fs || -z $snap ]]; then
log_fail "Error name of snapshot '$dataset'."
fi
echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
}
#
# Given a device and 'ashift' value verify it's correctly set on every label
#
function verify_ashift # device ashift
{
typeset device="$1"
typeset ashift="$2"
zdb -e -lll $device | awk -v ashift=$ashift '
/ashift: / {
if (ashift != $2)
exit 1;
else
count++;
}
END {
exit (count != 4);
}'
}
#
# Given a pool and file system, this function will verify the file system
# using the zdb internal tool. Note that the pool is exported and imported
# to ensure it has consistent state.
#
function verify_filesys # pool filesystem dir
{
typeset pool="$1"
typeset filesys="$2"
typeset zdbout="/tmp/zdbout.$$"
shift
shift
typeset dirs=$@
typeset search_path=""
log_note "Calling zdb to verify filesystem '$filesys'"
zfs unmount -a > /dev/null 2>&1
log_must zpool export $pool
if [[ -n $dirs ]] ; then
for dir in $dirs ; do
search_path="$search_path -d $dir"
done
fi
log_must zpool import $search_path $pool
if ! zdb -cudi $filesys > $zdbout 2>&1; then
log_note "Output: zdb -cudi $filesys"
cat $zdbout
rm -f $zdbout
log_fail "zdb detected errors with: '$filesys'"
fi
log_must zfs mount -a
log_must rm -rf $zdbout
}
#
# Given a pool issue a scrub and verify that no checksum errors are reported.
#
function verify_pool
{
typeset pool=${1:-$TESTPOOL}
log_must zpool scrub $pool
log_must wait_scrubbed $pool
typeset -i cksum=$(zpool status $pool | awk '
!NF { isvdev = 0 }
isvdev { errors += $NF }
/CKSUM$/ { isvdev = 1 }
END { print errors }
')
if [[ $cksum != 0 ]]; then
log_must zpool status -v
log_fail "Unexpected CKSUM errors found on $pool ($cksum)"
fi
}
#
# Given a pool, and this function list all disks in the pool
#
function get_disklist # pool
{
echo $(zpool iostat -v $1 | awk '(NR > 4) {print $1}' | \
grep -vEe '^-----' -e "^(mirror|raidz[1-3]|draid[1-3]|spare|log|cache|special|dedup)|\-[0-9]$")
}
#
# Given a pool, and this function list all disks in the pool with their full
# path (like "/dev/sda" instead of "sda").
#
function get_disklist_fullpath # pool
{
get_disklist "-P $1"
}
# /**
# This function kills a given list of processes after a time period. We use
# this in the stress tests instead of STF_TIMEOUT so that we can have processes
# run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
# would be listed as FAIL, which we don't want : we're happy with stress tests
# running for a certain amount of time, then finishing.
#
# @param $1 the time in seconds after which we should terminate these processes
# @param $2..$n the processes we wish to terminate.
# */
function stress_timeout
{
typeset -i TIMEOUT=$1
shift
typeset cpids="$@"
log_note "Waiting for child processes($cpids). " \
"It could last dozens of minutes, please be patient ..."
log_must sleep $TIMEOUT
log_note "Killing child processes after ${TIMEOUT} stress timeout."
typeset pid
for pid in $cpids; do
ps -p $pid > /dev/null 2>&1 &&
log_must kill -USR1 $pid
done
}
#
# Verify a given hotspare disk is inuse or avail
#
# Return 0 is pool/disk matches expected state, 1 otherwise
#
function check_hotspare_state # pool disk state{inuse,avail}
{
typeset pool=$1
typeset disk=${2#$DEV_DSKDIR/}
typeset state=$3
cur_state=$(get_device_state $pool $disk "spares")
[ $state = $cur_state ]
}
#
# Wait until a hotspare transitions to a given state or times out.
#
# Return 0 when pool/disk matches expected state, 1 on timeout.
#
function wait_hotspare_state # pool disk state timeout
{
typeset pool=$1
typeset disk=${2#*$DEV_DSKDIR/}
typeset state=$3
typeset timeout=${4:-60}
typeset -i i=0
while [[ $i -lt $timeout ]]; do
if check_hotspare_state $pool $disk $state; then
return 0
fi
i=$((i+1))
sleep 1
done
return 1
}
#
# Verify a given vdev disk is inuse or avail
#
# Return 0 is pool/disk matches expected state, 1 otherwise
#
function check_vdev_state # pool disk state{online,offline,unavail,removed}
{
typeset pool=$1
typeset disk=${2#*$DEV_DSKDIR/}
typeset state=$3
cur_state=$(get_device_state $pool $disk)
[ $state = $cur_state ]
}
#
# Wait until a vdev transitions to a given state or times out.
#
# Return 0 when pool/disk matches expected state, 1 on timeout.
#
function wait_vdev_state # pool disk state timeout
{
typeset pool=$1
typeset disk=${2#*$DEV_DSKDIR/}
typeset state=$3
typeset timeout=${4:-60}
typeset -i i=0
while [[ $i -lt $timeout ]]; do
if check_vdev_state $pool $disk $state; then
return 0
fi
i=$((i+1))
sleep 1
done
return 1
}
#
# Check the output of 'zpool status -v <pool>',
# and to see if the content of <token> contain the <keyword> specified.
#
# Return 0 is contain, 1 otherwise
#
function check_pool_status # pool token keyword <verbose>
{
typeset pool=$1
typeset token=$2
typeset keyword=$3
typeset verbose=${4:-false}
scan=$(zpool status -v "$pool" 2>/dev/null | awk -v token="$token:" '$1==token')
if [[ $verbose == true ]]; then
log_note $scan
fi
echo $scan | grep -qi "$keyword"
}
#
# The following functions are instance of check_pool_status()
# is_pool_resilvering - to check if the pool resilver is in progress
# is_pool_resilvered - to check if the pool resilver is completed
# is_pool_scrubbing - to check if the pool scrub is in progress
# is_pool_scrubbed - to check if the pool scrub is completed
# is_pool_scrub_stopped - to check if the pool scrub is stopped
# is_pool_scrub_paused - to check if the pool scrub has paused
# is_pool_removing - to check if the pool removing is a vdev
# is_pool_removed - to check if the pool remove is completed
# is_pool_discarding - to check if the pool checkpoint is being discarded
# is_pool_replacing - to check if the pool is performing a replacement
#
function is_pool_resilvering #pool <verbose>
{
check_pool_status "$1" "scan" \
"resilver[ ()0-9A-Za-z:_-]* in progress since" $2
}
function is_pool_resilvered #pool <verbose>
{
check_pool_status "$1" "scan" "resilvered " $2
}
function is_pool_scrubbing #pool <verbose>
{
check_pool_status "$1" "scan" "scrub in progress since " $2
}
function is_pool_error_scrubbing #pool <verbose>
{
check_pool_status "$1" "scrub" "error scrub in progress since " $2
return $?
}
function is_pool_scrubbed #pool <verbose>
{
check_pool_status "$1" "scan" "scrub repaired" $2
}
function is_pool_scrub_stopped #pool <verbose>
{
check_pool_status "$1" "scan" "scrub canceled" $2
}
function is_pool_error_scrub_stopped #pool <verbose>
{
check_pool_status "$1" "scrub" "error scrub canceled on " $2
return $?
}
function is_pool_scrub_paused #pool <verbose>
{
check_pool_status "$1" "scan" "scrub paused since " $2
}
function is_pool_error_scrub_paused #pool <verbose>
{
check_pool_status "$1" "scrub" "error scrub paused since " $2
return $?
}
function is_pool_removing #pool
{
check_pool_status "$1" "remove" "in progress since "
}
function is_pool_removed #pool
{
check_pool_status "$1" "remove" "completed on"
}
function is_pool_discarding #pool
{
check_pool_status "$1" "checkpoint" "discarding"
}
function is_pool_replacing #pool
{
zpool status "$1" | grep -qE 'replacing-[0-9]+'
}
function wait_for_degraded
{
typeset pool=$1
typeset timeout=${2:-30}
typeset t0=$SECONDS
while :; do
[[ $(get_pool_prop health $pool) == "DEGRADED" ]] && break
log_note "$pool is not yet degraded."
sleep 1
if ((SECONDS - t0 > $timeout)); then
log_note "$pool not degraded after $timeout seconds."
return 1
fi
done
return 0
}
#
# Use create_pool()/destroy_pool() to clean up the information in
# in the given disk to avoid slice overlapping.
#
function cleanup_devices #vdevs
{
typeset pool="foopool$$"
for vdev in $@; do
zero_partitions $vdev
done
poolexists $pool && destroy_pool $pool
create_pool $pool $@
destroy_pool $pool
return 0
}
#/**
# A function to find and locate free disks on a system or from given
# disks as the parameter. It works by locating disks that are in use
# as swap devices and dump devices, and also disks listed in /etc/vfstab
#
# $@ given disks to find which are free, default is all disks in
# the test system
#
# @return a string containing the list of available disks
#*/
function find_disks
{
# Trust provided list, no attempt is made to locate unused devices.
if is_linux || is_freebsd; then
echo "$@"
return
fi
sfi=/tmp/swaplist.$$
dmpi=/tmp/dumpdev.$$
max_finddisksnum=${MAX_FINDDISKSNUM:-6}
swap -l > $sfi
dumpadm > $dmpi 2>/dev/null
disks=${@:-$(echo "" | format -e 2>/dev/null | awk '
BEGIN { FS="."; }
/^Specify disk/{
searchdisks=0;
}
{
if (searchdisks && $2 !~ "^$"){
split($2,arr," ");
print arr[1];
}
}
/^AVAILABLE DISK SELECTIONS:/{
searchdisks=1;
}
')}
unused=""
for disk in $disks; do
# Check for mounted
grep -q "${disk}[sp]" /etc/mnttab && continue
# Check for swap
grep -q "${disk}[sp]" $sfi && continue
# check for dump device
grep -q "${disk}[sp]" $dmpi && continue
# check to see if this disk hasn't been explicitly excluded
# by a user-set environment variable
echo "${ZFS_HOST_DEVICES_IGNORE}" | grep -q "${disk}" && continue
unused_candidates="$unused_candidates $disk"
done
rm $sfi $dmpi
# now just check to see if those disks do actually exist
# by looking for a device pointing to the first slice in
# each case. limit the number to max_finddisksnum
count=0
for disk in $unused_candidates; do
if is_disk_device $DEV_DSKDIR/${disk}s0 && \
[ $count -lt $max_finddisksnum ]; then
unused="$unused $disk"
# do not impose limit if $@ is provided
[[ -z $@ ]] && ((count = count + 1))
fi
done
# finally, return our disk list
echo $unused
}
function add_user_freebsd #<group_name> <user_name> <basedir>
{
typeset group=$1
typeset user=$2
typeset basedir=$3
# Check to see if the user exists.
if id $user > /dev/null 2>&1; then
return 0
fi
# Assign 1000 as the base uid
typeset -i uid=1000
while true; do
pw useradd -u $uid -g $group -d $basedir/$user -m -n $user
case $? in
0) break ;;
# The uid is not unique
65) ((uid += 1)) ;;
*) return 1 ;;
esac
if [[ $uid == 65000 ]]; then
log_fail "No user id available under 65000 for $user"
fi
done
# Silence MOTD
touch $basedir/$user/.hushlogin
return 0
}
#
# Delete the specified user.
#
# $1 login name
#
function del_user_freebsd #<logname>
{
typeset user=$1
if id $user > /dev/null 2>&1; then
log_must pw userdel $user
fi
return 0
}
#
# Select valid gid and create specified group.
#
# $1 group name
#
function add_group_freebsd #<group_name>
{
typeset group=$1
# See if the group already exists.
if pw groupshow $group >/dev/null 2>&1; then
return 0
fi
# Assign 1000 as the base gid
typeset -i gid=1000
while true; do
pw groupadd -g $gid -n $group > /dev/null 2>&1
case $? in
0) return 0 ;;
# The gid is not unique
65) ((gid += 1)) ;;
*) return 1 ;;
esac
if [[ $gid == 65000 ]]; then
log_fail "No user id available under 65000 for $group"
fi
done
}
#
# Delete the specified group.
#
# $1 group name
#
function del_group_freebsd #<group_name>
{
typeset group=$1
pw groupdel -n $group > /dev/null 2>&1
case $? in
# Group does not exist, or was deleted successfully.
0|6|65) return 0 ;;
# Name already exists as a group name
9) log_must pw groupdel $group ;;
*) return 1 ;;
esac
return 0
}
function add_user_illumos #<group_name> <user_name> <basedir>
{
typeset group=$1
typeset user=$2
typeset basedir=$3
log_must useradd -g $group -d $basedir/$user -m $user
return 0
}
function del_user_illumos #<user_name>
{
typeset user=$1
if id $user > /dev/null 2>&1; then
log_must_retry "currently used" 6 userdel $user
fi
return 0
}
function add_group_illumos #<group_name>
{
typeset group=$1
typeset -i gid=100
while true; do
groupadd -g $gid $group > /dev/null 2>&1
case $? in
0) return 0 ;;
# The gid is not unique
4) ((gid += 1)) ;;
*) return 1 ;;
esac
done
}
function del_group_illumos #<group_name>
{
typeset group=$1
groupmod -n $grp $grp > /dev/null 2>&1
case $? in
# Group does not exist.
6) return 0 ;;
# Name already exists as a group name
9) log_must groupdel $grp ;;
*) return 1 ;;
esac
}
function add_user_linux #<group_name> <user_name> <basedir>
{
typeset group=$1
typeset user=$2
typeset basedir=$3
log_must useradd -g $group -d $basedir/$user -m $user
# Add new users to the same group and the command line utils.
# This allows them to be run out of the original users home
# directory as long as it permissioned to be group readable.
cmd_group=$(stat --format="%G" $(command -v zfs))
log_must usermod -a -G $cmd_group $user
return 0
}
function del_user_linux #<user_name>
{
typeset user=$1
if id $user > /dev/null 2>&1; then
log_must_retry "currently used" 6 userdel $user
fi
}
function add_group_linux #<group_name>
{
typeset group=$1
# Assign 100 as the base gid, a larger value is selected for
# Linux because for many distributions 1000 and under are reserved.
while true; do
groupadd $group > /dev/null 2>&1
case $? in
0) return 0 ;;
*) return 1 ;;
esac
done
}
function del_group_linux #<group_name>
{
typeset group=$1
getent group $group > /dev/null 2>&1
case $? in
# Group does not exist.
2) return 0 ;;
# Name already exists as a group name
0) log_must groupdel $group ;;
*) return 1 ;;
esac
return 0
}
#
# Add specified user to specified group
#
# $1 group name
# $2 user name
# $3 base of the homedir (optional)
#
function add_user #<group_name> <user_name> <basedir>
{
typeset group=$1
typeset user=$2
typeset basedir=${3:-"/var/tmp"}
if ((${#group} == 0 || ${#user} == 0)); then
log_fail "group name or user name are not defined."
fi
case "$UNAME" in
FreeBSD)
add_user_freebsd "$group" "$user" "$basedir"
;;
Linux)
add_user_linux "$group" "$user" "$basedir"
;;
*)
add_user_illumos "$group" "$user" "$basedir"
;;
esac
return 0
}
#
# Delete the specified user.
#
# $1 login name
# $2 base of the homedir (optional)
#
function del_user #<logname> <basedir>
{
typeset user=$1
typeset basedir=${2:-"/var/tmp"}
if ((${#user} == 0)); then
log_fail "login name is necessary."
fi
case "$UNAME" in
FreeBSD)
del_user_freebsd "$user"
;;
Linux)
del_user_linux "$user"
;;
*)
del_user_illumos "$user"
;;
esac
[[ -d $basedir/$user ]] && rm -fr $basedir/$user
return 0
}
#
# Select valid gid and create specified group.
#
# $1 group name
#
function add_group #<group_name>
{
typeset group=$1
if ((${#group} == 0)); then
log_fail "group name is necessary."
fi
case "$UNAME" in
FreeBSD)
add_group_freebsd "$group"
;;
Linux)
add_group_linux "$group"
;;
*)
add_group_illumos "$group"
;;
esac
return 0
}
#
# Delete the specified group.
#
# $1 group name
#
function del_group #<group_name>
{
typeset group=$1
if ((${#group} == 0)); then
log_fail "group name is necessary."
fi
case "$UNAME" in
FreeBSD)
del_group_freebsd "$group"
;;
Linux)
del_group_linux "$group"
;;
*)
del_group_illumos "$group"
;;
esac
return 0
}
#
# This function will return true if it's safe to destroy the pool passed
# as argument 1. It checks for pools based on zvols and files, and also
# files contained in a pool that may have a different mountpoint.
#
function safe_to_destroy_pool { # $1 the pool name
typeset pool=""
typeset DONT_DESTROY=""
# We check that by deleting the $1 pool, we're not
# going to pull the rug out from other pools. Do this
# by looking at all other pools, ensuring that they
# aren't built from files or zvols contained in this pool.
for pool in $(zpool list -H -o name)
do
ALTMOUNTPOOL=""
# this is a list of the top-level directories in each of the
# files that make up the path to the files the pool is based on
FILEPOOL=$(zpool status -v $pool | awk -v pool="/$1/" '$0 ~ pool {print $1}')
# this is a list of the zvols that make up the pool
ZVOLPOOL=$(zpool status -v $pool | awk -v zvols="$ZVOL_DEVDIR/$1$" '$0 ~ zvols {print $1}')
# also want to determine if it's a file-based pool using an
# alternate mountpoint...
POOL_FILE_DIRS=$(zpool status -v $pool | \
awk '/\// {print $1}' | \
awk -F/ '!/dev/ {print $2}')
for pooldir in $POOL_FILE_DIRS
do
OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
awk -v pd="${pooldir}$" '$0 ~ pd {print $1}')
ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
done
if [ ! -z "$ZVOLPOOL" ]
then
DONT_DESTROY="true"
log_note "Pool $pool is built from $ZVOLPOOL on $1"
fi
if [ ! -z "$FILEPOOL" ]
then
DONT_DESTROY="true"
log_note "Pool $pool is built from $FILEPOOL on $1"
fi
if [ ! -z "$ALTMOUNTPOOL" ]
then
DONT_DESTROY="true"
log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
fi
done
if [ -z "${DONT_DESTROY}" ]
then
return 0
else
log_note "Warning: it is not safe to destroy $1!"
return 1
fi
}
#
# Verify zfs operation with -p option work as expected
# $1 operation, value could be create, clone or rename
# $2 dataset type, value could be fs or vol
# $3 dataset name
# $4 new dataset name
#
function verify_opt_p_ops
{
typeset ops=$1
typeset datatype=$2
typeset dataset=$3
typeset newdataset=$4
if [[ $datatype != "fs" && $datatype != "vol" ]]; then
log_fail "$datatype is not supported."
fi
# check parameters accordingly
case $ops in
create)
newdataset=$dataset
dataset=""
if [[ $datatype == "vol" ]]; then
ops="create -V $VOLSIZE"
fi
;;
clone)
if [[ -z $newdataset ]]; then
log_fail "newdataset should not be empty" \
"when ops is $ops."
fi
log_must datasetexists $dataset
log_must snapexists $dataset
;;
rename)
if [[ -z $newdataset ]]; then
log_fail "newdataset should not be empty" \
"when ops is $ops."
fi
log_must datasetexists $dataset
;;
*)
log_fail "$ops is not supported."
;;
esac
# make sure the upper level filesystem does not exist
destroy_dataset "${newdataset%/*}" "-rRf"
# without -p option, operation will fail
log_mustnot zfs $ops $dataset $newdataset
log_mustnot datasetexists $newdataset ${newdataset%/*}
# with -p option, operation should succeed
log_must zfs $ops -p $dataset $newdataset
block_device_wait
if ! datasetexists $newdataset ; then
log_fail "-p option does not work for $ops"
fi
# when $ops is create or clone, redo the operation still return zero
if [[ $ops != "rename" ]]; then
log_must zfs $ops -p $dataset $newdataset
fi
return 0
}
#
# Get configuration of pool
# $1 pool name
# $2 config name
#
function get_config
{
typeset pool=$1
typeset config=$2
if ! poolexists "$pool" ; then
return 1
fi
if [ "$(get_pool_prop cachefile "$pool")" = "none" ]; then
zdb -e $pool
else
zdb -C $pool
fi | awk -F: -v cfg="$config:" '$0 ~ cfg {sub(/^'\''/, $2); sub(/'\''$/, $2); print $2}'
}
#
# Privated function. Random select one of items from arguments.
#
# $1 count
# $2-n string
#
function _random_get
{
typeset cnt=$1
shift
typeset str="$@"
typeset -i ind
((ind = RANDOM % cnt + 1))
echo "$str" | cut -f $ind -d ' '
}
#
# Random select one of item from arguments which include NONE string
#
function random_get_with_non
{
typeset -i cnt=$#
((cnt =+ 1))
_random_get "$cnt" "$@"
}
#
# Random select one of item from arguments which doesn't include NONE string
#
function random_get
{
_random_get "$#" "$@"
}
#
# The function will generate a dataset name with specific length
# $1, the length of the name
# $2, the base string to construct the name
#
function gen_dataset_name
{
typeset -i len=$1
typeset basestr="$2"
typeset -i baselen=${#basestr}
typeset -i iter=0
typeset l_name=""
if ((len % baselen == 0)); then
((iter = len / baselen))
else
((iter = len / baselen + 1))
fi
while ((iter > 0)); do
l_name="${l_name}$basestr"
((iter -= 1))
done
echo $l_name
}
#
# Get cksum tuple of dataset
# $1 dataset name
#
# sample zdb output:
# Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
# DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
# lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
# fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
function datasetcksum
{
typeset cksum
sync
sync_all_pools
zdb -vvv $1 | awk -F= -v ds="^Dataset $1 "'\\[' '$0 ~ ds && /cksum/ {print $7}'
}
#
# Get the given disk/slice state from the specific field of the pool
#
function get_device_state #pool disk field("", "spares","logs")
{
typeset pool=$1
typeset disk=${2#$DEV_DSKDIR/}
typeset field=${3:-$pool}
zpool status -v "$pool" 2>/dev/null | \
awk -v device=$disk -v pool=$pool -v field=$field \
'BEGIN {startconfig=0; startfield=0; }
/config:/ {startconfig=1}
(startconfig==1) && ($1==field) {startfield=1; next;}
(startfield==1) && ($1==device) {print $2; exit;}
(startfield==1) &&
($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}'
}
#
# get the root filesystem name if it's zfsroot system.
#
# return: root filesystem name
function get_rootfs
{
typeset rootfs=""
if is_freebsd; then
rootfs=$(mount -p | awk '$2 == "/" && $3 == "zfs" {print $1}')
elif ! is_linux; then
rootfs=$(awk '$2 == "/" && $3 == "zfs" {print $1}' \
/etc/mnttab)
fi
if [[ -z "$rootfs" ]]; then
log_fail "Can not get rootfs"
fi
if datasetexists $rootfs; then
echo $rootfs
else
log_fail "This is not a zfsroot system."
fi
}
#
# get the rootfs's pool name
# return:
# rootpool name
#
function get_rootpool
{
typeset rootfs=$(get_rootfs)
echo ${rootfs%%/*}
}
#
# To verify if the require numbers of disks is given
#
function verify_disk_count
{
typeset -i min=${2:-1}
typeset -i count=$(echo "$1" | wc -w)
if ((count < min)); then
log_untested "A minimum of $min disks is required to run." \
" You specified $count disk(s)"
fi
}
function ds_is_volume
{
typeset type=$(get_prop type $1)
[ $type = "volume" ]
}
function ds_is_filesystem
{
typeset type=$(get_prop type $1)
[ $type = "filesystem" ]
}
#
# Check if Trusted Extensions are installed and enabled
#
function is_te_enabled
{
svcs -H -o state labeld 2>/dev/null | grep -q "enabled"
}
# Return the number of CPUs (cross-platform)
function get_num_cpus
{
if is_linux ; then
grep -c '^processor' /proc/cpuinfo
elif is_freebsd; then
sysctl -n kern.smp.cpus
else
psrinfo | wc -l
fi
}
# Utility function to determine if a system has multiple cpus.
function is_mp
{
[[ $(get_num_cpus) -gt 1 ]]
}
function get_cpu_freq
{
if is_linux; then
lscpu | awk '/CPU MHz/ { print $3 }'
elif is_freebsd; then
sysctl -n hw.clockrate
else
psrinfo -v 0 | awk '/processor operates at/ {print $6}'
fi
}
# Run the given command as the user provided.
function user_run
{
typeset user=$1
shift
log_note "user: $user"
log_note "cmd: $*"
typeset out=$TEST_BASE_DIR/out
typeset err=$TEST_BASE_DIR/err
sudo -Eu $user env PATH="$PATH" ksh <<<"$*" >$out 2>$err
typeset res=$?
log_note "out: $(<$out)"
log_note "err: $(<$err)"
return $res
}
#
# Check if the pool contains the specified vdevs
#
# $1 pool
# $2..n <vdev> ...
#
# Return 0 if the vdevs are contained in the pool, 1 if any of the specified
# vdevs is not in the pool, and 2 if pool name is missing.
#
function vdevs_in_pool
{
typeset pool=$1
typeset vdev
if [[ -z $pool ]]; then
log_note "Missing pool name."
return 2
fi
shift
# We could use 'zpool list' to only get the vdevs of the pool but we
# can't reference a mirror/raidz vdev using its ID (i.e mirror-0),
# therefore we use the 'zpool status' output.
typeset tmpfile=$(mktemp)
zpool status -v "$pool" | grep -A 1000 "config:" >$tmpfile
for vdev in "$@"; do
grep -wq ${vdev##*/} $tmpfile || return 1
done
rm -f $tmpfile
return 0
}
function get_max
{
typeset -l i max=$1
shift
for i in "$@"; do
max=$((max > i ? max : i))
done
echo $max
}
# Write data that can be compressed into a directory
function write_compressible
{
typeset dir=$1
typeset megs=$2
typeset nfiles=${3:-1}
typeset bs=${4:-1024k}
typeset fname=${5:-file}
[[ -d $dir ]] || log_fail "No directory: $dir"
# Under Linux fio is not currently used since its behavior can
# differ significantly across versions. This includes missing
# command line options and cases where the --buffer_compress_*
# options fail to behave as expected.
if is_linux; then
typeset file_bytes=$(to_bytes $megs)
typeset bs_bytes=4096
typeset blocks=$(($file_bytes / $bs_bytes))
for (( i = 0; i < $nfiles; i++ )); do
truncate -s $file_bytes $dir/$fname.$i
# Write every third block to get 66% compression.
for (( j = 0; j < $blocks; j += 3 )); do
dd if=/dev/urandom of=$dir/$fname.$i \
seek=$j bs=$bs_bytes count=1 \
conv=notrunc >/dev/null 2>&1
done
done
else
command -v fio > /dev/null || log_unsupported "fio missing"
log_must eval fio \
--name=job \
--fallocate=0 \
--minimal \
--randrepeat=0 \
--buffer_compress_percentage=66 \
--buffer_compress_chunk=4096 \
--directory="$dir" \
--numjobs="$nfiles" \
--nrfiles="$nfiles" \
--rw=write \
--bs="$bs" \
--filesize="$megs" \
"--filename_format='$fname.\$jobnum' >/dev/null"
fi
}
function get_objnum
{
typeset pathname=$1
typeset objnum
[[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
if is_freebsd; then
objnum=$(stat -f "%i" $pathname)
else
objnum=$(stat -c %i $pathname)
fi
echo $objnum
}
#
# Sync data to the pool
#
# $1 pool name
# $2 boolean to force uberblock (and config including zpool cache file) update
#
function sync_pool #pool <force>
{
typeset pool=${1:-$TESTPOOL}
typeset force=${2:-false}
if [[ $force == true ]]; then
log_must zpool sync -f $pool
else
log_must zpool sync $pool
fi
return 0
}
#
# Sync all pools
#
# $1 boolean to force uberblock (and config including zpool cache file) update
#
function sync_all_pools #<force>
{
typeset force=${1:-false}
if [[ $force == true ]]; then
log_must zpool sync -f
else
log_must zpool sync
fi
return 0
}
#
# Wait for zpool 'freeing' property drops to zero.
#
# $1 pool name
#
function wait_freeing #pool
{
typeset pool=${1:-$TESTPOOL}
while true; do
[[ "0" == "$(zpool list -Ho freeing $pool)" ]] && break
log_must sleep 1
done
}
#
# Wait for every device replace operation to complete
#
# $1 pool name
# $2 timeout
#
function wait_replacing #pool timeout
{
typeset timeout=${2:-300}
typeset pool=${1:-$TESTPOOL}
for (( timer = 0; timer < $timeout; timer++ )); do
is_pool_replacing $pool || break;
sleep 1;
done
}
# Wait for a pool to be scrubbed
#
# $1 pool name
# $2 timeout
#
function wait_scrubbed #pool timeout
{
typeset timeout=${2:-300}
typeset pool=${1:-$TESTPOOL}
for (( timer = 0; timer < $timeout; timer++ )); do
is_pool_scrubbed $pool && break;
sleep 1;
done
}
# Backup the zed.rc in our test directory so that we can edit it for our test.
#
# Returns: Backup file name. You will need to pass this to zed_rc_restore().
function zed_rc_backup
{
zedrc_backup="$(mktemp)"
cp $ZEDLET_DIR/zed.rc $zedrc_backup
echo $zedrc_backup
}
function zed_rc_restore
{
mv $1 $ZEDLET_DIR/zed.rc
}
#
# Setup custom environment for the ZED.
#
# $@ Optional list of zedlets to run under zed.
function zed_setup
{
if ! is_linux; then
log_unsupported "No zed on $UNAME"
fi
if [[ ! -d $ZEDLET_DIR ]]; then
log_must mkdir $ZEDLET_DIR
fi
if [[ ! -e $VDEVID_CONF ]]; then
log_must touch $VDEVID_CONF
fi
if [[ -e $VDEVID_CONF_ETC ]]; then
log_fail "Must not have $VDEVID_CONF_ETC file present on system"
fi
EXTRA_ZEDLETS=$@
# Create a symlink for /etc/zfs/vdev_id.conf file.
log_must ln -s $VDEVID_CONF $VDEVID_CONF_ETC
# Setup minimal ZED configuration. Individual test cases should
# add additional ZEDLETs as needed for their specific test.
log_must cp ${ZEDLET_ETC_DIR}/zed.rc $ZEDLET_DIR
log_must cp ${ZEDLET_ETC_DIR}/zed-functions.sh $ZEDLET_DIR
# Scripts must only be user writable.
if [[ -n "$EXTRA_ZEDLETS" ]] ; then
saved_umask=$(umask)
log_must umask 0022
for i in $EXTRA_ZEDLETS ; do
log_must cp ${ZEDLET_LIBEXEC_DIR}/$i $ZEDLET_DIR
done
log_must umask $saved_umask
fi
# Customize the zed.rc file to enable the full debug log.
log_must sed -i '/\#ZED_DEBUG_LOG=.*/d' $ZEDLET_DIR/zed.rc
echo "ZED_DEBUG_LOG=$ZED_DEBUG_LOG" >>$ZEDLET_DIR/zed.rc
}
#
# Cleanup custom ZED environment.
#
# $@ Optional list of zedlets to remove from our test zed.d directory.
function zed_cleanup
{
if ! is_linux; then
return
fi
for extra_zedlet; do
log_must rm -f ${ZEDLET_DIR}/$extra_zedlet
done
log_must rm -fd ${ZEDLET_DIR}/zed.rc ${ZEDLET_DIR}/zed-functions.sh ${ZEDLET_DIR}/all-syslog.sh ${ZEDLET_DIR}/all-debug.sh ${ZEDLET_DIR}/state \
$ZED_LOG $ZED_DEBUG_LOG $VDEVID_CONF_ETC $VDEVID_CONF \
$ZEDLET_DIR
}
#
# Check if ZED is currently running; if so, returns PIDs
#
function zed_check
{
if ! is_linux; then
return
fi
zedpids="$(pgrep -x zed)"
zedpids2="$(pgrep -x lt-zed)"
echo ${zedpids} ${zedpids2}
}
#
# Check if ZED is currently running, if not start ZED.
#
function zed_start
{
if ! is_linux; then
return
fi
# ZEDLET_DIR=/var/tmp/zed
if [[ ! -d $ZEDLET_DIR ]]; then
log_must mkdir $ZEDLET_DIR
fi
# Verify the ZED is not already running.
zedpids=$(zed_check)
if [ -n "$zedpids" ]; then
# We never, ever, really want it to just keep going if zed
# is already running - usually this implies our test cases
# will break very strangely because whatever we wanted to
# configure zed for won't be listening to our changes in the
# tmpdir
log_fail "ZED already running - ${zedpids}"
else
log_note "Starting ZED"
# run ZED in the background and redirect foreground logging
# output to $ZED_LOG.
log_must truncate -s 0 $ZED_DEBUG_LOG
log_must eval "zed -vF -d $ZEDLET_DIR -P $PATH" \
"-s $ZEDLET_DIR/state -j 1 2>$ZED_LOG &"
fi
return 0
}
#
# Kill ZED process
#
function zed_stop
{
if ! is_linux; then
return ""
fi
log_note "Stopping ZED"
while true; do
zedpids=$(zed_check)
[ ! -n "$zedpids" ] && break
log_must kill $zedpids
sleep 1
done
return 0
}
#
# Drain all zevents
#
function zed_events_drain
{
while [ $(zpool events -H | wc -l) -ne 0 ]; do
sleep 1
zpool events -c >/dev/null
done
}
# Set a variable in zed.rc to something, un-commenting it in the process.
#
# $1 variable
# $2 value
function zed_rc_set
{
var="$1"
val="$2"
# Remove the line
cmd="'/$var/d'"
eval sed -i $cmd $ZEDLET_DIR/zed.rc
# Add it at the end
echo "$var=$val" >> $ZEDLET_DIR/zed.rc
}
#
# Check is provided device is being active used as a swap device.
#
function is_swap_inuse
{
typeset device=$1
if [[ -z $device ]] ; then
log_note "No device specified."
return 1
fi
case "$UNAME" in
Linux)
swapon -s | grep -wq $(readlink -f $device)
;;
FreeBSD)
swapctl -l | grep -wq $device
;;
*)
swap -l | grep -wq $device
;;
esac
}
#
# Setup a swap device using the provided device.
#
function swap_setup
{
typeset swapdev=$1
case "$UNAME" in
Linux)
log_must eval "mkswap $swapdev > /dev/null 2>&1"
log_must swapon $swapdev
;;
FreeBSD)
log_must swapctl -a $swapdev
;;
*)
log_must swap -a $swapdev
;;
esac
return 0
}
#
# Cleanup a swap device on the provided device.
#
function swap_cleanup
{
typeset swapdev=$1
if is_swap_inuse $swapdev; then
if is_linux; then
log_must swapoff $swapdev
elif is_freebsd; then
log_must swapoff $swapdev
else
log_must swap -d $swapdev
fi
fi
return 0
}
#
# Set a global system tunable (64-bit value)
#
# $1 tunable name (use a NAME defined in tunables.cfg)
# $2 tunable values
#
function set_tunable64
{
set_tunable_impl "$1" "$2" Z
}
#
# Set a global system tunable (32-bit value)
#
# $1 tunable name (use a NAME defined in tunables.cfg)
# $2 tunable values
#
function set_tunable32
{
set_tunable_impl "$1" "$2" W
}
function set_tunable_impl
{
typeset name="$1"
typeset value="$2"
typeset mdb_cmd="$3"
eval "typeset tunable=\$$name"
case "$tunable" in
UNSUPPORTED)
log_unsupported "Tunable '$name' is unsupported on $UNAME"
;;
"")
log_fail "Tunable '$name' must be added to tunables.cfg"
;;
*)
;;
esac
[[ -z "$value" ]] && return 1
[[ -z "$mdb_cmd" ]] && return 1
case "$UNAME" in
Linux)
typeset zfs_tunables="/sys/module/zfs/parameters"
echo "$value" >"$zfs_tunables/$tunable"
;;
FreeBSD)
sysctl vfs.zfs.$tunable=$value
;;
SunOS)
echo "${tunable}/${mdb_cmd}0t${value}" | mdb -kw
;;
esac
}
function save_tunable
{
[[ ! -d $TEST_BASE_DIR ]] && return 1
[[ -e $TEST_BASE_DIR/tunable-$1 ]] && return 2
echo "$(get_tunable """$1""")" > "$TEST_BASE_DIR"/tunable-"$1"
}
function restore_tunable
{
[[ ! -e $TEST_BASE_DIR/tunable-$1 ]] && return 1
val="$(cat $TEST_BASE_DIR/tunable-"""$1""")"
set_tunable64 "$1" "$val"
rm $TEST_BASE_DIR/tunable-$1
}
#
# Get a global system tunable
#
# $1 tunable name (use a NAME defined in tunables.cfg)
#
function get_tunable
{
get_tunable_impl "$1"
}
function get_tunable_impl
{
typeset name="$1"
typeset module="${2:-zfs}"
typeset check_only="$3"
eval "typeset tunable=\$$name"
case "$tunable" in
UNSUPPORTED)
if [ -z "$check_only" ] ; then
log_unsupported "Tunable '$name' is unsupported on $UNAME"
else
return 1
fi
;;
"")
if [ -z "$check_only" ] ; then
log_fail "Tunable '$name' must be added to tunables.cfg"
else
return 1
fi
;;
*)
;;
esac
case "$UNAME" in
Linux)
typeset zfs_tunables="/sys/module/$module/parameters"
cat $zfs_tunables/$tunable
;;
FreeBSD)
sysctl -n vfs.zfs.$tunable
;;
SunOS)
[[ "$module" -eq "zfs" ]] || return 1
;;
esac
}
# Does a tunable exist?
#
# $1: Tunable name
function tunable_exists
{
get_tunable_impl $1 "zfs" 1
}
#
# Compute MD5 digest for given file or stdin if no file given.
# Note: file path must not contain spaces
#
function md5digest
{
typeset file=$1
case "$UNAME" in
FreeBSD)
md5 -q $file
;;
*)
typeset sum _
read -r sum _ < <(md5sum -b $file)
echo $sum
;;
esac
}
#
# Compute SHA256 digest for given file or stdin if no file given.
# Note: file path must not contain spaces
#
function sha256digest
{
typeset file=$1
case "$UNAME" in
FreeBSD)
sha256 -q $file
;;
*)
typeset sum _
read -r sum _ < <(sha256sum -b $file)
echo $sum
;;
esac
}
function new_fs #<args>
{
case "$UNAME" in
FreeBSD)
newfs "$@"
;;
*)
echo y | newfs -v "$@"
;;
esac
}
function stat_size #<path>
{
typeset path=$1
case "$UNAME" in
FreeBSD)
stat -f %z "$path"
;;
*)
stat -c %s "$path"
;;
esac
}
function stat_mtime #<path>
{
typeset path=$1
case "$UNAME" in
FreeBSD)
stat -f %m "$path"
;;
*)
stat -c %Y "$path"
;;
esac
}
function stat_ctime #<path>
{
typeset path=$1
case "$UNAME" in
FreeBSD)
stat -f %c "$path"
;;
*)
stat -c %Z "$path"
;;
esac
}
function stat_crtime #<path>
{
typeset path=$1
case "$UNAME" in
FreeBSD)
stat -f %B "$path"
;;
*)
stat -c %W "$path"
;;
esac
}
function stat_generation #<path>
{
typeset path=$1
case "$UNAME" in
Linux)
getversion "${path}"
;;
*)
stat -f %v "${path}"
;;
esac
}
# Run a command as if it was being run in a TTY.
#
# Usage:
#
# faketty command
#
function faketty
{
if is_freebsd; then
script -q /dev/null env "$@"
else
script --return --quiet -c "$*" /dev/null
fi
}
#
# Produce a random permutation of the integers in a given range (inclusive).
#
function range_shuffle # begin end
{
typeset -i begin=$1
typeset -i end=$2
seq ${begin} ${end} | sort -R
}
#
# Cross-platform xattr helpers
#
function get_xattr # name path
{
typeset name=$1
typeset path=$2
case "$UNAME" in
FreeBSD)
getextattr -qq user "${name}" "${path}"
;;
*)
attr -qg "${name}" "${path}"
;;
esac
}
function set_xattr # name value path
{
typeset name=$1
typeset value=$2
typeset path=$3
case "$UNAME" in
FreeBSD)
setextattr user "${name}" "${value}" "${path}"
;;
*)
attr -qs "${name}" -V "${value}" "${path}"
;;
esac
}
function set_xattr_stdin # name value
{
typeset name=$1
typeset path=$2
case "$UNAME" in
FreeBSD)
setextattr -i user "${name}" "${path}"
;;
*)
attr -qs "${name}" "${path}"
;;
esac
}
function rm_xattr # name path
{
typeset name=$1
typeset path=$2
case "$UNAME" in
FreeBSD)
rmextattr -q user "${name}" "${path}"
;;
*)
attr -qr "${name}" "${path}"
;;
esac
}
function ls_xattr # path
{
typeset path=$1
case "$UNAME" in
FreeBSD)
lsextattr -qq user "${path}"
;;
*)
attr -ql "${path}"
;;
esac
}
function kstat # stat flags?
{
typeset stat=$1
typeset flags=${2-"-n"}
case "$UNAME" in
FreeBSD)
sysctl $flags kstat.zfs.misc.$stat
;;
Linux)
cat "/proc/spl/kstat/zfs/$stat" 2>/dev/null
;;
*)
false
;;
esac
}
function get_arcstat # stat
{
typeset stat=$1
case "$UNAME" in
FreeBSD)
kstat arcstats.$stat
;;
Linux)
kstat arcstats | awk "/$stat/"' { print $3 }'
;;
*)
false
;;
esac
}
function punch_hole # offset length file
{
typeset offset=$1
typeset length=$2
typeset file=$3
case "$UNAME" in
FreeBSD)
truncate -d -o $offset -l $length "$file"
;;
Linux)
fallocate --punch-hole --offset $offset --length $length "$file"
;;
*)
false
;;
esac
}
function zero_range # offset length file
{
typeset offset=$1
typeset length=$2
typeset file=$3
case "$UNAME" in
Linux)
fallocate --zero-range --offset $offset --length $length "$file"
;;
*)
false
;;
esac
}
#
# Wait for the specified arcstat to reach non-zero quiescence.
# If echo is 1 echo the value after reaching quiescence, otherwise
# if echo is 0 print the arcstat we are waiting on.
#
function arcstat_quiescence # stat echo
{
typeset stat=$1
typeset echo=$2
typeset do_once=true
if [[ $echo -eq 0 ]]; then
echo "Waiting for arcstat $1 quiescence."
fi
while $do_once || [ $stat1 -ne $stat2 ] || [ $stat2 -eq 0 ]; do
typeset stat1=$(get_arcstat $stat)
sleep 0.5
typeset stat2=$(get_arcstat $stat)
do_once=false
done
if [[ $echo -eq 1 ]]; then
echo $stat2
fi
}
function arcstat_quiescence_noecho # stat
{
typeset stat=$1
arcstat_quiescence $stat 0
}
function arcstat_quiescence_echo # stat
{
typeset stat=$1
arcstat_quiescence $stat 1
}
#
# Given an array of pids, wait until all processes
# have completed and check their return status.
#
function wait_for_children #children
{
rv=0
children=("$@")
for child in "${children[@]}"
do
child_exit=0
wait ${child} || child_exit=$?
if [ $child_exit -ne 0 ]; then
echo "child ${child} failed with ${child_exit}"
rv=1
fi
done
return $rv
}
#
# Compare two directory trees recursively in a manner similar to diff(1), but
# using rsync. If there are any discrepancies, a summary of the differences are
# output and a non-zero error is returned.
#
# If you're comparing a directory after a ZIL replay, you should set
# LIBTEST_DIFF_ZIL_REPLAY=1 or use replay_directory_diff which will cause
# directory_diff to ignore mtime changes (the ZIL replay won't fix up mtime
# information).
#
function directory_diff # dir_a dir_b
{
dir_a="$1"
dir_b="$2"
zil_replay="${LIBTEST_DIFF_ZIL_REPLAY:-0}"
# If one of the directories doesn't exist, return 2. This is to match the
# semantics of diff.
if ! [ -d "$dir_a" -a -d "$dir_b" ]; then
return 2
fi
# Run rsync with --dry-run --itemize-changes to get something akin to diff
# output, but rsync is far more thorough in detecting differences (diff
# doesn't compare file metadata, and cannot handle special files).
#
# Also make sure to filter out non-user.* xattrs when comparing. On
# SELinux-enabled systems the copied tree will probably have different
# SELinux labels.
args=("-nicaAHX" '--filter=-x! user.*' "--delete")
# NOTE: Quite a few rsync builds do not support --crtimes which would be
# necessary to verify that creation times are being maintained properly.
# Unfortunately because of this we cannot use it unconditionally but we can
# check if this rsync build supports it and use it then. This check is
# based on the same check in the rsync test suite (testsuite/crtimes.test).
#
# We check ctimes even with zil_replay=1 because the ZIL does store
# creation times and we should make sure they match (if the creation times
# do not match there is a "c" entry in one of the columns).
if rsync --version | grep -q "[, ] crtimes"; then
args+=("--crtimes")
else
log_note "This rsync package does not support --crtimes (-N)."
fi
# If we are testing a ZIL replay, we need to ignore timestamp changes.
# Unfortunately --no-times doesn't do what we want -- it will still tell
# you if the timestamps don't match but rsync will set the timestamps to
# the current time (leading to an itemised change entry). It's simpler to
# just filter out those lines.
if [ "$zil_replay" -eq 0 ]; then
filter=("cat")
else
# Different rsync versions have different numbers of columns. So just
# require that aside from the first two, all other columns must be
# blank (literal ".") or a timestamp field ("[tT]").
filter=("grep" "-v" '^\..[.Tt]\+ ')
fi
diff="$(rsync "${args[@]}" "$dir_a/" "$dir_b/" | "${filter[@]}")"
rv=0
if [ -n "$diff" ]; then
echo "$diff"
rv=1
fi
return $rv
}
#
# Compare two directory trees recursively, without checking whether the mtimes
# match (creation times will be checked if the available rsync binary supports
# it). This is necessary for ZIL replay checks (because the ZIL does not
# contain mtimes and thus after a ZIL replay, mtimes won't match).
#
# This is shorthand for LIBTEST_DIFF_ZIL_REPLAY=1 directory_diff <...>.
#
function replay_directory_diff # dir_a dir_b
{
LIBTEST_DIFF_ZIL_REPLAY=1 directory_diff "$@"
}
#
# Put coredumps into $1/core.{basename}
#
# Output must be saved and passed to pop_coredump_pattern on cleanup
#
function push_coredump_pattern # dir
{
ulimit -c unlimited
case "$UNAME" in
Linux)
cat /proc/sys/kernel/core_pattern /proc/sys/kernel/core_uses_pid
echo "$1/core.%e" >/proc/sys/kernel/core_pattern &&
echo 0 >/proc/sys/kernel/core_uses_pid
;;
FreeBSD)
sysctl -n kern.corefile
sysctl kern.corefile="$1/core.%N" >/dev/null
;;
*)
# Nothing to output – set only for this shell
coreadm -p "$1/core.%f"
;;
esac
}
#
# Put coredumps back into the default location
#
function pop_coredump_pattern
{
[ -s "$1" ] || return 0
case "$UNAME" in
Linux)
typeset pat pid
{ read -r pat; read -r pid; } < "$1"
echo "$pat" >/proc/sys/kernel/core_pattern &&
echo "$pid" >/proc/sys/kernel/core_uses_pid
;;
FreeBSD)
sysctl kern.corefile="$(<"$1")" >/dev/null
;;
esac
}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/include/math.shlib b/sys/contrib/openzfs/tests/zfs-tests/include/math.shlib
index da1e77e5fb97..2b5e60180f59 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/include/math.shlib
+++ b/sys/contrib/openzfs/tests/zfs-tests/include/math.shlib
@@ -1,132 +1,143 @@
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2012, 2016 by Delphix. All rights reserved.
#
#
# Return 0 if the percentage difference between $a and $b is $percent or
# greater. Return 1 if the percentage is lower or if we would divide by
# zero. For use like this:
#
# Do $action if the calculated percentage is greater or equal to that passed in:
# within_percent A B P && $action
# Do $action if the calculated percentage is less than that passed in:
# within_percent A B P || $action
#
function within_percent
{
typeset a=$1
typeset b=$1
typeset percent=$3
# Set $a or $b to $2 such that a >= b
[ 1 -eq $(echo "$2 > $a" | bc) ] && a=$2 || b=$2
# Prevent division by 0
[[ $a =~ [1-9] ]] || return 1
typeset p=$(echo "scale=2; $b * 100 / $a" | bc)
log_note "Comparing $a and $b given $percent% (calculated: $p%)"
[ 1 -eq $(echo "scale=2; $p >= $percent" | bc) ]
}
#
# Return 0 if value is within +/-tolerance of target.
# Return 1 if value exceeds our tolerance.
# For use like this:
#
# Do $action if value is within the tolerance from target passed in:
# within_tolerance VAL TAR TOL && $action
# Do $action if value surpasses the tolerance from target passed in:
# within_tolerance VAL TAR TOL || $action
#
function within_tolerance #value #target #tolerance
{
typeset val=$1
typeset target=$2
typeset tol=$3
typeset diff=$((abs(val - target)))
log_note "Checking if $val is within +/-$tol of $target (diff: $diff)"
((diff <= tol))
}
#
# Return 0 if the human readable string of the form <value>[suffix] can
# be converted to bytes. Allow suffixes are shown in the table below.
#
function to_bytes
{
typeset size=$1
typeset value=$(echo "$size" | grep -o '[0-9]\+')
case $size in
*PB|*pb|*P|*p) factor='1024^5' ;;
*TB|*tb|*T|*t) factor='1024^4' ;;
*GB|*gb|*G|*g) factor='1024^3' ;;
*MB|*mb|*M|*m) factor='1024^2' ;;
*KB|*kb|*K|*k) factor='1024^1' ;;
*B|*b) factor='1024^0' ;;
*[!0-9.]*) return 1 ;;
*) factor='1024^0' ;;
esac
echo "$value * ($factor)" | bc
return 0
}
#
# Verify $a is equal to $b, otherwise raise an error specifying
# the $type of values being compared
#
function verify_eq # <a> <b> <type>
{
typeset a=$1
typeset b=$2
typeset type=$3
if [[ $a -ne $b ]]; then
log_fail "Compared $type should be equal: $a != $b"
fi
}
#
# Verify $a is not equal to $b, otherwise raise an error specifying
# the $type of values being compared
#
function verify_ne # <a> <b> <type>
{
typeset a=$1
typeset b=$2
typeset type=$3
if [[ $a -eq $b ]]; then
log_fail "Compared $type should be not equal: $a == $b"
fi
}
# A simple function to get a random number between two bounds (inclusive)
#
# Note since we're using $RANDOM, $min+32767 is the largest number we
# can accept as the upper bound.
#
# $1 lower bound
# $2 upper bound
+# [$3 how many]
function random_int_between
{
typeset -i min=$1
typeset -i max=$2
+ typeset -i count
+ typeset -i i
- echo $(( (RANDOM % (max - min + 1)) + min ))
+ if [[ -z "$3" ]]; then
+ count=1
+ else
+ count=$3
+ fi
+
+ for (( i = 0; i < $count; i++ )); do
+ echo $(( (RANDOM % (max - min + 1)) + min ))
+ done
}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/include/tunables.cfg b/sys/contrib/openzfs/tests/zfs-tests/include/tunables.cfg
index a0edad14d028..718c4cf2d8ab 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/include/tunables.cfg
+++ b/sys/contrib/openzfs/tests/zfs-tests/include/tunables.cfg
@@ -1,102 +1,103 @@
# This file exports variables for each tunable used in the test suite.
#
# Different platforms use different names for most tunables. To avoid littering
# the tests with conditional logic for deciding how to set each tunable, the
# logic is instead consolidated to this one file.
#
# Any use of tunables in tests must use a name defined here. New entries
# should be added to the table as needed. Please keep the table sorted
# alphabetically for ease of maintenance.
#
# Platform-specific tunables should still use a NAME from this table for
# consistency. Enter UNSUPPORTED in the column for platforms on which the
# tunable is not implemented.
UNAME=$(uname)
# NAME FreeBSD tunable Linux tunable
cat <<%%%% |
ADMIN_SNAPSHOT UNSUPPORTED zfs_admin_snapshot
ALLOW_REDACTED_DATASET_MOUNT allow_redacted_dataset_mount zfs_allow_redacted_dataset_mount
ARC_MAX arc.max zfs_arc_max
ARC_MIN arc.min zfs_arc_min
ASYNC_BLOCK_MAX_BLOCKS async_block_max_blocks zfs_async_block_max_blocks
CHECKSUM_EVENTS_PER_SECOND checksum_events_per_second zfs_checksum_events_per_second
COMMIT_TIMEOUT_PCT commit_timeout_pct zfs_commit_timeout_pct
COMPRESSED_ARC_ENABLED compressed_arc_enabled zfs_compressed_arc_enabled
CONDENSE_INDIRECT_COMMIT_ENTRY_DELAY_MS condense.indirect_commit_entry_delay_ms zfs_condense_indirect_commit_entry_delay_ms
CONDENSE_INDIRECT_OBSOLETE_PCT condense.indirect_obsolete_pct zfs_condense_indirect_obsolete_pct
CONDENSE_MIN_MAPPING_BYTES condense.min_mapping_bytes zfs_condense_min_mapping_bytes
DBUF_CACHE_SHIFT dbuf.cache_shift dbuf_cache_shift
DEADMAN_CHECKTIME_MS deadman.checktime_ms zfs_deadman_checktime_ms
DEADMAN_FAILMODE deadman.failmode zfs_deadman_failmode
DEADMAN_SYNCTIME_MS deadman.synctime_ms zfs_deadman_synctime_ms
DEADMAN_ZIOTIME_MS deadman.ziotime_ms zfs_deadman_ziotime_ms
DISABLE_IVSET_GUID_CHECK disable_ivset_guid_check zfs_disable_ivset_guid_check
DMU_OFFSET_NEXT_SYNC dmu_offset_next_sync zfs_dmu_offset_next_sync
INITIALIZE_CHUNK_SIZE initialize_chunk_size zfs_initialize_chunk_size
INITIALIZE_VALUE initialize_value zfs_initialize_value
KEEP_LOG_SPACEMAPS_AT_EXPORT keep_log_spacemaps_at_export zfs_keep_log_spacemaps_at_export
LUA_MAX_MEMLIMIT lua.max_memlimit zfs_lua_max_memlimit
L2ARC_MFUONLY l2arc.mfuonly l2arc_mfuonly
L2ARC_NOPREFETCH l2arc.noprefetch l2arc_noprefetch
L2ARC_REBUILD_BLOCKS_MIN_L2SIZE l2arc.rebuild_blocks_min_l2size l2arc_rebuild_blocks_min_l2size
L2ARC_REBUILD_ENABLED l2arc.rebuild_enabled l2arc_rebuild_enabled
L2ARC_TRIM_AHEAD l2arc.trim_ahead l2arc_trim_ahead
L2ARC_WRITE_BOOST l2arc.write_boost l2arc_write_boost
L2ARC_WRITE_MAX l2arc.write_max l2arc_write_max
LIVELIST_CONDENSE_NEW_ALLOC livelist.condense.new_alloc zfs_livelist_condense_new_alloc
LIVELIST_CONDENSE_SYNC_CANCEL livelist.condense.sync_cancel zfs_livelist_condense_sync_cancel
LIVELIST_CONDENSE_SYNC_PAUSE livelist.condense.sync_pause zfs_livelist_condense_sync_pause
LIVELIST_CONDENSE_ZTHR_CANCEL livelist.condense.zthr_cancel zfs_livelist_condense_zthr_cancel
LIVELIST_CONDENSE_ZTHR_PAUSE livelist.condense.zthr_pause zfs_livelist_condense_zthr_pause
LIVELIST_MAX_ENTRIES livelist.max_entries zfs_livelist_max_entries
LIVELIST_MIN_PERCENT_SHARED livelist.min_percent_shared zfs_livelist_min_percent_shared
MAX_DATASET_NESTING max_dataset_nesting zfs_max_dataset_nesting
MAX_MISSING_TVDS max_missing_tvds zfs_max_missing_tvds
METASLAB_DEBUG_LOAD metaslab.debug_load metaslab_debug_load
METASLAB_FORCE_GANGING metaslab.force_ganging metaslab_force_ganging
MULTIHOST_FAIL_INTERVALS multihost.fail_intervals zfs_multihost_fail_intervals
MULTIHOST_HISTORY multihost.history zfs_multihost_history
MULTIHOST_IMPORT_INTERVALS multihost.import_intervals zfs_multihost_import_intervals
MULTIHOST_INTERVAL multihost.interval zfs_multihost_interval
OVERRIDE_ESTIMATE_RECORDSIZE send.override_estimate_recordsize zfs_override_estimate_recordsize
PREFETCH_DISABLE prefetch.disable zfs_prefetch_disable
REBUILD_SCRUB_ENABLED rebuild_scrub_enabled zfs_rebuild_scrub_enabled
REMOVAL_SUSPEND_PROGRESS removal_suspend_progress zfs_removal_suspend_progress
REMOVE_MAX_SEGMENT remove_max_segment zfs_remove_max_segment
RESILVER_MIN_TIME_MS resilver_min_time_ms zfs_resilver_min_time_ms
SCAN_LEGACY scan_legacy zfs_scan_legacy
SCAN_SUSPEND_PROGRESS scan_suspend_progress zfs_scan_suspend_progress
SCAN_VDEV_LIMIT scan_vdev_limit zfs_scan_vdev_limit
SEND_HOLES_WITHOUT_BIRTH_TIME send_holes_without_birth_time send_holes_without_birth_time
SLOW_IO_EVENTS_PER_SECOND slow_io_events_per_second zfs_slow_io_events_per_second
SPA_ASIZE_INFLATION spa.asize_inflation spa_asize_inflation
SPA_DISCARD_MEMORY_LIMIT spa.discard_memory_limit zfs_spa_discard_memory_limit
SPA_LOAD_VERIFY_DATA spa.load_verify_data spa_load_verify_data
SPA_LOAD_VERIFY_METADATA spa.load_verify_metadata spa_load_verify_metadata
TRIM_EXTENT_BYTES_MIN trim.extent_bytes_min zfs_trim_extent_bytes_min
TRIM_METASLAB_SKIP trim.metaslab_skip zfs_trim_metaslab_skip
TRIM_TXG_BATCH trim.txg_batch zfs_trim_txg_batch
TXG_HISTORY txg.history zfs_txg_history
TXG_TIMEOUT txg.timeout zfs_txg_timeout
UNLINK_SUSPEND_PROGRESS UNSUPPORTED zfs_unlink_suspend_progress
VDEV_FILE_LOGICAL_ASHIFT vdev.file.logical_ashift vdev_file_logical_ashift
VDEV_FILE_PHYSICAL_ASHIFT vdev.file.physical_ashift vdev_file_physical_ashift
VDEV_MAX_AUTO_ASHIFT vdev.max_auto_ashift zfs_vdev_max_auto_ashift
VDEV_MIN_MS_COUNT vdev.min_ms_count zfs_vdev_min_ms_count
VDEV_VALIDATE_SKIP vdev.validate_skip vdev_validate_skip
VOL_INHIBIT_DEV UNSUPPORTED zvol_inhibit_dev
VOL_MODE vol.mode zvol_volmode
VOL_RECURSIVE vol.recursive UNSUPPORTED
VOL_USE_BLK_MQ UNSUPPORTED zvol_use_blk_mq
-BCLONE_ENABLED zfs_bclone_enabled zfs_bclone_enabled
+BCLONE_ENABLED bclone_enabled zfs_bclone_enabled
+BCLONE_WAIT_DIRTY bclone_wait_dirty zfs_bclone_wait_dirty
XATTR_COMPAT xattr_compat zfs_xattr_compat
ZEVENT_LEN_MAX zevent.len_max zfs_zevent_len_max
ZEVENT_RETAIN_MAX zevent.retain_max zfs_zevent_retain_max
ZIO_SLOW_IO_MS zio.slow_io_ms zio_slow_io_ms
ZIL_SAXATTR zil_saxattr zfs_zil_saxattr
%%%%
while read name FreeBSD Linux; do
eval "export ${name}=\$${UNAME}"
done
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/Makefile.am b/sys/contrib/openzfs/tests/zfs-tests/tests/Makefile.am
index 87b50f59ca7a..e2824ee065e8 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/Makefile.am
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/Makefile.am
@@ -1,2074 +1,2105 @@
CLEANFILES =
dist_noinst_DATA =
include $(top_srcdir)/config/Substfiles.am
datadir_zfs_tests_testsdir = $(datadir)/$(PACKAGE)/zfs-tests/tests
nobase_dist_datadir_zfs_tests_tests_DATA = \
perf/nfs-sample.cfg \
perf/perf.shlib \
\
perf/fio/mkfiles.fio \
perf/fio/random_reads.fio \
perf/fio/random_readwrite.fio \
perf/fio/random_readwrite_fixed.fio \
perf/fio/random_writes.fio \
perf/fio/sequential_reads.fio \
perf/fio/sequential_readwrite.fio \
perf/fio/sequential_writes.fio
nobase_dist_datadir_zfs_tests_tests_SCRIPTS = \
perf/regression/random_reads.ksh \
perf/regression/random_readwrite.ksh \
perf/regression/random_readwrite_fixed.ksh \
perf/regression/random_writes.ksh \
perf/regression/random_writes_zil.ksh \
perf/regression/sequential_reads_arc_cached_clone.ksh \
perf/regression/sequential_reads_arc_cached.ksh \
perf/regression/sequential_reads_dbuf_cached.ksh \
perf/regression/sequential_reads.ksh \
perf/regression/sequential_writes.ksh \
perf/regression/setup.ksh \
\
perf/scripts/prefetch_io.sh
# These lists can be regenerated by running make regen-tests at the root, or, on a *clean* source:
# find functional/ ! -type d ! -name .gitignore ! -name .dirstamp ! -name '*.Po' ! -executable -name '*.in' | sort | sed 's/\.in$//;s/^/\t/;$!s/$/ \\/'
# find functional/ ! -type d ! -name .gitignore ! -name .dirstamp ! -name '*.Po' -executable -name '*.in' | sort | sed 's/\.in$//;s/^/\t/;$!s/$/ \\/'
# find functional/ ! -type d ! -name .gitignore ! -name .dirstamp ! -name '*.Po' ! -name '*.in' ! -name '*.c' | grep -Fe /simd -e /tmpfile | sort | sed 's/^/\t/;$!s/$/ \\/'
# find functional/ ! -type d ! -name .gitignore ! -name .dirstamp ! -name '*.Po' ! -executable ! -name '*.in' ! -name '*.c' | grep -vFe /simd -e /tmpfile | sort | sed 's/^/\t/;$!s/$/ \\/'
# find functional/ ! -type d ! -name .gitignore ! -name .dirstamp ! -name '*.Po' -executable ! -name '*.in' ! -name '*.c' | grep -vFe /simd -e /tmpfile | sort | sed 's/^/\t/;$!s/$/ \\/'
#
# simd and tmpfile are Linux-only and not installed elsewhere
#
# C programs are specced in ../Makefile.am above as part of the main Makefile
find_common := find functional/ ! -type d ! -name .gitignore ! -name .dirstamp ! -name '*.Po'
regen:
@$(MAKE) -C $(top_builddir) clean
@$(MAKE) clean
$(SED) $(ac_inplace) '/^# -- >8 --/q' Makefile.am
echo >> Makefile.am
echo 'nobase_nodist_datadir_zfs_tests_tests_DATA = \' >> Makefile.am
$(find_common) ! -executable -name '*.in' | sort | sed 's/\.in$$//;s/^/\t/;$$!s/$$/ \\/' >> Makefile.am
echo 'nobase_nodist_datadir_zfs_tests_tests_SCRIPTS = \' >> Makefile.am
$(find_common) -executable -name '*.in' | sort | sed 's/\.in$$//;s/^/\t/;$$!s/$$/ \\/' >> Makefile.am
echo >> Makefile.am
echo 'SUBSTFILES += $$(nobase_nodist_datadir_zfs_tests_tests_DATA) $$(nobase_nodist_datadir_zfs_tests_tests_SCRIPTS)' >> Makefile.am
echo >> Makefile.am
echo 'if BUILD_LINUX' >> Makefile.am
echo 'nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \' >> Makefile.am
$(find_common) ! -name '*.in' ! -name '*.c' | grep -Fe /simd -e /tmpfile | sort | sed 's/^/\t/;$$!s/$$/ \\/' >> Makefile.am
echo 'endif' >> Makefile.am
echo >> Makefile.am
echo 'nobase_dist_datadir_zfs_tests_tests_DATA += \' >> Makefile.am
$(find_common) ! -executable ! -name '*.in' ! -name '*.c' | grep -vFe /simd -e /tmpfile | sort | sed 's/^/\t/;$$!s/$$/ \\/' >> Makefile.am
echo >> Makefile.am
echo 'nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \' >> Makefile.am
$(find_common) -executable ! -name '*.in' ! -name '*.c' | grep -vFe /simd -e /tmpfile | sort | sed 's/^/\t/;$$!s/$$/ \\/' >> Makefile.am
# -- >8 --
nobase_nodist_datadir_zfs_tests_tests_DATA = \
functional/pam/utilities.kshlib
nobase_nodist_datadir_zfs_tests_tests_SCRIPTS = \
functional/pyzfs/pyzfs_unittest.ksh
SUBSTFILES += $(nobase_nodist_datadir_zfs_tests_tests_DATA) $(nobase_nodist_datadir_zfs_tests_tests_SCRIPTS)
if BUILD_LINUX
nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \
functional/simd/simd_supported.ksh \
functional/tmpfile/cleanup.ksh \
functional/tmpfile/setup.ksh
endif
nobase_dist_datadir_zfs_tests_tests_DATA += \
functional/acl/acl.cfg \
functional/acl/acl_common.kshlib \
functional/alloc_class/alloc_class.cfg \
functional/alloc_class/alloc_class.kshlib \
functional/atime/atime.cfg \
functional/atime/atime_common.kshlib \
+ functional/bclone/bclone.cfg \
+ functional/bclone/bclone_common.kshlib \
+ functional/bclone/bclone_corner_cases.kshlib \
functional/block_cloning/block_cloning.kshlib \
functional/cache/cache.cfg \
functional/cache/cache.kshlib \
functional/cachefile/cachefile.cfg \
functional/cachefile/cachefile.kshlib \
functional/casenorm/casenorm.cfg \
functional/casenorm/casenorm.kshlib \
functional/channel_program/channel_common.kshlib \
functional/channel_program/lua_core/tst.args_to_lua.out \
functional/channel_program/lua_core/tst.args_to_lua.zcp \
functional/channel_program/lua_core/tst.divide_by_zero.err \
functional/channel_program/lua_core/tst.divide_by_zero.zcp \
functional/channel_program/lua_core/tst.exists.zcp \
functional/channel_program/lua_core/tst.large_prog.out \
functional/channel_program/lua_core/tst.large_prog.zcp \
functional/channel_program/lua_core/tst.lib_base.lua \
functional/channel_program/lua_core/tst.lib_coroutine.lua \
functional/channel_program/lua_core/tst.lib_strings.lua \
functional/channel_program/lua_core/tst.lib_table.lua \
functional/channel_program/lua_core/tst.nested_neg.zcp \
functional/channel_program/lua_core/tst.nested_pos.zcp \
functional/channel_program/lua_core/tst.recursive.zcp \
functional/channel_program/lua_core/tst.return_large.zcp \
functional/channel_program/lua_core/tst.return_recursive_table.zcp \
functional/channel_program/lua_core/tst.stack_gsub.err \
functional/channel_program/lua_core/tst.stack_gsub.zcp \
functional/channel_program/lua_core/tst.timeout.zcp \
functional/channel_program/synctask_core/tst.bookmark.copy.zcp \
functional/channel_program/synctask_core/tst.bookmark.create.zcp \
functional/channel_program/synctask_core/tst.get_index_props.out \
functional/channel_program/synctask_core/tst.get_index_props.zcp \
functional/channel_program/synctask_core/tst.get_number_props.out \
functional/channel_program/synctask_core/tst.get_number_props.zcp \
functional/channel_program/synctask_core/tst.get_string_props.out \
functional/channel_program/synctask_core/tst.get_string_props.zcp \
functional/channel_program/synctask_core/tst.promote_conflict.zcp \
functional/channel_program/synctask_core/tst.set_props.zcp \
functional/channel_program/synctask_core/tst.snapshot_destroy.zcp \
functional/channel_program/synctask_core/tst.snapshot_neg.zcp \
functional/channel_program/synctask_core/tst.snapshot_recursive.zcp \
functional/channel_program/synctask_core/tst.snapshot_rename.zcp \
functional/channel_program/synctask_core/tst.snapshot_simple.zcp \
functional/checksum/default.cfg \
functional/clean_mirror/clean_mirror_common.kshlib \
functional/clean_mirror/default.cfg \
functional/cli_root/cli_common.kshlib \
functional/cli_root/zfs_copies/zfs_copies.cfg \
functional/cli_root/zfs_copies/zfs_copies.kshlib \
functional/cli_root/zfs_create/properties.kshlib \
functional/cli_root/zfs_create/zfs_create.cfg \
functional/cli_root/zfs_create/zfs_create_common.kshlib \
functional/cli_root/zfs_destroy/zfs_destroy.cfg \
functional/cli_root/zfs_destroy/zfs_destroy_common.kshlib \
functional/cli_root/zfs_get/zfs_get_common.kshlib \
functional/cli_root/zfs_get/zfs_get_list_d.kshlib \
functional/cli_root/zfs_jail/jail.conf \
functional/cli_root/zfs_load-key/HEXKEY \
functional/cli_root/zfs_load-key/PASSPHRASE \
functional/cli_root/zfs_load-key/RAWKEY \
functional/cli_root/zfs_load-key/zfs_load-key.cfg \
functional/cli_root/zfs_load-key/zfs_load-key_common.kshlib \
functional/cli_root/zfs_mount/zfs_mount.cfg \
functional/cli_root/zfs_mount/zfs_mount.kshlib \
functional/cli_root/zfs_promote/zfs_promote.cfg \
functional/cli_root/zfs_receive/zstd_test_data.txt \
functional/cli_root/zfs_rename/zfs_rename.cfg \
functional/cli_root/zfs_rename/zfs_rename.kshlib \
functional/cli_root/zfs_rollback/zfs_rollback.cfg \
functional/cli_root/zfs_rollback/zfs_rollback_common.kshlib \
functional/cli_root/zfs_send/zfs_send.cfg \
functional/cli_root/zfs_set/zfs_set_common.kshlib \
functional/cli_root/zfs_share/zfs_share.cfg \
functional/cli_root/zfs_snapshot/zfs_snapshot.cfg \
functional/cli_root/zfs_unmount/zfs_unmount.cfg \
functional/cli_root/zfs_unmount/zfs_unmount.kshlib \
functional/cli_root/zfs_upgrade/zfs_upgrade.kshlib \
functional/cli_root/zfs_wait/zfs_wait.kshlib \
functional/cli_root/zpool_add/zpool_add.cfg \
functional/cli_root/zpool_add/zpool_add.kshlib \
functional/cli_root/zpool_clear/zpool_clear.cfg \
functional/cli_root/zpool_create/draidcfg.gz \
functional/cli_root/zpool_create/zpool_create.cfg \
functional/cli_root/zpool_create/zpool_create.shlib \
functional/cli_root/zpool_destroy/zpool_destroy.cfg \
functional/cli_root/zpool_events/zpool_events.cfg \
functional/cli_root/zpool_events/zpool_events.kshlib \
functional/cli_root/zpool_expand/zpool_expand.cfg \
functional/cli_root/zpool_export/zpool_export.cfg \
functional/cli_root/zpool_export/zpool_export.kshlib \
functional/cli_root/zpool_get/vdev_get.cfg \
functional/cli_root/zpool_get/zpool_get.cfg \
functional/cli_root/zpool_get/zpool_get_parsable.cfg \
functional/cli_root/zpool_import/blockfiles/cryptv0.dat.bz2 \
functional/cli_root/zpool_import/blockfiles/missing_ivset.dat.bz2 \
functional/cli_root/zpool_import/blockfiles/unclean_export.dat.bz2 \
functional/cli_root/zpool_import/zpool_import.cfg \
functional/cli_root/zpool_import/zpool_import.kshlib \
functional/cli_root/zpool_initialize/zpool_initialize.kshlib \
functional/cli_root/zpool_labelclear/labelclear.cfg \
functional/cli_root/zpool_remove/zpool_remove.cfg \
functional/cli_root/zpool_reopen/zpool_reopen.cfg \
functional/cli_root/zpool_reopen/zpool_reopen.shlib \
functional/cli_root/zpool_resilver/zpool_resilver.cfg \
functional/cli_root/zpool_scrub/zpool_scrub.cfg \
functional/cli_root/zpool_split/zpool_split.cfg \
functional/cli_root/zpool_trim/zpool_trim.kshlib \
functional/cli_root/zpool_upgrade/blockfiles/zfs-broken-mirror1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-broken-mirror2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v10.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v11.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v12.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v13.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v14.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v15.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1mirror1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1mirror2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1mirror3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1raidz1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1raidz2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1raidz3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1stripe1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1stripe2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1stripe3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2mirror1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2mirror2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2mirror3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2raidz1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2raidz2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2raidz3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2stripe1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2stripe2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2stripe3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3hotspare1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3hotspare2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3hotspare3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3mirror1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3mirror2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3mirror3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3raidz1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3raidz21.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3raidz22.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3raidz23.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3raidz2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3raidz3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3stripe1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3stripe2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3stripe3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v4.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v5.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v6.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v7.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v8.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v999.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v9.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-vBROKEN.dat.bz2 \
functional/cli_root/zpool_upgrade/zpool_upgrade.cfg \
functional/cli_root/zpool_upgrade/zpool_upgrade.kshlib \
functional/cli_root/zpool_wait/zpool_wait.kshlib \
functional/cli_root/zhack/library.kshlib \
functional/cli_user/misc/misc.cfg \
functional/cli_user/zfs_list/zfs_list.cfg \
functional/cli_user/zfs_list/zfs_list.kshlib \
functional/compression/compress.cfg \
functional/compression/testpool_zstd.tar.gz \
functional/deadman/deadman.cfg \
functional/delegate/delegate.cfg \
functional/delegate/delegate_common.kshlib \
functional/devices/devices.cfg \
functional/devices/devices_common.kshlib \
functional/events/events.cfg \
functional/events/events_common.kshlib \
functional/fault/fault.cfg \
functional/grow/grow.cfg \
functional/history/history.cfg \
functional/history/history_common.kshlib \
functional/history/i386.migratedpool.DAT.Z \
functional/history/i386.orig_history.txt \
functional/history/sparc.migratedpool.DAT.Z \
functional/history/sparc.orig_history.txt \
functional/history/zfs-pool-v4.dat.Z \
functional/inheritance/config001.cfg \
functional/inheritance/config002.cfg \
functional/inheritance/config003.cfg \
functional/inheritance/config004.cfg \
functional/inheritance/config005.cfg \
functional/inheritance/config006.cfg \
functional/inheritance/config007.cfg \
functional/inheritance/config008.cfg \
functional/inheritance/config009.cfg \
functional/inheritance/config010.cfg \
functional/inheritance/config011.cfg \
functional/inheritance/config012.cfg \
functional/inheritance/config013.cfg \
functional/inheritance/config014.cfg \
functional/inheritance/config015.cfg \
functional/inheritance/config016.cfg \
functional/inheritance/config017.cfg \
functional/inheritance/config018.cfg \
functional/inheritance/config019.cfg \
functional/inheritance/config020.cfg \
functional/inheritance/config021.cfg \
functional/inheritance/config022.cfg \
functional/inheritance/config023.cfg \
functional/inheritance/config024.cfg \
functional/inheritance/inherit.kshlib \
functional/inheritance/README.config \
functional/inheritance/README.state \
functional/inheritance/state001.cfg \
functional/inheritance/state002.cfg \
functional/inheritance/state003.cfg \
functional/inheritance/state004.cfg \
functional/inheritance/state005.cfg \
functional/inheritance/state006.cfg \
functional/inheritance/state007.cfg \
functional/inheritance/state008.cfg \
functional/inheritance/state009.cfg \
functional/inheritance/state010.cfg \
functional/inheritance/state011.cfg \
functional/inheritance/state012.cfg \
functional/inheritance/state013.cfg \
functional/inheritance/state014.cfg \
functional/inheritance/state015.cfg \
functional/inheritance/state016.cfg \
functional/inheritance/state017.cfg \
functional/inheritance/state018.cfg \
functional/inheritance/state019.cfg \
functional/inheritance/state020.cfg \
functional/inheritance/state021.cfg \
functional/inheritance/state022.cfg \
functional/inheritance/state023.cfg \
functional/inheritance/state024.cfg \
functional/inuse/inuse.cfg \
functional/io/io.cfg \
functional/l2arc/l2arc.cfg \
functional/largest_pool/largest_pool.cfg \
functional/migration/migration.cfg \
functional/migration/migration.kshlib \
functional/mmap/mmap.cfg \
functional/mmp/mmp.cfg \
functional/mmp/mmp.kshlib \
functional/mv_files/mv_files.cfg \
functional/mv_files/mv_files_common.kshlib \
functional/nopwrite/nopwrite.shlib \
functional/no_space/enospc.cfg \
functional/online_offline/online_offline.cfg \
functional/pool_checkpoint/pool_checkpoint.kshlib \
functional/projectquota/projectquota.cfg \
functional/projectquota/projectquota_common.kshlib \
functional/quota/quota.cfg \
functional/quota/quota.kshlib \
functional/redacted_send/redacted.cfg \
functional/redacted_send/redacted.kshlib \
functional/redundancy/redundancy.cfg \
functional/redundancy/redundancy.kshlib \
functional/refreserv/refreserv.cfg \
functional/removal/removal.kshlib \
functional/replacement/replacement.cfg \
functional/reservation/reservation.cfg \
functional/reservation/reservation.shlib \
functional/rsend/dedup_encrypted_zvol.bz2 \
functional/rsend/dedup_encrypted_zvol.zsend.bz2 \
functional/rsend/dedup.zsend.bz2 \
functional/rsend/fs.tar.gz \
functional/rsend/rsend.cfg \
functional/rsend/rsend.kshlib \
functional/scrub_mirror/default.cfg \
functional/scrub_mirror/scrub_mirror_common.kshlib \
functional/slog/slog.cfg \
functional/slog/slog.kshlib \
functional/snapshot/snapshot.cfg \
functional/snapused/snapused.kshlib \
functional/sparse/sparse.cfg \
functional/trim/trim.cfg \
functional/trim/trim.kshlib \
functional/truncate/truncate.cfg \
functional/upgrade/upgrade_common.kshlib \
functional/user_namespace/user_namespace.cfg \
functional/user_namespace/user_namespace_common.kshlib \
functional/userquota/13709_reproducer.bz2 \
functional/userquota/userquota.cfg \
functional/userquota/userquota_common.kshlib \
functional/vdev_zaps/vdev_zaps.kshlib \
functional/xattr/xattr.cfg \
functional/xattr/xattr_common.kshlib \
functional/zvol/zvol.cfg \
functional/zvol/zvol_cli/zvol_cli.cfg \
functional/zvol/zvol_common.shlib \
functional/zvol/zvol_ENOSPC/zvol_ENOSPC.cfg \
functional/zvol/zvol_misc/zvol_misc_common.kshlib \
functional/zvol/zvol_swap/zvol_swap.cfg \
functional/idmap_mount/idmap_mount.cfg \
functional/idmap_mount/idmap_mount_common.kshlib
nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \
functional/acl/off/cleanup.ksh \
functional/acl/off/dosmode.ksh \
functional/acl/off/posixmode.ksh \
functional/acl/off/setup.ksh \
functional/acl/posix/cleanup.ksh \
functional/acl/posix/posix_001_pos.ksh \
functional/acl/posix/posix_002_pos.ksh \
functional/acl/posix/posix_003_pos.ksh \
functional/acl/posix/posix_004_pos.ksh \
functional/acl/posix-sa/cleanup.ksh \
functional/acl/posix-sa/posix_001_pos.ksh \
functional/acl/posix-sa/posix_002_pos.ksh \
functional/acl/posix-sa/posix_003_pos.ksh \
functional/acl/posix-sa/posix_004_pos.ksh \
functional/acl/posix-sa/setup.ksh \
functional/acl/posix/setup.ksh \
functional/alloc_class/alloc_class_001_pos.ksh \
functional/alloc_class/alloc_class_002_neg.ksh \
functional/alloc_class/alloc_class_003_pos.ksh \
functional/alloc_class/alloc_class_004_pos.ksh \
functional/alloc_class/alloc_class_005_pos.ksh \
functional/alloc_class/alloc_class_006_pos.ksh \
functional/alloc_class/alloc_class_007_pos.ksh \
functional/alloc_class/alloc_class_008_pos.ksh \
functional/alloc_class/alloc_class_009_pos.ksh \
functional/alloc_class/alloc_class_010_pos.ksh \
functional/alloc_class/alloc_class_011_neg.ksh \
functional/alloc_class/alloc_class_012_pos.ksh \
functional/alloc_class/alloc_class_013_pos.ksh \
functional/alloc_class/alloc_class_014_neg.ksh \
functional/alloc_class/alloc_class_015_pos.ksh \
functional/alloc_class/cleanup.ksh \
functional/alloc_class/setup.ksh \
functional/append/file_append.ksh \
functional/append/threadsappend_001_pos.ksh \
functional/append/cleanup.ksh \
functional/append/setup.ksh \
functional/arc/arcstats_runtime_tuning.ksh \
functional/arc/cleanup.ksh \
functional/arc/dbufstats_001_pos.ksh \
functional/arc/dbufstats_002_pos.ksh \
functional/arc/dbufstats_003_pos.ksh \
functional/arc/setup.ksh \
functional/atime/atime_001_pos.ksh \
functional/atime/atime_002_neg.ksh \
functional/atime/atime_003_pos.ksh \
functional/atime/cleanup.ksh \
functional/atime/root_atime_off.ksh \
functional/atime/root_atime_on.ksh \
functional/atime/root_relatime_on.ksh \
functional/atime/setup.ksh \
+ functional/bclone/bclone_crossfs_corner_cases.ksh \
+ functional/bclone/bclone_crossfs_corner_cases_limited.ksh \
+ functional/bclone/bclone_crossfs_data.ksh \
+ functional/bclone/bclone_crossfs_embedded.ksh \
+ functional/bclone/bclone_crossfs_hole.ksh \
+ functional/bclone/bclone_diffprops_all.ksh \
+ functional/bclone/bclone_diffprops_checksum.ksh \
+ functional/bclone/bclone_diffprops_compress.ksh \
+ functional/bclone/bclone_diffprops_copies.ksh \
+ functional/bclone/bclone_diffprops_recordsize.ksh \
+ functional/bclone/bclone_prop_sync.ksh \
+ functional/bclone/bclone_samefs_corner_cases.ksh \
+ functional/bclone/bclone_samefs_corner_cases_limited.ksh \
+ functional/bclone/bclone_samefs_data.ksh \
+ functional/bclone/bclone_samefs_embedded.ksh \
+ functional/bclone/bclone_samefs_hole.ksh \
+ functional/bclone/cleanup.ksh \
+ functional/bclone/setup.ksh \
functional/block_cloning/cleanup.ksh \
functional/block_cloning/setup.ksh \
+ functional/block_cloning/block_cloning_clone_mmap_cached.ksh \
+ functional/block_cloning/block_cloning_clone_mmap_write.ksh \
functional/block_cloning/block_cloning_copyfilerange_cross_dataset.ksh \
functional/block_cloning/block_cloning_copyfilerange_fallback.ksh \
functional/block_cloning/block_cloning_copyfilerange_fallback_same_txg.ksh \
functional/block_cloning/block_cloning_copyfilerange.ksh \
functional/block_cloning/block_cloning_copyfilerange_partial.ksh \
functional/block_cloning/block_cloning_disabled_copyfilerange.ksh \
functional/block_cloning/block_cloning_disabled_ficlone.ksh \
functional/block_cloning/block_cloning_disabled_ficlonerange.ksh \
functional/block_cloning/block_cloning_ficlone.ksh \
functional/block_cloning/block_cloning_ficlonerange.ksh \
functional/block_cloning/block_cloning_ficlonerange_partial.ksh \
+ functional/block_cloning/block_cloning_cross_enc_dataset.ksh \
+ functional/block_cloning/block_cloning_replay.ksh \
+ functional/block_cloning/block_cloning_replay_encrypted.ksh \
+ functional/block_cloning/block_cloning_lwb_buffer_overflow.ksh \
functional/bootfs/bootfs_001_pos.ksh \
functional/bootfs/bootfs_002_neg.ksh \
functional/bootfs/bootfs_003_pos.ksh \
functional/bootfs/bootfs_004_neg.ksh \
functional/bootfs/bootfs_005_neg.ksh \
functional/bootfs/bootfs_006_pos.ksh \
functional/bootfs/bootfs_007_pos.ksh \
functional/bootfs/bootfs_008_pos.ksh \
functional/bootfs/cleanup.ksh \
functional/bootfs/setup.ksh \
functional/btree/btree_negative.ksh \
functional/btree/btree_positive.ksh \
functional/cache/cache_001_pos.ksh \
functional/cache/cache_002_pos.ksh \
functional/cache/cache_003_pos.ksh \
functional/cache/cache_004_neg.ksh \
functional/cache/cache_005_neg.ksh \
functional/cache/cache_006_pos.ksh \
functional/cache/cache_007_neg.ksh \
functional/cache/cache_008_neg.ksh \
functional/cache/cache_009_pos.ksh \
functional/cache/cache_010_pos.ksh \
functional/cache/cache_011_pos.ksh \
functional/cache/cache_012_pos.ksh \
functional/cache/cleanup.ksh \
functional/cachefile/cachefile_001_pos.ksh \
functional/cachefile/cachefile_002_pos.ksh \
functional/cachefile/cachefile_003_pos.ksh \
functional/cachefile/cachefile_004_pos.ksh \
functional/cachefile/cleanup.ksh \
functional/cachefile/setup.ksh \
functional/cache/setup.ksh \
functional/casenorm/case_all_values.ksh \
functional/casenorm/cleanup.ksh \
functional/casenorm/insensitive_formd_delete.ksh \
functional/casenorm/insensitive_formd_lookup.ksh \
functional/casenorm/insensitive_none_delete.ksh \
functional/casenorm/insensitive_none_lookup.ksh \
functional/casenorm/mixed_create_failure.ksh \
functional/casenorm/mixed_formd_delete.ksh \
functional/casenorm/mixed_formd_lookup_ci.ksh \
functional/casenorm/mixed_formd_lookup.ksh \
functional/casenorm/mixed_none_delete.ksh \
functional/casenorm/mixed_none_lookup_ci.ksh \
functional/casenorm/mixed_none_lookup.ksh \
functional/casenorm/norm_all_values.ksh \
functional/casenorm/sensitive_formd_delete.ksh \
functional/casenorm/sensitive_formd_lookup.ksh \
functional/casenorm/sensitive_none_delete.ksh \
functional/casenorm/sensitive_none_lookup.ksh \
functional/casenorm/setup.ksh \
functional/channel_program/lua_core/cleanup.ksh \
functional/channel_program/lua_core/setup.ksh \
functional/channel_program/lua_core/tst.args_to_lua.ksh \
functional/channel_program/lua_core/tst.divide_by_zero.ksh \
functional/channel_program/lua_core/tst.exists.ksh \
functional/channel_program/lua_core/tst.integer_illegal.ksh \
functional/channel_program/lua_core/tst.integer_overflow.ksh \
functional/channel_program/lua_core/tst.language_functions_neg.ksh \
functional/channel_program/lua_core/tst.language_functions_pos.ksh \
functional/channel_program/lua_core/tst.large_prog.ksh \
functional/channel_program/lua_core/tst.libraries.ksh \
functional/channel_program/lua_core/tst.memory_limit.ksh \
functional/channel_program/lua_core/tst.nested_neg.ksh \
functional/channel_program/lua_core/tst.nested_pos.ksh \
functional/channel_program/lua_core/tst.nvlist_to_lua.ksh \
functional/channel_program/lua_core/tst.recursive_neg.ksh \
functional/channel_program/lua_core/tst.recursive_pos.ksh \
functional/channel_program/lua_core/tst.return_large.ksh \
functional/channel_program/lua_core/tst.return_nvlist_neg.ksh \
functional/channel_program/lua_core/tst.return_nvlist_pos.ksh \
functional/channel_program/lua_core/tst.return_recursive_table.ksh \
functional/channel_program/lua_core/tst.stack_gsub.ksh \
functional/channel_program/lua_core/tst.timeout.ksh \
functional/channel_program/synctask_core/cleanup.ksh \
functional/channel_program/synctask_core/setup.ksh \
functional/channel_program/synctask_core/tst.bookmark.copy.ksh \
functional/channel_program/synctask_core/tst.bookmark.create.ksh \
functional/channel_program/synctask_core/tst.destroy_fs.ksh \
functional/channel_program/synctask_core/tst.destroy_snap.ksh \
functional/channel_program/synctask_core/tst.get_count_and_limit.ksh \
functional/channel_program/synctask_core/tst.get_index_props.ksh \
functional/channel_program/synctask_core/tst.get_mountpoint.ksh \
functional/channel_program/synctask_core/tst.get_neg.ksh \
functional/channel_program/synctask_core/tst.get_number_props.ksh \
functional/channel_program/synctask_core/tst.get_string_props.ksh \
functional/channel_program/synctask_core/tst.get_type.ksh \
functional/channel_program/synctask_core/tst.get_userquota.ksh \
functional/channel_program/synctask_core/tst.get_written.ksh \
functional/channel_program/synctask_core/tst.inherit.ksh \
functional/channel_program/synctask_core/tst.list_bookmarks.ksh \
functional/channel_program/synctask_core/tst.list_children.ksh \
functional/channel_program/synctask_core/tst.list_clones.ksh \
functional/channel_program/synctask_core/tst.list_holds.ksh \
functional/channel_program/synctask_core/tst.list_snapshots.ksh \
functional/channel_program/synctask_core/tst.list_system_props.ksh \
functional/channel_program/synctask_core/tst.list_user_props.ksh \
functional/channel_program/synctask_core/tst.parse_args_neg.ksh \
functional/channel_program/synctask_core/tst.promote_conflict.ksh \
functional/channel_program/synctask_core/tst.promote_multiple.ksh \
functional/channel_program/synctask_core/tst.promote_simple.ksh \
functional/channel_program/synctask_core/tst.rollback_mult.ksh \
functional/channel_program/synctask_core/tst.rollback_one.ksh \
functional/channel_program/synctask_core/tst.set_props.ksh \
functional/channel_program/synctask_core/tst.snapshot_destroy.ksh \
functional/channel_program/synctask_core/tst.snapshot_neg.ksh \
functional/channel_program/synctask_core/tst.snapshot_recursive.ksh \
functional/channel_program/synctask_core/tst.snapshot_rename.ksh \
functional/channel_program/synctask_core/tst.snapshot_simple.ksh \
functional/channel_program/synctask_core/tst.terminate_by_signal.ksh \
functional/chattr/chattr_001_pos.ksh \
functional/chattr/chattr_002_neg.ksh \
functional/chattr/cleanup.ksh \
functional/chattr/setup.ksh \
functional/checksum/cleanup.ksh \
functional/checksum/filetest_001_pos.ksh \
functional/checksum/filetest_002_pos.ksh \
functional/checksum/run_blake3_test.ksh \
functional/checksum/run_edonr_test.ksh \
functional/checksum/run_sha2_test.ksh \
functional/checksum/run_skein_test.ksh \
functional/checksum/setup.ksh \
functional/clean_mirror/clean_mirror_001_pos.ksh \
functional/clean_mirror/clean_mirror_002_pos.ksh \
functional/clean_mirror/clean_mirror_003_pos.ksh \
functional/clean_mirror/clean_mirror_004_pos.ksh \
functional/clean_mirror/cleanup.ksh \
functional/clean_mirror/setup.ksh \
functional/cli_root/zdb/zdb_002_pos.ksh \
functional/cli_root/zdb/zdb_003_pos.ksh \
functional/cli_root/zdb/zdb_004_pos.ksh \
functional/cli_root/zdb/zdb_005_pos.ksh \
functional/cli_root/zdb/zdb_006_pos.ksh \
functional/cli_root/zdb/zdb_args_neg.ksh \
functional/cli_root/zdb/zdb_args_pos.ksh \
functional/cli_root/zdb/zdb_backup.ksh \
functional/cli_root/zdb/zdb_block_size_histogram.ksh \
functional/cli_root/zdb/zdb_checksum.ksh \
functional/cli_root/zdb/zdb_decompress.ksh \
functional/cli_root/zdb/zdb_decompress_zstd.ksh \
functional/cli_root/zdb/zdb_display_block.ksh \
functional/cli_root/zdb/zdb_encrypted.ksh \
functional/cli_root/zdb/zdb_label_checksum.ksh \
functional/cli_root/zdb/zdb_object_range_neg.ksh \
functional/cli_root/zdb/zdb_object_range_pos.ksh \
functional/cli_root/zdb/zdb_objset_id.ksh \
functional/cli_root/zdb/zdb_recover_2.ksh \
functional/cli_root/zdb/zdb_recover.ksh \
functional/cli_root/zfs_bookmark/cleanup.ksh \
functional/cli_root/zfs_bookmark/setup.ksh \
functional/cli_root/zfs_bookmark/zfs_bookmark_cliargs.ksh \
functional/cli_root/zfs_change-key/cleanup.ksh \
functional/cli_root/zfs_change-key/setup.ksh \
functional/cli_root/zfs_change-key/zfs_change-key_child.ksh \
functional/cli_root/zfs_change-key/zfs_change-key_clones.ksh \
functional/cli_root/zfs_change-key/zfs_change-key_format.ksh \
functional/cli_root/zfs_change-key/zfs_change-key_inherit.ksh \
functional/cli_root/zfs_change-key/zfs_change-key.ksh \
functional/cli_root/zfs_change-key/zfs_change-key_load.ksh \
functional/cli_root/zfs_change-key/zfs_change-key_location.ksh \
functional/cli_root/zfs_change-key/zfs_change-key_pbkdf2iters.ksh \
functional/cli_root/zfs/cleanup.ksh \
functional/cli_root/zfs_clone/cleanup.ksh \
functional/cli_root/zfs_clone/setup.ksh \
functional/cli_root/zfs_clone/zfs_clone_001_neg.ksh \
functional/cli_root/zfs_clone/zfs_clone_002_pos.ksh \
functional/cli_root/zfs_clone/zfs_clone_003_pos.ksh \
functional/cli_root/zfs_clone/zfs_clone_004_pos.ksh \
functional/cli_root/zfs_clone/zfs_clone_005_pos.ksh \
functional/cli_root/zfs_clone/zfs_clone_006_pos.ksh \
functional/cli_root/zfs_clone/zfs_clone_007_pos.ksh \
functional/cli_root/zfs_clone/zfs_clone_008_neg.ksh \
functional/cli_root/zfs_clone/zfs_clone_009_neg.ksh \
functional/cli_root/zfs_clone/zfs_clone_010_pos.ksh \
functional/cli_root/zfs_clone/zfs_clone_deeply_nested.ksh \
functional/cli_root/zfs_clone/zfs_clone_encrypted.ksh \
functional/cli_root/zfs_clone/zfs_clone_rm_nested.ksh \
functional/cli_root/zfs_copies/cleanup.ksh \
functional/cli_root/zfs_copies/setup.ksh \
functional/cli_root/zfs_copies/zfs_copies_001_pos.ksh \
functional/cli_root/zfs_copies/zfs_copies_002_pos.ksh \
functional/cli_root/zfs_copies/zfs_copies_003_pos.ksh \
functional/cli_root/zfs_copies/zfs_copies_004_neg.ksh \
functional/cli_root/zfs_copies/zfs_copies_005_neg.ksh \
functional/cli_root/zfs_copies/zfs_copies_006_pos.ksh \
functional/cli_root/zfs_create/cleanup.ksh \
functional/cli_root/zfs_create/setup.ksh \
functional/cli_root/zfs_create/zfs_create_001_pos.ksh \
functional/cli_root/zfs_create/zfs_create_002_pos.ksh \
functional/cli_root/zfs_create/zfs_create_003_pos.ksh \
functional/cli_root/zfs_create/zfs_create_004_pos.ksh \
functional/cli_root/zfs_create/zfs_create_005_pos.ksh \
functional/cli_root/zfs_create/zfs_create_006_pos.ksh \
functional/cli_root/zfs_create/zfs_create_007_pos.ksh \
functional/cli_root/zfs_create/zfs_create_008_neg.ksh \
functional/cli_root/zfs_create/zfs_create_009_neg.ksh \
functional/cli_root/zfs_create/zfs_create_010_neg.ksh \
functional/cli_root/zfs_create/zfs_create_011_pos.ksh \
functional/cli_root/zfs_create/zfs_create_012_pos.ksh \
functional/cli_root/zfs_create/zfs_create_013_pos.ksh \
functional/cli_root/zfs_create/zfs_create_014_pos.ksh \
functional/cli_root/zfs_create/zfs_create_crypt_combos.ksh \
functional/cli_root/zfs_create/zfs_create_dryrun.ksh \
functional/cli_root/zfs_create/zfs_create_encrypted.ksh \
functional/cli_root/zfs_create/zfs_create_nomount.ksh \
functional/cli_root/zfs_create/zfs_create_verbose.ksh \
functional/cli_root/zfs_destroy/cleanup.ksh \
functional/cli_root/zfs_destroy/setup.ksh \
functional/cli_root/zfs_destroy/zfs_clone_livelist_condense_and_disable.ksh \
functional/cli_root/zfs_destroy/zfs_clone_livelist_condense_races.ksh \
functional/cli_root/zfs_destroy/zfs_clone_livelist_dedup.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_001_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_002_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_003_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_004_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_005_neg.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_006_neg.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_007_neg.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_008_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_009_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_010_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_011_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_012_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_013_neg.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_014_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_015_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_016_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_clone_livelist.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_dev_removal_condense.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_dev_removal.ksh \
functional/cli_root/zfs_diff/cleanup.ksh \
functional/cli_root/zfs_diff/setup.ksh \
functional/cli_root/zfs_diff/zfs_diff_changes.ksh \
functional/cli_root/zfs_diff/zfs_diff_cliargs.ksh \
functional/cli_root/zfs_diff/zfs_diff_encrypted.ksh \
functional/cli_root/zfs_diff/zfs_diff_mangle.ksh \
functional/cli_root/zfs_diff/zfs_diff_timestamp.ksh \
functional/cli_root/zfs_diff/zfs_diff_types.ksh \
functional/cli_root/zfs_get/cleanup.ksh \
functional/cli_root/zfs_get/setup.ksh \
functional/cli_root/zfs_get/zfs_get_001_pos.ksh \
functional/cli_root/zfs_get/zfs_get_002_pos.ksh \
functional/cli_root/zfs_get/zfs_get_003_pos.ksh \
functional/cli_root/zfs_get/zfs_get_004_pos.ksh \
functional/cli_root/zfs_get/zfs_get_005_neg.ksh \
functional/cli_root/zfs_get/zfs_get_006_neg.ksh \
functional/cli_root/zfs_get/zfs_get_007_neg.ksh \
functional/cli_root/zfs_get/zfs_get_008_pos.ksh \
functional/cli_root/zfs_get/zfs_get_009_pos.ksh \
functional/cli_root/zfs_get/zfs_get_010_neg.ksh \
functional/cli_root/zfs_ids_to_path/cleanup.ksh \
functional/cli_root/zfs_ids_to_path/setup.ksh \
functional/cli_root/zfs_ids_to_path/zfs_ids_to_path_001_pos.ksh \
functional/cli_root/zfs_inherit/cleanup.ksh \
functional/cli_root/zfs_inherit/setup.ksh \
functional/cli_root/zfs_inherit/zfs_inherit_001_neg.ksh \
functional/cli_root/zfs_inherit/zfs_inherit_002_neg.ksh \
functional/cli_root/zfs_inherit/zfs_inherit_003_pos.ksh \
functional/cli_root/zfs_inherit/zfs_inherit_mountpoint.ksh \
functional/cli_root/zfs_jail/cleanup.ksh \
functional/cli_root/zfs_jail/setup.ksh \
functional/cli_root/zfs_jail/zfs_jail_001_pos.ksh \
functional/cli_root/zfs_load-key/cleanup.ksh \
functional/cli_root/zfs_load-key/setup.ksh \
functional/cli_root/zfs_load-key/zfs_load-key_all.ksh \
functional/cli_root/zfs_load-key/zfs_load-key_file.ksh \
functional/cli_root/zfs_load-key/zfs_load-key_https.ksh \
functional/cli_root/zfs_load-key/zfs_load-key.ksh \
functional/cli_root/zfs_load-key/zfs_load-key_location.ksh \
functional/cli_root/zfs_load-key/zfs_load-key_noop.ksh \
functional/cli_root/zfs_load-key/zfs_load-key_recursive.ksh \
functional/cli_root/zfs_mount/cleanup.ksh \
functional/cli_root/zfs_mount/setup.ksh \
functional/cli_root/zfs_mount/zfs_mount_001_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_002_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_003_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_004_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_005_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_006_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_007_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_008_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_009_neg.ksh \
functional/cli_root/zfs_mount/zfs_mount_010_neg.ksh \
functional/cli_root/zfs_mount/zfs_mount_011_neg.ksh \
functional/cli_root/zfs_mount/zfs_mount_012_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_013_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_014_neg.ksh \
functional/cli_root/zfs_mount/zfs_mount_all_001_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_all_fail.ksh \
functional/cli_root/zfs_mount/zfs_mount_all_mountpoints.ksh \
functional/cli_root/zfs_mount/zfs_mount_encrypted.ksh \
functional/cli_root/zfs_mount/zfs_mount_remount.ksh \
functional/cli_root/zfs_mount/zfs_mount_test_race.ksh \
functional/cli_root/zfs_mount/zfs_multi_mount.ksh \
functional/cli_root/zfs_program/cleanup.ksh \
functional/cli_root/zfs_program/setup.ksh \
functional/cli_root/zfs_program/zfs_program_json.ksh \
functional/cli_root/zfs_promote/cleanup.ksh \
functional/cli_root/zfs_promote/setup.ksh \
functional/cli_root/zfs_promote/zfs_promote_001_pos.ksh \
functional/cli_root/zfs_promote/zfs_promote_002_pos.ksh \
functional/cli_root/zfs_promote/zfs_promote_003_pos.ksh \
functional/cli_root/zfs_promote/zfs_promote_004_pos.ksh \
functional/cli_root/zfs_promote/zfs_promote_005_pos.ksh \
functional/cli_root/zfs_promote/zfs_promote_006_neg.ksh \
functional/cli_root/zfs_promote/zfs_promote_007_neg.ksh \
functional/cli_root/zfs_promote/zfs_promote_008_pos.ksh \
functional/cli_root/zfs_promote/zfs_promote_encryptionroot.ksh \
functional/cli_root/zfs_property/cleanup.ksh \
functional/cli_root/zfs_property/setup.ksh \
functional/cli_root/zfs_property/zfs_written_property_001_pos.ksh \
functional/cli_root/zfs_receive/cleanup.ksh \
functional/cli_root/zfs_receive/receive-o-x_props_aliases.ksh \
functional/cli_root/zfs_receive/receive-o-x_props_override.ksh \
functional/cli_root/zfs_receive/setup.ksh \
functional/cli_root/zfs_receive/zfs_receive_001_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_002_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_003_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_004_neg.ksh \
functional/cli_root/zfs_receive/zfs_receive_005_neg.ksh \
functional/cli_root/zfs_receive/zfs_receive_006_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_007_neg.ksh \
functional/cli_root/zfs_receive/zfs_receive_008_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_009_neg.ksh \
functional/cli_root/zfs_receive/zfs_receive_010_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_011_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_012_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_013_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_014_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_015_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_016_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_-e.ksh \
functional/cli_root/zfs_receive/zfs_receive_from_encrypted.ksh \
functional/cli_root/zfs_receive/zfs_receive_from_zstd.ksh \
functional/cli_root/zfs_receive/zfs_receive_new_props.ksh \
functional/cli_root/zfs_receive/zfs_receive_raw_-d.ksh \
functional/cli_root/zfs_receive/zfs_receive_raw_incremental.ksh \
functional/cli_root/zfs_receive/zfs_receive_raw.ksh \
functional/cli_root/zfs_receive/zfs_receive_to_encrypted.ksh \
functional/cli_root/zfs_receive/zfs_receive_-wR-encrypted-mix.ksh \
functional/cli_root/zfs_receive/zfs_receive_corrective.ksh \
functional/cli_root/zfs_receive/zfs_receive_compressed_corrective.ksh \
functional/cli_root/zfs_receive/zfs_receive_large_block_corrective.ksh \
functional/cli_root/zfs_rename/cleanup.ksh \
functional/cli_root/zfs_rename/setup.ksh \
functional/cli_root/zfs_rename/zfs_rename_001_pos.ksh \
functional/cli_root/zfs_rename/zfs_rename_002_pos.ksh \
functional/cli_root/zfs_rename/zfs_rename_003_pos.ksh \
functional/cli_root/zfs_rename/zfs_rename_004_neg.ksh \
functional/cli_root/zfs_rename/zfs_rename_005_neg.ksh \
functional/cli_root/zfs_rename/zfs_rename_006_pos.ksh \
functional/cli_root/zfs_rename/zfs_rename_007_pos.ksh \
functional/cli_root/zfs_rename/zfs_rename_008_pos.ksh \
functional/cli_root/zfs_rename/zfs_rename_009_neg.ksh \
functional/cli_root/zfs_rename/zfs_rename_010_neg.ksh \
functional/cli_root/zfs_rename/zfs_rename_011_pos.ksh \
functional/cli_root/zfs_rename/zfs_rename_012_neg.ksh \
functional/cli_root/zfs_rename/zfs_rename_013_pos.ksh \
functional/cli_root/zfs_rename/zfs_rename_014_neg.ksh \
functional/cli_root/zfs_rename/zfs_rename_encrypted_child.ksh \
functional/cli_root/zfs_rename/zfs_rename_mountpoint.ksh \
functional/cli_root/zfs_rename/zfs_rename_nounmount.ksh \
functional/cli_root/zfs_rename/zfs_rename_to_encrypted.ksh \
functional/cli_root/zfs_reservation/cleanup.ksh \
functional/cli_root/zfs_reservation/setup.ksh \
functional/cli_root/zfs_reservation/zfs_reservation_001_pos.ksh \
functional/cli_root/zfs_reservation/zfs_reservation_002_pos.ksh \
functional/cli_root/zfs_rollback/cleanup.ksh \
functional/cli_root/zfs_rollback/setup.ksh \
functional/cli_root/zfs_rollback/zfs_rollback_001_pos.ksh \
functional/cli_root/zfs_rollback/zfs_rollback_002_pos.ksh \
functional/cli_root/zfs_rollback/zfs_rollback_003_neg.ksh \
functional/cli_root/zfs_rollback/zfs_rollback_004_neg.ksh \
functional/cli_root/zfs_send/cleanup.ksh \
functional/cli_root/zfs_send/setup.ksh \
functional/cli_root/zfs_send/zfs_send_001_pos.ksh \
functional/cli_root/zfs_send/zfs_send_002_pos.ksh \
functional/cli_root/zfs_send/zfs_send_003_pos.ksh \
functional/cli_root/zfs_send/zfs_send_004_neg.ksh \
functional/cli_root/zfs_send/zfs_send_005_pos.ksh \
functional/cli_root/zfs_send/zfs_send_006_pos.ksh \
functional/cli_root/zfs_send/zfs_send_007_pos.ksh \
functional/cli_root/zfs_send/zfs_send-b.ksh \
functional/cli_root/zfs_send/zfs_send_encrypted.ksh \
functional/cli_root/zfs_send/zfs_send_encrypted_unloaded.ksh \
functional/cli_root/zfs_send/zfs_send_raw.ksh \
functional/cli_root/zfs_send/zfs_send_skip_missing.ksh \
functional/cli_root/zfs_send/zfs_send_sparse.ksh \
functional/cli_root/zfs_set/cache_001_pos.ksh \
functional/cli_root/zfs_set/cache_002_neg.ksh \
functional/cli_root/zfs_set/canmount_001_pos.ksh \
functional/cli_root/zfs_set/canmount_002_pos.ksh \
functional/cli_root/zfs_set/canmount_003_pos.ksh \
functional/cli_root/zfs_set/canmount_004_pos.ksh \
functional/cli_root/zfs_set/checksum_001_pos.ksh \
functional/cli_root/zfs_set/cleanup.ksh \
functional/cli_root/zfs_set/compression_001_pos.ksh \
functional/cli_root/zfs_set/mountpoint_001_pos.ksh \
functional/cli_root/zfs_set/mountpoint_002_pos.ksh \
functional/cli_root/zfs_set/mountpoint_003_pos.ksh \
functional/cli_root/zfs_set/onoffs_001_pos.ksh \
functional/cli_root/zfs_set/property_alias_001_pos.ksh \
functional/cli_root/zfs_set/readonly_001_pos.ksh \
functional/cli_root/zfs_set/reservation_001_neg.ksh \
functional/cli_root/zfs_set/ro_props_001_pos.ksh \
functional/cli_root/zfs_set/setup.ksh \
functional/cli_root/zfs_set/share_mount_001_neg.ksh \
functional/cli_root/zfs_set/snapdir_001_pos.ksh \
functional/cli_root/zfs/setup.ksh \
functional/cli_root/zfs_set/user_property_001_pos.ksh \
functional/cli_root/zfs_set/user_property_002_pos.ksh \
functional/cli_root/zfs_set/user_property_003_neg.ksh \
functional/cli_root/zfs_set/user_property_004_pos.ksh \
functional/cli_root/zfs_set/version_001_neg.ksh \
functional/cli_root/zfs_set/zfs_set_001_neg.ksh \
functional/cli_root/zfs_set/zfs_set_002_neg.ksh \
functional/cli_root/zfs_set/zfs_set_003_neg.ksh \
functional/cli_root/zfs_set/zfs_set_feature_activation.ksh \
functional/cli_root/zfs_set/zfs_set_keylocation.ksh \
functional/cli_root/zfs_set/zfs_set_nomount.ksh \
functional/cli_root/zfs_share/cleanup.ksh \
functional/cli_root/zfs_share/setup.ksh \
functional/cli_root/zfs_share/zfs_share_001_pos.ksh \
functional/cli_root/zfs_share/zfs_share_002_pos.ksh \
functional/cli_root/zfs_share/zfs_share_003_pos.ksh \
functional/cli_root/zfs_share/zfs_share_004_pos.ksh \
functional/cli_root/zfs_share/zfs_share_005_pos.ksh \
functional/cli_root/zfs_share/zfs_share_006_pos.ksh \
functional/cli_root/zfs_share/zfs_share_007_neg.ksh \
functional/cli_root/zfs_share/zfs_share_008_neg.ksh \
functional/cli_root/zfs_share/zfs_share_009_neg.ksh \
functional/cli_root/zfs_share/zfs_share_010_neg.ksh \
functional/cli_root/zfs_share/zfs_share_011_pos.ksh \
functional/cli_root/zfs_share/zfs_share_012_pos.ksh \
functional/cli_root/zfs_share/zfs_share_013_pos.ksh \
functional/cli_root/zfs_share/zfs_share_concurrent_shares.ksh \
+ functional/cli_root/zfs_share/zfs_share_after_mount.ksh \
functional/cli_root/zfs_snapshot/cleanup.ksh \
functional/cli_root/zfs_snapshot/setup.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_001_neg.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_002_neg.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_003_neg.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_004_neg.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_005_neg.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_006_pos.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_007_neg.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_008_neg.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_009_pos.ksh \
functional/cli_root/zfs_sysfs/cleanup.ksh \
functional/cli_root/zfs_sysfs/setup.ksh \
functional/cli_root/zfs_sysfs/zfeature_set_unsupported.ksh \
functional/cli_root/zfs_sysfs/zfs_get_unsupported.ksh \
functional/cli_root/zfs_sysfs/zfs_set_unsupported.ksh \
functional/cli_root/zfs_sysfs/zfs_sysfs_live.ksh \
functional/cli_root/zfs_sysfs/zpool_get_unsupported.ksh \
functional/cli_root/zfs_sysfs/zpool_set_unsupported.ksh \
functional/cli_root/zfs_unload-key/cleanup.ksh \
functional/cli_root/zfs_unload-key/setup.ksh \
functional/cli_root/zfs_unload-key/zfs_unload-key_all.ksh \
functional/cli_root/zfs_unload-key/zfs_unload-key.ksh \
functional/cli_root/zfs_unload-key/zfs_unload-key_recursive.ksh \
functional/cli_root/zfs_unmount/cleanup.ksh \
functional/cli_root/zfs_unmount/setup.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_001_pos.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_002_pos.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_003_pos.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_004_pos.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_005_pos.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_006_pos.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_007_neg.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_008_neg.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_009_pos.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_all_001_pos.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_nested.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_unload_keys.ksh \
functional/cli_root/zfs_unshare/cleanup.ksh \
functional/cli_root/zfs_unshare/setup.ksh \
functional/cli_root/zfs_unshare/zfs_unshare_001_pos.ksh \
functional/cli_root/zfs_unshare/zfs_unshare_002_pos.ksh \
functional/cli_root/zfs_unshare/zfs_unshare_003_pos.ksh \
functional/cli_root/zfs_unshare/zfs_unshare_004_neg.ksh \
functional/cli_root/zfs_unshare/zfs_unshare_005_neg.ksh \
functional/cli_root/zfs_unshare/zfs_unshare_006_pos.ksh \
functional/cli_root/zfs_unshare/zfs_unshare_007_pos.ksh \
functional/cli_root/zfs_unshare/zfs_unshare_008_pos.ksh \
functional/cli_root/zfs_upgrade/cleanup.ksh \
functional/cli_root/zfs_upgrade/setup.ksh \
functional/cli_root/zfs_upgrade/zfs_upgrade_001_pos.ksh \
functional/cli_root/zfs_upgrade/zfs_upgrade_002_pos.ksh \
functional/cli_root/zfs_upgrade/zfs_upgrade_003_pos.ksh \
functional/cli_root/zfs_upgrade/zfs_upgrade_004_pos.ksh \
functional/cli_root/zfs_upgrade/zfs_upgrade_005_pos.ksh \
functional/cli_root/zfs_upgrade/zfs_upgrade_006_neg.ksh \
functional/cli_root/zfs_upgrade/zfs_upgrade_007_neg.ksh \
functional/cli_root/zfs_wait/cleanup.ksh \
functional/cli_root/zfs_wait/setup.ksh \
functional/cli_root/zfs_wait/zfs_wait_deleteq.ksh \
functional/cli_root/zfs_wait/zfs_wait_getsubopt.ksh \
functional/cli_root/zfs/zfs_001_neg.ksh \
functional/cli_root/zfs/zfs_002_pos.ksh \
functional/cli_root/zfs/zfs_003_neg.ksh \
functional/cli_root/zhack/zhack_label_repair_001.ksh \
functional/cli_root/zhack/zhack_label_repair_002.ksh \
functional/cli_root/zhack/zhack_label_repair_003.ksh \
functional/cli_root/zhack/zhack_label_repair_004.ksh \
functional/cli_root/zpool_add/add_nested_replacing_spare.ksh \
functional/cli_root/zpool_add/add-o_ashift.ksh \
functional/cli_root/zpool_add/add_prop_ashift.ksh \
functional/cli_root/zpool_add/cleanup.ksh \
functional/cli_root/zpool_add/setup.ksh \
functional/cli_root/zpool_add/zpool_add_001_pos.ksh \
functional/cli_root/zpool_add/zpool_add_002_pos.ksh \
functional/cli_root/zpool_add/zpool_add_003_pos.ksh \
functional/cli_root/zpool_add/zpool_add_004_pos.ksh \
functional/cli_root/zpool_add/zpool_add_005_pos.ksh \
functional/cli_root/zpool_add/zpool_add_006_pos.ksh \
functional/cli_root/zpool_add/zpool_add_007_neg.ksh \
functional/cli_root/zpool_add/zpool_add_008_neg.ksh \
functional/cli_root/zpool_add/zpool_add_009_neg.ksh \
functional/cli_root/zpool_add/zpool_add_010_pos.ksh \
functional/cli_root/zpool_add/zpool_add_dryrun_output.ksh \
functional/cli_root/zpool_attach/attach-o_ashift.ksh \
functional/cli_root/zpool_attach/cleanup.ksh \
functional/cli_root/zpool_attach/setup.ksh \
functional/cli_root/zpool_attach/zpool_attach_001_neg.ksh \
functional/cli_root/zpool/cleanup.ksh \
functional/cli_root/zpool_clear/cleanup.ksh \
functional/cli_root/zpool_clear/setup.ksh \
functional/cli_root/zpool_clear/zpool_clear_001_pos.ksh \
functional/cli_root/zpool_clear/zpool_clear_002_neg.ksh \
functional/cli_root/zpool_clear/zpool_clear_003_neg.ksh \
functional/cli_root/zpool_clear/zpool_clear_readonly.ksh \
functional/cli_root/zpool_create/cleanup.ksh \
functional/cli_root/zpool_create/create-o_ashift.ksh \
functional/cli_root/zpool_create/setup.ksh \
functional/cli_root/zpool_create/zpool_create_001_pos.ksh \
functional/cli_root/zpool_create/zpool_create_002_pos.ksh \
functional/cli_root/zpool_create/zpool_create_003_pos.ksh \
functional/cli_root/zpool_create/zpool_create_004_pos.ksh \
functional/cli_root/zpool_create/zpool_create_005_pos.ksh \
functional/cli_root/zpool_create/zpool_create_006_pos.ksh \
functional/cli_root/zpool_create/zpool_create_007_neg.ksh \
functional/cli_root/zpool_create/zpool_create_008_pos.ksh \
functional/cli_root/zpool_create/zpool_create_009_neg.ksh \
functional/cli_root/zpool_create/zpool_create_010_neg.ksh \
functional/cli_root/zpool_create/zpool_create_011_neg.ksh \
functional/cli_root/zpool_create/zpool_create_012_neg.ksh \
functional/cli_root/zpool_create/zpool_create_014_neg.ksh \
functional/cli_root/zpool_create/zpool_create_015_neg.ksh \
functional/cli_root/zpool_create/zpool_create_016_pos.ksh \
functional/cli_root/zpool_create/zpool_create_017_neg.ksh \
functional/cli_root/zpool_create/zpool_create_018_pos.ksh \
functional/cli_root/zpool_create/zpool_create_019_pos.ksh \
functional/cli_root/zpool_create/zpool_create_020_pos.ksh \
functional/cli_root/zpool_create/zpool_create_021_pos.ksh \
functional/cli_root/zpool_create/zpool_create_022_pos.ksh \
functional/cli_root/zpool_create/zpool_create_023_neg.ksh \
functional/cli_root/zpool_create/zpool_create_024_pos.ksh \
functional/cli_root/zpool_create/zpool_create_crypt_combos.ksh \
functional/cli_root/zpool_create/zpool_create_draid_001_pos.ksh \
functional/cli_root/zpool_create/zpool_create_draid_002_pos.ksh \
functional/cli_root/zpool_create/zpool_create_draid_003_pos.ksh \
functional/cli_root/zpool_create/zpool_create_draid_004_pos.ksh \
functional/cli_root/zpool_create/zpool_create_dryrun_output.ksh \
functional/cli_root/zpool_create/zpool_create_encrypted.ksh \
functional/cli_root/zpool_create/zpool_create_features_001_pos.ksh \
functional/cli_root/zpool_create/zpool_create_features_002_pos.ksh \
functional/cli_root/zpool_create/zpool_create_features_003_pos.ksh \
functional/cli_root/zpool_create/zpool_create_features_004_neg.ksh \
functional/cli_root/zpool_create/zpool_create_features_005_pos.ksh \
functional/cli_root/zpool_create/zpool_create_features_006_pos.ksh \
functional/cli_root/zpool_create/zpool_create_features_007_pos.ksh \
functional/cli_root/zpool_create/zpool_create_features_008_pos.ksh \
functional/cli_root/zpool_create/zpool_create_features_009_pos.ksh \
functional/cli_root/zpool_create/zpool_create_tempname.ksh \
functional/cli_root/zpool_destroy/zpool_destroy_001_pos.ksh \
functional/cli_root/zpool_destroy/zpool_destroy_002_pos.ksh \
functional/cli_root/zpool_destroy/zpool_destroy_003_neg.ksh \
functional/cli_root/zpool_detach/cleanup.ksh \
functional/cli_root/zpool_detach/setup.ksh \
functional/cli_root/zpool_detach/zpool_detach_001_neg.ksh \
functional/cli_root/zpool_events/cleanup.ksh \
functional/cli_root/zpool_events/setup.ksh \
functional/cli_root/zpool_events/zpool_events_clear.ksh \
functional/cli_root/zpool_events/zpool_events_clear_retained.ksh \
functional/cli_root/zpool_events/zpool_events_cliargs.ksh \
functional/cli_root/zpool_events/zpool_events_duplicates.ksh \
functional/cli_root/zpool_events/zpool_events_errors.ksh \
functional/cli_root/zpool_events/zpool_events_follow.ksh \
functional/cli_root/zpool_events/zpool_events_poolname.ksh \
functional/cli_root/zpool_expand/cleanup.ksh \
functional/cli_root/zpool_expand/setup.ksh \
functional/cli_root/zpool_expand/zpool_expand_001_pos.ksh \
functional/cli_root/zpool_expand/zpool_expand_002_pos.ksh \
functional/cli_root/zpool_expand/zpool_expand_003_neg.ksh \
functional/cli_root/zpool_expand/zpool_expand_004_pos.ksh \
functional/cli_root/zpool_expand/zpool_expand_005_pos.ksh \
functional/cli_root/zpool_export/cleanup.ksh \
functional/cli_root/zpool_export/setup.ksh \
functional/cli_root/zpool_export/zpool_export_001_pos.ksh \
functional/cli_root/zpool_export/zpool_export_002_pos.ksh \
functional/cli_root/zpool_export/zpool_export_003_neg.ksh \
functional/cli_root/zpool_export/zpool_export_004_pos.ksh \
functional/cli_root/zpool_get/cleanup.ksh \
functional/cli_root/zpool_get/setup.ksh \
functional/cli_root/zpool_get/vdev_get_001_pos.ksh \
functional/cli_root/zpool_get/zpool_get_001_pos.ksh \
functional/cli_root/zpool_get/zpool_get_002_pos.ksh \
functional/cli_root/zpool_get/zpool_get_003_pos.ksh \
functional/cli_root/zpool_get/zpool_get_004_neg.ksh \
functional/cli_root/zpool_get/zpool_get_005_pos.ksh \
functional/cli_root/zpool_history/cleanup.ksh \
functional/cli_root/zpool_history/setup.ksh \
functional/cli_root/zpool_history/zpool_history_001_neg.ksh \
functional/cli_root/zpool_history/zpool_history_002_pos.ksh \
functional/cli_root/zpool_import/cleanup.ksh \
functional/cli_root/zpool_import/import_cachefile_device_added.ksh \
functional/cli_root/zpool_import/import_cachefile_device_removed.ksh \
functional/cli_root/zpool_import/import_cachefile_device_replaced.ksh \
functional/cli_root/zpool_import/import_cachefile_mirror_attached.ksh \
functional/cli_root/zpool_import/import_cachefile_mirror_detached.ksh \
functional/cli_root/zpool_import/import_cachefile_paths_changed.ksh \
functional/cli_root/zpool_import/import_cachefile_shared_device.ksh \
functional/cli_root/zpool_import/import_devices_missing.ksh \
functional/cli_root/zpool_import/import_log_missing.ksh \
functional/cli_root/zpool_import/import_paths_changed.ksh \
functional/cli_root/zpool_import/import_rewind_config_changed.ksh \
functional/cli_root/zpool_import/import_rewind_device_replaced.ksh \
functional/cli_root/zpool_import/setup.ksh \
functional/cli_root/zpool_import/zpool_import_001_pos.ksh \
functional/cli_root/zpool_import/zpool_import_002_pos.ksh \
functional/cli_root/zpool_import/zpool_import_003_pos.ksh \
functional/cli_root/zpool_import/zpool_import_004_pos.ksh \
functional/cli_root/zpool_import/zpool_import_005_pos.ksh \
functional/cli_root/zpool_import/zpool_import_006_pos.ksh \
functional/cli_root/zpool_import/zpool_import_007_pos.ksh \
functional/cli_root/zpool_import/zpool_import_008_pos.ksh \
functional/cli_root/zpool_import/zpool_import_009_neg.ksh \
functional/cli_root/zpool_import/zpool_import_010_pos.ksh \
functional/cli_root/zpool_import/zpool_import_011_neg.ksh \
functional/cli_root/zpool_import/zpool_import_012_pos.ksh \
functional/cli_root/zpool_import/zpool_import_013_neg.ksh \
functional/cli_root/zpool_import/zpool_import_014_pos.ksh \
functional/cli_root/zpool_import/zpool_import_015_pos.ksh \
functional/cli_root/zpool_import/zpool_import_016_pos.ksh \
functional/cli_root/zpool_import/zpool_import_017_pos.ksh \
functional/cli_root/zpool_import/zpool_import_all_001_pos.ksh \
functional/cli_root/zpool_import/zpool_import_encrypted.ksh \
functional/cli_root/zpool_import/zpool_import_encrypted_load.ksh \
functional/cli_root/zpool_import/zpool_import_errata3.ksh \
functional/cli_root/zpool_import/zpool_import_errata4.ksh \
functional/cli_root/zpool_import/zpool_import_features_001_pos.ksh \
functional/cli_root/zpool_import/zpool_import_features_002_neg.ksh \
functional/cli_root/zpool_import/zpool_import_features_003_pos.ksh \
functional/cli_root/zpool_import/zpool_import_hostid_changed.ksh \
functional/cli_root/zpool_import/zpool_import_hostid_changed_unclean_export.ksh \
functional/cli_root/zpool_import/zpool_import_hostid_changed_cachefile.ksh \
functional/cli_root/zpool_import/zpool_import_hostid_changed_cachefile_unclean_export.ksh \
functional/cli_root/zpool_import/zpool_import_missing_001_pos.ksh \
functional/cli_root/zpool_import/zpool_import_missing_002_pos.ksh \
functional/cli_root/zpool_import/zpool_import_missing_003_pos.ksh \
functional/cli_root/zpool_import/zpool_import_rename_001_pos.ksh \
functional/cli_root/zpool_initialize/cleanup.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_attach_detach_add_remove.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_fault_export_import_online.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_import_export.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_offline_export_import_online.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_online_offline.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_split.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_start_and_cancel_neg.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_start_and_cancel_pos.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_suspend_resume.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_uninit.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_unsupported_vdevs.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_verify_checksums.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_verify_initialized.ksh \
functional/cli_root/zpool_labelclear/zpool_labelclear_active.ksh \
functional/cli_root/zpool_labelclear/zpool_labelclear_exported.ksh \
functional/cli_root/zpool_labelclear/zpool_labelclear_removed.ksh \
functional/cli_root/zpool_labelclear/zpool_labelclear_valid.ksh \
functional/cli_root/zpool_offline/cleanup.ksh \
functional/cli_root/zpool_offline/setup.ksh \
functional/cli_root/zpool_offline/zpool_offline_001_pos.ksh \
functional/cli_root/zpool_offline/zpool_offline_002_neg.ksh \
functional/cli_root/zpool_offline/zpool_offline_003_pos.ksh \
functional/cli_root/zpool_online/cleanup.ksh \
functional/cli_root/zpool_online/setup.ksh \
functional/cli_root/zpool_online/zpool_online_001_pos.ksh \
functional/cli_root/zpool_online/zpool_online_002_neg.ksh \
functional/cli_root/zpool_remove/cleanup.ksh \
functional/cli_root/zpool_remove/setup.ksh \
functional/cli_root/zpool_remove/zpool_remove_001_neg.ksh \
functional/cli_root/zpool_remove/zpool_remove_002_pos.ksh \
functional/cli_root/zpool_remove/zpool_remove_003_pos.ksh \
functional/cli_root/zpool_reopen/cleanup.ksh \
functional/cli_root/zpool_reopen/setup.ksh \
functional/cli_root/zpool_reopen/zpool_reopen_001_pos.ksh \
functional/cli_root/zpool_reopen/zpool_reopen_002_pos.ksh \
functional/cli_root/zpool_reopen/zpool_reopen_003_pos.ksh \
functional/cli_root/zpool_reopen/zpool_reopen_004_pos.ksh \
functional/cli_root/zpool_reopen/zpool_reopen_005_pos.ksh \
functional/cli_root/zpool_reopen/zpool_reopen_006_neg.ksh \
functional/cli_root/zpool_reopen/zpool_reopen_007_pos.ksh \
functional/cli_root/zpool_replace/cleanup.ksh \
functional/cli_root/zpool_replace/replace-o_ashift.ksh \
functional/cli_root/zpool_replace/replace_prop_ashift.ksh \
functional/cli_root/zpool_replace/setup.ksh \
functional/cli_root/zpool_replace/zpool_replace_001_neg.ksh \
functional/cli_root/zpool_resilver/cleanup.ksh \
functional/cli_root/zpool_resilver/setup.ksh \
functional/cli_root/zpool_resilver/zpool_resilver_bad_args.ksh \
functional/cli_root/zpool_resilver/zpool_resilver_restart.ksh \
functional/cli_root/zpool_resilver/zpool_resilver_concurrent.ksh \
functional/cli_root/zpool_scrub/cleanup.ksh \
functional/cli_root/zpool_scrub/setup.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_001_neg.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_002_pos.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_003_pos.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_004_pos.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_005_pos.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_encrypted_unloaded.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_multiple_copies.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_offline_device.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_print_repairing.ksh \
functional/cli_root/zpool_scrub/zpool_error_scrub_001_pos.ksh \
functional/cli_root/zpool_scrub/zpool_error_scrub_002_pos.ksh \
functional/cli_root/zpool_scrub/zpool_error_scrub_003_pos.ksh \
functional/cli_root/zpool_scrub/zpool_error_scrub_004_pos.ksh \
functional/cli_root/zpool_set/cleanup.ksh \
functional/cli_root/zpool_set/setup.ksh \
functional/cli_root/zpool/setup.ksh \
functional/cli_root/zpool_set/vdev_set_001_pos.ksh \
functional/cli_root/zpool_set/zpool_set_common.kshlib \
functional/cli_root/zpool_set/zpool_set_001_pos.ksh \
functional/cli_root/zpool_set/zpool_set_002_neg.ksh \
functional/cli_root/zpool_set/zpool_set_003_neg.ksh \
functional/cli_root/zpool_set/zpool_set_ashift.ksh \
functional/cli_root/zpool_set/user_property_001_pos.ksh \
functional/cli_root/zpool_set/user_property_002_neg.ksh \
functional/cli_root/zpool_set/zpool_set_features.ksh \
functional/cli_root/zpool_split/cleanup.ksh \
functional/cli_root/zpool_split/setup.ksh \
functional/cli_root/zpool_split/zpool_split_cliargs.ksh \
functional/cli_root/zpool_split/zpool_split_devices.ksh \
functional/cli_root/zpool_split/zpool_split_dryrun_output.ksh \
functional/cli_root/zpool_split/zpool_split_encryption.ksh \
functional/cli_root/zpool_split/zpool_split_indirect.ksh \
functional/cli_root/zpool_split/zpool_split_props.ksh \
functional/cli_root/zpool_split/zpool_split_resilver.ksh \
functional/cli_root/zpool_split/zpool_split_vdevs.ksh \
functional/cli_root/zpool_split/zpool_split_wholedisk.ksh \
functional/cli_root/zpool_status/cleanup.ksh \
functional/cli_root/zpool_status/setup.ksh \
functional/cli_root/zpool_status/zpool_status_001_pos.ksh \
functional/cli_root/zpool_status/zpool_status_002_pos.ksh \
functional/cli_root/zpool_status/zpool_status_003_pos.ksh \
functional/cli_root/zpool_status/zpool_status_004_pos.ksh \
functional/cli_root/zpool_status/zpool_status_005_pos.ksh \
functional/cli_root/zpool_status/zpool_status_006_pos.ksh \
functional/cli_root/zpool_status/zpool_status_007_pos.ksh \
+ functional/cli_root/zpool_status/zpool_status_008_pos.ksh \
functional/cli_root/zpool_status/zpool_status_features_001_pos.ksh \
functional/cli_root/zpool_sync/cleanup.ksh \
functional/cli_root/zpool_sync/setup.ksh \
functional/cli_root/zpool_sync/zpool_sync_001_pos.ksh \
functional/cli_root/zpool_sync/zpool_sync_002_neg.ksh \
functional/cli_root/zpool_trim/cleanup.ksh \
functional/cli_root/zpool_trim/setup.ksh \
functional/cli_root/zpool_trim/zpool_trim_attach_detach_add_remove.ksh \
functional/cli_root/zpool_trim/zpool_trim_fault_export_import_online.ksh \
functional/cli_root/zpool_trim/zpool_trim_import_export.ksh \
functional/cli_root/zpool_trim/zpool_trim_multiple.ksh \
functional/cli_root/zpool_trim/zpool_trim_neg.ksh \
functional/cli_root/zpool_trim/zpool_trim_offline_export_import_online.ksh \
functional/cli_root/zpool_trim/zpool_trim_online_offline.ksh \
functional/cli_root/zpool_trim/zpool_trim_partial.ksh \
functional/cli_root/zpool_trim/zpool_trim_rate.ksh \
functional/cli_root/zpool_trim/zpool_trim_rate_neg.ksh \
functional/cli_root/zpool_trim/zpool_trim_secure.ksh \
functional/cli_root/zpool_trim/zpool_trim_split.ksh \
functional/cli_root/zpool_trim/zpool_trim_start_and_cancel_neg.ksh \
functional/cli_root/zpool_trim/zpool_trim_start_and_cancel_pos.ksh \
functional/cli_root/zpool_trim/zpool_trim_suspend_resume.ksh \
functional/cli_root/zpool_trim/zpool_trim_unsupported_vdevs.ksh \
functional/cli_root/zpool_trim/zpool_trim_verify_checksums.ksh \
functional/cli_root/zpool_trim/zpool_trim_verify_trimmed.ksh \
functional/cli_root/zpool_upgrade/cleanup.ksh \
functional/cli_root/zpool_upgrade/setup.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_001_pos.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_002_pos.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_003_pos.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_004_pos.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_005_neg.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_006_neg.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_007_pos.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_008_pos.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_009_neg.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_features_001_pos.ksh \
functional/cli_root/zpool_wait/cleanup.ksh \
functional/cli_root/zpool_wait/scan/cleanup.ksh \
functional/cli_root/zpool_wait/scan/setup.ksh \
functional/cli_root/zpool_wait/scan/zpool_wait_rebuild.ksh \
functional/cli_root/zpool_wait/scan/zpool_wait_replace_cancel.ksh \
functional/cli_root/zpool_wait/scan/zpool_wait_replace.ksh \
functional/cli_root/zpool_wait/scan/zpool_wait_resilver.ksh \
functional/cli_root/zpool_wait/scan/zpool_wait_scrub_basic.ksh \
functional/cli_root/zpool_wait/scan/zpool_wait_scrub_cancel.ksh \
functional/cli_root/zpool_wait/scan/zpool_wait_scrub_flag.ksh \
functional/cli_root/zpool_wait/setup.ksh \
functional/cli_root/zpool_wait/zpool_wait_discard.ksh \
functional/cli_root/zpool_wait/zpool_wait_freeing.ksh \
functional/cli_root/zpool_wait/zpool_wait_initialize_basic.ksh \
functional/cli_root/zpool_wait/zpool_wait_initialize_cancel.ksh \
functional/cli_root/zpool_wait/zpool_wait_initialize_flag.ksh \
functional/cli_root/zpool_wait/zpool_wait_multiple.ksh \
functional/cli_root/zpool_wait/zpool_wait_no_activity.ksh \
functional/cli_root/zpool_wait/zpool_wait_remove_cancel.ksh \
functional/cli_root/zpool_wait/zpool_wait_remove.ksh \
functional/cli_root/zpool_wait/zpool_wait_trim_basic.ksh \
functional/cli_root/zpool_wait/zpool_wait_trim_cancel.ksh \
functional/cli_root/zpool_wait/zpool_wait_trim_flag.ksh \
functional/cli_root/zpool_wait/zpool_wait_usage.ksh \
functional/cli_root/zpool/zpool_001_neg.ksh \
functional/cli_root/zpool/zpool_002_pos.ksh \
functional/cli_root/zpool/zpool_003_pos.ksh \
functional/cli_root/zpool/zpool_colors.ksh \
functional/cli_user/misc/arcstat_001_pos.ksh \
functional/cli_user/misc/arc_summary_001_pos.ksh \
functional/cli_user/misc/arc_summary_002_neg.ksh \
functional/cli_user/misc/zilstat_001_pos.ksh \
functional/cli_user/misc/cleanup.ksh \
functional/cli_user/misc/setup.ksh \
functional/cli_user/misc/zdb_001_neg.ksh \
functional/cli_user/misc/zfs_001_neg.ksh \
functional/cli_user/misc/zfs_allow_001_neg.ksh \
functional/cli_user/misc/zfs_clone_001_neg.ksh \
functional/cli_user/misc/zfs_create_001_neg.ksh \
functional/cli_user/misc/zfs_destroy_001_neg.ksh \
functional/cli_user/misc/zfs_get_001_neg.ksh \
functional/cli_user/misc/zfs_inherit_001_neg.ksh \
functional/cli_user/misc/zfs_mount_001_neg.ksh \
functional/cli_user/misc/zfs_promote_001_neg.ksh \
functional/cli_user/misc/zfs_receive_001_neg.ksh \
functional/cli_user/misc/zfs_rename_001_neg.ksh \
functional/cli_user/misc/zfs_rollback_001_neg.ksh \
functional/cli_user/misc/zfs_send_001_neg.ksh \
functional/cli_user/misc/zfs_set_001_neg.ksh \
functional/cli_user/misc/zfs_share_001_neg.ksh \
functional/cli_user/misc/zfs_snapshot_001_neg.ksh \
functional/cli_user/misc/zfs_unallow_001_neg.ksh \
functional/cli_user/misc/zfs_unmount_001_neg.ksh \
functional/cli_user/misc/zfs_unshare_001_neg.ksh \
functional/cli_user/misc/zfs_upgrade_001_neg.ksh \
functional/cli_user/misc/zpool_001_neg.ksh \
functional/cli_user/misc/zpool_add_001_neg.ksh \
functional/cli_user/misc/zpool_attach_001_neg.ksh \
functional/cli_user/misc/zpool_clear_001_neg.ksh \
functional/cli_user/misc/zpool_create_001_neg.ksh \
functional/cli_user/misc/zpool_destroy_001_neg.ksh \
functional/cli_user/misc/zpool_detach_001_neg.ksh \
functional/cli_user/misc/zpool_export_001_neg.ksh \
functional/cli_user/misc/zpool_get_001_neg.ksh \
functional/cli_user/misc/zpool_history_001_neg.ksh \
functional/cli_user/misc/zpool_import_001_neg.ksh \
functional/cli_user/misc/zpool_import_002_neg.ksh \
functional/cli_user/misc/zpool_offline_001_neg.ksh \
functional/cli_user/misc/zpool_online_001_neg.ksh \
functional/cli_user/misc/zpool_remove_001_neg.ksh \
functional/cli_user/misc/zpool_replace_001_neg.ksh \
functional/cli_user/misc/zpool_scrub_001_neg.ksh \
functional/cli_user/misc/zpool_set_001_neg.ksh \
functional/cli_user/misc/zpool_status_001_neg.ksh \
functional/cli_user/misc/zpool_upgrade_001_neg.ksh \
functional/cli_user/misc/zpool_wait_privilege.ksh \
functional/cli_user/zfs_list/cleanup.ksh \
functional/cli_user/zfs_list/setup.ksh \
functional/cli_user/zfs_list/zfs_list_001_pos.ksh \
functional/cli_user/zfs_list/zfs_list_002_pos.ksh \
functional/cli_user/zfs_list/zfs_list_003_pos.ksh \
functional/cli_user/zfs_list/zfs_list_004_neg.ksh \
functional/cli_user/zfs_list/zfs_list_005_neg.ksh \
functional/cli_user/zfs_list/zfs_list_007_pos.ksh \
functional/cli_user/zfs_list/zfs_list_008_neg.ksh \
functional/cli_user/zpool_iostat/cleanup.ksh \
functional/cli_user/zpool_iostat/setup.ksh \
functional/cli_user/zpool_iostat/zpool_iostat_001_neg.ksh \
functional/cli_user/zpool_iostat/zpool_iostat_002_pos.ksh \
functional/cli_user/zpool_iostat/zpool_iostat_003_neg.ksh \
functional/cli_user/zpool_iostat/zpool_iostat_004_pos.ksh \
functional/cli_user/zpool_iostat/zpool_iostat_005_pos.ksh \
functional/cli_user/zpool_iostat/zpool_iostat_-c_disable.ksh \
functional/cli_user/zpool_iostat/zpool_iostat_-c_homedir.ksh \
functional/cli_user/zpool_iostat/zpool_iostat_-c_searchpath.ksh \
functional/cli_user/zpool_list/cleanup.ksh \
functional/cli_user/zpool_list/setup.ksh \
functional/cli_user/zpool_list/zpool_list_001_pos.ksh \
functional/cli_user/zpool_list/zpool_list_002_neg.ksh \
functional/cli_user/zpool_status/cleanup.ksh \
functional/cli_user/zpool_status/setup.ksh \
functional/cli_user/zpool_status/zpool_status_003_pos.ksh \
functional/cli_user/zpool_status/zpool_status_-c_disable.ksh \
functional/cli_user/zpool_status/zpool_status_-c_homedir.ksh \
functional/cli_user/zpool_status/zpool_status_-c_searchpath.ksh \
functional/compression/cleanup.ksh \
functional/compression/compress_001_pos.ksh \
functional/compression/compress_002_pos.ksh \
functional/compression/compress_003_pos.ksh \
functional/compression/compress_004_pos.ksh \
functional/compression/compress_zstd_bswap.ksh \
functional/compression/l2arc_compressed_arc_disabled.ksh \
functional/compression/l2arc_compressed_arc.ksh \
functional/compression/l2arc_encrypted.ksh \
functional/compression/l2arc_encrypted_no_compressed_arc.ksh \
functional/compression/setup.ksh \
functional/cp_files/cleanup.ksh \
functional/cp_files/cp_files_001_pos.ksh \
+ functional/cp_files/cp_files_002_pos.ksh \
+ functional/cp_files/cp_stress.ksh \
functional/cp_files/setup.ksh \
functional/crtime/cleanup.ksh \
functional/crtime/crtime_001_pos.ksh \
functional/crtime/setup.ksh \
functional/ctime/cleanup.ksh \
functional/ctime/ctime_001_pos.ksh \
functional/ctime/setup.ksh \
functional/deadman/deadman_ratelimit.ksh \
functional/deadman/deadman_sync.ksh \
functional/deadman/deadman_zio.ksh \
functional/delegate/cleanup.ksh \
functional/delegate/setup.ksh \
functional/delegate/zfs_allow_001_pos.ksh \
functional/delegate/zfs_allow_002_pos.ksh \
functional/delegate/zfs_allow_003_pos.ksh \
functional/delegate/zfs_allow_004_pos.ksh \
functional/delegate/zfs_allow_005_pos.ksh \
functional/delegate/zfs_allow_006_pos.ksh \
functional/delegate/zfs_allow_007_pos.ksh \
functional/delegate/zfs_allow_008_pos.ksh \
functional/delegate/zfs_allow_009_neg.ksh \
functional/delegate/zfs_allow_010_pos.ksh \
functional/delegate/zfs_allow_011_neg.ksh \
functional/delegate/zfs_allow_012_neg.ksh \
functional/delegate/zfs_unallow_001_pos.ksh \
functional/delegate/zfs_unallow_002_pos.ksh \
functional/delegate/zfs_unallow_003_pos.ksh \
functional/delegate/zfs_unallow_004_pos.ksh \
functional/delegate/zfs_unallow_005_pos.ksh \
functional/delegate/zfs_unallow_006_pos.ksh \
functional/delegate/zfs_unallow_007_neg.ksh \
functional/delegate/zfs_unallow_008_neg.ksh \
functional/devices/cleanup.ksh \
functional/devices/devices_001_pos.ksh \
functional/devices/devices_002_neg.ksh \
functional/devices/devices_003_pos.ksh \
functional/devices/setup.ksh \
functional/dos_attributes/cleanup.ksh \
functional/dos_attributes/read_dos_attrs_001.ksh \
functional/dos_attributes/setup.ksh \
functional/dos_attributes/write_dos_attrs_001.ksh \
functional/events/cleanup.ksh \
functional/events/events_001_pos.ksh \
functional/events/events_002_pos.ksh \
functional/events/setup.ksh \
functional/events/zed_cksum_config.ksh \
functional/events/zed_cksum_reported.ksh \
functional/events/zed_fd_spill.ksh \
functional/events/zed_io_config.ksh \
functional/events/zed_rc_filter.ksh \
functional/exec/cleanup.ksh \
functional/exec/exec_001_pos.ksh \
functional/exec/exec_002_neg.ksh \
functional/exec/setup.ksh \
functional/fadvise/cleanup.ksh \
functional/fadvise/fadvise_sequential.ksh \
functional/fadvise/setup.ksh \
functional/fallocate/cleanup.ksh \
functional/fallocate/fallocate_prealloc.ksh \
functional/fallocate/fallocate_punch-hole.ksh \
functional/fallocate/fallocate_zero-range.ksh \
functional/fallocate/setup.ksh \
functional/fault/auto_offline_001_pos.ksh \
functional/fault/auto_online_001_pos.ksh \
functional/fault/auto_online_002_pos.ksh \
functional/fault/auto_replace_001_pos.ksh \
functional/fault/auto_replace_002_pos.ksh \
functional/fault/auto_spare_001_pos.ksh \
functional/fault/auto_spare_002_pos.ksh \
functional/fault/auto_spare_ashift.ksh \
functional/fault/auto_spare_multiple.ksh \
functional/fault/auto_spare_shared.ksh \
functional/fault/cleanup.ksh \
functional/fault/decompress_fault.ksh \
functional/fault/decrypt_fault.ksh \
functional/fault/scrub_after_resilver.ksh \
functional/fault/setup.ksh \
functional/fault/zpool_status_-s.ksh \
functional/features/async_destroy/async_destroy_001_pos.ksh \
functional/features/async_destroy/cleanup.ksh \
functional/features/async_destroy/setup.ksh \
functional/features/large_dnode/cleanup.ksh \
functional/features/large_dnode/large_dnode_001_pos.ksh \
functional/features/large_dnode/large_dnode_002_pos.ksh \
functional/features/large_dnode/large_dnode_003_pos.ksh \
functional/features/large_dnode/large_dnode_004_neg.ksh \
functional/features/large_dnode/large_dnode_005_pos.ksh \
functional/features/large_dnode/large_dnode_006_pos.ksh \
functional/features/large_dnode/large_dnode_007_neg.ksh \
functional/features/large_dnode/large_dnode_008_pos.ksh \
functional/features/large_dnode/large_dnode_009_pos.ksh \
functional/features/large_dnode/setup.ksh \
functional/grow/grow_pool_001_pos.ksh \
functional/grow/grow_replicas_001_pos.ksh \
functional/history/cleanup.ksh \
functional/history/history_001_pos.ksh \
functional/history/history_002_pos.ksh \
functional/history/history_003_pos.ksh \
functional/history/history_004_pos.ksh \
functional/history/history_005_neg.ksh \
functional/history/history_006_neg.ksh \
functional/history/history_007_pos.ksh \
functional/history/history_008_pos.ksh \
functional/history/history_009_pos.ksh \
functional/history/history_010_pos.ksh \
functional/history/setup.ksh \
functional/inheritance/cleanup.ksh \
functional/inheritance/inherit_001_pos.ksh \
functional/inuse/inuse_001_pos.ksh \
functional/inuse/inuse_003_pos.ksh \
functional/inuse/inuse_004_pos.ksh \
functional/inuse/inuse_005_pos.ksh \
functional/inuse/inuse_006_pos.ksh \
functional/inuse/inuse_007_pos.ksh \
functional/inuse/inuse_008_pos.ksh \
functional/inuse/inuse_009_pos.ksh \
functional/inuse/setup.ksh \
functional/io/cleanup.ksh \
functional/io/io_uring.ksh \
functional/io/libaio.ksh \
functional/io/mmap.ksh \
functional/io/posixaio.ksh \
functional/io/psync.ksh \
functional/io/setup.ksh \
functional/io/sync.ksh \
functional/l2arc/cleanup.ksh \
functional/l2arc/l2arc_arcstats_pos.ksh \
functional/l2arc/l2arc_l2miss_pos.ksh \
functional/l2arc/l2arc_mfuonly_pos.ksh \
functional/l2arc/persist_l2arc_001_pos.ksh \
functional/l2arc/persist_l2arc_002_pos.ksh \
functional/l2arc/persist_l2arc_003_neg.ksh \
functional/l2arc/persist_l2arc_004_pos.ksh \
functional/l2arc/persist_l2arc_005_pos.ksh \
functional/l2arc/setup.ksh \
functional/large_files/cleanup.ksh \
functional/large_files/large_files_001_pos.ksh \
functional/large_files/large_files_002_pos.ksh \
functional/large_files/setup.ksh \
functional/largest_pool/largest_pool_001_pos.ksh \
functional/libzfs/cleanup.ksh \
functional/libzfs/libzfs_input.ksh \
functional/libzfs/setup.ksh \
functional/limits/cleanup.ksh \
functional/limits/filesystem_count.ksh \
functional/limits/filesystem_limit.ksh \
functional/limits/setup.ksh \
functional/limits/snapshot_count.ksh \
functional/limits/snapshot_limit.ksh \
functional/link_count/cleanup.ksh \
functional/link_count/link_count_001.ksh \
functional/link_count/link_count_root_inode.ksh \
functional/link_count/setup.ksh \
functional/log_spacemap/log_spacemap_import_logs.ksh \
functional/migration/cleanup.ksh \
functional/migration/migration_001_pos.ksh \
functional/migration/migration_002_pos.ksh \
functional/migration/migration_003_pos.ksh \
functional/migration/migration_004_pos.ksh \
functional/migration/migration_005_pos.ksh \
functional/migration/migration_006_pos.ksh \
functional/migration/migration_007_pos.ksh \
functional/migration/migration_008_pos.ksh \
functional/migration/migration_009_pos.ksh \
functional/migration/migration_010_pos.ksh \
functional/migration/migration_011_pos.ksh \
functional/migration/migration_012_pos.ksh \
functional/migration/setup.ksh \
functional/mmap/cleanup.ksh \
functional/mmap/mmap_libaio_001_pos.ksh \
functional/mmap/mmap_mixed.ksh \
functional/mmap/mmap_read_001_pos.ksh \
functional/mmap/mmap_seek_001_pos.ksh \
functional/mmap/mmap_sync_001_pos.ksh \
functional/mmap/mmap_write_001_pos.ksh \
functional/mmap/setup.ksh \
functional/mmp/cleanup.ksh \
functional/mmp/mmp_active_import.ksh \
functional/mmp/mmp_exported_import.ksh \
functional/mmp/mmp_hostid.ksh \
functional/mmp/mmp_inactive_import.ksh \
functional/mmp/mmp_interval.ksh \
functional/mmp/mmp_on_off.ksh \
functional/mmp/mmp_on_thread.ksh \
functional/mmp/mmp_on_uberblocks.ksh \
functional/mmp/mmp_on_zdb.ksh \
functional/mmp/mmp_reset_interval.ksh \
functional/mmp/mmp_write_distribution.ksh \
functional/mmp/mmp_write_uberblocks.ksh \
functional/mmp/multihost_history.ksh \
functional/mmp/setup.ksh \
functional/mount/cleanup.ksh \
functional/mount/setup.ksh \
functional/mount/umount_001.ksh \
functional/mount/umountall_001.ksh \
functional/mount/umount_unlinked_drain.ksh \
functional/mv_files/cleanup.ksh \
functional/mv_files/mv_files_001_pos.ksh \
functional/mv_files/mv_files_002_pos.ksh \
functional/mv_files/random_creation.ksh \
functional/mv_files/setup.ksh \
functional/nestedfs/cleanup.ksh \
functional/nestedfs/nestedfs_001_pos.ksh \
functional/nestedfs/setup.ksh \
functional/nopwrite/cleanup.ksh \
functional/nopwrite/nopwrite_copies.ksh \
functional/nopwrite/nopwrite_mtime.ksh \
functional/nopwrite/nopwrite_negative.ksh \
functional/nopwrite/nopwrite_promoted_clone.ksh \
functional/nopwrite/nopwrite_recsize.ksh \
functional/nopwrite/nopwrite_sync.ksh \
functional/nopwrite/nopwrite_varying_compression.ksh \
functional/nopwrite/nopwrite_volume.ksh \
functional/nopwrite/setup.ksh \
functional/no_space/cleanup.ksh \
functional/no_space/enospc_001_pos.ksh \
functional/no_space/enospc_002_pos.ksh \
functional/no_space/enospc_003_pos.ksh \
functional/no_space/enospc_df.ksh \
functional/no_space/enospc_ganging.ksh \
functional/no_space/enospc_rm.ksh \
functional/no_space/setup.ksh \
functional/online_offline/cleanup.ksh \
functional/online_offline/online_offline_001_pos.ksh \
functional/online_offline/online_offline_002_neg.ksh \
functional/online_offline/online_offline_003_neg.ksh \
functional/online_offline/setup.ksh \
functional/pam/cleanup.ksh \
functional/pam/pam_basic.ksh \
functional/pam/pam_change_unmounted.ksh \
functional/pam/pam_nounmount.ksh \
functional/pam/pam_recursive.ksh \
functional/pam/pam_short_password.ksh \
functional/pam/setup.ksh \
functional/pool_checkpoint/checkpoint_after_rewind.ksh \
functional/pool_checkpoint/checkpoint_big_rewind.ksh \
functional/pool_checkpoint/checkpoint_capacity.ksh \
functional/pool_checkpoint/checkpoint_conf_change.ksh \
functional/pool_checkpoint/checkpoint_discard_busy.ksh \
functional/pool_checkpoint/checkpoint_discard.ksh \
functional/pool_checkpoint/checkpoint_discard_many.ksh \
functional/pool_checkpoint/checkpoint_indirect.ksh \
functional/pool_checkpoint/checkpoint_invalid.ksh \
functional/pool_checkpoint/checkpoint_lun_expsz.ksh \
functional/pool_checkpoint/checkpoint_open.ksh \
functional/pool_checkpoint/checkpoint_removal.ksh \
functional/pool_checkpoint/checkpoint_rewind.ksh \
functional/pool_checkpoint/checkpoint_ro_rewind.ksh \
functional/pool_checkpoint/checkpoint_sm_scale.ksh \
functional/pool_checkpoint/checkpoint_twice.ksh \
functional/pool_checkpoint/checkpoint_vdev_add.ksh \
functional/pool_checkpoint/checkpoint_zdb.ksh \
functional/pool_checkpoint/checkpoint_zhack_feat.ksh \
functional/pool_checkpoint/cleanup.ksh \
functional/pool_checkpoint/setup.ksh \
functional/pool_names/pool_names_001_pos.ksh \
functional/pool_names/pool_names_002_neg.ksh \
functional/poolversion/cleanup.ksh \
functional/poolversion/poolversion_001_pos.ksh \
functional/poolversion/poolversion_002_pos.ksh \
functional/poolversion/setup.ksh \
functional/privilege/cleanup.ksh \
functional/privilege/privilege_001_pos.ksh \
functional/privilege/privilege_002_pos.ksh \
functional/privilege/setup.ksh \
functional/procfs/cleanup.ksh \
functional/procfs/pool_state.ksh \
functional/procfs/procfs_list_basic.ksh \
functional/procfs/procfs_list_concurrent_readers.ksh \
functional/procfs/procfs_list_stale_read.ksh \
functional/procfs/setup.ksh \
functional/projectquota/cleanup.ksh \
functional/projectquota/projectid_001_pos.ksh \
functional/projectquota/projectid_002_pos.ksh \
functional/projectquota/projectid_003_pos.ksh \
functional/projectquota/projectquota_001_pos.ksh \
functional/projectquota/projectquota_002_pos.ksh \
functional/projectquota/projectquota_003_pos.ksh \
functional/projectquota/projectquota_004_neg.ksh \
functional/projectquota/projectquota_005_pos.ksh \
functional/projectquota/projectquota_006_pos.ksh \
functional/projectquota/projectquota_007_pos.ksh \
functional/projectquota/projectquota_008_pos.ksh \
functional/projectquota/projectquota_009_pos.ksh \
functional/projectquota/projectspace_001_pos.ksh \
functional/projectquota/projectspace_002_pos.ksh \
functional/projectquota/projectspace_003_pos.ksh \
functional/projectquota/projectspace_004_pos.ksh \
functional/projectquota/projecttree_001_pos.ksh \
functional/projectquota/projecttree_002_pos.ksh \
functional/projectquota/projecttree_003_neg.ksh \
functional/projectquota/setup.ksh \
functional/quota/cleanup.ksh \
functional/quota/quota_001_pos.ksh \
functional/quota/quota_002_pos.ksh \
functional/quota/quota_003_pos.ksh \
functional/quota/quota_004_pos.ksh \
functional/quota/quota_005_pos.ksh \
functional/quota/quota_006_neg.ksh \
functional/quota/setup.ksh \
functional/raidz/cleanup.ksh \
functional/raidz/raidz_001_neg.ksh \
functional/raidz/raidz_002_pos.ksh \
functional/raidz/raidz_003_pos.ksh \
functional/raidz/raidz_004_pos.ksh \
functional/raidz/setup.ksh \
functional/redacted_send/cleanup.ksh \
functional/redacted_send/redacted_compressed.ksh \
functional/redacted_send/redacted_contents.ksh \
functional/redacted_send/redacted_deleted.ksh \
functional/redacted_send/redacted_disabled_feature.ksh \
functional/redacted_send/redacted_embedded.ksh \
functional/redacted_send/redacted_holes.ksh \
functional/redacted_send/redacted_incrementals.ksh \
functional/redacted_send/redacted_largeblocks.ksh \
functional/redacted_send/redacted_many_clones.ksh \
functional/redacted_send/redacted_mixed_recsize.ksh \
functional/redacted_send/redacted_mounts.ksh \
functional/redacted_send/redacted_negative.ksh \
functional/redacted_send/redacted_origin.ksh \
functional/redacted_send/redacted_panic.ksh \
functional/redacted_send/redacted_props.ksh \
functional/redacted_send/redacted_resume.ksh \
functional/redacted_send/redacted_size.ksh \
functional/redacted_send/redacted_volume.ksh \
functional/redacted_send/setup.ksh \
functional/redundancy/cleanup.ksh \
functional/redundancy/redundancy_draid1.ksh \
functional/redundancy/redundancy_draid2.ksh \
functional/redundancy/redundancy_draid3.ksh \
functional/redundancy/redundancy_draid_damaged1.ksh \
functional/redundancy/redundancy_draid_damaged2.ksh \
functional/redundancy/redundancy_draid.ksh \
functional/redundancy/redundancy_draid_spare1.ksh \
functional/redundancy/redundancy_draid_spare2.ksh \
functional/redundancy/redundancy_draid_spare3.ksh \
functional/redundancy/redundancy_mirror.ksh \
functional/redundancy/redundancy_raidz1.ksh \
functional/redundancy/redundancy_raidz2.ksh \
functional/redundancy/redundancy_raidz3.ksh \
functional/redundancy/redundancy_raidz.ksh \
functional/redundancy/redundancy_stripe.ksh \
functional/redundancy/setup.ksh \
functional/refquota/cleanup.ksh \
functional/refquota/refquota_001_pos.ksh \
functional/refquota/refquota_002_pos.ksh \
functional/refquota/refquota_003_pos.ksh \
functional/refquota/refquota_004_pos.ksh \
functional/refquota/refquota_005_pos.ksh \
functional/refquota/refquota_006_neg.ksh \
functional/refquota/refquota_007_neg.ksh \
functional/refquota/refquota_008_neg.ksh \
functional/refquota/setup.ksh \
functional/refreserv/cleanup.ksh \
functional/refreserv/refreserv_001_pos.ksh \
functional/refreserv/refreserv_002_pos.ksh \
functional/refreserv/refreserv_003_pos.ksh \
functional/refreserv/refreserv_004_pos.ksh \
functional/refreserv/refreserv_005_pos.ksh \
functional/refreserv/refreserv_multi_raidz.ksh \
functional/refreserv/refreserv_raidz.ksh \
functional/refreserv/setup.ksh \
functional/removal/cleanup.ksh \
functional/removal/removal_all_vdev.ksh \
functional/removal/removal_cancel.ksh \
functional/removal/removal_check_space.ksh \
functional/removal/removal_condense_export.ksh \
functional/removal/removal_multiple_indirection.ksh \
functional/removal/removal_nopwrite.ksh \
functional/removal/removal_remap_deadlists.ksh \
functional/removal/removal_reservation.ksh \
functional/removal/removal_resume_export.ksh \
functional/removal/removal_sanity.ksh \
functional/removal/removal_with_add.ksh \
functional/removal/removal_with_create_fs.ksh \
functional/removal/removal_with_dedup.ksh \
functional/removal/removal_with_errors.ksh \
functional/removal/removal_with_export.ksh \
functional/removal/removal_with_faulted.ksh \
functional/removal/removal_with_ganging.ksh \
functional/removal/removal_with_indirect.ksh \
functional/removal/removal_with_remove.ksh \
functional/removal/removal_with_scrub.ksh \
functional/removal/removal_with_send.ksh \
functional/removal/removal_with_send_recv.ksh \
functional/removal/removal_with_snapshot.ksh \
functional/removal/removal_with_write.ksh \
functional/removal/removal_with_zdb.ksh \
functional/removal/remove_attach_mirror.ksh \
functional/removal/remove_expanded.ksh \
functional/removal/remove_indirect.ksh \
functional/removal/remove_mirror.ksh \
functional/removal/remove_mirror_sanity.ksh \
functional/removal/remove_raidz.ksh \
functional/rename_dirs/cleanup.ksh \
functional/rename_dirs/rename_dirs_001_pos.ksh \
functional/rename_dirs/setup.ksh \
functional/renameat2/cleanup.ksh \
functional/renameat2/setup.ksh \
functional/renameat2/renameat2_exchange.ksh \
functional/renameat2/renameat2_noreplace.ksh \
functional/renameat2/renameat2_whiteout.ksh \
functional/replacement/attach_import.ksh \
functional/replacement/attach_multiple.ksh \
functional/replacement/attach_rebuild.ksh \
functional/replacement/attach_resilver.ksh \
functional/replacement/cleanup.ksh \
functional/replacement/detach.ksh \
functional/replacement/rebuild_disabled_feature.ksh \
functional/replacement/rebuild_multiple.ksh \
functional/replacement/rebuild_raidz.ksh \
functional/replacement/replace_import.ksh \
functional/replacement/replace_rebuild.ksh \
functional/replacement/replace_resilver.ksh \
functional/replacement/resilver_restart_001.ksh \
functional/replacement/resilver_restart_002.ksh \
functional/replacement/scrub_cancel.ksh \
functional/replacement/setup.ksh \
functional/reservation/cleanup.ksh \
functional/reservation/reservation_001_pos.ksh \
functional/reservation/reservation_002_pos.ksh \
functional/reservation/reservation_003_pos.ksh \
functional/reservation/reservation_004_pos.ksh \
functional/reservation/reservation_005_pos.ksh \
functional/reservation/reservation_006_pos.ksh \
functional/reservation/reservation_007_pos.ksh \
functional/reservation/reservation_008_pos.ksh \
functional/reservation/reservation_009_pos.ksh \
functional/reservation/reservation_010_pos.ksh \
functional/reservation/reservation_011_pos.ksh \
functional/reservation/reservation_012_pos.ksh \
functional/reservation/reservation_013_pos.ksh \
functional/reservation/reservation_014_pos.ksh \
functional/reservation/reservation_015_pos.ksh \
functional/reservation/reservation_016_pos.ksh \
functional/reservation/reservation_017_pos.ksh \
functional/reservation/reservation_018_pos.ksh \
functional/reservation/reservation_019_pos.ksh \
functional/reservation/reservation_020_pos.ksh \
functional/reservation/reservation_021_neg.ksh \
functional/reservation/reservation_022_pos.ksh \
functional/reservation/setup.ksh \
functional/rootpool/cleanup.ksh \
functional/rootpool/rootpool_002_neg.ksh \
functional/rootpool/rootpool_003_neg.ksh \
functional/rootpool/rootpool_007_pos.ksh \
functional/rootpool/setup.ksh \
functional/rsend/cleanup.ksh \
functional/rsend/recv_dedup_encrypted_zvol.ksh \
functional/rsend/recv_dedup.ksh \
functional/rsend/rsend_001_pos.ksh \
functional/rsend/rsend_002_pos.ksh \
functional/rsend/rsend_003_pos.ksh \
functional/rsend/rsend_004_pos.ksh \
functional/rsend/rsend_005_pos.ksh \
functional/rsend/rsend_006_pos.ksh \
functional/rsend/rsend_007_pos.ksh \
functional/rsend/rsend_008_pos.ksh \
functional/rsend/rsend_009_pos.ksh \
functional/rsend/rsend_010_pos.ksh \
functional/rsend/rsend_011_pos.ksh \
functional/rsend/rsend_012_pos.ksh \
functional/rsend/rsend_013_pos.ksh \
functional/rsend/rsend_014_pos.ksh \
functional/rsend/rsend_016_neg.ksh \
functional/rsend/rsend_019_pos.ksh \
functional/rsend/rsend_020_pos.ksh \
functional/rsend/rsend_021_pos.ksh \
functional/rsend/rsend_022_pos.ksh \
functional/rsend/rsend_024_pos.ksh \
functional/rsend/rsend_025_pos.ksh \
functional/rsend/rsend_026_neg.ksh \
functional/rsend/rsend_027_pos.ksh \
functional/rsend/rsend_028_neg.ksh \
functional/rsend/rsend_029_neg.ksh \
functional/rsend/rsend_030_pos.ksh \
functional/rsend/rsend_031_pos.ksh \
functional/rsend/send-c_embedded_blocks.ksh \
functional/rsend/send-c_incremental.ksh \
functional/rsend/send-c_lz4_disabled.ksh \
functional/rsend/send-c_mixed_compression.ksh \
functional/rsend/send-c_props.ksh \
functional/rsend/send-c_recv_dedup.ksh \
functional/rsend/send-c_recv_lz4_disabled.ksh \
functional/rsend/send-c_resume.ksh \
functional/rsend/send-c_stream_size_estimate.ksh \
functional/rsend/send-c_verify_contents.ksh \
functional/rsend/send-c_verify_ratio.ksh \
functional/rsend/send-c_volume.ksh \
functional/rsend/send-c_zstream_recompress.ksh \
functional/rsend/send-c_zstreamdump.ksh \
functional/rsend/send-cpL_varied_recsize.ksh \
functional/rsend/send_doall.ksh \
functional/rsend/send_encrypted_incremental.ksh \
functional/rsend/send_encrypted_files.ksh \
functional/rsend/send_encrypted_freeobjects.ksh \
functional/rsend/send_encrypted_hierarchy.ksh \
functional/rsend/send_encrypted_props.ksh \
functional/rsend/send_encrypted_truncated_files.ksh \
functional/rsend/send_freeobjects.ksh \
functional/rsend/send_holds.ksh \
functional/rsend/send_hole_birth.ksh \
functional/rsend/send_invalid.ksh \
functional/rsend/send-L_toggle.ksh \
functional/rsend/send_mixed_raw.ksh \
functional/rsend/send_partial_dataset.ksh \
functional/rsend/send_raw_ashift.ksh \
functional/rsend/send_raw_spill_block.ksh \
functional/rsend/send_raw_large_blocks.ksh \
functional/rsend/send_realloc_dnode_size.ksh \
functional/rsend/send_realloc_encrypted_files.ksh \
functional/rsend/send_realloc_files.ksh \
functional/rsend/send_spill_block.ksh \
functional/rsend/send-wR_encrypted_zvol.ksh \
functional/rsend/setup.ksh \
functional/scrub_mirror/cleanup.ksh \
functional/scrub_mirror/scrub_mirror_001_pos.ksh \
functional/scrub_mirror/scrub_mirror_002_pos.ksh \
functional/scrub_mirror/scrub_mirror_003_pos.ksh \
functional/scrub_mirror/scrub_mirror_004_pos.ksh \
functional/scrub_mirror/setup.ksh \
functional/slog/cleanup.ksh \
functional/slog/setup.ksh \
functional/slog/slog_001_pos.ksh \
functional/slog/slog_002_pos.ksh \
functional/slog/slog_003_pos.ksh \
functional/slog/slog_004_pos.ksh \
functional/slog/slog_005_pos.ksh \
functional/slog/slog_006_pos.ksh \
functional/slog/slog_007_pos.ksh \
functional/slog/slog_008_neg.ksh \
functional/slog/slog_009_neg.ksh \
functional/slog/slog_010_neg.ksh \
functional/slog/slog_011_neg.ksh \
functional/slog/slog_012_neg.ksh \
functional/slog/slog_013_pos.ksh \
functional/slog/slog_014_pos.ksh \
functional/slog/slog_015_neg.ksh \
functional/slog/slog_016_pos.ksh \
functional/slog/slog_replay_fs_001.ksh \
functional/slog/slog_replay_fs_002.ksh \
functional/slog/slog_replay_volume.ksh \
functional/snapshot/cleanup.ksh \
functional/snapshot/clone_001_pos.ksh \
functional/snapshot/rollback_001_pos.ksh \
functional/snapshot/rollback_002_pos.ksh \
functional/snapshot/rollback_003_pos.ksh \
functional/snapshot/setup.ksh \
functional/snapshot/snapshot_001_pos.ksh \
functional/snapshot/snapshot_002_pos.ksh \
functional/snapshot/snapshot_003_pos.ksh \
functional/snapshot/snapshot_004_pos.ksh \
functional/snapshot/snapshot_005_pos.ksh \
functional/snapshot/snapshot_006_pos.ksh \
functional/snapshot/snapshot_007_pos.ksh \
functional/snapshot/snapshot_008_pos.ksh \
functional/snapshot/snapshot_009_pos.ksh \
functional/snapshot/snapshot_010_pos.ksh \
functional/snapshot/snapshot_011_pos.ksh \
functional/snapshot/snapshot_012_pos.ksh \
functional/snapshot/snapshot_013_pos.ksh \
functional/snapshot/snapshot_014_pos.ksh \
functional/snapshot/snapshot_015_pos.ksh \
functional/snapshot/snapshot_016_pos.ksh \
functional/snapshot/snapshot_017_pos.ksh \
functional/snapshot/snapshot_018_pos.ksh \
functional/snapused/cleanup.ksh \
functional/snapused/setup.ksh \
functional/snapused/snapused_001_pos.ksh \
functional/snapused/snapused_002_pos.ksh \
functional/snapused/snapused_003_pos.ksh \
functional/snapused/snapused_004_pos.ksh \
functional/snapused/snapused_005_pos.ksh \
functional/sparse/cleanup.ksh \
functional/sparse/setup.ksh \
functional/sparse/sparse_001_pos.ksh \
functional/stat/cleanup.ksh \
functional/stat/setup.ksh \
functional/stat/stat_001_pos.ksh \
functional/suid/cleanup.ksh \
functional/suid/setup.ksh \
functional/suid/suid_write_to_none.ksh \
functional/suid/suid_write_to_sgid.ksh \
functional/suid/suid_write_to_suid.ksh \
functional/suid/suid_write_to_suid_sgid.ksh \
functional/suid/suid_write_zil_replay.ksh \
functional/trim/autotrim_config.ksh \
functional/trim/autotrim_integrity.ksh \
functional/trim/autotrim_trim_integrity.ksh \
functional/trim/cleanup.ksh \
functional/trim/setup.ksh \
functional/trim/trim_config.ksh \
functional/trim/trim_integrity.ksh \
functional/trim/trim_l2arc.ksh \
functional/truncate/cleanup.ksh \
functional/truncate/setup.ksh \
functional/truncate/truncate_001_pos.ksh \
functional/truncate/truncate_002_pos.ksh \
functional/truncate/truncate_timestamps.ksh \
functional/upgrade/cleanup.ksh \
functional/upgrade/setup.ksh \
functional/upgrade/upgrade_projectquota_001_pos.ksh \
functional/upgrade/upgrade_readonly_pool.ksh \
functional/upgrade/upgrade_userobj_001_pos.ksh \
functional/user_namespace/cleanup.ksh \
functional/user_namespace/setup.ksh \
functional/user_namespace/user_namespace_001.ksh \
functional/user_namespace/user_namespace_002.ksh \
functional/user_namespace/user_namespace_003.ksh \
functional/user_namespace/user_namespace_004.ksh \
functional/userquota/cleanup.ksh \
functional/userquota/groupspace_001_pos.ksh \
functional/userquota/groupspace_002_pos.ksh \
functional/userquota/groupspace_003_pos.ksh \
functional/userquota/setup.ksh \
functional/userquota/userquota_001_pos.ksh \
functional/userquota/userquota_002_pos.ksh \
functional/userquota/userquota_003_pos.ksh \
functional/userquota/userquota_004_pos.ksh \
functional/userquota/userquota_005_neg.ksh \
functional/userquota/userquota_006_pos.ksh \
functional/userquota/userquota_007_pos.ksh \
functional/userquota/userquota_008_pos.ksh \
functional/userquota/userquota_009_pos.ksh \
functional/userquota/userquota_010_pos.ksh \
functional/userquota/userquota_011_pos.ksh \
functional/userquota/userquota_012_neg.ksh \
functional/userquota/userquota_013_pos.ksh \
functional/userquota/userspace_001_pos.ksh \
functional/userquota/userspace_002_pos.ksh \
functional/userquota/userspace_003_pos.ksh \
functional/userquota/userspace_encrypted.ksh \
functional/userquota/userspace_send_encrypted.ksh \
functional/userquota/userspace_encrypted_13709.ksh \
functional/vdev_zaps/cleanup.ksh \
functional/vdev_zaps/setup.ksh \
functional/vdev_zaps/vdev_zaps_001_pos.ksh \
functional/vdev_zaps/vdev_zaps_002_pos.ksh \
functional/vdev_zaps/vdev_zaps_003_pos.ksh \
functional/vdev_zaps/vdev_zaps_004_pos.ksh \
functional/vdev_zaps/vdev_zaps_005_pos.ksh \
functional/vdev_zaps/vdev_zaps_006_pos.ksh \
functional/vdev_zaps/vdev_zaps_007_pos.ksh \
functional/write_dirs/cleanup.ksh \
functional/write_dirs/setup.ksh \
functional/write_dirs/write_dirs_001_pos.ksh \
functional/write_dirs/write_dirs_002_pos.ksh \
functional/xattr/cleanup.ksh \
functional/xattr/setup.ksh \
functional/xattr/xattr_001_pos.ksh \
functional/xattr/xattr_002_neg.ksh \
functional/xattr/xattr_003_neg.ksh \
functional/xattr/xattr_004_pos.ksh \
functional/xattr/xattr_005_pos.ksh \
functional/xattr/xattr_006_pos.ksh \
functional/xattr/xattr_007_neg.ksh \
functional/xattr/xattr_008_pos.ksh \
functional/xattr/xattr_009_neg.ksh \
functional/xattr/xattr_010_neg.ksh \
functional/xattr/xattr_011_pos.ksh \
functional/xattr/xattr_012_pos.ksh \
functional/xattr/xattr_013_pos.ksh \
functional/xattr/xattr_compat.ksh \
functional/zpool_influxdb/cleanup.ksh \
functional/zpool_influxdb/setup.ksh \
functional/zpool_influxdb/zpool_influxdb.ksh \
functional/zvol/zvol_cli/cleanup.ksh \
functional/zvol/zvol_cli/setup.ksh \
functional/zvol/zvol_cli/zvol_cli_001_pos.ksh \
functional/zvol/zvol_cli/zvol_cli_002_pos.ksh \
functional/zvol/zvol_cli/zvol_cli_003_neg.ksh \
functional/zvol/zvol_ENOSPC/cleanup.ksh \
functional/zvol/zvol_ENOSPC/setup.ksh \
functional/zvol/zvol_ENOSPC/zvol_ENOSPC_001_pos.ksh \
functional/zvol/zvol_misc/cleanup.ksh \
functional/zvol/zvol_misc/setup.ksh \
functional/zvol/zvol_misc/zvol_misc_001_neg.ksh \
functional/zvol/zvol_misc/zvol_misc_002_pos.ksh \
functional/zvol/zvol_misc/zvol_misc_003_neg.ksh \
functional/zvol/zvol_misc/zvol_misc_004_pos.ksh \
functional/zvol/zvol_misc/zvol_misc_005_neg.ksh \
functional/zvol/zvol_misc/zvol_misc_006_pos.ksh \
functional/zvol/zvol_misc/zvol_misc_fua.ksh \
functional/zvol/zvol_misc/zvol_misc_hierarchy.ksh \
functional/zvol/zvol_misc/zvol_misc_rename_inuse.ksh \
functional/zvol/zvol_misc/zvol_misc_snapdev.ksh \
functional/zvol/zvol_misc/zvol_misc_trim.ksh \
functional/zvol/zvol_misc/zvol_misc_volmode.ksh \
functional/zvol/zvol_misc/zvol_misc_zil.ksh \
functional/zvol/zvol_stress/cleanup.ksh \
functional/zvol/zvol_stress/setup.ksh \
functional/zvol/zvol_stress/zvol_stress.ksh \
functional/zvol/zvol_swap/cleanup.ksh \
functional/zvol/zvol_swap/setup.ksh \
functional/zvol/zvol_swap/zvol_swap_001_pos.ksh \
functional/zvol/zvol_swap/zvol_swap_002_pos.ksh \
functional/zvol/zvol_swap/zvol_swap_003_pos.ksh \
functional/zvol/zvol_swap/zvol_swap_004_pos.ksh \
functional/zvol/zvol_swap/zvol_swap_005_pos.ksh \
functional/zvol/zvol_swap/zvol_swap_006_pos.ksh \
functional/idmap_mount/cleanup.ksh \
functional/idmap_mount/setup.ksh \
functional/idmap_mount/idmap_mount_001.ksh \
functional/idmap_mount/idmap_mount_002.ksh \
functional/idmap_mount/idmap_mount_003.ksh \
functional/idmap_mount/idmap_mount_004.ksh \
functional/idmap_mount/idmap_mount_005.ksh
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/TODO b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/TODO
new file mode 100644
index 000000000000..7cd4ee898fc4
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/TODO
@@ -0,0 +1,4 @@
+- If dedup enabled, block_cloning uses dedup.
+- check when block cloning doesn't suppose to work
+- check block cloning between two different pools
+- block cloning from a snapshot
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cp_files/setup.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone.cfg
old mode 100755
new mode 100644
similarity index 75%
copy from sys/contrib/openzfs/tests/zfs-tests/tests/functional/cp_files/setup.ksh
copy to sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone.cfg
index b756d4e76c83..f72d17c1beca
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cp_files/setup.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone.cfg
@@ -1,35 +1,32 @@
-#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
-# Copyright 2007 Sun Microsystems, Inc. All rights reserved.
-# Use is subject to license terms.
+# Copyright (c) 2023 by Pawel Jakub Dawidek
#
-#
-# Copyright (c) 2013 by Delphix. All rights reserved.
-#
-
-. $STF_SUITE/include/libtest.shlib
+# TODO: We should calculate that based on ashift.
+export MINBLOCKSIZE=512
-DISK=${DISKS%% *}
-default_setup $DISK
+export TESTSRCFS="$TESTPOOL/$TESTFS/src"
+export TESTDSTFS="$TESTPOOL/$TESTFS/dst"
+export TESTSRCDIR="$TESTDIR/src"
+export TESTDSTDIR="$TESTDIR/dst"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_common.kshlib b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_common.kshlib
new file mode 100644
index 000000000000..3b8eaea5bb54
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_common.kshlib
@@ -0,0 +1,286 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2023 by Pawel Jakub Dawidek
+#
+
+. $STF_SUITE/tests/functional/bclone/bclone.cfg
+
+export RECORDSIZE=$(zfs get -Hp -o value recordsize $TESTPOOL/$TESTFS)
+
+MINBLKSIZE1=512
+MINBLKSIZE2=1024
+
+function verify_block_cloning
+{
+ if is_linux && [[ $(linux_version) -lt $(linux_version "4.5") ]]; then
+ log_unsupported "copy_file_range not available before Linux 4.5"
+ fi
+}
+
+function verify_crossfs_block_cloning
+{
+ if is_linux && [[ $(linux_version) -lt $(linux_version "5.3") ]]; then
+ log_unsupported "copy_file_range can't copy cross-filesystem before Linux 5.3"
+ fi
+
+ # Cross dataset block cloning only supported on FreeBSD 14+
+ # https://github.com/freebsd/freebsd-src/commit/969071be938c
+ if is_freebsd && [ $(freebsd_version) -lt $(freebsd_version 14.0) ] ; then
+ log_unsupported "Cloning across datasets not supported in $(uname -r)"
+ fi
+}
+
+# Unused.
+function size_to_dsize
+{
+ typeset -r size=$1
+ typeset -r dir=$2
+
+ typeset -r dataset=$(df $dir | tail -1 | awk '{print $1}')
+ typeset -r recordsize=$(get_prop recordsize $dataset)
+ typeset -r copies=$(get_prop copies $dataset)
+ typeset dsize
+
+ if [[ $size -le $recordsize ]]; then
+ dsize=$(( ((size - 1) / MINBLOCKSIZE + 1) * MINBLOCKSIZE ))
+ else
+ dsize=$(( ((size - 1) / recordsize + 1) * recordsize ))
+ fi
+ dsize=$((dsize*copies))
+
+ echo $dsize
+}
+
+function test_file_integrity
+{
+ typeset -r original_checksum=$1
+ typeset -r clone=$2
+ typeset -r filesize=$3
+
+ typeset -r clone_checksum=$(sha256digest $clone)
+
+ if [[ $original_checksum != $clone_checksum ]]; then
+ log_fail "Clone $clone is corrupted with file size $filesize"
+ fi
+}
+
+function verify_pool_prop_eq
+{
+ typeset -r prop=$1
+ typeset -r expected=$2
+
+ typeset -r value=$(get_pool_prop $prop $TESTPOOL)
+ if [[ $value != $expected ]]; then
+ log_fail "Pool property $prop is incorrect: expected $expected, got $value"
+ fi
+}
+
+function verify_pool_props
+{
+ typeset -r dsize=$1
+ typeset -r ratio=$2
+
+ if [[ $dsize -eq 0 ]]; then
+ verify_pool_prop_eq bcloneused 0
+ verify_pool_prop_eq bclonesaved 0
+ verify_pool_prop_eq bcloneratio 1.00
+ else
+ if [[ $ratio -eq 1 ]]; then
+ verify_pool_prop_eq bcloneused 0
+ else
+ verify_pool_prop_eq bcloneused $dsize
+ fi
+ verify_pool_prop_eq bclonesaved $((dsize*(ratio-1)))
+ verify_pool_prop_eq bcloneratio "${ratio}.00"
+ fi
+}
+
+# Function to test file copying and integrity check.
+function bclone_test
+{
+ typeset -r datatype=$1
+ typeset filesize=$2
+ typeset -r embedded=$3
+ typeset -r srcdir=$4
+ typeset -r dstdir=$5
+ typeset dsize
+
+ typeset -r original="${srcdir}/original"
+ typeset -r clone="${dstdir}/clone"
+
+ log_note "Testing file copy with datatype $datatype, file size $filesize, embedded $embedded"
+
+ # Create a test file with known content.
+ case $datatype in
+ random|text)
+ sync_pool $TESTPOOL
+ if [[ $datatype = "random" ]]; then
+ dd if=/dev/urandom of=$original bs=$filesize count=1 2>/dev/null
+ else
+ filesize=$(((filesize/4)*4))
+ dd if=/dev/urandom bs=$(((filesize/4)*3)) count=1 | \
+ openssl base64 -A > $original
+ fi
+ sync_pool $TESTPOOL
+ clonefile -f $original "${clone}-tmp"
+ sync_pool $TESTPOOL
+ # It is hard to predict block sizes that will be used,
+ # so just do one clone and take it from bcloneused.
+ filesize=$(zpool get -Hp -o value bcloneused $TESTPOOL)
+ if [[ $embedded = "false" ]]; then
+ log_must test $filesize -gt 0
+ fi
+ rm -f "${clone}-tmp"
+ sync_pool $TESTPOOL
+ dsize=$filesize
+ ;;
+ hole)
+ log_must truncate_test -s $filesize -f $original
+ dsize=0
+ ;;
+ *)
+ log_fail "Unknown datatype $datatype"
+ ;;
+ esac
+ if [[ $embedded = "true" ]]; then
+ dsize=0
+ fi
+
+ typeset -r original_checksum=$(sha256digest $original)
+
+ sync_pool $TESTPOOL
+
+ # Create a first clone of the entire file.
+ clonefile -f $original "${clone}0"
+ # Try to clone the clone in the same transaction group.
+ clonefile -f "${clone}0" "${clone}2"
+
+ # Clone the original again...
+ clonefile -f $original "${clone}1"
+ # ...and overwrite it in the same transaction group.
+ clonefile -f $original "${clone}1"
+
+ # Clone the clone...
+ clonefile -f "${clone}1" "${clone}3"
+ sync_pool $TESTPOOL
+ # ...and overwrite in the new transaction group.
+ clonefile -f "${clone}1" "${clone}3"
+
+ sync_pool $TESTPOOL
+
+ # Test removal of the pending clones (before they are committed to disk).
+ clonefile -f $original "${clone}4"
+ clonefile -f "${clone}4" "${clone}5"
+ rm -f "${clone}4" "${clone}5"
+
+ # Clone into one file, but remove another file, but with the same data in
+ # the same transaction group.
+ clonefile -f $original "${clone}5"
+ sync_pool $TESTPOOL
+ clonefile -f $original "${clone}4"
+ rm -f "${clone}5"
+ test_file_integrity $original_checksum "${clone}4" $filesize
+ sync_pool $TESTPOOL
+ test_file_integrity $original_checksum "${clone}4" $filesize
+
+ clonefile -f "${clone}4" "${clone}5"
+ # Verify integrity of the cloned file before it is committed to disk.
+ test_file_integrity $original_checksum "${clone}5" $filesize
+
+ sync_pool $TESTPOOL
+
+ # Verify integrity in the new transaction group.
+ test_file_integrity $original_checksum "${clone}0" $filesize
+ test_file_integrity $original_checksum "${clone}1" $filesize
+ test_file_integrity $original_checksum "${clone}2" $filesize
+ test_file_integrity $original_checksum "${clone}3" $filesize
+ test_file_integrity $original_checksum "${clone}4" $filesize
+ test_file_integrity $original_checksum "${clone}5" $filesize
+
+ verify_pool_props $dsize 7
+
+ # Clear cache and test after fresh import.
+ log_must zpool export $TESTPOOL
+ log_must zpool import $TESTPOOL
+
+ # Cloned uncached file.
+ clonefile -f $original "${clone}6"
+ # Cloned uncached clone.
+ clonefile -f "${clone}6" "${clone}7"
+
+ # Cache the file.
+ cat $original >/dev/null
+ clonefile -f $original "${clone}8"
+ clonefile -f "${clone}8" "${clone}9"
+
+ test_file_integrity $original_checksum "${clone}6" $filesize
+ test_file_integrity $original_checksum "${clone}7" $filesize
+ test_file_integrity $original_checksum "${clone}8" $filesize
+ test_file_integrity $original_checksum "${clone}9" $filesize
+
+ sync_pool $TESTPOOL
+
+ verify_pool_props $dsize 11
+
+ log_must zpool export $TESTPOOL
+ log_must zpool import $TESTPOOL
+
+ test_file_integrity $original_checksum "${clone}0" $filesize
+ test_file_integrity $original_checksum "${clone}1" $filesize
+ test_file_integrity $original_checksum "${clone}2" $filesize
+ test_file_integrity $original_checksum "${clone}3" $filesize
+ test_file_integrity $original_checksum "${clone}4" $filesize
+ test_file_integrity $original_checksum "${clone}5" $filesize
+ test_file_integrity $original_checksum "${clone}6" $filesize
+ test_file_integrity $original_checksum "${clone}7" $filesize
+ test_file_integrity $original_checksum "${clone}8" $filesize
+ test_file_integrity $original_checksum "${clone}9" $filesize
+
+ rm -f $original
+ rm -f "${clone}1" "${clone}3" "${clone}5" "${clone}7"
+
+ sync_pool $TESTPOOL
+
+ test_file_integrity $original_checksum "${clone}0" $filesize
+ test_file_integrity $original_checksum "${clone}2" $filesize
+ test_file_integrity $original_checksum "${clone}4" $filesize
+ test_file_integrity $original_checksum "${clone}6" $filesize
+ test_file_integrity $original_checksum "${clone}8" $filesize
+ test_file_integrity $original_checksum "${clone}9" $filesize
+
+ verify_pool_props $dsize 6
+
+ rm -f "${clone}0" "${clone}2" "${clone}4" "${clone}8" "${clone}9"
+
+ sync_pool $TESTPOOL
+
+ test_file_integrity $original_checksum "${clone}6" $filesize
+
+ verify_pool_props $dsize 1
+
+ rm -f "${clone}6"
+
+ sync_pool $TESTPOOL
+
+ verify_pool_props $dsize 1
+}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_corner_cases.kshlib b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_corner_cases.kshlib
new file mode 100644
index 000000000000..ddfbfc999c4e
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_corner_cases.kshlib
@@ -0,0 +1,315 @@
+#! /bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2023 by Pawel Jakub Dawidek
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/include/math.shlib
+. $STF_SUITE/tests/functional/bclone/bclone_common.kshlib
+
+function first_half_checksum
+{
+ typeset -r file=$1
+
+ dd if=$file bs=$HALFRECORDSIZE count=1 2>/dev/null | sha256digest
+}
+
+function second_half_checksum
+{
+ typeset -r file=$1
+
+ dd if=$file bs=$HALFRECORDSIZE count=1 skip=1 2>/dev/null | sha256digest
+}
+
+function bclone_corner_cases_init
+{
+ typeset -r srcdir=$1
+ typeset -r dstdir=$2
+
+ export RECORDSIZE=4096
+ export HALFRECORDSIZE=$((RECORDSIZE / 2))
+
+ export CLONE="$dstdir/clone0"
+ export ORIG0="$srcdir/orig0"
+ export ORIG1="$srcdir/orig1"
+ export ORIG2="$srcdir/orig2"
+
+ # Create source files.
+ log_must dd if=/dev/urandom of="$ORIG0" bs=$RECORDSIZE count=1
+ log_must dd if=/dev/urandom of="$ORIG1" bs=$RECORDSIZE count=1
+ log_must dd if=/dev/urandom of="$ORIG2" bs=$RECORDSIZE count=1
+
+ export FIRST_HALF_ORIG0_CHECKSUM=$(first_half_checksum $ORIG0)
+ export FIRST_HALF_ORIG1_CHECKSUM=$(first_half_checksum $ORIG1)
+ export FIRST_HALF_ORIG2_CHECKSUM=$(first_half_checksum $ORIG2)
+ export SECOND_HALF_ORIG0_CHECKSUM=$(second_half_checksum $ORIG0)
+ export SECOND_HALF_ORIG1_CHECKSUM=$(second_half_checksum $ORIG1)
+ export SECOND_HALF_ORIG2_CHECKSUM=$(second_half_checksum $ORIG2)
+ export ZEROS_CHECKSUM=$(dd if=/dev/zero bs=$HALFRECORDSIZE count=1 | sha256digest)
+ export FIRST_HALF_CHECKSUM=""
+ export SECOND_HALF_CHECKSUM=""
+}
+
+function cache_clone
+{
+ typeset -r cached=$1
+
+ case "$cached" in
+ "cached")
+ dd if=$CLONE of=/dev/null bs=$RECORDSIZE 2>/dev/null
+ ;;
+ "uncached")
+ ;;
+ *)
+ log_fail "invalid cached: $cached"
+ ;;
+ esac
+}
+
+function create_existing
+{
+ typeset -r existing=$1
+
+ case "$existing" in
+ "no")
+ ;;
+ "small empty")
+ log_must truncate_test -s $HALFRECORDSIZE -f $CLONE
+ ;;
+ "full empty")
+ log_must truncate_test -s $RECORDSIZE -f $CLONE
+ ;;
+ "small data")
+ log_must dd if=/dev/urandom of=$CLONE bs=$HALFRECORDSIZE count=1 \
+ 2>/dev/null
+ ;;
+ "full data")
+ log_must dd if=/dev/urandom of=$CLONE bs=$RECORDSIZE count=1 2>/dev/null
+ ;;
+ *)
+ log_fail "invalid existing: $existing"
+ ;;
+ esac
+}
+
+function create_clone
+{
+ typeset -r clone=$1
+ typeset -r file=$2
+
+ case "$clone" in
+ "no")
+ ;;
+ "yes")
+ clonefile -f $file $CLONE
+ case "$file" in
+ $ORIG0)
+ FIRST_HALF_CHECKSUM=$FIRST_HALF_ORIG0_CHECKSUM
+ SECOND_HALF_CHECKSUM=$SECOND_HALF_ORIG0_CHECKSUM
+ ;;
+ $ORIG2)
+ FIRST_HALF_CHECKSUM=$FIRST_HALF_ORIG2_CHECKSUM
+ SECOND_HALF_CHECKSUM=$SECOND_HALF_ORIG2_CHECKSUM
+ ;;
+ *)
+ log_fail "invalid file: $file"
+ ;;
+ esac
+ ;;
+ *)
+ log_fail "invalid clone: $clone"
+ ;;
+ esac
+}
+
+function overwrite_clone
+{
+ typeset -r overwrite=$1
+
+ case "$overwrite" in
+ "no")
+ ;;
+ "free")
+ log_must truncate_test -s 0 -f $CLONE
+ log_must truncate_test -s $RECORDSIZE -f $CLONE
+ FIRST_HALF_CHECKSUM=$ZEROS_CHECKSUM
+ SECOND_HALF_CHECKSUM=$ZEROS_CHECKSUM
+ ;;
+ "full")
+ log_must dd if=$ORIG1 of=$CLONE bs=$RECORDSIZE count=1 2>/dev/null
+ FIRST_HALF_CHECKSUM=$FIRST_HALF_ORIG1_CHECKSUM
+ SECOND_HALF_CHECKSUM=$SECOND_HALF_ORIG1_CHECKSUM
+ ;;
+ "first half")
+ log_must dd if=$ORIG1 of=$CLONE bs=$HALFRECORDSIZE skip=0 seek=0 \
+ count=1 conv=notrunc 2>/dev/null
+ FIRST_HALF_CHECKSUM=$FIRST_HALF_ORIG1_CHECKSUM
+ ;;
+ "second half")
+ log_must dd if=$ORIG1 of=$CLONE bs=$HALFRECORDSIZE skip=1 seek=1 \
+ count=1 conv=notrunc 2>/dev/null
+ SECOND_HALF_CHECKSUM=$SECOND_HALF_ORIG1_CHECKSUM
+ ;;
+ *)
+ log_fail "invalid overwrite: $overwrite"
+ ;;
+ esac
+}
+
+function checksum_compare
+{
+ typeset -r compare=$1
+ typeset first_half_calculated_checksum second_half_calculated_checksum
+
+ case "$compare" in
+ "no")
+ ;;
+ "yes")
+ first_half_calculated_checksum=$(first_half_checksum $CLONE)
+ second_half_calculated_checksum=$(second_half_checksum $CLONE)
+
+ if [[ $first_half_calculated_checksum != $FIRST_HALF_CHECKSUM ]] || \
+ [[ $second_half_calculated_checksum != $SECOND_HALF_CHECKSUM ]]; then
+ return 1
+ fi
+ ;;
+ *)
+ log_fail "invalid compare: $compare"
+ ;;
+ esac
+}
+
+function bclone_corner_cases_test
+{
+ typeset cached existing
+ typeset first_clone first_overwrite
+ typeset read_after read_before
+ typeset second_clone second_overwrite
+ typeset -r srcdir=$1
+ typeset -r dstdir=$2
+ typeset limit=$3
+ typeset -i count=0
+
+ if [[ $srcdir != "count" ]]; then
+ if [[ -n "$limit" ]]; then
+ typeset -r total_count=$(bclone_corner_cases_test count)
+ limit=$(random_int_between 1 $total_count $((limit*2)) | sort -nu | head -n $limit | xargs)
+ fi
+ bclone_corner_cases_init $srcdir $dstdir
+ fi
+
+ #
+ # (create) / (cache) / (clone) / (overwrite) / (read) / (clone) / (overwrite) / (read) / read next txg
+ #
+ for existing in "no" "small empty" "full empty" "small data" "full data"; do
+ for cached in "uncached" "cached"; do
+ for first_clone in "no" "yes"; do
+ for first_overwrite in "no" "free" "full" "first half" "second half"; do
+ for read_before in "no" "yes"; do
+ for second_clone in "no" "yes"; do
+ for second_overwrite in "no" "free" "full" "first half" "second half"; do
+ for read_after in "no" "yes"; do
+ if [[ $first_clone = "no" ]] && \
+ [[ $second_clone = "no" ]]; then
+ continue
+ fi
+ if [[ $first_clone = "no" ]] && \
+ [[ $read_before = "yes" ]]; then
+ continue
+ fi
+ if [[ $second_clone = "no" ]] && \
+ [[ $read_before = "yes" ]] && \
+ [[ $read_after = "yes" ]]; then
+ continue
+ fi
+
+ count=$((count+1))
+
+ if [[ $srcdir = "count" ]]; then
+ # Just counting.
+ continue
+ fi
+
+ if [[ -n "$limit" ]]; then
+ if ! echo " $limit " | grep -q " $count "; then
+ continue
+ fi
+ fi
+
+ FIRST_HALF_CHECKSUM=""
+ SECOND_HALF_CHECKSUM=""
+
+ log_must zpool export $TESTPOOL
+ log_must zpool import $TESTPOOL
+
+ create_existing "$existing"
+
+ log_must zpool export $TESTPOOL
+ log_must zpool import $TESTPOOL
+
+ cache_clone "$cached"
+
+ create_clone "$first_clone" "$ORIG0"
+
+ overwrite_clone "$first_overwrite"
+
+ if checksum_compare $read_before; then
+ log_note "existing: $existing / cached: $cached / first_clone: $first_clone / first_overwrite: $first_overwrite / read_before: $read_before"
+ else
+ log_fail "FAIL: existing: $existing / cached: $cached / first_clone: $first_clone / first_overwrite: $first_overwrite / read_before: $read_before"
+ fi
+
+ create_clone "$second_clone" "$ORIG2"
+
+ overwrite_clone "$second_overwrite"
+
+ if checksum_compare $read_after; then
+ log_note "existing: $existing / cached: $cached / first_clone: $first_clone / first_overwrite: $first_overwrite / read_before: $read_before / second_clone: $second_clone / read_after: $read_after"
+ else
+ log_fail "FAIL: existing: $existing / cached: $cached / first_clone: $first_clone / first_overwrite: $first_overwrite / read_before: $read_before / second_clone: $second_clone / read_after: $read_after"
+ fi
+
+ log_must zpool export $TESTPOOL
+ log_must zpool import $TESTPOOL
+
+ if checksum_compare "yes"; then
+ log_note "existing: $existing / cached: $cached / first_clone: $first_clone / first_overwrite: $first_overwrite / read_before: $read_before / second_clone: $second_clone / read_after: $read_after / read_next_txg"
+ else
+ log_fail "FAIL: existing: $existing / cached: $cached / first_clone: $first_clone / first_overwrite: $first_overwrite / read_before: $read_before / second_clone: $second_clone / read_after: $read_after / read_next_txg"
+ fi
+
+ rm -f "$CLONE"
+ done
+ done
+ done
+ done
+ done
+ done
+ done
+ done
+
+ if [[ $srcdir = "count" ]]; then
+ echo $count
+ fi
+}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/setup.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_crossfs_corner_cases.ksh
similarity index 60%
copy from sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/setup.ksh
copy to sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_crossfs_corner_cases.ksh
index 58441bf8f3ad..35188cddb063 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/setup.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_crossfs_corner_cases.ksh
@@ -1,41 +1,45 @@
-#!/bin/ksh -p
+#! /bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
-# Copyright (c) 2023, Klara Inc.
+# Copyright (c) 2023 by Pawel Jakub Dawidek
#
. $STF_SUITE/include/libtest.shlib
-. $STF_SUITE/tests/functional/block_cloning/block_cloning.kshlib
+. $STF_SUITE/tests/functional/bclone/bclone_corner_cases.kshlib
-if ! command -v clonefile > /dev/null ; then
- log_unsupported "clonefile program required to test block cloning"
-fi
+verify_runnable "both"
-verify_runnable "global"
+verify_block_cloning
+verify_crossfs_block_cloning
-if tunable_exists BCLONE_ENABLED ; then
- log_must save_tunable BCLONE_ENABLED
- log_must set_tunable32 BCLONE_ENABLED 1
-fi
+log_assert "Verify various corner cases in block cloning across datasets"
+
+# Disable compression to make sure we won't use embedded blocks.
+log_must zfs set compress=off $TESTSRCFS
+log_must zfs set recordsize=$RECORDSIZE $TESTSRCFS
+log_must zfs set compress=off $TESTDSTFS
+log_must zfs set recordsize=$RECORDSIZE $TESTDSTFS
+
+bclone_corner_cases_test $TESTSRCDIR $TESTDSTDIR
log_pass
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/setup.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_crossfs_corner_cases_limited.ksh
similarity index 59%
copy from sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/setup.ksh
copy to sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_crossfs_corner_cases_limited.ksh
index 58441bf8f3ad..1fc1bbd07fd9 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/setup.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_crossfs_corner_cases_limited.ksh
@@ -1,41 +1,45 @@
-#!/bin/ksh -p
+#! /bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
-# Copyright (c) 2023, Klara Inc.
+# Copyright (c) 2023 by Pawel Jakub Dawidek
#
. $STF_SUITE/include/libtest.shlib
-. $STF_SUITE/tests/functional/block_cloning/block_cloning.kshlib
+. $STF_SUITE/tests/functional/bclone/bclone_corner_cases.kshlib
-if ! command -v clonefile > /dev/null ; then
- log_unsupported "clonefile program required to test block cloning"
-fi
+verify_runnable "both"
-verify_runnable "global"
+verify_block_cloning
+verify_crossfs_block_cloning
-if tunable_exists BCLONE_ENABLED ; then
- log_must save_tunable BCLONE_ENABLED
- log_must set_tunable32 BCLONE_ENABLED 1
-fi
+log_assert "Verify various corner cases in block cloning across datasets"
+
+# Disable compression to make sure we won't use embedded blocks.
+log_must zfs set compress=off $TESTSRCFS
+log_must zfs set recordsize=$RECORDSIZE $TESTSRCFS
+log_must zfs set compress=off $TESTDSTFS
+log_must zfs set recordsize=$RECORDSIZE $TESTDSTFS
+
+bclone_corner_cases_test $TESTSRCDIR $TESTDSTDIR 100
log_pass
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/setup.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_crossfs_data.ksh
similarity index 58%
copy from sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/setup.ksh
copy to sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_crossfs_data.ksh
index 58441bf8f3ad..e2fe25d451dd 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/setup.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_crossfs_data.ksh
@@ -1,41 +1,46 @@
-#!/bin/ksh -p
+#! /bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
-# Copyright (c) 2023, Klara Inc.
+# Copyright (c) 2023 by Pawel Jakub Dawidek
#
. $STF_SUITE/include/libtest.shlib
-. $STF_SUITE/tests/functional/block_cloning/block_cloning.kshlib
+. $STF_SUITE/tests/functional/bclone/bclone_common.kshlib
-if ! command -v clonefile > /dev/null ; then
- log_unsupported "clonefile program required to test block cloning"
-fi
+verify_runnable "both"
-verify_runnable "global"
+verify_block_cloning
+verify_crossfs_block_cloning
-if tunable_exists BCLONE_ENABLED ; then
- log_must save_tunable BCLONE_ENABLED
- log_must set_tunable32 BCLONE_ENABLED 1
-fi
+log_assert "Verify block cloning properly clones regular files across datasets"
+
+# Disable compression to make sure we won't use embedded blocks.
+log_must zfs set compress=off $TESTSRCFS
+log_must zfs set compress=off $TESTDSTFS
+
+for filesize in 1 107 113 511 512 513 4095 4096 4097 131071 131072 131073 \
+ 1048575 1048576 1048577 4194303 4194304 4194305; do
+ bclone_test random $filesize false $TESTSRCDIR $TESTDSTDIR
+done
log_pass
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_status/zpool_status_002_pos.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_crossfs_embedded.ksh
similarity index 51%
copy from sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_status/zpool_status_002_pos.ksh
copy to sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_crossfs_embedded.ksh
index 3bdd7db649f9..6a6fe1d309a9 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_status/zpool_status_002_pos.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_crossfs_embedded.ksh
@@ -1,67 +1,50 @@
-#!/bin/ksh -p
+#! /bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
-# Copyright 2007 Sun Microsystems, Inc. All rights reserved.
-# Use is subject to license terms.
-#
-
-#
-# Copyright (c) 2016 by Delphix. All rights reserved.
+# Copyright (c) 2023 by Pawel Jakub Dawidek
#
. $STF_SUITE/include/libtest.shlib
-
-#
-# DESCRIPTION:
-# Executing 'zpool status' with correct options succeeds
-#
-# STRATEGY:
-# 1. Create an array of correctly formed 'zpool status' options
-# 2. Execute each element of the array.
-# 3. Verify use of each option is successful.
-#
+. $STF_SUITE/tests/functional/bclone/bclone_common.kshlib
verify_runnable "both"
-typeset testpool
-if is_global_zone; then
- testpool=$TESTPOOL
-else
- testpool=${TESTPOOL%%/*}
-fi
-
-set -A args "" "-x" "-v" "-x $testpool" "-v $testpool" "-xv $testpool" \
- "-vx $testpool"
-
-log_assert "Executing 'zpool status' with correct options succeeds"
-
-typeset -i i=0
+verify_block_cloning
+verify_crossfs_block_cloning
-while [[ $i -lt ${#args[*]} ]]; do
+log_assert "Verify block cloning properly clones small files (with embedded blocks) across datasets"
- log_must zpool status ${args[$i]}
+# Enable ZLE compression to make sure what is the maximum amount of data we
+# can store in BP.
+log_must zfs set compress=zle $TESTSRCFS
+log_must zfs set compress=zle $TESTDSTFS
- (( i = i + 1 ))
+# Test BP_IS_EMBEDDED().
+# Maximum embedded payload size is 112 bytes, but the buffer is extended to
+# 512 bytes first and then compressed. 107 random bytes followed by 405 zeros
+# gives exactly 112 bytes after compression with ZLE.
+for filesize in 1 2 4 8 16 32 64 96 107; do
+ bclone_test random $filesize true $TESTSRCDIR $TESTDSTDIR
done
-log_pass "'zpool status' with correct options succeeded"
+log_pass
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/setup.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_crossfs_hole.ksh
similarity index 61%
copy from sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/setup.ksh
copy to sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_crossfs_hole.ksh
index 58441bf8f3ad..d4c33d6da30f 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/setup.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_crossfs_hole.ksh
@@ -1,41 +1,45 @@
-#!/bin/ksh -p
+#! /bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
-# Copyright (c) 2023, Klara Inc.
+# Copyright (c) 2023 by Pawel Jakub Dawidek
#
. $STF_SUITE/include/libtest.shlib
-. $STF_SUITE/tests/functional/block_cloning/block_cloning.kshlib
+. $STF_SUITE/tests/functional/bclone/bclone_common.kshlib
-if ! command -v clonefile > /dev/null ; then
- log_unsupported "clonefile program required to test block cloning"
-fi
+verify_runnable "both"
-verify_runnable "global"
+verify_block_cloning
+verify_crossfs_block_cloning
-if tunable_exists BCLONE_ENABLED ; then
- log_must save_tunable BCLONE_ENABLED
- log_must set_tunable32 BCLONE_ENABLED 1
-fi
+log_assert "Verify block cloning properly clones sparse files (files with holes) across datasets"
+
+# Compression doesn't matter here.
+
+# Test BP_IS_HOLE().
+for filesize in 1 511 512 513 4095 4096 4097 131071 131072 131073 \
+ 1048575 1048576 1048577 4194303 4194304 4194305; do
+ bclone_test hole $filesize false $TESTSRCDIR $TESTDSTDIR
+done
log_pass
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_diffprops_all.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_diffprops_all.ksh
new file mode 100755
index 000000000000..a5e7282fe6a8
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_diffprops_all.ksh
@@ -0,0 +1,86 @@
+#! /bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2023 by Pawel Jakub Dawidek
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/include/math.shlib
+. $STF_SUITE/tests/functional/bclone/bclone_common.kshlib
+
+verify_runnable "both"
+
+verify_block_cloning
+verify_crossfs_block_cloning
+
+log_assert "Verify block cloning across datasets with different properties"
+
+log_must zfs set checksum=off $TESTSRCFS
+log_must zfs set compress=off $TESTSRCFS
+log_must zfs set copies=1 $TESTSRCFS
+log_must zfs set recordsize=131072 $TESTSRCFS
+log_must zfs set checksum=fletcher2 $TESTDSTFS
+log_must zfs set compress=lz4 $TESTDSTFS
+log_must zfs set copies=3 $TESTDSTFS
+log_must zfs set recordsize=8192 $TESTDSTFS
+
+FILESIZE=$(random_int_between 2 32767)
+FILESIZE=$((FILESIZE * 64))
+bclone_test text $FILESIZE false $TESTSRCDIR $TESTDSTDIR
+
+log_must zfs set checksum=sha256 $TESTSRCFS
+log_must zfs set compress=zstd $TESTSRCFS
+log_must zfs set copies=2 $TESTSRCFS
+log_must zfs set recordsize=262144 $TESTSRCFS
+log_must zfs set checksum=off $TESTDSTFS
+log_must zfs set compress=off $TESTDSTFS
+log_must zfs set copies=1 $TESTDSTFS
+log_must zfs set recordsize=131072 $TESTDSTFS
+
+FILESIZE=$(random_int_between 2 32767)
+FILESIZE=$((FILESIZE * 64))
+bclone_test text $FILESIZE false $TESTSRCDIR $TESTDSTDIR
+
+log_must zfs set checksum=sha512 $TESTSRCFS
+log_must zfs set compress=gzip $TESTSRCFS
+log_must zfs set copies=2 $TESTSRCFS
+log_must zfs set recordsize=512 $TESTSRCFS
+log_must zfs set checksum=fletcher4 $TESTDSTFS
+log_must zfs set compress=lzjb $TESTDSTFS
+log_must zfs set copies=3 $TESTDSTFS
+log_must zfs set recordsize=16384 $TESTDSTFS
+
+FILESIZE=$(random_int_between 2 32767)
+FILESIZE=$((FILESIZE * 64))
+bclone_test text $FILESIZE false $TESTSRCDIR $TESTDSTDIR
+
+log_must zfs inherit checksum $TESTSRCFS
+log_must zfs inherit compress $TESTSRCFS
+log_must zfs inherit copies $TESTSRCFS
+log_must zfs inherit recordsize $TESTSRCFS
+log_must zfs inherit checksum $TESTDSTFS
+log_must zfs inherit compress $TESTDSTFS
+log_must zfs inherit copies $TESTDSTFS
+log_must zfs inherit recordsize $TESTDSTFS
+
+log_pass
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_diffprops_checksum.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_diffprops_checksum.ksh
new file mode 100755
index 000000000000..7e064a0dfd73
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_diffprops_checksum.ksh
@@ -0,0 +1,62 @@
+#! /bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2023 by Pawel Jakub Dawidek
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/include/math.shlib
+. $STF_SUITE/include/properties.shlib
+. $STF_SUITE/tests/functional/bclone/bclone_common.kshlib
+
+verify_runnable "both"
+
+verify_block_cloning
+verify_crossfs_block_cloning
+
+log_assert "Verify block cloning across datasets with different checksum properties"
+
+log_must zfs set compress=off $TESTSRCFS
+log_must zfs set compress=off $TESTDSTFS
+
+for srcprop in "${checksum_prop_vals[@]}"; do
+ for dstprop in "${checksum_prop_vals[@]}"; do
+ if [[ $srcprop == $dstprop ]]; then
+ continue
+ fi
+ log_must zfs set checksum=$srcprop $TESTSRCFS
+ log_must zfs set checksum=$dstprop $TESTDSTFS
+ # 15*8=120, which is greater than 113, so we are sure the data won't
+ # be embedded into BP.
+ # 32767*8=262136, which is larger than a single default recordsize of
+ # 131072.
+ FILESIZE=$(random_int_between 15 32767)
+ FILESIZE=$((FILESIZE * 8))
+ bclone_test random $FILESIZE false $TESTSRCDIR $TESTDSTDIR
+ done
+done
+
+log_must zfs inherit checksum $TESTSRCFS
+log_must zfs inherit checksum $TESTDSTFS
+
+log_pass
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_diffprops_compress.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_diffprops_compress.ksh
new file mode 100755
index 000000000000..e1d6e5949218
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_diffprops_compress.ksh
@@ -0,0 +1,59 @@
+#! /bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2023 by Pawel Jakub Dawidek
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/include/math.shlib
+. $STF_SUITE/include/properties.shlib
+. $STF_SUITE/tests/functional/bclone/bclone_common.kshlib
+
+verify_runnable "both"
+
+verify_block_cloning
+verify_crossfs_block_cloning
+
+log_assert "Verify block cloning across datasets with different compression properties"
+
+for srcprop in "${compress_prop_vals[@]}"; do
+ for dstprop in "${compress_prop_vals[@]}"; do
+ if [[ $srcprop == $dstprop ]]; then
+ continue
+ fi
+ log_must zfs set compress=$srcprop $TESTSRCFS
+ log_must zfs set compress=$dstprop $TESTDSTFS
+ # 15*8=120, which is greater than 113, so we are sure the data won't
+ # be embedded into BP.
+ # 32767*8=262136, which is larger than a single default recordsize of
+ # 131072.
+ FILESIZE=$(random_int_between 15 32767)
+ FILESIZE=$((FILESIZE * 8))
+ bclone_test text $FILESIZE false $TESTSRCDIR $TESTDSTDIR
+ done
+done
+
+log_must zfs inherit compress $TESTSRCFS
+log_must zfs inherit compress $TESTDSTFS
+
+log_pass
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_diffprops_copies.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_diffprops_copies.ksh
new file mode 100755
index 000000000000..ac823e1ec394
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_diffprops_copies.ksh
@@ -0,0 +1,59 @@
+#! /bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2023 by Pawel Jakub Dawidek
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/include/math.shlib
+. $STF_SUITE/include/properties.shlib
+. $STF_SUITE/tests/functional/bclone/bclone_common.kshlib
+
+verify_runnable "both"
+
+verify_block_cloning
+verify_crossfs_block_cloning
+
+log_assert "Verify block cloning across datasets with different copies properties"
+
+log_must zfs set compress=off $TESTSRCFS
+log_must zfs set compress=off $TESTDSTFS
+
+for srcprop in "${copies_prop_vals[@]}"; do
+ for dstprop in "${copies_prop_vals[@]}"; do
+ log_must zfs set copies=$srcprop $TESTSRCFS
+ log_must zfs set copies=$dstprop $TESTDSTFS
+ # 15*8=120, which is greater than 113, so we are sure the data won't
+ # be embedded into BP.
+ # 32767*8=262136, which is larger than a single default recordsize of
+ # 131072.
+ FILESIZE=$(random_int_between 15 32767)
+ FILESIZE=$((FILESIZE * 8))
+ bclone_test random $FILESIZE false $TESTSRCDIR $TESTDSTDIR
+ done
+done
+
+log_must zfs inherit copies $TESTSRCFS
+log_must zfs inherit copies $TESTDSTFS
+
+log_pass
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_diffprops_recordsize.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_diffprops_recordsize.ksh
new file mode 100755
index 000000000000..d833e6123106
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_diffprops_recordsize.ksh
@@ -0,0 +1,65 @@
+#! /bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2023 by Pawel Jakub Dawidek
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/include/math.shlib
+. $STF_SUITE/include/properties.shlib
+. $STF_SUITE/tests/functional/bclone/bclone_common.kshlib
+
+verify_runnable "both"
+
+verify_block_cloning
+verify_crossfs_block_cloning
+
+log_assert "Verify block cloning across datasets with different recordsize properties"
+
+log_must zfs set compress=off $TESTSRCFS
+log_must zfs set compress=off $TESTDSTFS
+
+# recsize_prop_vals[] array contains too many entries and the tests take too
+# long. Let's use only a subset of them.
+typeset -a bclone_recsize_prop_vals=('512' '4096' '131072' '1048576')
+
+for srcprop in "${bclone_recsize_prop_vals[@]}"; do
+ for dstprop in "${bclone_recsize_prop_vals[@]}"; do
+ if [[ $srcprop == $dstprop ]]; then
+ continue
+ fi
+ log_must zfs set recordsize=$srcprop $TESTSRCFS
+ log_must zfs set recordsize=$dstprop $TESTDSTFS
+ # 2*64=128, which is greater than 113, so we are sure the data won't
+ # be embedded into BP.
+ # 32767*64=2097088, which is larger than the largest recordsize (1MB).
+ FILESIZE=$(random_int_between 2 32767)
+ FILESIZE=$((FILESIZE * 64))
+ bclone_test random $FILESIZE false $TESTSRCDIR $TESTDSTDIR
+ done
+done
+
+log_must zfs inherit recordsize $TESTSRCFS
+log_must zfs inherit recordsize $TESTDSTFS
+
+log_pass
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_prop_sync.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_prop_sync.ksh
new file mode 100755
index 000000000000..f8aa1c875c60
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_prop_sync.ksh
@@ -0,0 +1,66 @@
+#! /bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2023 by Pawel Jakub Dawidek
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/include/math.shlib
+. $STF_SUITE/include/properties.shlib
+. $STF_SUITE/tests/functional/bclone/bclone_common.kshlib
+
+verify_runnable "both"
+
+verify_block_cloning
+verify_crossfs_block_cloning
+
+log_assert "Verify block cloning with all sync property settings"
+
+log_must zfs set compress=zle $TESTSRCFS
+log_must zfs set compress=zle $TESTDSTFS
+
+for prop in "${sync_prop_vals[@]}"; do
+ log_must zfs set sync=$prop $TESTSRCFS
+ # 32767*8=262136, which is larger than a single default recordsize of
+ # 131072.
+ FILESIZE=$(random_int_between 1 32767)
+ FILESIZE=$((FILESIZE * 8))
+ bclone_test random $FILESIZE false $TESTSRCDIR $TESTSRCDIR
+done
+
+for srcprop in "${sync_prop_vals[@]}"; do
+ log_must zfs set sync=$srcprop $TESTSRCFS
+ for dstprop in "${sync_prop_vals[@]}"; do
+ log_must zfs set sync=$dstprop $TESTDSTFS
+ # 32767*8=262136, which is larger than a single default recordsize of
+ # 131072.
+ FILESIZE=$(random_int_between 1 32767)
+ FILESIZE=$((FILESIZE * 8))
+ bclone_test random $FILESIZE false $TESTSRCDIR $TESTDSTDIR
+ done
+done
+
+log_must zfs inherit sync $TESTSRCFS
+log_must zfs inherit sync $TESTDSTFS
+
+log_pass
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/setup.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_samefs_corner_cases.ksh
similarity index 65%
copy from sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/setup.ksh
copy to sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_samefs_corner_cases.ksh
index 58441bf8f3ad..4aa2914da299 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/setup.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_samefs_corner_cases.ksh
@@ -1,41 +1,42 @@
-#!/bin/ksh -p
+#! /bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
-# Copyright (c) 2023, Klara Inc.
+# Copyright (c) 2023 by Pawel Jakub Dawidek
#
. $STF_SUITE/include/libtest.shlib
-. $STF_SUITE/tests/functional/block_cloning/block_cloning.kshlib
+. $STF_SUITE/tests/functional/bclone/bclone_corner_cases.kshlib
-if ! command -v clonefile > /dev/null ; then
- log_unsupported "clonefile program required to test block cloning"
-fi
+verify_runnable "both"
-verify_runnable "global"
+verify_block_cloning
-if tunable_exists BCLONE_ENABLED ; then
- log_must save_tunable BCLONE_ENABLED
- log_must set_tunable32 BCLONE_ENABLED 1
-fi
+log_assert "Verify various corner cases in block cloning within the same dataset"
+
+# Disable compression to make sure we won't use embedded blocks.
+log_must zfs set compress=off $TESTSRCFS
+log_must zfs set recordsize=$RECORDSIZE $TESTSRCFS
+
+bclone_corner_cases_test $TESTSRCDIR $TESTSRCDIR
log_pass
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/setup.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_samefs_corner_cases_limited.ksh
similarity index 65%
copy from sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/setup.ksh
copy to sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_samefs_corner_cases_limited.ksh
index 58441bf8f3ad..b4737700eb7d 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/setup.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_samefs_corner_cases_limited.ksh
@@ -1,41 +1,42 @@
-#!/bin/ksh -p
+#! /bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
-# Copyright (c) 2023, Klara Inc.
+# Copyright (c) 2023 by Pawel Jakub Dawidek
#
. $STF_SUITE/include/libtest.shlib
-. $STF_SUITE/tests/functional/block_cloning/block_cloning.kshlib
+. $STF_SUITE/tests/functional/bclone/bclone_corner_cases.kshlib
-if ! command -v clonefile > /dev/null ; then
- log_unsupported "clonefile program required to test block cloning"
-fi
+verify_runnable "both"
-verify_runnable "global"
+verify_block_cloning
-if tunable_exists BCLONE_ENABLED ; then
- log_must save_tunable BCLONE_ENABLED
- log_must set_tunable32 BCLONE_ENABLED 1
-fi
+log_assert "Verify various corner cases in block cloning within the same dataset"
+
+# Disable compression to make sure we won't use embedded blocks.
+log_must zfs set compress=off $TESTSRCFS
+log_must zfs set recordsize=$RECORDSIZE $TESTSRCFS
+
+bclone_corner_cases_test $TESTSRCDIR $TESTSRCDIR 100
log_pass
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/setup.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_samefs_data.ksh
similarity index 60%
copy from sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/setup.ksh
copy to sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_samefs_data.ksh
index 58441bf8f3ad..e964f7bbf641 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/setup.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_samefs_data.ksh
@@ -1,41 +1,44 @@
-#!/bin/ksh -p
+#! /bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
-# Copyright (c) 2023, Klara Inc.
+# Copyright (c) 2023 by Pawel Jakub Dawidek
#
. $STF_SUITE/include/libtest.shlib
-. $STF_SUITE/tests/functional/block_cloning/block_cloning.kshlib
+. $STF_SUITE/tests/functional/bclone/bclone_common.kshlib
-if ! command -v clonefile > /dev/null ; then
- log_unsupported "clonefile program required to test block cloning"
-fi
+verify_runnable "both"
-verify_runnable "global"
+verify_block_cloning
-if tunable_exists BCLONE_ENABLED ; then
- log_must save_tunable BCLONE_ENABLED
- log_must set_tunable32 BCLONE_ENABLED 1
-fi
+log_assert "Verify block cloning properly clones regular files within the same dataset"
+
+# Disable compression to make sure we won't use embedded blocks.
+log_must zfs set compress=off $TESTSRCFS
+
+for filesize in 1 107 113 511 512 513 4095 4096 4097 131071 131072 131073 \
+ 1048575 1048576 1048577 4194303 4194304 4194305; do
+ bclone_test random $filesize false $TESTSRCDIR $TESTSRCDIR
+done
log_pass
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/setup.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_samefs_embedded.ksh
similarity index 53%
copy from sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/setup.ksh
copy to sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_samefs_embedded.ksh
index 58441bf8f3ad..df393a878015 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/setup.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_samefs_embedded.ksh
@@ -1,41 +1,48 @@
-#!/bin/ksh -p
+#! /bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
-# Copyright (c) 2023, Klara Inc.
+# Copyright (c) 2023 by Pawel Jakub Dawidek
#
. $STF_SUITE/include/libtest.shlib
-. $STF_SUITE/tests/functional/block_cloning/block_cloning.kshlib
+. $STF_SUITE/tests/functional/bclone/bclone_common.kshlib
-if ! command -v clonefile > /dev/null ; then
- log_unsupported "clonefile program required to test block cloning"
-fi
+verify_runnable "both"
-verify_runnable "global"
+verify_block_cloning
-if tunable_exists BCLONE_ENABLED ; then
- log_must save_tunable BCLONE_ENABLED
- log_must set_tunable32 BCLONE_ENABLED 1
-fi
+log_assert "Verify block cloning properly clones small files (with embedded blocks) within the same dataset"
+
+# Enable ZLE compression to make sure what is the maximum amount of data we
+# can store in BP.
+log_must zfs set compress=zle $TESTSRCFS
+
+# Test BP_IS_EMBEDDED().
+# Maximum embedded payload size is 112 bytes, but the buffer is extended to
+# 512 bytes first and then compressed. 107 random bytes followed by 405 zeros
+# gives exactly 112 bytes after compression with ZLE.
+for filesize in 1 2 4 8 16 32 64 96 107; do
+ bclone_test random $filesize true $TESTSRCDIR $TESTSRCDIR
+done
log_pass
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/setup.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_samefs_hole.ksh
similarity index 62%
copy from sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/setup.ksh
copy to sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_samefs_hole.ksh
index 58441bf8f3ad..3c6e345e6e64 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/setup.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/bclone_samefs_hole.ksh
@@ -1,41 +1,44 @@
-#!/bin/ksh -p
+#! /bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
-# Copyright (c) 2023, Klara Inc.
+# Copyright (c) 2023 by Pawel Jakub Dawidek
#
. $STF_SUITE/include/libtest.shlib
-. $STF_SUITE/tests/functional/block_cloning/block_cloning.kshlib
+. $STF_SUITE/tests/functional/bclone/bclone_common.kshlib
-if ! command -v clonefile > /dev/null ; then
- log_unsupported "clonefile program required to test block cloning"
-fi
+verify_runnable "both"
-verify_runnable "global"
+verify_block_cloning
-if tunable_exists BCLONE_ENABLED ; then
- log_must save_tunable BCLONE_ENABLED
- log_must set_tunable32 BCLONE_ENABLED 1
-fi
+log_assert "Verify block cloning properly clones sparse files (files with holes) within the same dataset"
+
+# Compression doesn't matter here.
+
+# Test BP_IS_HOLE().
+for filesize in 1 511 512 513 4095 4096 4097 131071 131072 131073 \
+ 1048575 1048576 1048577 4194303 4194304 4194305; do
+ bclone_test hole $filesize false $TESTSRCDIR $TESTSRCDIR
+done
log_pass
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cp_files/cleanup.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/cleanup.ksh
similarity index 81%
copy from sys/contrib/openzfs/tests/zfs-tests/tests/functional/cp_files/cleanup.ksh
copy to sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/cleanup.ksh
index 42fe70042d6a..0021ccb57ae0 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cp_files/cleanup.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/cleanup.ksh
@@ -1,34 +1,44 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2007 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
#
# Copyright (c) 2013 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/bclone/bclone.cfg
-default_cleanup
+log_must zfs destroy $TESTSRCFS
+log_must zfs destroy $TESTDSTFS
+
+default_cleanup_noexit
+
+if tunable_exists BCLONE_ENABLED ; then
+ log_must restore_tunable BCLONE_ENABLED
+fi
+
+log_pass
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cp_files/setup.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/setup.ksh
similarity index 66%
copy from sys/contrib/openzfs/tests/zfs-tests/tests/functional/cp_files/setup.ksh
copy to sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/setup.ksh
index b756d4e76c83..9d26088c5a8a 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cp_files/setup.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/bclone/setup.ksh
@@ -1,35 +1,50 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2007 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
#
-# Copyright (c) 2013 by Delphix. All rights reserved.
+# Copyright (c) 2023 by Pawel Jakub Dawidek
#
. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/bclone/bclone.cfg
+
+if ! command -v clonefile > /dev/null ; then
+ log_unsupported "clonefile program required to test block cloning"
+fi
+
+if tunable_exists BCLONE_ENABLED ; then
+ log_must save_tunable BCLONE_ENABLED
+ log_must set_tunable32 BCLONE_ENABLED 1
+fi
DISK=${DISKS%% *}
-default_setup $DISK
+
+default_setup_noexit $DISK "true"
+log_must zpool set feature@block_cloning=enabled $TESTPOOL
+log_must zfs create $TESTSRCFS
+log_must zfs create $TESTDSTFS
+log_pass
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning.kshlib b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning.kshlib
index 8e16366b4cd6..50f3a3d262c0 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning.kshlib
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning.kshlib
@@ -1,54 +1,58 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright (c) 2023, Klara Inc.
#
. $STF_SUITE/include/libtest.shlib
function have_same_content
{
- typeset hash1=$(cat $1 | md5sum)
- typeset hash2=$(cat $2 | md5sum)
+ typeset hash1=$(md5digest $1)
+ typeset hash2=$(md5digest $2)
log_must [ "$hash1" = "$hash2" ]
}
#
# get_same_blocks dataset1 path/to/file1 dataset2 path/to/file2
#
# Returns a space-separated list of the indexes (starting at 0) of the L0
# blocks that are shared between both files (by first DVA and checksum).
# Assumes that the two files have the same content, use have_same_content to
# confirm that.
#
function get_same_blocks
{
+ KEY=$5
+ if [ ${#KEY} -gt 0 ]; then
+ KEY="--key=$KEY"
+ fi
typeset zdbout=${TMPDIR:-$TEST_BASE_DIR}/zdbout.$$
- zdb -vvvvv $1 -O $2 | \
+ zdb $KEY -vvvvv $1 -O $2 | \
awk '/ L0 / { print l++ " " $3 " " $7 }' > $zdbout.a
- zdb -vvvvv $3 -O $4 | \
+ zdb $KEY -vvvvv $3 -O $4 | \
awk '/ L0 / { print l++ " " $3 " " $7 }' > $zdbout.b
- echo $(sort $zdbout.a $zdbout.b | uniq -d | cut -f1 -d' ')
+ echo $(sort -n $zdbout.a $zdbout.b | uniq -d | cut -f1 -d' ')
}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_clone_mmap_cached.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_clone_mmap_cached.ksh
new file mode 100755
index 000000000000..b0ef8ec99533
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_clone_mmap_cached.ksh
@@ -0,0 +1,86 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/block_cloning/block_cloning.kshlib
+
+#
+# DESCRIPTION:
+# When the destination file is mmaped and is already cached we need to
+# update mmaped pages after successful clone.
+#
+# STRATEGY:
+# 1. Create a pool.
+# 2. Create a two test files with random content.
+# 3. mmap the files, read them and clone from one to the other using
+# clone_mmap_cached.
+# 4. clone_mmap_cached also verifies if the content of the destination
+# file was updated while reading it from mmaped memory.
+#
+
+verify_runnable "global"
+
+if is_linux && [[ $(linux_version) -lt $(linux_version "4.5") ]]; then
+ log_unsupported "copy_file_range not available before Linux 4.5"
+fi
+
+VDIR=$TEST_BASE_DIR/disk-bclone
+VDEV="$VDIR/a"
+
+function cleanup
+{
+ datasetexists $TESTPOOL && destroy_pool $TESTPOOL
+ rm -rf $VDIR
+}
+
+log_onexit cleanup
+
+log_assert "Test for clone into mmaped and cached file"
+
+log_must rm -rf $VDIR
+log_must mkdir -p $VDIR
+log_must truncate -s 1G $VDEV
+
+log_must zpool create -o feature@block_cloning=enabled $TESTPOOL $VDEV
+log_must zfs create $TESTPOOL/$TESTFS
+
+for opts in "--" "-i" "-o" "-io"
+do
+ log_must dd if=/dev/urandom of=/$TESTPOOL/$TESTFS/src bs=1M count=1
+ log_must dd if=/dev/urandom of=/$TESTPOOL/$TESTFS/dst bs=1M count=1
+
+ # Clear cache.
+ log_must zpool export $TESTPOOL
+ log_must zpool import -d $VDIR $TESTPOOL
+
+ log_must clone_mmap_cached $opts /$TESTPOOL/$TESTFS/src /$TESTPOOL/$TESTFS/dst
+
+ sync_pool $TESTPOOL
+ log_must sync
+
+ log_must have_same_content /$TESTPOOL/$TESTFS/src /$TESTPOOL/$TESTFS/dst
+ blocks=$(get_same_blocks $TESTPOOL/$TESTFS src $TESTPOOL/$TESTFS dst)
+ # FreeBSD's seq(1) leaves a trailing space, remove it with sed(1).
+ log_must [ "$blocks" = "$(seq -s " " 0 7 | sed 's/ $//')" ]
+done
+
+log_pass "Clone properly updates mmapped and cached pages"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_clone_mmap_write.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_clone_mmap_write.ksh
new file mode 100755
index 000000000000..6215b3178e7e
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_clone_mmap_write.ksh
@@ -0,0 +1,79 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/block_cloning/block_cloning.kshlib
+
+#
+# DESCRIPTION:
+# A PANIC is triggered in dbuf_redirty() if we clone a file, mmap it
+# and write from the map into the file. PR#15656 fixes this scenario.
+# This scenario also causes data corruption on FreeBSD, which is fixed
+# by PR#15665.
+#
+# STRATEGY:
+# 1. Create a pool
+# 2. Create a test file
+# 3. Clone, mmap and write to the file using clone_mmap_write
+# 5. Synchronize cached writes
+# 6. Verfiy data is correctly written to the disk
+#
+
+verify_runnable "global"
+
+if is_linux && [[ $(linux_version) -lt $(linux_version "4.5") ]]; then
+ log_unsupported "copy_file_range not available before Linux 4.5"
+fi
+
+VDIR=$TEST_BASE_DIR/disk-bclone
+VDEV="$VDIR/a"
+
+function cleanup
+{
+ datasetexists $TESTPOOL && destroy_pool $TESTPOOL
+ rm -rf $VDIR
+}
+
+log_onexit cleanup
+
+log_assert "Test for clone, mmap and write scenario"
+
+log_must rm -rf $VDIR
+log_must mkdir -p $VDIR
+log_must truncate -s 1G $VDEV
+
+log_must zpool create -o feature@block_cloning=enabled $TESTPOOL $VDEV
+log_must zfs create $TESTPOOL/$TESTFS
+
+log_must dd if=/dev/urandom of=/$TESTPOOL/$TESTFS/file bs=1M count=512
+log_must clone_mmap_write /$TESTPOOL/$TESTFS/file /$TESTPOOL/$TESTFS/clone
+
+sync_pool $TESTPOOL
+log_must sync
+
+log_must have_same_content /$TESTPOOL/$TESTFS/file /$TESTPOOL/$TESTFS/clone
+blocks=$(get_same_blocks $TESTPOOL/$TESTFS file $TESTPOOL/$TESTFS clone)
+# FreeBSD's seq(1) leaves a trailing space, remove it with sed(1).
+log_must [ "$blocks" = "$(seq -s " " 1 4095 | sed 's/ $//')" ]
+
+log_pass "Clone, mmap and write does not cause data corruption or " \
+ "trigger panic"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_copyfilerange.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_copyfilerange.ksh
index 43ea47b0ef19..0599739abee6 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_copyfilerange.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_copyfilerange.ksh
@@ -1,60 +1,60 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright (c) 2023, Klara Inc.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/block_cloning/block_cloning.kshlib
verify_runnable "global"
-if [[ $(linux_version) -lt $(linux_version "4.5") ]]; then
+if is_linux && [[ $(linux_version) -lt $(linux_version "4.5") ]]; then
log_unsupported "copy_file_range not available before Linux 4.5"
fi
claim="The copy_file_range syscall can clone whole files."
log_assert $claim
function cleanup
{
datasetexists $TESTPOOL && destroy_pool $TESTPOOL
}
log_onexit cleanup
log_must zpool create -o feature@block_cloning=enabled $TESTPOOL $DISKS
log_must dd if=/dev/urandom of=/$TESTPOOL/file1 bs=128K count=4
log_must sync_pool $TESTPOOL
log_must clonefile -f /$TESTPOOL/file1 /$TESTPOOL/file2 0 0 524288
log_must sync_pool $TESTPOOL
log_must have_same_content /$TESTPOOL/file1 /$TESTPOOL/file2
typeset blocks=$(get_same_blocks $TESTPOOL file1 $TESTPOOL file2)
log_must [ "$blocks" = "0 1 2 3" ]
log_pass $claim
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_copyfilerange_cross_dataset.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_copyfilerange_cross_dataset.ksh
index 74e6b04903a3..ad83d30291ac 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_copyfilerange_cross_dataset.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_copyfilerange_cross_dataset.ksh
@@ -1,65 +1,64 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright (c) 2023, Klara Inc.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/block_cloning/block_cloning.kshlib
+. $STF_SUITE/tests/functional/bclone/bclone_common.kshlib
verify_runnable "global"
-if [[ $(linux_version) -lt $(linux_version "5.3") ]]; then
- log_unsupported "copy_file_range can't copy cross-filesystem before Linux 5.3"
-fi
+verify_crossfs_block_cloning
claim="The copy_file_range syscall can clone across datasets."
log_assert $claim
function cleanup
{
datasetexists $TESTPOOL && destroy_pool $TESTPOOL
}
log_onexit cleanup
log_must zpool create -o feature@block_cloning=enabled $TESTPOOL $DISKS
log_must zfs create $TESTPOOL/$TESTFS1
log_must zfs create $TESTPOOL/$TESTFS2
log_must dd if=/dev/urandom of=/$TESTPOOL/$TESTFS1/file1 bs=128K count=4
log_must sync_pool $TESTPOOL
log_must \
clonefile -f /$TESTPOOL/$TESTFS1/file1 /$TESTPOOL/$TESTFS2/file2 0 0 524288
log_must sync_pool $TESTPOOL
log_must have_same_content /$TESTPOOL/$TESTFS1/file1 /$TESTPOOL/$TESTFS2/file2
typeset blocks=$(get_same_blocks \
$TESTPOOL/$TESTFS1 file1 $TESTPOOL/$TESTFS2 file2)
log_must [ "$blocks" = "0 1 2 3" ]
log_pass $claim
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_copyfilerange_fallback.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_copyfilerange_fallback.ksh
index 9a96eacd60af..475910be7478 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_copyfilerange_fallback.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_copyfilerange_fallback.ksh
@@ -1,86 +1,86 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright (c) 2023, Klara Inc.
# Copyright (c) 2023, Rob Norris <robn@despairlabs.com>
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/block_cloning/block_cloning.kshlib
verify_runnable "global"
-if [[ $(linux_version) -lt $(linux_version "4.5") ]]; then
+if is_linux && [[ $(linux_version) -lt $(linux_version "4.5") ]]; then
log_unsupported "copy_file_range not available before Linux 4.5"
fi
claim="copy_file_range will fall back to copy when cloning not possible."
log_assert $claim
function cleanup
{
datasetexists $TESTPOOL && destroy_pool $TESTPOOL
}
log_onexit cleanup
log_must zpool create -o feature@block_cloning=enabled $TESTPOOL $DISKS
log_must dd if=/dev/urandom of=/$TESTPOOL/file bs=128K count=4
log_must sync_pool $TESTPOOL
log_note "Copying entire file with copy_file_range"
log_must clonefile -f /$TESTPOOL/file /$TESTPOOL/clone 0 0 524288
log_must sync_pool $TESTPOOL
log_must have_same_content /$TESTPOOL/file /$TESTPOOL/clone
typeset blocks=$(get_same_blocks $TESTPOOL file $TESTPOOL clone)
log_must [ "$blocks" = "0 1 2 3" ]
log_note "Copying within a block with copy_file_range"
log_must clonefile -f /$TESTPOOL/file /$TESTPOOL/clone 32768 32768 65536
log_must sync_pool $TESTPOOL
log_must have_same_content /$TESTPOOL/file /$TESTPOOL/clone
typeset blocks=$(get_same_blocks $TESTPOOL file $TESTPOOL clone)
log_must [ "$blocks" = "1 2 3" ]
log_note "Copying across a block with copy_file_range"
log_must clonefile -f /$TESTPOOL/file /$TESTPOOL/clone 327680 327680 131072
log_must sync_pool $TESTPOOL
log_must have_same_content /$TESTPOOL/file /$TESTPOOL/clone
typeset blocks=$(get_same_blocks $TESTPOOL file $TESTPOOL clone)
log_must [ "$blocks" = "1" ]
log_pass $claim
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_copyfilerange_fallback_same_txg.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_copyfilerange_fallback_same_txg.ksh
index e52b34ec8a51..00982f68db86 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_copyfilerange_fallback_same_txg.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_copyfilerange_fallback_same_txg.ksh
@@ -1,68 +1,68 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright (c) 2023, Klara Inc.
# Copyright (c) 2023, Rob Norris <robn@despairlabs.com>
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/block_cloning/block_cloning.kshlib
verify_runnable "global"
-if [[ $(linux_version) -lt $(linux_version "4.5") ]]; then
+if is_linux && [[ $(linux_version) -lt $(linux_version "4.5") ]]; then
log_unsupported "copy_file_range not available before Linux 4.5"
fi
claim="copy_file_range will fall back to copy when cloning on same txg"
log_assert $claim
typeset timeout=$(get_tunable TXG_TIMEOUT)
function cleanup
{
datasetexists $TESTPOOL && destroy_pool $TESTPOOL
set_tunable64 TXG_TIMEOUT $timeout
}
log_onexit cleanup
log_must set_tunable64 TXG_TIMEOUT 5000
log_must zpool create -o feature@block_cloning=enabled $TESTPOOL $DISKS
log_must sync_pool $TESTPOOL true
log_must dd if=/dev/urandom of=/$TESTPOOL/file bs=128K count=4
log_must clonefile -f /$TESTPOOL/file /$TESTPOOL/clone 0 0 524288
log_must sync_pool $TESTPOOL
log_must have_same_content /$TESTPOOL/file /$TESTPOOL/clone
typeset blocks=$(get_same_blocks $TESTPOOL file $TESTPOOL clone)
log_must [ "$blocks" = "" ]
log_pass $claim
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_copyfilerange_partial.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_copyfilerange_partial.ksh
index a5da0a0bd359..38c46e4741cb 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_copyfilerange_partial.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_copyfilerange_partial.ksh
@@ -1,68 +1,68 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright (c) 2023, Klara Inc.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/block_cloning/block_cloning.kshlib
verify_runnable "global"
-if [[ $(linux_version) -lt $(linux_version "4.5") ]]; then
+if is_linux && [[ $(linux_version) -lt $(linux_version "4.5") ]]; then
log_unsupported "copy_file_range not available before Linux 4.5"
fi
claim="The copy_file_range syscall can clone parts of a file."
log_assert $claim
function cleanup
{
datasetexists $TESTPOOL && destroy_pool $TESTPOOL
}
log_onexit cleanup
log_must zpool create -o feature@block_cloning=enabled $TESTPOOL $DISKS
log_must dd if=/dev/urandom of=/$TESTPOOL/file1 bs=128K count=4
log_must sync_pool $TESTPOOL
log_must dd if=/$TESTPOOL/file1 of=/$TESTPOOL/file2 bs=128K count=4
log_must sync_pool $TESTPOOL
log_must have_same_content /$TESTPOOL/file1 /$TESTPOOL/file2
typeset blocks=$(get_same_blocks $TESTPOOL file1 $TESTPOOL file2)
log_must [ "$blocks" = "" ]
log_must clonefile -f /$TESTPOOL/file1 /$TESTPOOL/file2 131072 131072 262144
log_must sync_pool $TESTPOOL
log_must have_same_content /$TESTPOOL/file1 /$TESTPOOL/file2
typeset blocks=$(get_same_blocks $TESTPOOL file1 $TESTPOOL file2)
log_must [ "$blocks" = "1 2" ]
log_pass $claim
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_cross_enc_dataset.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_cross_enc_dataset.ksh
new file mode 100755
index 000000000000..702e23267f7e
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_cross_enc_dataset.ksh
@@ -0,0 +1,169 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2023, Kay Pedersen <mail@mkwg.de>
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/block_cloning/block_cloning.kshlib
+. $STF_SUITE/tests/functional/bclone/bclone_common.kshlib
+
+verify_runnable "global"
+
+verify_crossfs_block_cloning
+
+claim="Block cloning across encrypted datasets."
+
+log_assert $claim
+
+DS1="$TESTPOOL/encrypted1"
+DS2="$TESTPOOL/encrypted2"
+DS1_NC="$TESTPOOL/notcrypted1"
+PASSPHRASE="top_secret"
+
+function prepare_enc
+{
+ log_must zpool create -o feature@block_cloning=enabled $TESTPOOL $DISKS
+ log_must eval "echo $PASSPHRASE | zfs create -o encryption=on" \
+ "-o keyformat=passphrase -o keylocation=prompt $DS1"
+ log_must eval "echo $PASSPHRASE | zfs create -o encryption=on" \
+ "-o keyformat=passphrase -o keylocation=prompt $DS2"
+ log_must zfs create $DS1/child1
+ log_must zfs create $DS1/child2
+ log_must zfs create $DS1_NC
+
+ log_note "Create test file"
+ # we must wait until the src file txg is written to the disk otherwise we
+ # will fallback to normal copy. See "dmu_read_l0_bps" in
+ # "zfs/module/zfs/dmu.c" and "zfs_clone_range" in
+ # "zfs/module/zfs/zfs_vnops.c"
+ log_must dd if=/dev/urandom of=/$DS1/file bs=128K count=4
+ log_must dd if=/dev/urandom of=/$DS1/child1/file bs=128K count=4
+ log_must dd if=/dev/urandom of=/$DS1_NC/file bs=128K count=4
+ log_must sync_pool $TESTPOOL
+}
+
+function cleanup_enc
+{
+ datasetexists $TESTPOOL && destroy_pool $TESTPOOL
+}
+
+function clone_and_check
+{
+ I_FILE="$1"
+ O_FILE=$2
+ I_DS=$3
+ O_DS=$4
+ SAME_BLOCKS=$5
+ # the CLONE option provides a choice between copy_file_range
+ # which should clone and a dd which is a copy no matter what
+ CLONE=$6
+ SNAPSHOT=$7
+ if [ ${#SNAPSHOT} -gt 0 ]; then
+ I_FILE=".zfs/snapshot/$SNAPSHOT/$1"
+ fi
+ if [ $CLONE ]; then
+ log_must clonefile -f "/$I_DS/$I_FILE" "/$O_DS/$O_FILE" 0 0 524288
+ else
+ log_must dd if="/$I_DS/$I_FILE" of="/$O_DS/$O_FILE" bs=128K
+ fi
+ log_must sync_pool $TESTPOOL
+
+ log_must have_same_content "/$I_DS/$I_FILE" "/$O_DS/$O_FILE"
+
+ if [ ${#SNAPSHOT} -gt 0 ]; then
+ I_DS="$I_DS@$SNAPSHOT"
+ I_FILE="$1"
+ fi
+ typeset blocks=$(get_same_blocks \
+ $I_DS $I_FILE $O_DS $O_FILE $PASSPHRASE)
+ log_must [ "$blocks" = "$SAME_BLOCKS" ]
+}
+
+log_onexit cleanup_enc
+
+prepare_enc
+
+log_note "Cloning entire file with copy_file_range across different enc" \
+ "roots, should fallback"
+# we are expecting no same block map.
+clone_and_check "file" "clone" $DS1 $DS2 "" true
+log_note "check if the file is still readable and the same after" \
+ "unmount and key unload, shouldn't fail"
+typeset hash1=$(md5digest "/$DS1/file")
+log_must zfs umount $DS1 && zfs unload-key $DS1
+typeset hash2=$(md5digest "/$DS2/clone")
+log_must [ "$hash1" = "$hash2" ]
+
+cleanup_enc
+prepare_enc
+
+log_note "Cloning entire file with copy_file_range across different child datasets"
+# clone shouldn't work because of deriving a new master key for the child
+# we are expecting no same block map.
+clone_and_check "file" "clone" $DS1 "$DS1/child1" "" true
+clone_and_check "file" "clone" "$DS1/child1" "$DS1/child2" "" true
+
+cleanup_enc
+prepare_enc
+
+log_note "Copying entire file with copy_file_range across same snapshot"
+log_must zfs snapshot -r $DS1@s1
+log_must sync_pool $TESTPOOL
+log_must rm -f "/$DS1/file"
+log_must sync_pool $TESTPOOL
+clone_and_check "file" "clone" "$DS1" "$DS1" "0 1 2 3" true "s1"
+
+cleanup_enc
+prepare_enc
+
+log_note "Copying entire file with copy_file_range across different snapshot"
+clone_and_check "file" "file" $DS1 $DS2 "" true
+log_must zfs snapshot -r $DS2@s1
+log_must sync_pool $TESTPOOL
+log_must rm -f "/$DS1/file" "/$DS2/file"
+log_must sync_pool $TESTPOOL
+clone_and_check "file" "clone" "$DS2" "$DS1" "" true "s1"
+typeset hash1=$(md5digest "/$DS1/.zfs/snapshot/s1/file")
+log_note "destroy the snapshot and check if the file is still readable and" \
+ "has the same content"
+log_must zfs destroy -r $DS2@s1
+log_must sync_pool $TESTPOOL
+typeset hash2=$(md5digest "/$DS1/file")
+log_must [ "$hash1" = "$hash2" ]
+
+cleanup_enc
+prepare_enc
+
+log_note "Copying with copy_file_range from non encrypted to encrypted"
+clone_and_check "file" "copy" $DS1_NC $DS1 "" true
+
+cleanup_enc
+prepare_enc
+
+log_note "Copying with copy_file_range from encrypted to non encrypted"
+clone_and_check "file" "copy" $DS1 $DS1_NC "" true
+
+log_must sync_pool $TESTPOOL
+
+log_pass $claim
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_disabled_copyfilerange.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_disabled_copyfilerange.ksh
index d21b6251134e..3d916ab92165 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_disabled_copyfilerange.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_disabled_copyfilerange.ksh
@@ -1,60 +1,60 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright (c) 2023, Klara Inc.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/block_cloning/block_cloning.kshlib
verify_runnable "global"
-if [[ $(linux_version) -lt $(linux_version "4.5") ]]; then
+if is_linux && [[ $(linux_version) -lt $(linux_version "4.5") ]]; then
log_unsupported "copy_file_range not available before Linux 4.5"
fi
claim="The copy_file_range syscall copies files when block cloning is disabled."
log_assert $claim
function cleanup
{
datasetexists $TESTPOOL && destroy_pool $TESTPOOL
}
log_onexit cleanup
log_must zpool create -o feature@block_cloning=disabled $TESTPOOL $DISKS
log_must dd if=/dev/urandom of=/$TESTPOOL/file1 bs=128K count=4
log_must sync_pool $TESTPOOL
log_must clonefile -f /$TESTPOOL/file1 /$TESTPOOL/file2 0 0 524288
log_must sync_pool $TESTPOOL
log_must have_same_content /$TESTPOOL/file1 /$TESTPOOL/file2
typeset blocks=$(get_same_blocks $TESTPOOL file1 $TESTPOOL file2)
log_must [ "$blocks" = "" ]
log_pass $claim
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_lwb_buffer_overflow.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_lwb_buffer_overflow.ksh
new file mode 100755
index 000000000000..919f320dea3f
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_lwb_buffer_overflow.ksh
@@ -0,0 +1,90 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2023 by iXsystems, Inc. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/block_cloning/block_cloning.kshlib
+
+#
+# DESCRIPTION:
+# Test for LWB buffer overflow with multiple VDEVs ZIL when 128KB
+# block write is split into two 68KB ones, trying to write maximum
+# sizes 128KB TX_CLONE_RANGE record with 1022 block pointers into
+# 68KB buffer.
+#
+# STRATEGY:
+# 1. Create a pool with multiple VDEVs ZIL
+# 2. Write maximum sizes TX_CLONE_RANGE record with 1022 block
+# pointers into 68KB buffer
+# 3. Sync TXG
+# 4. Clone the file
+# 5. Synchronize cached writes
+#
+
+verify_runnable "global"
+
+if is_linux && [[ $(linux_version) -lt $(linux_version "4.5") ]]; then
+ log_unsupported "copy_file_range not available before Linux 4.5"
+fi
+
+VDIR=$TEST_BASE_DIR/disk-bclone
+VDEV="$VDIR/a $VDIR/b $VDIR/c"
+LDEV="$VDIR/e $VDIR/f"
+
+function cleanup
+{
+ datasetexists $TESTPOOL && destroy_pool $TESTPOOL
+ rm -rf $VDIR
+}
+
+log_onexit cleanup
+
+log_assert "Test for LWB buffer overflow with multiple VDEVs ZIL"
+
+log_must rm -rf $VDIR
+log_must mkdir -p $VDIR
+log_must truncate -s $MINVDEVSIZE $VDEV $LDEV
+
+log_must zpool create -o feature@block_cloning=enabled $TESTPOOL $VDEV \
+ log mirror $LDEV
+log_must zfs create -o recordsize=32K $TESTPOOL/$TESTFS
+# Each ZIL log entry can fit 130816 bytes for a block cloning operation,
+# so it can store 1022 block pointers. When LWB optimization is enabled,
+# an assert is hit when 128KB block write is split into two 68KB ones
+# for 2 SLOG devices
+log_must dd if=/dev/urandom of=/$TESTPOOL/$TESTFS/file1 bs=32K count=1022 \
+ conv=fsync
+sync_pool $TESTPOOL
+log_must clonefile -f /$TESTPOOL/$TESTFS/file1 /$TESTPOOL/$TESTFS/file2
+log_must sync
+
+sync_pool $TESTPOOL
+log_must have_same_content /$TESTPOOL/$TESTFS/file1 /$TESTPOOL/$TESTFS/file2
+typeset blocks=$(get_same_blocks $TESTPOOL/$TESTFS file1 $TESTPOOL/$TESTFS file2)
+# FreeBSD's seq(1) leaves a trailing space, remove it with sed(1).
+log_must [ "$blocks" = "$(seq -s " " 0 1021 | sed 's/ $//')" ]
+
+log_pass "LWB buffer overflow is not triggered with multiple VDEVs ZIL"
+
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_replay.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_replay.ksh
new file mode 100755
index 000000000000..530152004686
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_replay.ksh
@@ -0,0 +1,132 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/block_cloning/block_cloning.kshlib
+
+#
+# DESCRIPTION:
+# Verify slogs are replayed correctly for cloned files. This
+# test is ported from slog_replay tests for block cloning.
+#
+# STRATEGY:
+# 1. Create an empty file system (TESTFS)
+# 2. Create regular files and sync
+# 3. Freeze TESTFS
+# 4. Clone the file
+# 5. Unmount filesystem
+# <At this stage TESTFS is frozen, the intent log contains a
+# complete set of deltas to replay it>
+# 6. Remount TESTFS <which replays the intent log>
+# 7. Compare clone file with the original file
+#
+
+verify_runnable "global"
+
+if is_linux && [[ $(linux_version) -lt $(linux_version "4.5") ]]; then
+ log_unsupported "copy_file_range not available before Linux 4.5"
+fi
+
+export VDIR=$TEST_BASE_DIR/disk-bclone
+export VDEV="$VDIR/a $VDIR/b $VDIR/c"
+export LDEV="$VDIR/e $VDIR/f"
+log_must rm -rf $VDIR
+log_must mkdir -p $VDIR
+log_must truncate -s $MINVDEVSIZE $VDEV $LDEV
+
+claim="The slogs are replayed correctly for cloned files."
+
+log_assert $claim
+
+function cleanup
+{
+ datasetexists $TESTPOOL && destroy_pool $TESTPOOL
+ rm -rf $TESTDIR $VDIR $VDIR2
+}
+
+log_onexit cleanup
+
+#
+# 1. Create an empty file system (TESTFS)
+#
+log_must zpool create -o feature@block_cloning=enabled $TESTPOOL $VDEV \
+ log mirror $LDEV
+log_must zfs create $TESTPOOL/$TESTFS
+
+#
+# 2. TX_WRITE: Create two files and sync txg
+#
+log_must dd if=/dev/urandom of=/$TESTPOOL/$TESTFS/file1 \
+ oflag=sync bs=128k count=4
+log_must zfs set recordsize=16K $TESTPOOL/$TESTFS
+log_must dd if=/dev/urandom of=/$TESTPOOL/$TESTFS/file2 \
+ oflag=sync bs=16K count=2048
+sync_pool $TESTPOOL
+
+#
+# 3. Checkpoint for ZIL Replay
+#
+log_must zpool freeze $TESTPOOL
+
+#
+# 4. TX_CLONE_RANGE: Clone the file
+#
+log_must clonefile -f /$TESTPOOL/$TESTFS/file1 /$TESTPOOL/$TESTFS/clone1
+log_must clonefile -f /$TESTPOOL/$TESTFS/file2 /$TESTPOOL/$TESTFS/clone2
+
+#
+# 5. Unmount filesystem and export the pool
+#
+# At this stage TESTFS is frozen, the intent log contains a complete set
+# of deltas to replay for clone files.
+#
+log_must zfs unmount /$TESTPOOL/$TESTFS
+
+log_note "Verify transactions to replay:"
+log_must zdb -iv $TESTPOOL/$TESTFS
+
+log_must zpool export $TESTPOOL
+
+#
+# 6. Remount TESTFS <which replays the intent log>
+#
+# Import the pool to unfreeze it and claim log blocks. It has to be
+# `zpool import -f` because we can't write a frozen pool's labels!
+#
+log_must zpool import -f -d $VDIR $TESTPOOL
+
+#
+# 7. Compare clone file with the original file
+#
+log_must have_same_content /$TESTPOOL/$TESTFS/file1 /$TESTPOOL/$TESTFS/clone1
+log_must have_same_content /$TESTPOOL/$TESTFS/file2 /$TESTPOOL/$TESTFS/clone2
+
+typeset blocks=$(get_same_blocks $TESTPOOL/$TESTFS file1 \
+ $TESTPOOL/$TESTFS clone1)
+log_must [ "$blocks" = "0 1 2 3" ]
+
+typeset blocks=$(get_same_blocks $TESTPOOL/$TESTFS file2 \
+ $TESTPOOL/$TESTFS clone2)
+# FreeBSD's seq(1) leaves a trailing space, remove it with sed(1).
+log_must [ "$blocks" = "$(seq -s " " 0 2047 | sed 's/ $//')" ]
+
+log_pass $claim
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_replay_encrypted.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_replay_encrypted.ksh
new file mode 100755
index 000000000000..0967415b7b7b
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_replay_encrypted.ksh
@@ -0,0 +1,134 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/block_cloning/block_cloning.kshlib
+
+#
+# DESCRIPTION:
+# Verify slogs are replayed correctly for encrypted cloned files.
+# This test is ported from slog_replay tests for block cloning.
+#
+# STRATEGY:
+# 1. Create an encrypted file system (TESTFS)
+# 2. Create regular files and sync
+# 3. Freeze TESTFS
+# 4. Clone the file
+# 5. Unmount filesystem
+# <At this stage TESTFS is frozen, the intent log contains a
+# complete set of deltas to replay it>
+# 6. Remount encrypted TESTFS <which replays the intent log>
+# 7. Compare clone file with the original file
+#
+
+verify_runnable "global"
+
+if is_linux && [[ $(linux_version) -lt $(linux_version "4.5") ]]; then
+ log_unsupported "copy_file_range not available before Linux 4.5"
+fi
+
+export VDIR=$TEST_BASE_DIR/disk-bclone
+export VDEV="$VDIR/a $VDIR/b $VDIR/c"
+export LDEV="$VDIR/e $VDIR/f"
+log_must rm -rf $VDIR
+log_must mkdir -p $VDIR
+log_must truncate -s $MINVDEVSIZE $VDEV $LDEV
+export PASSPHRASE="password"
+
+claim="The slogs are replayed correctly for encrypted cloned files."
+
+log_assert $claim
+
+function cleanup
+{
+ datasetexists $TESTPOOL && destroy_pool $TESTPOOL
+ rm -rf $TESTDIR $VDIR $VDIR2
+}
+
+log_onexit cleanup
+
+#
+# 1. Create an encrypted file system (TESTFS)
+#
+log_must zpool create -o feature@block_cloning=enabled $TESTPOOL $VDEV \
+ log mirror $LDEV
+log_must eval "echo $PASSPHRASE | zfs create -o encryption=on" \
+ "-o keyformat=passphrase -o keylocation=prompt $TESTPOOL/$TESTFS"
+
+#
+# 2. TX_WRITE: Create two files and sync txg
+#
+log_must dd if=/dev/urandom of=/$TESTPOOL/$TESTFS/file1 \
+ oflag=sync bs=128k count=4
+log_must zfs set recordsize=16K $TESTPOOL/$TESTFS
+log_must dd if=/dev/urandom of=/$TESTPOOL/$TESTFS/file2 \
+ oflag=sync bs=16K count=2048
+sync_pool $TESTPOOL
+
+#
+# 3. Checkpoint for ZIL Replay
+#
+log_must zpool freeze $TESTPOOL
+
+#
+# 4. TX_CLONE_RANGE: Clone the file
+#
+log_must clonefile -f /$TESTPOOL/$TESTFS/file1 /$TESTPOOL/$TESTFS/clone1
+log_must clonefile -f /$TESTPOOL/$TESTFS/file2 /$TESTPOOL/$TESTFS/clone2
+
+#
+# 5. Unmount filesystem and export the pool
+#
+# At this stage TESTFS is frozen, the intent log contains a complete set
+# of deltas to replay for clone files.
+#
+log_must zfs unmount /$TESTPOOL/$TESTFS
+
+log_note "Verify transactions to replay:"
+log_must zdb -iv $TESTPOOL/$TESTFS
+
+log_must zpool export $TESTPOOL
+
+#
+# 6. Remount TESTFS <which replays the intent log>
+#
+# Import the pool to unfreeze it and claim log blocks. It has to be
+# `zpool import -f` because we can't write a frozen pool's labels!
+#
+log_must eval "echo $PASSPHRASE | zpool import -l -f -d $VDIR $TESTPOOL"
+
+#
+# 7. Compare clone file with the original file
+#
+log_must have_same_content /$TESTPOOL/$TESTFS/file1 /$TESTPOOL/$TESTFS/clone1
+log_must have_same_content /$TESTPOOL/$TESTFS/file2 /$TESTPOOL/$TESTFS/clone2
+
+typeset blocks=$(get_same_blocks $TESTPOOL/$TESTFS file1 \
+ $TESTPOOL/$TESTFS clone1 $PASSPHRASE)
+log_must [ "$blocks" = "0 1 2 3" ]
+
+typeset blocks=$(get_same_blocks $TESTPOOL/$TESTFS file2 \
+ $TESTPOOL/$TESTFS clone2 $PASSPHRASE)
+# FreeBSD's seq(1) leaves a trailing space, remove it with sed(1).
+log_must [ "$blocks" = "$(seq -s " " 0 2047 | sed 's/ $//')" ]
+
+log_pass $claim
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/setup.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/setup.ksh
index 58441bf8f3ad..a9b13f062a4e 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/setup.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/setup.ksh
@@ -1,41 +1,44 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright (c) 2023, Klara Inc.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/block_cloning/block_cloning.kshlib
if ! command -v clonefile > /dev/null ; then
log_unsupported "clonefile program required to test block cloning"
fi
+if ! command -v clone_mmap_cached > /dev/null ; then
+ log_unsupported "clone_mmap_cached program required to test block cloning"
+fi
verify_runnable "global"
if tunable_exists BCLONE_ENABLED ; then
log_must save_tunable BCLONE_ENABLED
log_must set_tunable32 BCLONE_ENABLED 1
fi
log_pass
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cache/cache_012_pos.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cache/cache_012_pos.ksh
index 74caa12a9cc4..945db71bf113 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cache/cache_012_pos.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cache/cache_012_pos.ksh
@@ -1,115 +1,107 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
# CDDL HEADER END
#
#
# Copyright (c) 2020, George Amanakis. All rights reserved.
#
. $STF_SUITE/tests/functional/cache/cache.cfg
. $STF_SUITE/tests/functional/cache/cache.kshlib
#
# DESCRIPTION:
# Looping around a cache device with l2arc_write_size exceeding
# the device size succeeds.
#
# STRATEGY:
# 1. Create pool with a cache device.
# 2. Set l2arc_write_max to a value larger than the cache device.
# 3. Create a file larger than the cache device and random read
# for 10 sec.
-# 4. Verify that l2arc_write_max is set back to the default.
-# 5. Set l2arc_write_max to a value less than the cache device size but
+# 4. Set l2arc_write_max to a value less than the cache device size but
# larger than the default (256MB).
-# 6. Record the l2_size.
-# 7. Random read for 1 sec.
-# 8. Record the l2_size again.
-# 9. If (6) <= (8) then we have not looped around yet.
-# 10. If (6) > (8) then we looped around. Break out of the loop and test.
-# 11. Destroy pool.
+# 5. Record the l2_size.
+# 6. Random read for 1 sec.
+# 7. Record the l2_size again.
+# 8. If (5) <= (7) then we have not looped around yet.
+# 9. Destroy pool.
#
verify_runnable "global"
command -v fio > /dev/null || log_unsupported "fio missing"
log_assert "Looping around a cache device succeeds."
function cleanup
{
if poolexists $TESTPOOL ; then
destroy_pool $TESTPOOL
fi
log_must set_tunable32 L2ARC_WRITE_MAX $write_max
log_must set_tunable32 L2ARC_NOPREFETCH $noprefetch
}
log_onexit cleanup
typeset write_max=$(get_tunable L2ARC_WRITE_MAX)
typeset noprefetch=$(get_tunable L2ARC_NOPREFETCH)
log_must set_tunable32 L2ARC_NOPREFETCH 0
typeset VDEV="$VDIR/vdev.disk"
typeset VDEV_SZ=$(( 4 * 1024 * 1024 * 1024 ))
typeset VCACHE="$VDIR/vdev.cache"
typeset VCACHE_SZ=$(( $VDEV_SZ / 2 ))
typeset fill_mb=$(( floor($VDEV_SZ * 3 / 4 ) ))
export DIRECTORY=/$TESTPOOL
export NUMJOBS=4
export RUNTIME=10
export PERF_RANDSEED=1234
export PERF_COMPPERCENT=66
export PERF_COMPCHUNK=0
export BLOCKSIZE=128K
export SYNC_TYPE=0
export DIRECT=1
export FILE_SIZE=$(( floor($fill_mb / $NUMJOBS) ))
log_must set_tunable32 L2ARC_WRITE_MAX $(( $VCACHE_SZ * 2 ))
log_must truncate -s $VCACHE_SZ $VCACHE
log_must truncate -s $VDEV_SZ $VDEV
log_must zpool create -f $TESTPOOL $VDEV cache $VCACHE
# Actually, this test relies on atime writes to force the L2 ARC discards
log_must zfs set relatime=off $TESTPOOL
log_must fio $FIO_SCRIPTS/mkfiles.fio
log_must fio $FIO_SCRIPTS/random_reads.fio
-typeset write_max2=$(get_tunable L2ARC_WRITE_MAX)
-
-log_must test $write_max2 -eq $write_max
-
log_must set_tunable32 L2ARC_WRITE_MAX $(( 256 * 1024 * 1024 ))
export RUNTIME=1
typeset do_once=true
while $do_once || [[ $l2_size1 -le $l2_size2 ]]; do
typeset l2_size1=$(get_arcstat l2_size)
log_must fio $FIO_SCRIPTS/random_reads.fio
typeset l2_size2=$(get_arcstat l2_size)
do_once=false
done
-log_must test $l2_size1 -gt $l2_size2
-
log_must zpool destroy $TESTPOOL
log_pass "Looping around a cache device succeeds."
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_copyfilerange.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_share/zfs_share_after_mount.ksh
similarity index 53%
copy from sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_copyfilerange.ksh
copy to sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_share/zfs_share_after_mount.ksh
index 43ea47b0ef19..0d4b66ea854c 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/block_cloning/block_cloning_copyfilerange.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_share/zfs_share_after_mount.ksh
@@ -1,60 +1,62 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
-# Copyright (c) 2023, Klara Inc.
+# Copyright (c) 2023 by Proxmox. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
-. $STF_SUITE/tests/functional/block_cloning/block_cloning.kshlib
-verify_runnable "global"
-
-if [[ $(linux_version) -lt $(linux_version "4.5") ]]; then
- log_unsupported "copy_file_range not available before Linux 4.5"
-fi
-
-claim="The copy_file_range syscall can clone whole files."
+# DESCRIPTION:
+# Verify that nfs shares persist after zfs mount -a
+#
+# STRATEGY:
+# 1. Verify that the filesystem is not shared.
+# 2. Enable the 'sharenfs' property
+# 3. Verify filesystem is shared
+# 4. Invoke 'zfs mount -a'
+# 5. Verify filesystem is still shared
-log_assert $claim
+verify_runnable "global"
function cleanup
{
- datasetexists $TESTPOOL && destroy_pool $TESTPOOL
+ log_must zfs set sharenfs=off $TESTPOOL/$TESTFS
+ is_shared $TESTPOOL/$TESTFS && \
+ log_must unshare_fs $TESTPOOL/$TESTFS
+ log_must zfs share -a
}
-log_onexit cleanup
-
-log_must zpool create -o feature@block_cloning=enabled $TESTPOOL $DISKS
-log_must dd if=/dev/urandom of=/$TESTPOOL/file1 bs=128K count=4
-log_must sync_pool $TESTPOOL
+log_onexit cleanup
-log_must clonefile -f /$TESTPOOL/file1 /$TESTPOOL/file2 0 0 524288
-log_must sync_pool $TESTPOOL
+cleanup
-log_must have_same_content /$TESTPOOL/file1 /$TESTPOOL/file2
+log_must zfs set sharenfs="on" $TESTPOOL/$TESTFS
+log_must is_shared $TESTPOOL/$TESTFS
+log_must is_exported $TESTPOOL/$TESTFS
-typeset blocks=$(get_same_blocks $TESTPOOL file1 $TESTPOOL file2)
-log_must [ "$blocks" = "0 1 2 3" ]
+log_must zfs mount -a
+log_must is_shared $TESTPOOL/$TESTFS
+log_must is_exported $TESTPOOL/$TESTFS
-log_pass $claim
+log_pass "Verify that nfs shares persist after zfs mount -a"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_status/zpool_status_002_pos.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_status/zpool_status_002_pos.ksh
index 3bdd7db649f9..d6f32cdc7ac6 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_status/zpool_status_002_pos.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_status/zpool_status_002_pos.ksh
@@ -1,67 +1,69 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2007 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
#
# Copyright (c) 2016 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
#
# DESCRIPTION:
# Executing 'zpool status' with correct options succeeds
#
# STRATEGY:
# 1. Create an array of correctly formed 'zpool status' options
# 2. Execute each element of the array.
# 3. Verify use of each option is successful.
#
verify_runnable "both"
typeset testpool
if is_global_zone; then
testpool=$TESTPOOL
else
testpool=${TESTPOOL%%/*}
fi
set -A args "" "-x" "-v" "-x $testpool" "-v $testpool" "-xv $testpool" \
- "-vx $testpool"
+ "-vx $testpool" "-e $testpool" "-es $testpool"
log_assert "Executing 'zpool status' with correct options succeeds"
typeset -i i=0
while [[ $i -lt ${#args[*]} ]]; do
log_must zpool status ${args[$i]}
(( i = i + 1 ))
done
+cleanup
+
log_pass "'zpool status' with correct options succeeded"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_status/zpool_status_003_pos.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_status/zpool_status_003_pos.ksh
index b501aac5ad6d..52b22dd833f0 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_status/zpool_status_003_pos.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_status/zpool_status_003_pos.ksh
@@ -1,73 +1,75 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright (c) 2019, Delphix. All rights reserved.
# Copyright (c) 2021, George Amanakis. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
#
# DESCRIPTION:
# Verify correct output with 'zpool status -v' after corrupting a file
#
# STRATEGY:
# 1. Create a pool and a file
# 2. zinject checksum errors
# 3. Read the file
# 4. Take a snapshot and make a clone
# 5. Verify we see "snapshot, clone and filesystem" output in 'zpool status -v'
+# and 'zpool status -ev'
function cleanup
{
log_must zinject -c all
datasetexists $TESTPOOL2 && log_must zpool destroy $TESTPOOL2
rm -f $TESTDIR/vdev_a
}
verify_runnable "both"
log_assert "Verify correct 'zpool status -v' output with a corrupted file"
log_onexit cleanup
truncate -s $MINVDEVSIZE $TESTDIR/vdev_a
log_must zpool create -f $TESTPOOL2 $TESTDIR/vdev_a
log_must fio --rw=write --name=job --size=10M --filename=/$TESTPOOL2/10m_file
log_must zinject -t data -e checksum -f 100 -am /$TESTPOOL2/10m_file
# Try to read the 2nd megabyte of 10m_file
dd if=/$TESTPOOL2/10m_file bs=1M || true
log_must zfs snapshot $TESTPOOL2@snap
log_must zfs clone $TESTPOOL2@snap $TESTPOOL2/clone
log_must zfs create $TESTPOOL2/$TESTFS1
# Look to see that snapshot, clone and filesystem our files report errors
log_must zpool status -v $TESTPOOL2
log_must eval "zpool status -v | grep '$TESTPOOL2@snap:/10m_file'"
log_must eval "zpool status -v | grep '$TESTPOOL2/clone/10m_file'"
log_must eval "zpool status -v | grep '$TESTPOOL2/10m_file'"
+log_must eval "zpool status -ev | grep '$TESTPOOL2/10m_file'"
log_mustnot eval "zpool status -v | grep '$TESTFS1'"
log_pass "'zpool status -v' outputs affected filesystem, snapshot & clone"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_status/zpool_status_008_pos.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_status/zpool_status_008_pos.ksh
new file mode 100755
index 000000000000..6be2ad5a7410
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_status/zpool_status_008_pos.ksh
@@ -0,0 +1,104 @@
+#!/bin/ksh -p
+
+#
+# CDDL HEADER START
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2024 by Lawrence Livermore National Security, LLC.
+#
+
+. $STF_SUITE/include/libtest.shlib
+
+#
+# DESCRIPTION:
+# Verify 'zpool status -e' only shows unhealthy devices.
+#
+# STRATEGY:
+# 1. Create zpool
+# 2. Force DEGRADE, FAULT, or inject slow IOs for vdevs
+# 3. Verify vdevs are reported correctly with -e and -s
+# 4. Verify parents are reported as DEGRADED
+# 5. Verify healthy children are not reported
+#
+
+function cleanup
+{
+ log_must set_tunable64 ZIO_SLOW_IO_MS $OLD_SLOW_IO
+ zinject -c all
+ poolexists $TESTPOOL2 && destroy_pool $TESTPOOL2
+ log_must rm -f $all_vdevs
+}
+
+log_assert "Verify 'zpool status -e'"
+
+log_onexit cleanup
+
+all_vdevs=$(echo $TESTDIR/vdev{1..6})
+log_must mkdir -p $TESTDIR
+log_must truncate -s $MINVDEVSIZE $all_vdevs
+
+OLD_SLOW_IO=$(get_tunable ZIO_SLOW_IO_MS)
+
+for raid_type in "draid2:3d:6c:1s" "raidz2"; do
+
+ log_must zpool create -f $TESTPOOL2 $raid_type $all_vdevs
+
+ # Check DEGRADED vdevs are shown.
+ log_must check_vdev_state $TESTPOOL2 $TESTDIR/vdev4 "ONLINE"
+ log_must zinject -d $TESTDIR/vdev4 -A degrade $TESTPOOL2
+ log_must eval "zpool status -e $TESTPOOL2 | grep $TESTDIR/vdev4 | grep DEGRADED"
+
+ # Check FAULTED vdevs are shown.
+ log_must check_vdev_state $TESTPOOL2 $TESTDIR/vdev5 "ONLINE"
+ log_must zinject -d $TESTDIR/vdev5 -A fault $TESTPOOL2
+ log_must eval "zpool status -e $TESTPOOL2 | grep $TESTDIR/vdev5 | grep FAULTED"
+
+ # Check no ONLINE vdevs are shown
+ log_mustnot eval "zpool status -e $TESTPOOL2 | grep ONLINE"
+
+ # Check no ONLINE slow vdevs are show. Then mark IOs greater than
+ # 10ms slow, delay IOs 20ms to vdev6, check slow IOs.
+ log_must check_vdev_state $TESTPOOL2 $TESTDIR/vdev6 "ONLINE"
+ log_mustnot eval "zpool status -es $TESTPOOL2 | grep ONLINE"
+
+ log_must set_tunable64 ZIO_SLOW_IO_MS 10
+ log_must zinject -d $TESTDIR/vdev6 -D20:100 $TESTPOOL2
+ log_must mkfile 1048576 /$TESTPOOL2/testfile
+ sync_pool $TESTPOOL2
+ log_must set_tunable64 ZIO_SLOW_IO_MS $OLD_SLOW_IO
+
+ # Check vdev6 slow IOs are only shown when requested with -s.
+ log_mustnot eval "zpool status -e $TESTPOOL2 | grep $TESTDIR/vdev6 | grep ONLINE"
+ log_must eval "zpool status -es $TESTPOOL2 | grep $TESTDIR/vdev6 | grep ONLINE"
+
+ # Pool level and top-vdev level status must be DEGRADED.
+ log_must eval "zpool status -e $TESTPOOL2 | grep $TESTPOOL2 | grep DEGRADED"
+ log_must eval "zpool status -e $TESTPOOL2 | grep $raid_type | grep DEGRADED"
+
+ # Check that healthy vdevs[1-3] aren't shown with -e.
+ log_must check_vdev_state $TESTPOOL2 $TESTDIR/vdev1 "ONLINE"
+ log_must check_vdev_state $TESTPOOL2 $TESTDIR/vdev2 "ONLINE"
+ log_must check_vdev_state $TESTPOOL2 $TESTDIR/vdev3 "ONLINE"
+ log_mustnot eval "zpool status -es $TESTPOOL2 | grep $TESTDIR/vdev1 | grep ONLINE"
+ log_mustnot eval "zpool status -es $TESTPOOL2 | grep $TESTDIR/vdev2 | grep ONLINE"
+ log_mustnot eval "zpool status -es $TESTPOOL2 | grep $TESTDIR/vdev3 | grep ONLINE"
+
+ log_must zinject -c all
+ log_must zpool status -es $TESTPOOL2
+
+ zpool destroy $TESTPOOL2
+done
+
+log_pass "Verify zpool status -e shows only unhealthy vdevs"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cp_files/.gitignore b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cp_files/.gitignore
new file mode 100644
index 000000000000..d15225ac8429
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cp_files/.gitignore
@@ -0,0 +1 @@
+seekflood
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cp_files/cleanup.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cp_files/cleanup.ksh
index 42fe70042d6a..c0bccab12210 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cp_files/cleanup.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cp_files/cleanup.ksh
@@ -1,34 +1,38 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2007 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
#
# Copyright (c) 2013 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
default_cleanup
+
+if tunable_exists BCLONE_ENABLED ; then
+ log_must restore_tunable BCLONE_ENABLED
+fi
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cp_files/cp_files_002_pos.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cp_files/cp_files_002_pos.ksh
new file mode 100755
index 000000000000..60817449ab03
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cp_files/cp_files_002_pos.ksh
@@ -0,0 +1,161 @@
+#! /bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2024 by Lawrence Livermore National Security, LLC.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/bclone/bclone_common.kshlib
+
+#
+# DESCRIPTION:
+# Verify all cp --reflink modes work with modified file.
+#
+# STRATEGY:
+# 1. Verify "cp --reflink=never|auto|always" behaves as expected.
+# Two different modes of operation are tested.
+#
+# a. zfs_bclone_wait_dirty=0: FICLONE and FICLONERANGE fail with EINVAL
+# when there are dirty blocks which cannot be immediately cloned.
+# This is the default behavior.
+#
+# b. zfs_bclone_wait_dirty=1: FICLONE and FICLONERANGE wait for
+# dirty blocks to be written to disk allowing the clone to succeed.
+# The downside to this is it may be slow which depending on the
+# situtation may defeat the point of making a clone.
+#
+
+verify_runnable "global"
+verify_block_cloning
+
+if ! is_linux; then
+ log_unsupported "cp --reflink is a GNU coreutils option"
+fi
+
+function cleanup
+{
+ datasetexists $TESTPOOL/cp-reflink && \
+ destroy_dataset $$TESTPOOL/cp-reflink -f
+ log_must set_tunable32 BCLONE_WAIT_DIRTY 0
+}
+
+function verify_copy
+{
+ src_cksum=$(sha256digest $1)
+ dst_cksum=$(sha256digest $2)
+
+ if [[ "$src_cksum" != "$dst_cksum" ]]; then
+ log_must ls -l $CP_TESTDIR
+ log_fail "checksum mismatch ($src_cksum != $dst_cksum)"
+ fi
+}
+
+log_assert "Verify all cp --reflink modes work with modified file"
+
+log_onexit cleanup
+
+SRC_FILE=src.data
+DST_FILE=dst.data
+SRC_SIZE=$(($RANDOM % 2048))
+
+# A smaller recordsize is used merely to speed up the test.
+RECORDSIZE=4096
+
+log_must zfs create -o recordsize=$RECORDSIZE $TESTPOOL/cp-reflink
+CP_TESTDIR=$(get_prop mountpoint $TESTPOOL/cp-reflink)
+
+log_must cd $CP_TESTDIR
+
+# Never wait on dirty blocks (zfs_bclone_wait_dirty=0)
+log_must set_tunable32 BCLONE_WAIT_DIRTY 0
+
+for mode in "never" "auto" "always"; do
+ log_note "Checking 'cp --reflink=$mode'"
+
+ # Create a new file and immediately copy it.
+ log_must dd if=/dev/urandom of=$SRC_FILE bs=$RECORDSIZE count=$SRC_SIZE
+
+ if [[ "$mode" == "always" ]]; then
+ log_mustnot cp --reflink=$mode $SRC_FILE $DST_FILE
+ log_must ls -l $CP_TESTDIR
+ else
+ log_must cp --reflink=$mode $SRC_FILE $DST_FILE
+ verify_copy $SRC_FILE $DST_FILE
+ fi
+ log_must rm -f $DST_FILE
+
+ # Append to an existing file and immediately copy it.
+ sync_pool $TESTPOOL
+ log_must dd if=/dev/urandom of=$SRC_FILE bs=$RECORDSIZE seek=$SRC_SIZE \
+ count=1 conv=notrunc
+ if [[ "$mode" == "always" ]]; then
+ log_mustnot cp --reflink=$mode $SRC_FILE $DST_FILE
+ log_must ls -l $CP_TESTDIR
+ else
+ log_must cp --reflink=$mode $SRC_FILE $DST_FILE
+ verify_copy $SRC_FILE $DST_FILE
+ fi
+ log_must rm -f $DST_FILE
+
+ # Overwrite a random range of an existing file and immediately copy it.
+ sync_pool $TESTPOOL
+ log_must dd if=/dev/urandom of=$SRC_FILE bs=$((RECORDSIZE / 2)) \
+ seek=$(($RANDOM % $SRC_SIZE)) count=$(($RANDOM % 16)) conv=notrunc
+ if [[ "$mode" == "always" ]]; then
+ log_mustnot cp --reflink=$mode $SRC_FILE $DST_FILE
+ log_must ls -l $CP_TESTDIR
+ else
+ log_must cp --reflink=$mode $SRC_FILE $DST_FILE
+ verify_copy $SRC_FILE $DST_FILE
+ fi
+ log_must rm -f $SRC_FILE $DST_FILE
+done
+
+# Wait on dirty blocks (zfs_bclone_wait_dirty=1)
+log_must set_tunable32 BCLONE_WAIT_DIRTY 1
+
+for mode in "never" "auto" "always"; do
+ log_note "Checking 'cp --reflink=$mode'"
+
+ # Create a new file and immediately copy it.
+ log_must dd if=/dev/urandom of=$SRC_FILE bs=$RECORDSIZE count=$SRC_SIZE
+ log_must cp --reflink=$mode $SRC_FILE $DST_FILE
+ verify_copy $SRC_FILE $DST_FILE
+ log_must rm -f $DST_FILE
+
+ # Append to an existing file and immediately copy it.
+ log_must dd if=/dev/urandom of=$SRC_FILE bs=$RECORDSIZE seek=$SRC_SIZE \
+ count=1 conv=notrunc
+ log_must cp --reflink=$mode $SRC_FILE $DST_FILE
+ verify_copy $SRC_FILE $DST_FILE
+ log_must rm -f $DST_FILE
+
+ # Overwrite a random range of an existing file and immediately copy it.
+ log_must dd if=/dev/urandom of=$SRC_FILE bs=$((RECORDSIZE / 2)) \
+ seek=$(($RANDOM % $SRC_SIZE)) count=$(($RANDOM % 16)) conv=notrunc
+ log_must cp --reflink=$mode $SRC_FILE $DST_FILE
+ verify_copy $SRC_FILE $DST_FILE
+ log_must rm -f $SRC_FILE $DST_FILE
+done
+
+log_pass
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cp_files/cp_stress.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cp_files/cp_stress.ksh
new file mode 100755
index 000000000000..43bb8ab572d2
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cp_files/cp_stress.ksh
@@ -0,0 +1,73 @@
+#! /bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+
+#
+# Copyright (c) 2023 by Lawrence Livermore National Security, LLC.
+#
+
+. $STF_SUITE/include/libtest.shlib
+
+#
+# DESCRIPTION:
+#
+# https://github.com/openzfs/zfs/issues/15526 identified a dirty dnode
+# SEEK_HOLE/SEEK_DATA bug. https://github.com/openzfs/zfs/pull/15571
+# fixed the bug, and was backported to 2.1.14 and 2.2.2.
+#
+# This test is to ensure that the bug, as understood, will not recur.
+#
+# STRATEGY:
+#
+# 1. Run the 'seekflood' binary, for creation of files with timing
+# characteristics that can trigger #15526.
+# 2. A single run is not always a trigger, so run repeatedly.
+
+verify_runnable "global"
+
+function cleanup
+{
+ rm -rf /$TESTDIR/cp_stress
+}
+
+log_assert "Run the 'seekflood' binary repeatedly to try to trigger #15526"
+
+log_onexit cleanup
+
+log_must mkdir /$TESTPOOL/cp_stress
+
+MYPWD="$PWD"
+cd /$TESTPOOL/cp_stress
+CPUS=$(get_num_cpus)
+
+if is_freebsd ; then
+ # 'seekflood' takes longer on FreeBSD and can timeout the test
+ RUNS=3
+else
+ RUNS=10
+fi
+
+for i in $(seq 1 $RUNS) ; do
+ # Each run takes around 12 seconds.
+ log_must $STF_SUITE/tests/functional/cp_files/seekflood 2000 $CPUS
+done
+cd "$MYPWD"
+
+log_pass "No corruption detected"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cp_files/seekflood.c b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cp_files/seekflood.c
new file mode 100644
index 000000000000..02c2c8e6eca5
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cp_files/seekflood.c
@@ -0,0 +1,180 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2023, Rob Norris <robn@despairlabs.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+
+#include <fcntl.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <sys/wait.h>
+
+#define DATASIZE (4096)
+char data[DATASIZE];
+
+static int
+_open_file(int n, int wr)
+{
+ char buf[256];
+ int fd;
+
+ snprintf(buf, sizeof (buf), "testdata_%d_%d", getpid(), n);
+
+ if ((fd = open(buf, wr ? (O_WRONLY | O_CREAT) : O_RDONLY,
+ wr ? (S_IRUSR | S_IWUSR) : 0)) < 0) {
+ fprintf(stderr, "Error: open '%s' (%s): %s\n",
+ buf, wr ? "write" : "read", strerror(errno));
+ exit(1);
+ }
+
+ return (fd);
+}
+
+static void
+_write_file(int n, int fd)
+{
+ /* write a big ball of stuff */
+ ssize_t nwr = write(fd, data, DATASIZE);
+ if (nwr < 0) {
+ fprintf(stderr, "Error: write '%d_%d': %s\n",
+ getpid(), n, strerror(errno));
+ exit(1);
+ } else if (nwr < DATASIZE) {
+ fprintf(stderr, "Error: write '%d_%d': short write\n", getpid(),
+ n);
+ exit(1);
+ }
+}
+
+static int
+_seek_file(int n, int fd)
+{
+ struct stat st;
+ if (fstat(fd, &st) < 0) {
+ fprintf(stderr, "Error: fstat '%d_%d': %s\n", getpid(), n,
+ strerror(errno));
+ exit(1);
+ }
+
+ /*
+ * A zero-sized file correctly has no data, so seeking the file is
+ * pointless.
+ */
+ if (st.st_size == 0)
+ return (0);
+
+ /* size is real, and we only write, so SEEK_DATA must find something */
+ if (lseek(fd, 0, SEEK_DATA) < 0) {
+ if (errno == ENXIO)
+ return (1);
+ fprintf(stderr, "Error: lseek '%d_%d': %s\n",
+ getpid(), n, strerror(errno));
+ exit(2);
+ }
+
+ return (0);
+}
+
+int
+main(int argc, char **argv)
+{
+ int nfiles = 0;
+ int nthreads = 0;
+
+ if (argc < 3 || (nfiles = atoi(argv[1])) == 0 ||
+ (nthreads = atoi(argv[2])) == 0) {
+ printf("usage: seekflood <nfiles> <threads>\n");
+ exit(1);
+ }
+
+ memset(data, 0x5a, DATASIZE);
+
+ /* fork off some flood threads */
+ for (int i = 0; i < nthreads; i++) {
+ if (!fork()) {
+ /* thread main */
+
+ /* create zero file */
+ int fd = _open_file(0, 1);
+ _write_file(0, fd);
+ close(fd);
+
+ int count = 0;
+
+ int h = 0, i, j, rfd, wfd;
+ for (i = 0; i < nfiles; i += 2, h++) {
+ j = i+1;
+
+ /* seek h, write i */
+ rfd = _open_file(h, 0);
+ wfd = _open_file(i, 1);
+ count += _seek_file(h, rfd);
+ _write_file(i, wfd);
+ close(rfd);
+ close(wfd);
+
+ /* seek i, write j */
+ rfd = _open_file(i, 0);
+ wfd = _open_file(j, 1);
+ count += _seek_file(i, rfd);
+ _write_file(j, wfd);
+ close(rfd);
+ close(wfd);
+ }
+
+ /* return count of failed seeks to parent */
+ exit(count < 256 ? count : 255);
+ }
+ }
+
+ /* wait for threads, take their seek fail counts from exit code */
+ int count = 0, crashed = 0;
+ for (int i = 0; i < nthreads; i++) {
+ int wstatus;
+ wait(&wstatus);
+ if (WIFEXITED(wstatus))
+ count += WEXITSTATUS(wstatus);
+ else
+ crashed++;
+ }
+
+ if (crashed) {
+ fprintf(stderr, "Error: child crashed; test failed\n");
+ exit(1);
+ }
+
+ if (count) {
+ fprintf(stderr, "Error: %d seek failures; test failed\n",
+ count);
+ exit(1);
+ }
+
+ exit(0);
+}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cp_files/setup.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cp_files/setup.ksh
index b756d4e76c83..4223386b3615 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cp_files/setup.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cp_files/setup.ksh
@@ -1,35 +1,41 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2007 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
#
# Copyright (c) 2013 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
DISK=${DISKS%% *}
+
+if tunable_exists BCLONE_ENABLED ; then
+ log_must save_tunable BCLONE_ENABLED
+ log_must set_tunable32 BCLONE_ENABLED 1
+fi
+
default_setup $DISK
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/io/io_uring.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/io/io_uring.ksh
index 47e439d0f4d5..2fa146556358 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/io/io_uring.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/io/io_uring.ksh
@@ -1,72 +1,79 @@
#! /bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright (c) 2018 by Lawrence Livermore National Security, LLC.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/io/io.cfg
#
# DESCRIPTION:
# Verify Linux io_uring.
#
# STRATEGY:
# 1. Use fio(1) in verify mode to perform write, read,
# random read, and random write workloads.
# 2. Repeat the test with additional fio(1) options.
#
verify_runnable "global"
if ! $(grep -q "CONFIG_IO_URING=y" /boot/config-$(uname -r)); then
log_unsupported "Requires io_uring support"
fi
+if [ -e /etc/os-release ] ; then
+ source /etc/os-release
+ if [ -n "$REDHAT_SUPPORT_PRODUCT_VERSION" ] && ((floor($REDHAT_SUPPORT_PRODUCT_VERSION) == 9)) ; then
+ log_unsupported "Disabled on CentOS 9, fails with 'Operation not permitted'"
+ fi
+fi
+
fio --ioengine=io_uring --parse-only || log_unsupported "fio io_uring support required"
function cleanup
{
log_must rm -f "$mntpnt/rw*"
}
log_assert "Verify Linux io_uring"
log_onexit cleanup
ioengine="--ioengine=io_uring"
mntpnt=$(get_prop mountpoint $TESTPOOL/$TESTFS)
dir="--directory=$mntpnt"
set -A fio_arg -- "--sync=0" "--sync=1" "--direct=0" "--direct=1"
for arg in "${fio_arg[@]}"; do
log_must fio $dir $ioengine $arg $FIO_WRITE_ARGS
log_must fio $dir $ioengine $arg $FIO_READ_ARGS
log_must fio $dir $ioengine $arg $FIO_RANDWRITE_ARGS
log_must fio $dir $ioengine $arg $FIO_RANDREAD_ARGS
log_must rm -f "$mntpnt/rw*"
done
log_pass "Verified Linux io_uring"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/redundancy/redundancy.kshlib b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/redundancy/redundancy.kshlib
index 30818050a07a..297c6a073bb9 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/redundancy/redundancy.kshlib
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/redundancy/redundancy.kshlib
@@ -1,372 +1,350 @@
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
#
# Copyright (c) 2013, 2016 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/redundancy/redundancy.cfg
function cleanup
{
if poolexists $TESTPOOL; then
destroy_pool $TESTPOOL
fi
typeset dir
for dir in $TESTDIR $BASEDIR; do
if [[ -d $dir ]]; then
log_must rm -rf $dir
fi
done
}
-#
-# Get random number between min and max number.
-#
-# $1 Minimal value
-# $2 Maximal value
-#
-function random
-{
- typeset -i min=$1
- typeset -i max=$2
- typeset -i value
-
- while true; do
- ((value = RANDOM % (max + 1)))
- if ((value >= min)); then
- break
- fi
- done
-
- echo $value
-}
-
#
# Get the number of checksum errors for the pool.
#
# $1 Pool
#
function cksum_pool
{
typeset -i cksum=$(zpool status $1 | awk '
!NF { isvdev = 0 }
isvdev { errors += $NF }
/CKSUM$/ { isvdev = 1 }
END { print errors }
')
echo $cksum
}
#
# Record the directories construction and checksum all the files which reside
# within the specified pool
#
# $1 The specified pool
# $2 The file which save the record.
#
function record_data
{
typeset pool=$1
typeset recordfile=$2
[[ -z $pool ]] && log_fail "No specified pool."
[[ -f $recordfile ]] && log_must rm -f $recordfile
sync_pool $pool
typeset mntpnt
mntpnt=$(get_prop mountpoint $pool)
log_must eval "du -a $mntpnt > $recordfile 2>&1"
#
# When the data was damaged, checksum is failing and return 1
# So, will not use log_must
#
find $mntpnt -type f -exec cksum {} + >> $recordfile 2>&1
}
#
# Create test pool and fill with files and directories.
#
# $1 pool name
# $2 pool type
# $3 virtual devices number
#
function setup_test_env
{
typeset pool=$1
typeset keyword=$2
typeset -i vdev_cnt=$3
typeset vdevs
typeset -i i=0
while (( i < vdev_cnt )); do
vdevs="$vdevs $BASEDIR/vdev$i"
((i += 1))
done
if [[ ! -d $BASEDIR ]]; then
log_must mkdir $BASEDIR
fi
if poolexists $pool ; then
destroy_pool $pool
fi
log_must truncate -s $MINVDEVSIZE $vdevs
log_must zpool create -O compression=off -f -m $TESTDIR $pool $keyword $vdevs
log_note "Filling up the filesystem ..."
typeset -i i=0
typeset file=$TESTDIR/file
typeset -i limit
(( limit = $(get_prop available $pool) / 2 ))
while true ; do
[[ $(get_prop available $pool) -lt $limit ]] && break
file_write -o create -f $file.$i -b $BLOCKSZ -c $NUM_WRITES || break
(( i = i + 1 ))
done
record_data $TESTPOOL $PRE_RECORD_FILE
}
function refill_test_env
{
log_note "Re-filling the filesystem ..."
typeset pool=$1
typeset -i i=0
typeset mntpnt
mntpnt=$(get_prop mountpoint $pool)
typeset file=$mntpnt/file
while [[ -e $file.$i ]]; do
log_must rm -f $file.$i
file_write -o create -f $file.$i -b $BLOCKSZ -c $NUM_WRITES || break
(( i = i + 1 ))
done
record_data $TESTPOOL $PRE_RECORD_FILE
}
#
# Check pool status is healthy
#
# $1 pool
#
function is_healthy
{
typeset pool=$1
typeset healthy_output="pool '$pool' is healthy"
typeset real_output=$(zpool status -x $pool)
if [[ "$real_output" == "$healthy_output" ]]; then
return 0
else
typeset -i ret
zpool status -x $pool | grep "state:" | grep -q "FAULTED" && return 1
typeset l_scan
typeset errnum _
l_scan=$(zpool status -x $pool | grep "scan:")
l_scan=${l_scan##*"with"}
read -r errnum _ <<<"$l_scan"
return $errnum
fi
}
#
# Check pool data is valid
#
# $1 pool
#
function is_data_valid
{
typeset pool=$1
log_must zpool scrub -w $pool
record_data $pool $PST_RECORD_FILE
if ! cmp $PRE_RECORD_FILE $PST_RECORD_FILE > /dev/null; then
log_must cat $PRE_RECORD_FILE
log_must cat $PST_RECORD_FILE
diff -u $PRE_RECORD_FILE $PST_RECORD_FILE
return 1
fi
return 0
}
#
# Get the specified count devices name
#
# $1 pool name
# $2 devices count
#
function get_vdevs #pool cnt
{
typeset pool=$1
typeset -i cnt=$2
typeset all_devs=$(zpool iostat -v $pool | awk '{print $1}' | \
grep -vEe "^pool$|^capacity$|^mirror\-[0-9]$|^raidz[1-3]\-[0-9]$|^draid[1-3].*\-[0-9]$|---" \
-e "/old$|^$pool$")
typeset -i i=0
typeset vdevs
while ((i < cnt)); do
typeset dev _
read -r dev _ <<<"$all_devs"
eval all_devs=\${all_devs##*$dev}
vdevs="$dev $vdevs"
((i += 1))
done
echo "$vdevs"
}
#
# Create and replace the same name virtual device files
#
# $1 pool name
# $2-n virtual device files
#
function replace_missing_devs
{
typeset pool=$1
shift
typeset vdev
for vdev in $@; do
log_must dd if=/dev/zero of=$vdev \
bs=1024k count=$((MINVDEVSIZE / (1024 * 1024))) \
conv=fdatasync
log_must zpool replace -wf $pool $vdev $vdev
done
}
#
# Damage the pool's virtual device files.
#
# $1 pool name
# $2 Failing devices count
# $3 damage vdevs method, if not null, we keep
# the label for the vdevs
#
function damage_devs
{
typeset pool=$1
typeset -i cnt=$2
typeset label="$3"
typeset vdevs
typeset -i bs_count=$(((MINVDEVSIZE / 1024) - 4096))
vdevs=$(get_vdevs $pool $cnt)
typeset dev
if [[ -n $label ]]; then
for dev in $vdevs; do
log_must dd if=/dev/zero of=$dev seek=512 bs=1024 \
count=$bs_count conv=notrunc >/dev/null 2>&1
done
else
for dev in $vdevs; do
log_must dd if=/dev/zero of=$dev bs=1024 \
count=$bs_count conv=notrunc >/dev/null 2>&1
done
fi
sync_pool $pool
}
#
# Clear errors in the pool caused by data corruptions
#
# $1 pool name
#
function clear_errors
{
typeset pool=$1
log_must zpool clear $pool
if ! is_healthy $pool ; then
log_note "$pool should be healthy."
return 1
fi
if ! is_data_valid $pool ; then
log_note "Data should be valid in $pool."
return 1
fi
return 0
}
#
# Remove the specified pool's virtual device files
#
# $1 Pool name
# $2 Missing devices count
#
function remove_devs
{
typeset pool=$1
typeset -i cnt=$2
typeset vdevs
vdevs=$(get_vdevs $pool $cnt)
log_must rm -f $vdevs
sync_pool $pool
}
#
# Recover the bad or missing device files in the pool
#
# $1 Pool name
# $2 Missing devices count
#
function recover_bad_missing_devs
{
typeset pool=$1
typeset -i cnt=$2
typeset vdevs
vdevs=$(get_vdevs $pool $cnt)
replace_missing_devs $pool $vdevs
if ! is_healthy $pool ; then
log_note "$pool should be healthy."
return 1
fi
if ! is_data_valid $pool ; then
log_note "Data should be valid in $pool."
return 1
fi
return 0
}
diff --git a/sys/modules/zfs/zfs_config.h b/sys/modules/zfs/zfs_config.h
index 8fc8a9541740..fe8580263185 100644
--- a/sys/modules/zfs/zfs_config.h
+++ b/sys/modules/zfs/zfs_config.h
@@ -1,1149 +1,1191 @@
/*
*/
/* zfs_config.h. Generated from zfs_config.h.in by configure. */
/* zfs_config.h.in. Generated from configure.ac by autoheader. */
/* Define to 1 if translation of program messages to the user's native
language is requested. */
/* #undef ENABLE_NLS */
/* bio_end_io_t wants 1 arg */
/* #undef HAVE_1ARG_BIO_END_IO_T */
/* lookup_bdev() wants 1 arg */
/* #undef HAVE_1ARG_LOOKUP_BDEV */
/* submit_bio() wants 1 arg */
/* #undef HAVE_1ARG_SUBMIT_BIO */
/* bdi_setup_and_register() wants 2 args */
/* #undef HAVE_2ARGS_BDI_SETUP_AND_REGISTER */
/* vfs_getattr wants 2 args */
/* #undef HAVE_2ARGS_VFS_GETATTR */
/* zlib_deflate_workspacesize() wants 2 args */
/* #undef HAVE_2ARGS_ZLIB_DEFLATE_WORKSPACESIZE */
/* bdi_setup_and_register() wants 3 args */
/* #undef HAVE_3ARGS_BDI_SETUP_AND_REGISTER */
/* vfs_getattr wants 3 args */
/* #undef HAVE_3ARGS_VFS_GETATTR */
/* vfs_getattr wants 4 args */
/* #undef HAVE_4ARGS_VFS_GETATTR */
/* kernel has access_ok with 'type' parameter */
/* #undef HAVE_ACCESS_OK_TYPE */
/* posix_acl has refcount_t */
/* #undef HAVE_ACL_REFCOUNT */
/* add_disk() returns int */
/* #undef HAVE_ADD_DISK_RET */
/* Define if host toolchain supports AES */
#define HAVE_AES 1
/* Define if you have [rt] */
#define HAVE_AIO_H 1
#ifdef __amd64__
#ifndef RESCUE
/* Define if host toolchain supports AVX */
#define HAVE_AVX 1
#endif
/* Define if host toolchain supports AVX2 */
#define HAVE_AVX2 1
/* Define if host toolchain supports AVX512BW */
#define HAVE_AVX512BW 1
/* Define if host toolchain supports AVX512CD */
#define HAVE_AVX512CD 1
/* Define if host toolchain supports AVX512DQ */
#define HAVE_AVX512DQ 1
/* Define if host toolchain supports AVX512ER */
#define HAVE_AVX512ER 1
/* Define if host toolchain supports AVX512F */
#define HAVE_AVX512F 1
/* Define if host toolchain supports AVX512IFMA */
#define HAVE_AVX512IFMA 1
/* Define if host toolchain supports AVX512PF */
#define HAVE_AVX512PF 1
/* Define if host toolchain supports AVX512VBMI */
#define HAVE_AVX512VBMI 1
/* Define if host toolchain supports AVX512VL */
#define HAVE_AVX512VL 1
#endif
/* bdevname() is available */
/* #undef HAVE_BDEVNAME */
/* bdev_check_media_change() exists */
/* #undef HAVE_BDEV_CHECK_MEDIA_CHANGE */
/* bdev_*_io_acct() available */
/* #undef HAVE_BDEV_IO_ACCT_63 */
/* bdev_*_io_acct() available */
/* #undef HAVE_BDEV_IO_ACCT_OLD */
/* bdev_kobj() exists */
/* #undef HAVE_BDEV_KOBJ */
/* bdev_max_discard_sectors() is available */
/* #undef HAVE_BDEV_MAX_DISCARD_SECTORS */
/* bdev_max_secure_erase_sectors() is available */
/* #undef HAVE_BDEV_MAX_SECURE_ERASE_SECTORS */
+/* bdev_open_by_path() exists */
+/* #undef HAVE_BDEV_OPEN_BY_PATH */
+
+/* bdev_release() exists */
+/* #undef HAVE_BDEV_RELEASE */
+
/* block_device_operations->submit_bio() returns void */
/* #undef HAVE_BDEV_SUBMIT_BIO_RETURNS_VOID */
/* bdev_whole() is available */
/* #undef HAVE_BDEV_WHOLE */
/* bio_alloc() takes 4 arguments */
/* #undef HAVE_BIO_ALLOC_4ARG */
/* bio->bi_bdev->bd_disk exists */
/* #undef HAVE_BIO_BDEV_DISK */
/* bio->bi_opf is defined */
/* #undef HAVE_BIO_BI_OPF */
/* bio->bi_status exists */
/* #undef HAVE_BIO_BI_STATUS */
/* bio has bi_iter */
/* #undef HAVE_BIO_BVEC_ITER */
/* bio_*_io_acct() available */
/* #undef HAVE_BIO_IO_ACCT */
/* bio_max_segs() is implemented */
/* #undef HAVE_BIO_MAX_SEGS */
/* bio_set_dev() is available */
/* #undef HAVE_BIO_SET_DEV */
/* bio_set_dev() GPL-only */
/* #undef HAVE_BIO_SET_DEV_GPL_ONLY */
/* bio_set_dev() is a macro */
/* #undef HAVE_BIO_SET_DEV_MACRO */
/* bio_set_op_attrs is available */
/* #undef HAVE_BIO_SET_OP_ATTRS */
/* blkdev_get_by_path() exists and takes 4 args */
/* #undef HAVE_BLKDEV_GET_BY_PATH_4ARG */
/* blkdev_get_by_path() handles ERESTARTSYS */
/* #undef HAVE_BLKDEV_GET_ERESTARTSYS */
/* blkdev_issue_discard() is available */
/* #undef HAVE_BLKDEV_ISSUE_DISCARD */
+/* __blkdev_issue_discard() is available */
+/* #undef HAVE_BLKDEV_ISSUE_DISCARD_ASYNC */
+
/* blkdev_issue_secure_erase() is available */
/* #undef HAVE_BLKDEV_ISSUE_SECURE_ERASE */
/* blkdev_put() accepts void* as arg 2 */
/* #undef HAVE_BLKDEV_PUT_HOLDER */
/* blkdev_reread_part() exists */
/* #undef HAVE_BLKDEV_REREAD_PART */
/* blkg_tryget() is available */
/* #undef HAVE_BLKG_TRYGET */
/* blkg_tryget() GPL-only */
/* #undef HAVE_BLKG_TRYGET_GPL_ONLY */
/* blk_alloc_disk() exists */
/* #undef HAVE_BLK_ALLOC_DISK */
/* blk_alloc_queue() expects request function */
/* #undef HAVE_BLK_ALLOC_QUEUE_REQUEST_FN */
/* blk_alloc_queue_rh() expects request function */
/* #undef HAVE_BLK_ALLOC_QUEUE_REQUEST_FN_RH */
/* blk_cleanup_disk() exists */
/* #undef HAVE_BLK_CLEANUP_DISK */
/* blk_mode_t is defined */
/* #undef HAVE_BLK_MODE_T */
/* block multiqueue is available */
/* #undef HAVE_BLK_MQ */
/* blk queue backing_dev_info is dynamic */
/* #undef HAVE_BLK_QUEUE_BDI_DYNAMIC */
/* blk_queue_discard() is available */
/* #undef HAVE_BLK_QUEUE_DISCARD */
/* blk_queue_flag_clear() exists */
/* #undef HAVE_BLK_QUEUE_FLAG_CLEAR */
/* blk_queue_flag_set() exists */
/* #undef HAVE_BLK_QUEUE_FLAG_SET */
/* blk_queue_flush() is available */
/* #undef HAVE_BLK_QUEUE_FLUSH */
/* blk_queue_flush() is GPL-only */
/* #undef HAVE_BLK_QUEUE_FLUSH_GPL_ONLY */
/* blk_queue_secdiscard() is available */
/* #undef HAVE_BLK_QUEUE_SECDISCARD */
/* blk_queue_secure_erase() is available */
/* #undef HAVE_BLK_QUEUE_SECURE_ERASE */
/* blk_queue_update_readahead() exists */
/* #undef HAVE_BLK_QUEUE_UPDATE_READAHEAD */
/* blk_queue_write_cache() exists */
/* #undef HAVE_BLK_QUEUE_WRITE_CACHE */
/* blk_queue_write_cache() is GPL-only */
/* #undef HAVE_BLK_QUEUE_WRITE_CACHE_GPL_ONLY */
/* BLK_STS_RESV_CONFLICT is defined */
/* #undef HAVE_BLK_STS_RESV_CONFLICT */
/* Define if release() in block_device_operations takes 1 arg */
/* #undef HAVE_BLOCK_DEVICE_OPERATIONS_RELEASE_1ARG */
/* Define if revalidate_disk() in block_device_operations */
/* #undef HAVE_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK */
/* Define to 1 if you have the Mac OS X function CFLocaleCopyCurrent in the
CoreFoundation framework. */
/* #undef HAVE_CFLOCALECOPYCURRENT */
/* Define to 1 if you have the Mac OS X function
CFLocaleCopyPreferredLanguages in the CoreFoundation framework. */
/* #undef HAVE_CFLOCALECOPYPREFERREDLANGUAGES */
/* Define to 1 if you have the Mac OS X function CFPreferencesCopyAppValue in
the CoreFoundation framework. */
/* #undef HAVE_CFPREFERENCESCOPYAPPVALUE */
/* check_disk_change() exists */
/* #undef HAVE_CHECK_DISK_CHANGE */
/* clear_inode() is available */
/* #undef HAVE_CLEAR_INODE */
/* dentry uses const struct dentry_operations */
/* #undef HAVE_CONST_DENTRY_OPERATIONS */
/* copy_from_iter() is available */
/* #undef HAVE_COPY_FROM_ITER */
/* copy_splice_read exists */
/* #undef HAVE_COPY_SPLICE_READ */
/* copy_to_iter() is available */
/* #undef HAVE_COPY_TO_ITER */
/* cpu_has_feature() is GPL-only */
/* #undef HAVE_CPU_HAS_FEATURE_GPL_ONLY */
/* yes */
/* #undef HAVE_CPU_HOTPLUG */
/* current_time() exists */
/* #undef HAVE_CURRENT_TIME */
/* Define if the GNU dcgettext() function is already present or preinstalled.
*/
/* #undef HAVE_DCGETTEXT */
/* DECLARE_EVENT_CLASS() is available */
/* #undef HAVE_DECLARE_EVENT_CLASS */
/* dentry aliases are in d_u member */
/* #undef HAVE_DENTRY_D_U_ALIASES */
/* dequeue_signal() takes 4 arguments */
/* #undef HAVE_DEQUEUE_SIGNAL_4ARG */
/* lookup_bdev() wants dev_t arg */
/* #undef HAVE_DEVT_LOOKUP_BDEV */
/* sops->dirty_inode() wants flags */
/* #undef HAVE_DIRTY_INODE_WITH_FLAGS */
/* disk_check_media_change() exists */
/* #undef HAVE_DISK_CHECK_MEDIA_CHANGE */
/* disk_*_io_acct() available */
/* #undef HAVE_DISK_IO_ACCT */
/* disk_update_readahead() exists */
/* #undef HAVE_DISK_UPDATE_READAHEAD */
/* Define to 1 if you have the <dlfcn.h> header file. */
#define HAVE_DLFCN_H 1
/* d_make_root() is available */
/* #undef HAVE_D_MAKE_ROOT */
/* d_prune_aliases() is available */
/* #undef HAVE_D_PRUNE_ALIASES */
/* dops->d_revalidate() operation takes nameidata */
/* #undef HAVE_D_REVALIDATE_NAMEIDATA */
/* eops->encode_fh() wants child and parent inodes */
/* #undef HAVE_ENCODE_FH_WITH_INODE */
/* sops->evict_inode() exists */
/* #undef HAVE_EVICT_INODE */
/* FALLOC_FL_ZERO_RANGE is defined */
/* #undef HAVE_FALLOC_FL_ZERO_RANGE */
/* fault_in_iov_iter_readable() is available */
/* #undef HAVE_FAULT_IN_IOV_ITER_READABLE */
/* filemap_range_has_page() is available */
/* #undef HAVE_FILEMAP_RANGE_HAS_PAGE */
/* fops->aio_fsync() exists */
/* #undef HAVE_FILE_AIO_FSYNC */
/* file_dentry() is available */
/* #undef HAVE_FILE_DENTRY */
/* fops->fadvise() exists */
/* #undef HAVE_FILE_FADVISE */
/* file_inode() is available */
/* #undef HAVE_FILE_INODE */
/* flush_dcache_page() is GPL-only */
/* #undef HAVE_FLUSH_DCACHE_PAGE_GPL_ONLY */
/* iops->follow_link() cookie */
/* #undef HAVE_FOLLOW_LINK_COOKIE */
/* iops->follow_link() nameidata */
/* #undef HAVE_FOLLOW_LINK_NAMEIDATA */
/* Define if compiler supports -Wformat-overflow */
/* #undef HAVE_FORMAT_OVERFLOW */
/* fsync_bdev() is declared in include/blkdev.h */
/* #undef HAVE_FSYNC_BDEV */
/* fops->fsync() with range */
/* #undef HAVE_FSYNC_RANGE */
/* fops->fsync() without dentry */
/* #undef HAVE_FSYNC_WITHOUT_DENTRY */
/* yes */
/* #undef HAVE_GENERIC_FADVISE */
/* generic_fillattr requires struct mnt_idmap* */
/* #undef HAVE_GENERIC_FILLATTR_IDMAP */
/* generic_fillattr requires struct mnt_idmap* and u32 request_mask */
/* #undef HAVE_GENERIC_FILLATTR_IDMAP_REQMASK */
/* generic_fillattr requires struct user_namespace* */
/* #undef HAVE_GENERIC_FILLATTR_USERNS */
/* generic_*_io_acct() 3 arg available */
/* #undef HAVE_GENERIC_IO_ACCT_3ARG */
/* generic_*_io_acct() 4 arg available */
/* #undef HAVE_GENERIC_IO_ACCT_4ARG */
/* generic_readlink is global */
/* #undef HAVE_GENERIC_READLINK */
/* generic_setxattr() exists */
/* #undef HAVE_GENERIC_SETXATTR */
/* generic_write_checks() takes kiocb */
/* #undef HAVE_GENERIC_WRITE_CHECKS_KIOCB */
/* Define if the GNU gettext() function is already present or preinstalled. */
/* #undef HAVE_GETTEXT */
/* iops->get_acl() exists */
/* #undef HAVE_GET_ACL */
/* iops->get_acl() takes rcu */
/* #undef HAVE_GET_ACL_RCU */
/* has iops->get_inode_acl() */
/* #undef HAVE_GET_INODE_ACL */
/* iops->get_link() cookie */
/* #undef HAVE_GET_LINK_COOKIE */
/* iops->get_link() delayed */
/* #undef HAVE_GET_LINK_DELAYED */
/* group_info->gid exists */
/* #undef HAVE_GROUP_INFO_GID */
/* has_capability() is available */
/* #undef HAVE_HAS_CAPABILITY */
/* iattr->ia_vfsuid and iattr->ia_vfsgid exist */
/* #undef HAVE_IATTR_VFSID */
/* Define if you have the iconv() function and it works. */
#define HAVE_ICONV 1
/* iops->getattr() takes struct mnt_idmap* */
/* #undef HAVE_IDMAP_IOPS_GETATTR */
/* iops->setattr() takes struct mnt_idmap* */
/* #undef HAVE_IDMAP_IOPS_SETATTR */
/* APIs for idmapped mount are present */
/* #undef HAVE_IDMAP_MNT_API */
+/* mnt_idmap does not have user_namespace */
+/* #undef HAVE_IDMAP_NO_USERNS */
+
/* Define if compiler supports -Wimplicit-fallthrough */
/* #undef HAVE_IMPLICIT_FALLTHROUGH */
/* Define if compiler supports -Winfinite-recursion */
/* #undef HAVE_INFINITE_RECURSION */
+/* inode_get_atime() exists in linux/fs.h */
+/* #undef HAVE_INODE_GET_ATIME */
+
/* inode_get_ctime() exists in linux/fs.h */
/* #undef HAVE_INODE_GET_CTIME */
+/* inode_get_mtime() exists in linux/fs.h */
+/* #undef HAVE_INODE_GET_MTIME */
+
/* yes */
/* #undef HAVE_INODE_LOCK_SHARED */
/* inode_owner_or_capable() exists */
/* #undef HAVE_INODE_OWNER_OR_CAPABLE */
/* inode_owner_or_capable() takes mnt_idmap */
/* #undef HAVE_INODE_OWNER_OR_CAPABLE_IDMAP */
/* inode_owner_or_capable() takes user_ns */
/* #undef HAVE_INODE_OWNER_OR_CAPABLE_USERNS */
+/* inode_set_atime_to_ts() exists in linux/fs.h */
+/* #undef HAVE_INODE_SET_ATIME_TO_TS */
+
/* inode_set_ctime_to_ts() exists in linux/fs.h */
/* #undef HAVE_INODE_SET_CTIME_TO_TS */
/* inode_set_flags() exists */
/* #undef HAVE_INODE_SET_FLAGS */
/* inode_set_iversion() exists */
/* #undef HAVE_INODE_SET_IVERSION */
+/* inode_set_mtime_to_ts() exists in linux/fs.h */
+/* #undef HAVE_INODE_SET_MTIME_TO_TS */
+
/* inode->i_*time's are timespec64 */
/* #undef HAVE_INODE_TIMESPEC64_TIMES */
/* timestamp_truncate() exists */
/* #undef HAVE_INODE_TIMESTAMP_TRUNCATE */
/* Define to 1 if you have the <inttypes.h> header file. */
#define HAVE_INTTYPES_H 1
/* in_compat_syscall() is available */
/* #undef HAVE_IN_COMPAT_SYSCALL */
/* iops->create() takes struct mnt_idmap* */
/* #undef HAVE_IOPS_CREATE_IDMAP */
/* iops->create() takes struct user_namespace* */
/* #undef HAVE_IOPS_CREATE_USERNS */
/* iops->mkdir() takes struct mnt_idmap* */
/* #undef HAVE_IOPS_MKDIR_IDMAP */
/* iops->mkdir() takes struct user_namespace* */
/* #undef HAVE_IOPS_MKDIR_USERNS */
/* iops->mknod() takes struct mnt_idmap* */
/* #undef HAVE_IOPS_MKNOD_IDMAP */
/* iops->mknod() takes struct user_namespace* */
/* #undef HAVE_IOPS_MKNOD_USERNS */
/* iops->permission() takes struct mnt_idmap* */
/* #undef HAVE_IOPS_PERMISSION_IDMAP */
/* iops->permission() takes struct user_namespace* */
/* #undef HAVE_IOPS_PERMISSION_USERNS */
/* iops->rename() takes struct mnt_idmap* */
/* #undef HAVE_IOPS_RENAME_IDMAP */
/* iops->rename() takes struct user_namespace* */
/* #undef HAVE_IOPS_RENAME_USERNS */
/* iops->setattr() exists */
/* #undef HAVE_IOPS_SETATTR */
/* iops->symlink() takes struct mnt_idmap* */
/* #undef HAVE_IOPS_SYMLINK_IDMAP */
/* iops->symlink() takes struct user_namespace* */
/* #undef HAVE_IOPS_SYMLINK_USERNS */
/* iov_iter_advance() is available */
/* #undef HAVE_IOV_ITER_ADVANCE */
/* iov_iter_count() is available */
/* #undef HAVE_IOV_ITER_COUNT */
/* iov_iter_fault_in_readable() is available */
/* #undef HAVE_IOV_ITER_FAULT_IN_READABLE */
/* iov_iter_revert() is available */
/* #undef HAVE_IOV_ITER_REVERT */
/* iov_iter_type() is available */
/* #undef HAVE_IOV_ITER_TYPE */
/* iov_iter types are available */
/* #undef HAVE_IOV_ITER_TYPES */
/* yes */
/* #undef HAVE_IO_SCHEDULE_TIMEOUT */
/* Define to 1 if you have the `issetugid' function. */
#define HAVE_ISSETUGID 1
/* iter_iov() is available */
/* #undef HAVE_ITER_IOV */
/* kernel has kernel_fpu_* functions */
/* #undef HAVE_KERNEL_FPU */
/* kernel has asm/fpu/api.h */
/* #undef HAVE_KERNEL_FPU_API_HEADER */
/* kernel fpu internal */
/* #undef HAVE_KERNEL_FPU_INTERNAL */
/* kernel has asm/fpu/internal.h */
/* #undef HAVE_KERNEL_FPU_INTERNAL_HEADER */
/* uncached_acl_sentinel() exists */
/* #undef HAVE_KERNEL_GET_ACL_HANDLE_CACHE */
/* Define if compiler supports -Winfinite-recursion */
/* #undef HAVE_KERNEL_INFINITE_RECURSION */
+/* kernel has kernel_neon_* functions */
+/* #undef HAVE_KERNEL_NEON */
+
/* kernel does stack verification */
/* #undef HAVE_KERNEL_OBJTOOL */
/* kernel has linux/objtool.h */
/* #undef HAVE_KERNEL_OBJTOOL_HEADER */
/* kernel_read() take loff_t pointer */
/* #undef HAVE_KERNEL_READ_PPOS */
+/* strlcpy() exists */
+/* #undef HAVE_KERNEL_STRLCPY */
+
+/* strscpy() exists */
+/* #undef HAVE_KERNEL_STRSCPY */
+
/* timer_list.function gets a timer_list */
/* #undef HAVE_KERNEL_TIMER_FUNCTION_TIMER_LIST */
/* struct timer_list has a flags member */
/* #undef HAVE_KERNEL_TIMER_LIST_FLAGS */
/* timer_setup() is available */
/* #undef HAVE_KERNEL_TIMER_SETUP */
/* kernel_write() take loff_t pointer */
/* #undef HAVE_KERNEL_WRITE_PPOS */
/* kmem_cache_create_usercopy() exists */
/* #undef HAVE_KMEM_CACHE_CREATE_USERCOPY */
/* kstrtoul() exists */
/* #undef HAVE_KSTRTOUL */
/* ktime_get_coarse_real_ts64() exists */
/* #undef HAVE_KTIME_GET_COARSE_REAL_TS64 */
/* ktime_get_raw_ts64() exists */
/* #undef HAVE_KTIME_GET_RAW_TS64 */
/* kvmalloc exists */
/* #undef HAVE_KVMALLOC */
/* Define if you have [aio] */
/* #undef HAVE_LIBAIO */
/* Define if you have [blkid] */
/* #undef HAVE_LIBBLKID */
/* Define if you have [crypto] */
#define HAVE_LIBCRYPTO 1
/* Define if you have [tirpc] */
/* #undef HAVE_LIBTIRPC */
/* Define if you have [udev] */
/* #undef HAVE_LIBUDEV */
/* Define if you have [uuid] */
/* #undef HAVE_LIBUUID */
/* linux/blk-cgroup.h exists */
/* #undef HAVE_LINUX_BLK_CGROUP_HEADER */
/* lseek_execute() is available */
/* #undef HAVE_LSEEK_EXECUTE */
/* makedev() is declared in sys/mkdev.h */
/* #undef HAVE_MAKEDEV_IN_MKDEV */
/* makedev() is declared in sys/sysmacros.h */
/* #undef HAVE_MAKEDEV_IN_SYSMACROS */
/* Noting that make_request_fn() returns blk_qc_t */
/* #undef HAVE_MAKE_REQUEST_FN_RET_QC */
/* Noting that make_request_fn() returns void */
/* #undef HAVE_MAKE_REQUEST_FN_RET_VOID */
/* iops->mkdir() takes umode_t */
/* #undef HAVE_MKDIR_UMODE_T */
/* Define to 1 if you have the `mlockall' function. */
#define HAVE_MLOCKALL 1
/* lookup_bdev() wants mode arg */
/* #undef HAVE_MODE_LOOKUP_BDEV */
/* Define if host toolchain supports MOVBE */
#define HAVE_MOVBE 1
/* new_sync_read()/new_sync_write() are available */
/* #undef HAVE_NEW_SYNC_READ */
/* folio_wait_bit() exists */
/* #undef HAVE_PAGEMAP_FOLIO_WAIT_BIT */
/* part_to_dev() exists */
/* #undef HAVE_PART_TO_DEV */
/* iops->getattr() takes a path */
/* #undef HAVE_PATH_IOPS_GETATTR */
/* Define if host toolchain supports PCLMULQDQ */
#define HAVE_PCLMULQDQ 1
/* percpu_counter_add_batch() is defined */
/* #undef HAVE_PERCPU_COUNTER_ADD_BATCH */
/* percpu_counter_init() wants gfp_t */
/* #undef HAVE_PERCPU_COUNTER_INIT_WITH_GFP */
/* posix_acl_chmod() exists */
/* #undef HAVE_POSIX_ACL_CHMOD */
/* posix_acl_from_xattr() needs user_ns */
/* #undef HAVE_POSIX_ACL_FROM_XATTR_USERNS */
/* posix_acl_release() is available */
/* #undef HAVE_POSIX_ACL_RELEASE */
/* posix_acl_release() is GPL-only */
/* #undef HAVE_POSIX_ACL_RELEASE_GPL_ONLY */
/* posix_acl_valid() wants user namespace */
/* #undef HAVE_POSIX_ACL_VALID_WITH_NS */
/* proc_ops structure exists */
/* #undef HAVE_PROC_OPS_STRUCT */
/* iops->put_link() cookie */
/* #undef HAVE_PUT_LINK_COOKIE */
/* iops->put_link() delayed */
/* #undef HAVE_PUT_LINK_DELAYED */
/* iops->put_link() nameidata */
/* #undef HAVE_PUT_LINK_NAMEIDATA */
/* If available, contains the Python version number currently in use. */
#define HAVE_PYTHON "3.7"
/* qat is enabled and existed */
/* #undef HAVE_QAT */
/* struct reclaim_state has reclaimed */
/* #undef HAVE_RECLAIM_STATE_RECLAIMED */
/* register_shrinker is vararg */
/* #undef HAVE_REGISTER_SHRINKER_VARARG */
/* register_sysctl_table exists */
/* #undef HAVE_REGISTER_SYSCTL_TABLE */
/* iops->rename2() exists */
/* #undef HAVE_RENAME2 */
/* struct inode_operations_wrapper takes .rename2() */
/* #undef HAVE_RENAME2_OPERATIONS_WRAPPER */
/* iops->rename() wants flags */
/* #undef HAVE_RENAME_WANTS_FLAGS */
/* REQ_DISCARD is defined */
/* #undef HAVE_REQ_DISCARD */
/* REQ_FLUSH is defined */
/* #undef HAVE_REQ_FLUSH */
/* REQ_OP_DISCARD is defined */
/* #undef HAVE_REQ_OP_DISCARD */
/* REQ_OP_FLUSH is defined */
/* #undef HAVE_REQ_OP_FLUSH */
/* REQ_OP_SECURE_ERASE is defined */
/* #undef HAVE_REQ_OP_SECURE_ERASE */
/* REQ_PREFLUSH is defined */
/* #undef HAVE_REQ_PREFLUSH */
/* revalidate_disk() is available */
/* #undef HAVE_REVALIDATE_DISK */
/* revalidate_disk_size() is available */
/* #undef HAVE_REVALIDATE_DISK_SIZE */
/* struct rw_semaphore has member activity */
/* #undef HAVE_RWSEM_ACTIVITY */
/* struct rw_semaphore has atomic_long_t member count */
/* #undef HAVE_RWSEM_ATOMIC_LONG_COUNT */
/* linux/sched/signal.h exists */
/* #undef HAVE_SCHED_SIGNAL_HEADER */
/* Define to 1 if you have the <security/pam_modules.h> header file. */
#define HAVE_SECURITY_PAM_MODULES_H 1
/* setattr_prepare() accepts mnt_idmap */
/* #undef HAVE_SETATTR_PREPARE_IDMAP */
/* setattr_prepare() is available, doesn't accept user_namespace */
/* #undef HAVE_SETATTR_PREPARE_NO_USERNS */
/* setattr_prepare() accepts user_namespace */
/* #undef HAVE_SETATTR_PREPARE_USERNS */
/* iops->set_acl() exists, takes 3 args */
/* #undef HAVE_SET_ACL */
/* iops->set_acl() takes 4 args, arg1 is struct mnt_idmap * */
/* #undef HAVE_SET_ACL_IDMAP_DENTRY */
/* iops->set_acl() takes 4 args */
/* #undef HAVE_SET_ACL_USERNS */
/* iops->set_acl() takes 4 args, arg2 is struct dentry * */
/* #undef HAVE_SET_ACL_USERNS_DENTRY_ARG2 */
/* set_cached_acl() is usable */
/* #undef HAVE_SET_CACHED_ACL_USABLE */
/* set_special_state() exists */
/* #undef HAVE_SET_SPECIAL_STATE */
+/* shrinker_register exists */
+/* #undef HAVE_SHRINKER_REGISTER */
+
/* struct shrink_control exists */
/* #undef HAVE_SHRINK_CONTROL_STRUCT */
/* kernel_siginfo_t exists */
/* #undef HAVE_SIGINFO */
/* signal_stop() exists */
/* #undef HAVE_SIGNAL_STOP */
/* new shrinker callback wants 2 args */
/* #undef HAVE_SINGLE_SHRINKER_CALLBACK */
/* cs->count_objects exists */
/* #undef HAVE_SPLIT_SHRINKER_CALLBACK */
#if defined(__amd64__) || defined(__i386__)
/* Define if host toolchain supports SSE */
#define HAVE_SSE 1
/* Define if host toolchain supports SSE2 */
#define HAVE_SSE2 1
/* Define if host toolchain supports SSE3 */
#define HAVE_SSE3 1
/* Define if host toolchain supports SSE4.1 */
#define HAVE_SSE4_1 1
/* Define if host toolchain supports SSE4.2 */
#define HAVE_SSE4_2 1
/* Define if host toolchain supports SSSE3 */
#define HAVE_SSSE3 1
#endif
/* STACK_FRAME_NON_STANDARD is defined */
/* #undef HAVE_STACK_FRAME_NON_STANDARD */
/* standalone <linux/stdarg.h> exists */
/* #undef HAVE_STANDALONE_LINUX_STDARG */
/* Define to 1 if you have the <stdint.h> header file. */
#define HAVE_STDINT_H 1
/* Define to 1 if you have the <stdio.h> header file. */
#define HAVE_STDIO_H 1
/* Define to 1 if you have the <stdlib.h> header file. */
#define HAVE_STDLIB_H 1
/* Define to 1 if you have the <strings.h> header file. */
#define HAVE_STRINGS_H 1
/* Define to 1 if you have the <string.h> header file. */
#define HAVE_STRING_H 1
/* Define to 1 if you have the `strlcat' function. */
#define HAVE_STRLCAT 1
/* Define to 1 if you have the `strlcpy' function. */
#define HAVE_STRLCPY 1
/* submit_bio is member of struct block_device_operations */
/* #undef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS */
+/* have super_block s_shrink */
+/* #undef HAVE_SUPER_BLOCK_S_SHRINK */
+
+/* have super_block s_shrink pointer */
+/* #undef HAVE_SUPER_BLOCK_S_SHRINK_PTR */
+
/* super_setup_bdi_name() exits */
/* #undef HAVE_SUPER_SETUP_BDI_NAME */
/* super_block->s_user_ns exists */
/* #undef HAVE_SUPER_USER_NS */
/* sync_blockdev() is declared in include/blkdev.h */
/* #undef HAVE_SYNC_BLOCKDEV */
/* struct kobj_type has default_groups */
/* #undef HAVE_SYSFS_DEFAULT_GROUPS */
/* Define to 1 if you have the <sys/stat.h> header file. */
#define HAVE_SYS_STAT_H 1
/* Define to 1 if you have the <sys/types.h> header file. */
#define HAVE_SYS_TYPES_H 1
/* i_op->tmpfile() exists */
/* #undef HAVE_TMPFILE */
/* i_op->tmpfile() uses old dentry signature */
/* #undef HAVE_TMPFILE_DENTRY */
/* i_op->tmpfile() has mnt_idmap */
/* #undef HAVE_TMPFILE_IDMAP */
/* i_op->tmpfile() has userns */
/* #undef HAVE_TMPFILE_USERNS */
/* totalhigh_pages() exists */
/* #undef HAVE_TOTALHIGH_PAGES */
/* kernel has totalram_pages() */
/* #undef HAVE_TOTALRAM_PAGES_FUNC */
/* Define to 1 if you have the `udev_device_get_is_initialized' function. */
/* #undef HAVE_UDEV_DEVICE_GET_IS_INITIALIZED */
/* kernel has __kernel_fpu_* functions */
/* #undef HAVE_UNDERSCORE_KERNEL_FPU */
/* Define to 1 if you have the <unistd.h> header file. */
#define HAVE_UNISTD_H 1
/* iops->getattr() takes struct user_namespace* */
/* #undef HAVE_USERNS_IOPS_GETATTR */
/* iops->setattr() takes struct user_namespace* */
/* #undef HAVE_USERNS_IOPS_SETATTR */
/* user_namespace->ns.inum exists */
/* #undef HAVE_USER_NS_COMMON_INUM */
/* iops->getattr() takes a vfsmount */
/* #undef HAVE_VFSMOUNT_IOPS_GETATTR */
/* fops->clone_file_range() is available */
/* #undef HAVE_VFS_CLONE_FILE_RANGE */
/* fops->copy_file_range() is available */
/* #undef HAVE_VFS_COPY_FILE_RANGE */
/* fops->dedupe_file_range() is available */
/* #undef HAVE_VFS_DEDUPE_FILE_RANGE */
/* aops->direct_IO() uses iovec */
/* #undef HAVE_VFS_DIRECT_IO_IOVEC */
/* aops->direct_IO() uses iov_iter without rw */
/* #undef HAVE_VFS_DIRECT_IO_ITER */
/* aops->direct_IO() uses iov_iter with offset */
/* #undef HAVE_VFS_DIRECT_IO_ITER_OFFSET */
/* aops->direct_IO() uses iov_iter with rw and offset */
/* #undef HAVE_VFS_DIRECT_IO_ITER_RW_OFFSET */
/* filemap_dirty_folio exists */
/* #undef HAVE_VFS_FILEMAP_DIRTY_FOLIO */
/* file_operations_extend takes .copy_file_range() and .clone_file_range() */
/* #undef HAVE_VFS_FILE_OPERATIONS_EXTEND */
/* generic_copy_file_range() is available */
/* #undef HAVE_VFS_GENERIC_COPY_FILE_RANGE */
/* All required iov_iter interfaces are available */
/* #undef HAVE_VFS_IOV_ITER */
/* fops->iterate() is available */
/* #undef HAVE_VFS_ITERATE */
/* fops->iterate_shared() is available */
/* #undef HAVE_VFS_ITERATE_SHARED */
/* fops->readdir() is available */
/* #undef HAVE_VFS_READDIR */
/* address_space_operations->readpages exists */
/* #undef HAVE_VFS_READPAGES */
/* read_folio exists */
/* #undef HAVE_VFS_READ_FOLIO */
/* fops->remap_file_range() is available */
/* #undef HAVE_VFS_REMAP_FILE_RANGE */
/* fops->read/write_iter() are available */
/* #undef HAVE_VFS_RW_ITERATE */
/* __set_page_dirty_nobuffers exists */
/* #undef HAVE_VFS_SET_PAGE_DIRTY_NOBUFFERS */
/* __vmalloc page flags exists */
/* #undef HAVE_VMALLOC_PAGE_KERNEL */
/* yes */
/* #undef HAVE_WAIT_ON_BIT_ACTION */
/* wait_queue_entry_t exists */
/* #undef HAVE_WAIT_QUEUE_ENTRY_T */
/* wq_head->head and wq_entry->entry exist */
/* #undef HAVE_WAIT_QUEUE_HEAD_ENTRY */
/* int (*writepage_t)() takes struct folio* */
/* #undef HAVE_WRITEPAGE_T_FOLIO */
/* xattr_handler->get() wants dentry */
/* #undef HAVE_XATTR_GET_DENTRY */
/* xattr_handler->get() wants both dentry and inode */
/* #undef HAVE_XATTR_GET_DENTRY_INODE */
/* xattr_handler->get() wants dentry and inode and flags */
/* #undef HAVE_XATTR_GET_DENTRY_INODE_FLAGS */
/* xattr_handler->get() wants xattr_handler */
/* #undef HAVE_XATTR_GET_HANDLER */
/* xattr_handler has name */
/* #undef HAVE_XATTR_HANDLER_NAME */
/* xattr_handler->list() wants dentry */
/* #undef HAVE_XATTR_LIST_DENTRY */
/* xattr_handler->list() wants xattr_handler */
/* #undef HAVE_XATTR_LIST_HANDLER */
/* xattr_handler->list() wants simple */
/* #undef HAVE_XATTR_LIST_SIMPLE */
/* xattr_handler->set() wants dentry */
/* #undef HAVE_XATTR_SET_DENTRY */
/* xattr_handler->set() wants both dentry and inode */
/* #undef HAVE_XATTR_SET_DENTRY_INODE */
/* xattr_handler->set() wants xattr_handler */
/* #undef HAVE_XATTR_SET_HANDLER */
/* xattr_handler->set() takes mnt_idmap */
/* #undef HAVE_XATTR_SET_IDMAP */
/* xattr_handler->set() takes user_namespace */
/* #undef HAVE_XATTR_SET_USERNS */
/* Define if host toolchain supports XSAVE */
#define HAVE_XSAVE 1
/* Define if host toolchain supports XSAVEOPT */
#define HAVE_XSAVEOPT 1
/* Define if host toolchain supports XSAVES */
#define HAVE_XSAVES 1
/* ZERO_PAGE() is GPL-only */
/* #undef HAVE_ZERO_PAGE_GPL_ONLY */
/* Define if you have [z] */
#define HAVE_ZLIB 1
/* __posix_acl_chmod() exists */
/* #undef HAVE___POSIX_ACL_CHMOD */
/* kernel exports FPU functions */
/* #undef KERNEL_EXPORTS_X86_FPU */
/* TBD: fetch(3) support */
#if 0
/* whether the chosen libfetch is to be loaded at run-time */
#define LIBFETCH_DYNAMIC 1
/* libfetch is fetch(3) */
#define LIBFETCH_IS_FETCH 1
/* libfetch is libcurl */
#define LIBFETCH_IS_LIBCURL 0
/* soname of chosen libfetch */
#define LIBFETCH_SONAME "libfetch.so.6"
#endif
/* Define to the sub-directory where libtool stores uninstalled libraries. */
#define LT_OBJDIR ".libs/"
/* make_request_fn() return type */
/* #undef MAKE_REQUEST_FN_RET */
/* struct shrink_control has nid */
/* #undef SHRINK_CONTROL_HAS_NID */
/* using complete_and_exit() instead */
/* #undef SPL_KTHREAD_COMPLETE_AND_EXIT */
/* Defined for legacy compatibility. */
#define SPL_META_ALIAS ZFS_META_ALIAS
/* Defined for legacy compatibility. */
#define SPL_META_RELEASE ZFS_META_RELEASE
/* Defined for legacy compatibility. */
#define SPL_META_VERSION ZFS_META_VERSION
/* pde_data() is PDE_DATA() */
/* #undef SPL_PDE_DATA */
/* Define to 1 if all of the C90 standard headers exist (not just the ones
required in a freestanding environment). This macro is provided for
backward compatibility; new code need not use it. */
#define SYSTEM_FREEBSD 1
/* True if ZFS is to be compiled for a Linux system */
/* #undef SYSTEM_LINUX */
/* Version number of package */
/* #undef ZFS_DEBUG */
/* /dev/zfs minor */
/* #undef ZFS_DEVICE_MINOR */
/* enum node_stat_item contains NR_FILE_PAGES */
/* #undef ZFS_ENUM_NODE_STAT_ITEM_NR_FILE_PAGES */
/* enum node_stat_item contains NR_INACTIVE_ANON */
/* #undef ZFS_ENUM_NODE_STAT_ITEM_NR_INACTIVE_ANON */
/* enum node_stat_item contains NR_INACTIVE_FILE */
/* #undef ZFS_ENUM_NODE_STAT_ITEM_NR_INACTIVE_FILE */
/* enum zone_stat_item contains NR_FILE_PAGES */
/* #undef ZFS_ENUM_ZONE_STAT_ITEM_NR_FILE_PAGES */
/* enum zone_stat_item contains NR_INACTIVE_ANON */
/* #undef ZFS_ENUM_ZONE_STAT_ITEM_NR_INACTIVE_ANON */
/* enum zone_stat_item contains NR_INACTIVE_FILE */
/* #undef ZFS_ENUM_ZONE_STAT_ITEM_NR_INACTIVE_FILE */
/* GENHD_FL_EXT_DEVT flag is not available */
/* #undef ZFS_GENHD_FL_EXT_DEVT */
/* GENHD_FL_NO_PART_SCAN flag is available */
/* #undef ZFS_GENHD_FL_NO_PART */
/* global_node_page_state() exists */
/* #undef ZFS_GLOBAL_NODE_PAGE_STATE */
/* global_zone_page_state() exists */
/* #undef ZFS_GLOBAL_ZONE_PAGE_STATE */
/* Define to 1 if GPL-only symbols can be used */
/* #undef ZFS_IS_GPL_COMPATIBLE */
/* Define the project alias string. */
-#define ZFS_META_ALIAS "zfs-2.2.2-FreeBSD_g494aaaed8"
+#define ZFS_META_ALIAS "zfs-2.2.3-FreeBSD_gc883088df"
/* Define the project author. */
#define ZFS_META_AUTHOR "OpenZFS"
/* Define the project release date. */
/* #undef ZFS_META_DATA */
/* Define the maximum compatible kernel version. */
-#define ZFS_META_KVER_MAX "6.6"
+#define ZFS_META_KVER_MAX "6.7"
/* Define the minimum compatible kernel version. */
#define ZFS_META_KVER_MIN "3.10"
/* Define the project license. */
#define ZFS_META_LICENSE "CDDL"
/* Define the libtool library 'age' version information. */
/* #undef ZFS_META_LT_AGE */
/* Define the libtool library 'current' version information. */
/* #undef ZFS_META_LT_CURRENT */
/* Define the libtool library 'revision' version information. */
/* #undef ZFS_META_LT_REVISION */
/* Define the project name. */
#define ZFS_META_NAME "zfs"
/* Define the project release. */
-#define ZFS_META_RELEASE "FreeBSD_g494aaaed8"
+#define ZFS_META_RELEASE "FreeBSD_gc883088df"
/* Define the project version. */
-#define ZFS_META_VERSION "2.2.2"
+#define ZFS_META_VERSION "2.2.3"
/* count is located in percpu_ref.data */
/* #undef ZFS_PERCPU_REF_COUNT_IN_DATA */
diff --git a/sys/modules/zfs/zfs_gitrev.h b/sys/modules/zfs/zfs_gitrev.h
index 82745ed74145..70b3c3310d8f 100644
--- a/sys/modules/zfs/zfs_gitrev.h
+++ b/sys/modules/zfs/zfs_gitrev.h
@@ -1 +1 @@
-#define ZFS_META_GITREV "zfs-2.2.2-0-g494aaaed8"
+#define ZFS_META_GITREV "zfs-2.2.3-0-gc883088df"

File Metadata

Mime Type
application/octet-stream
Expires
Mon, May 13, 6:33 AM (2 d)
Storage Engine
chunks
Storage Format
Chunks
Storage Handle
6iXG34Tekmom
Default Alt Text
(5 MB)

Event Timeline