diff --git a/Makefile.am b/Makefile.am index 30c61dda27c4..c44d64df0187 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,263 +1,261 @@ ACLOCAL_AMFLAGS = -I config SUBDIRS = include if BUILD_LINUX SUBDIRS += rpm endif if CONFIG_USER SUBDIRS += etc man scripts lib tests cmd contrib if BUILD_LINUX SUBDIRS += udev endif endif if CONFIG_KERNEL SUBDIRS += module extradir = $(prefix)/src/zfs-$(VERSION) extra_HEADERS = zfs.release.in zfs_config.h.in if BUILD_LINUX kerneldir = $(prefix)/src/zfs-$(VERSION)/$(LINUX_VERSION) nodist_kernel_HEADERS = zfs.release zfs_config.h module/$(LINUX_SYMBOLS) endif endif AUTOMAKE_OPTIONS = foreign EXTRA_DIST = autogen.sh copy-builtin EXTRA_DIST += config/config.awk config/rpm.am config/deb.am config/tgz.am EXTRA_DIST += AUTHORS CODE_OF_CONDUCT.md COPYRIGHT LICENSE META NEWS NOTICE EXTRA_DIST += README.md RELEASES.md EXTRA_DIST += module/lua/README.zfs module/os/linux/spl/README.md # Include all the extra licensing information for modules EXTRA_DIST += module/icp/algs/skein/THIRDPARTYLICENSE EXTRA_DIST += module/icp/algs/skein/THIRDPARTYLICENSE.descrip EXTRA_DIST += module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.gladman EXTRA_DIST += module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.gladman.descrip EXTRA_DIST += module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.openssl EXTRA_DIST += module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.openssl.descrip EXTRA_DIST += module/icp/asm-x86_64/modes/THIRDPARTYLICENSE.cryptogams EXTRA_DIST += module/icp/asm-x86_64/modes/THIRDPARTYLICENSE.cryptogams.descrip EXTRA_DIST += module/icp/asm-x86_64/modes/THIRDPARTYLICENSE.openssl EXTRA_DIST += module/icp/asm-x86_64/modes/THIRDPARTYLICENSE.openssl.descrip EXTRA_DIST += module/os/linux/spl/THIRDPARTYLICENSE.gplv2 EXTRA_DIST += module/os/linux/spl/THIRDPARTYLICENSE.gplv2.descrip EXTRA_DIST += module/zfs/THIRDPARTYLICENSE.cityhash EXTRA_DIST += module/zfs/THIRDPARTYLICENSE.cityhash.descrip @CODE_COVERAGE_RULES@ GITREV = include/zfs_gitrev.h PHONY = gitrev gitrev: $(AM_V_GEN)$(top_srcdir)/scripts/make_gitrev.sh $(GITREV) all: gitrev # Double-colon rules are allowed; there are multiple independent definitions. maintainer-clean-local:: -$(RM) $(GITREV) distclean-local:: -$(RM) -R autom4te*.cache build -find . \( -name SCCS -o -name BitKeeper -o -name .svn -o -name CVS \ -o -name .pc -o -name .hg -o -name .git \) -prune -o \ \( -name '*.orig' -o -name '*.rej' -o -name '*~' \ -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \ -o -name '.*.rej' -o -size 0 -o -name '*%' -o -name '.*.cmd' \ -o -name 'core' -o -name 'Makefile' -o -name 'Module.symvers' \ -o -name '*.order' -o -name '*.markers' -o -name '*.gcda' \ -o -name '*.gcno' \) \ -type f -print | xargs $(RM) all-local: -[ -x ${top_builddir}/scripts/zfs-tests.sh ] && \ ${top_builddir}/scripts/zfs-tests.sh -c dist-hook: $(AM_V_GEN)$(top_srcdir)/scripts/make_gitrev.sh -D $(distdir) $(GITREV) $(SED) ${ac_inplace} -e 's/Release:[[:print:]]*/Release: $(RELEASE)/' \ $(distdir)/META if BUILD_LINUX # For compatibility, create a matching spl-x.y.z directly which contains # symlinks to the updated header and object file locations. These # compatibility links will be removed in the next major release. if CONFIG_KERNEL install-data-hook: rm -rf $(DESTDIR)$(prefix)/src/spl-$(VERSION) && \ mkdir $(DESTDIR)$(prefix)/src/spl-$(VERSION) && \ cd $(DESTDIR)$(prefix)/src/spl-$(VERSION) && \ ln -s ../zfs-$(VERSION)/include/spl include && \ ln -s ../zfs-$(VERSION)/$(LINUX_VERSION) $(LINUX_VERSION) && \ ln -s ../zfs-$(VERSION)/zfs_config.h.in spl_config.h.in && \ ln -s ../zfs-$(VERSION)/zfs.release.in spl.release.in && \ cd $(DESTDIR)$(prefix)/src/zfs-$(VERSION)/$(LINUX_VERSION) && \ ln -fs zfs_config.h spl_config.h && \ ln -fs zfs.release spl.release endif endif PHONY += codecheck codecheck: cstyle shellcheck checkbashisms flake8 mancheck testscheck vcscheck PHONY += checkstyle checkstyle: codecheck commitcheck PHONY += commitcheck commitcheck: @if git rev-parse --git-dir > /dev/null 2>&1; then \ ${top_srcdir}/scripts/commitcheck.sh; \ fi PHONY += cstyle cstyle: @find ${top_srcdir} -name build -prune \ -o -type f -name '*.[hc]' \ ! -name 'zfs_config.*' ! -name '*.mod.c' \ ! -name 'opt_global.h' ! -name '*_if*.h' \ ! -path './module/zstd/lib/*' \ -exec ${top_srcdir}/scripts/cstyle.pl -cpP {} \+ filter_executable = -exec test -x '{}' \; -print PHONY += shellcheck shellcheck: @if type shellcheck > /dev/null 2>&1; then \ shellcheck --exclude=SC1090 --exclude=SC1117 --format=gcc \ $$(find ${top_srcdir}/scripts/*.sh -type f) \ $$(find ${top_srcdir}/cmd/zed/zed.d/*.sh -type f) \ $$(find ${top_srcdir}/cmd/zpool/zpool.d/* \ -type f ${filter_executable}); \ else \ echo "skipping shellcheck because shellcheck is not installed"; \ fi PHONY += checkabi storeabi checkabi: lib $(MAKE) -C lib checkabi storeabi: lib $(MAKE) -C lib storeabi PHONY += checkbashisms checkbashisms: @if type checkbashisms > /dev/null 2>&1; then \ checkbashisms -n -p -x \ $$(find ${top_srcdir} \ -name '.git' -prune \ -o -name 'build' -prune \ -o -name 'tests' -prune \ -o -name 'config' -prune \ -o -name 'zed-functions.sh*' -prune \ -o -name 'zfs-import*' -prune \ -o -name 'zfs-mount*' -prune \ -o -name 'zfs-zed*' -prune \ -o -name 'smart' -prune \ -o -name 'paxcheck.sh' -prune \ -o -name 'make_gitrev.sh' -prune \ -o -name '90zfs' -prune \ -o -type f ! -name 'config*' \ ! -name 'libtool' \ -exec sh -c 'awk "NR==1 && /#!.*bin\/sh.*/ {print FILENAME;}" "{}"' \;); \ else \ echo "skipping checkbashisms because checkbashisms is not installed"; \ fi PHONY += mancheck mancheck: @if type mandoc > /dev/null 2>&1; then \ - find ${top_srcdir}/man/man8 -type f -name 'zfs.8' \ - -o -name 'zpool.8' -o -name 'zdb.8' \ - -o -name 'zgenhostid.8' | \ - xargs mandoc -Tlint -Werror; \ + find ${top_srcdir}/man/man8 -type f -name '*[1-9]*' \ + -exec mandoc -Tlint -Werror {} \+; \ else \ echo "skipping mancheck because mandoc is not installed"; \ fi if BUILD_LINUX stat_fmt = -c '%A %n' else stat_fmt = -f '%Sp %N' endif PHONY += testscheck testscheck: @find ${top_srcdir}/tests/zfs-tests -type f \ \( -name '*.ksh' -not ${filter_executable} \) -o \ \( -name '*.kshlib' ${filter_executable} \) -o \ \( -name '*.shlib' ${filter_executable} \) -o \ \( -name '*.cfg' ${filter_executable} \) | \ xargs -r stat ${stat_fmt} | \ awk '{c++; print} END {if(c>0) exit 1}' PHONY += vcscheck vcscheck: @if git rev-parse --git-dir > /dev/null 2>&1; then \ git ls-files . --exclude-standard --others | \ awk '{c++; print} END {if(c>0) exit 1}' ; \ fi PHONY += lint lint: cppcheck paxcheck CPPCHECKDIRS = cmd lib module PHONY += cppcheck cppcheck: $(CPPCHECKDIRS) @if test -n "$(CPPCHECK)"; then \ set -e ; for dir in $(CPPCHECKDIRS) ; do \ $(MAKE) -C $$dir cppcheck ; \ done \ else \ echo "skipping cppcheck because cppcheck is not installed"; \ fi PHONY += paxcheck paxcheck: @if type scanelf > /dev/null 2>&1; then \ ${top_srcdir}/scripts/paxcheck.sh ${top_builddir}; \ else \ echo "skipping paxcheck because scanelf is not installed"; \ fi PHONY += flake8 flake8: @if type flake8 > /dev/null 2>&1; then \ flake8 ${top_srcdir}; \ else \ echo "skipping flake8 because flake8 is not installed"; \ fi PHONY += ctags ctags: $(RM) tags find $(top_srcdir) -name '.?*' -prune \ -o -type f -name '*.[hcS]' -print | xargs ctags -a PHONY += etags etags: $(RM) TAGS find $(top_srcdir) -name '.?*' -prune \ -o -type f -name '*.[hcS]' -print | xargs etags -a PHONY += cscopelist cscopelist: find $(top_srcdir) -name '.?*' -prune \ -o -type f -name '*.[hc]' -print >cscope.files PHONY += tags tags: ctags etags PHONY += pkg pkg-dkms pkg-kmod pkg-utils pkg: @DEFAULT_PACKAGE@ pkg-dkms: @DEFAULT_PACKAGE@-dkms pkg-kmod: @DEFAULT_PACKAGE@-kmod pkg-utils: @DEFAULT_PACKAGE@-utils include config/rpm.am include config/deb.am include config/tgz.am .PHONY: $(PHONY) diff --git a/man/man8/zfs-mount-generator.8.in b/man/man8/zfs-mount-generator.8.in index 3b8c9c3ae246..96fee4291071 100644 --- a/man/man8/zfs-mount-generator.8.in +++ b/man/man8/zfs-mount-generator.8.in @@ -1,248 +1,247 @@ .\" .\" Copyright 2018 Antonio Russo .\" Copyright 2019 Kjeld Schouten-Lebbing .\" Copyright 2020 InsanePrawn .\" .\" Permission is hereby granted, free of charge, to any person obtaining .\" a copy of this software and associated documentation files (the .\" "Software"), to deal in the Software without restriction, including .\" without limitation the rights to use, copy, modify, merge, publish, .\" distribute, sublicense, and/or sell copies of the Software, and to .\" permit persons to whom the Software is furnished to do so, subject to .\" the following conditions: .\" .\" The above copyright notice and this permission notice shall be .\" included in all copies or substantial portions of the Software. .\" .\" THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, .\" EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF .\" MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND .\" NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE .\" LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION .\" OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION .\" WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. .TH ZFS-MOUNT-GENERATOR 8 "Aug 24, 2020" OpenZFS .SH "NAME" zfs\-mount\-generator \- generates systemd mount units for ZFS .SH SYNOPSIS .B @systemdgeneratordir@/zfs\-mount\-generator .sp .SH DESCRIPTION zfs\-mount\-generator implements the \fBGenerators Specification\fP of .BR systemd (1), and is called during early boot to generate .BR systemd.mount (5) units for automatically mounted datasets. Mount ordering and dependencies are created for all tracked pools (see below). .SS ENCRYPTION KEYS If the dataset is an encryption root, a service that loads the associated key (either from file or through a .BR systemd\-ask\-password (1) prompt) will be created. This service . BR RequiresMountsFor the path of the key (if file-based) and also copies the mount unit's .BR After , .BR Before and .BR Requires . All mount units of encrypted datasets add the key\-load service for their encryption root to their .BR Wants and .BR After . The service will not be .BR Want ed or .BR Require d by .BR local-fs.target directly, and so will only be started manually or as a dependency of a started mount unit. .SS UNIT ORDERING AND DEPENDENCIES mount unit's .BR Before \-> key\-load service (if any) \-> mount unit \-> mount unit's .BR After It is worth nothing that when a mount unit is activated, it activates all available mount units for parent paths to its mountpoint, i.e. activating the mount unit for /tmp/foo/1/2/3 automatically activates all available mount units for /tmp, /tmp/foo, /tmp/foo/1, and /tmp/foo/1/2. This is true for any combination of mount units from any sources, not just ZFS. .SS CACHE FILE Because ZFS pools may not be available very early in the boot process, information on ZFS mountpoints must be stored separately. The output of the command .PP .RS 4 zfs list -H -o name,mountpoint,canmount,atime,relatime,devices,exec,readonly,setuid,nbmand,encroot,keylocation,org.openzfs.systemd:requires,org.openzfs.systemd:requires-mounts-for,org.openzfs.systemd:before,org.openzfs.systemd:after,org.openzfs.systemd:wanted-by,org.openzfs.systemd:required-by,org.openzfs.systemd:nofail,org.openzfs.systemd:ignore .RE .PP for datasets that should be mounted by systemd, should be kept separate from the pool, at .PP .RS 4 .RI @sysconfdir@/zfs/zfs-list.cache/ POOLNAME . .RE .PP The cache file, if writeable, will be kept synchronized with the pool state by the ZEDLET .PP .RS 4 history_event-zfs-list-cacher.sh . .RE .PP .sp .SS PROPERTIES The behavior of the generator script can be influenced by the following dataset properties: .sp .TP 4 .BR canmount = on | off | noauto If a dataset has .BR mountpoint set and .BR canmount is not .BR off , a mount unit will be generated. Additionally, if .BR canmount is .BR on , .BR local-fs.target will gain a dependency on the mount unit. This behavior is equal to the .BR auto and .BR noauto legacy mount options, see .BR systemd.mount (5). Encryption roots always generate a key-load service, even for .BR canmount=off . .TP 4 .BR org.openzfs.systemd:requires\-mounts\-for = \fIpath\fR... Space\-separated list of mountpoints to require to be mounted for this mount unit .TP 4 .BR org.openzfs.systemd:before = \fIunit\fR... The mount unit and associated key\-load service will be ordered before this space\-separated list of units. .TP 4 .BR org.openzfs.systemd:after = \fIunit\fR... The mount unit and associated key\-load service will be ordered after this space\-separated list of units. .TP 4 .BR org.openzfs.systemd:wanted\-by = \fIunit\fR... Space-separated list of units that will gain a .BR Wants dependency on this mount unit. Setting this property implies .BR noauto . .TP 4 .BR org.openzfs.systemd:required\-by = \fIunit\fR... Space-separated list of units that will gain a .BR Requires dependency on this mount unit. Setting this property implies .BR noauto . .TP 4 .BR org.openzfs.systemd:nofail = unset | on | off Toggles between a .BR Wants and .BR Requires type of dependency between the mount unit and .BR local-fs.target , if .BR noauto isn't set or implied. .BR on : Mount will be .BR WantedBy local-fs.target .BR off : Mount will be .BR Before and .BR RequiredBy local-fs.target .BR unset : Mount will be .BR Before and .BR WantedBy local-fs.target .TP 4 .BR org.openzfs.systemd:ignore = on | off If set to .BR on , do not generate a mount unit for this dataset. -.RE See also .BR systemd.mount (5) .PP .SH EXAMPLE To begin, enable tracking for the pool: .PP .RS 4 touch .RI @sysconfdir@/zfs/zfs-list.cache/ POOLNAME .RE .PP Then, enable the tracking ZEDLET: .PP .RS 4 ln -s "@zfsexecdir@/zed.d/history_event-zfs-list-cacher.sh" "@sysconfdir@/zfs/zed.d" systemctl enable zfs-zed.service systemctl restart zfs-zed.service .RE .PP Force the running of the ZEDLET by setting a monitored property, e.g. .BR canmount , for at least one dataset in the pool: .PP .RS 4 zfs set canmount=on .I DATASET .RE .PP This forces an update to the stale cache file. To test the generator output, run .PP .RS 4 @systemdgeneratordir@/zfs-mount-generator /tmp/zfs-mount-generator . . .RE .PP This will generate units and dependencies in .I /tmp/zfs-mount-generator for you to inspect them. The second and third argument are ignored. If you're satisfied with the generated units, instruct systemd to re-run all generators: .PP .RS 4 systemctl daemon-reload .RE .PP .sp .SH SEE ALSO .BR zfs (5) .BR zfs-events (5) .BR zed (8) .BR zpool (5) .BR systemd (1) .BR systemd.target (5) .BR systemd.special (7) .BR systemd.mount (7) diff --git a/man/man8/zfs-program.8 b/man/man8/zfs-program.8 index de708e12cec2..02251ae7cbad 100644 --- a/man/man8/zfs-program.8 +++ b/man/man8/zfs-program.8 @@ -1,635 +1,636 @@ .\" This file and its contents are supplied under the terms of the .\" Common Development and Distribution License ("CDDL"), version 1.0. .\" You may only use this file in accordance with the terms of version .\" 1.0 of the CDDL. .\" .\" A full copy of the text of the CDDL should have accompanied this .\" source. A copy of the CDDL is also available via the Internet at .\" http://www.illumos.org/license/CDDL. .\" .\" .\" Copyright (c) 2016, 2019 by Delphix. All Rights Reserved. .\" Copyright (c) 2019, 2020 by Christian Schwarz. All Rights Reserved. .\" Copyright 2020 Joyent, Inc. .\" .Dd January 26, 2021 .Dt ZFS-PROGRAM 8 .Os .Sh NAME .Nm zfs-program .Nd executes ZFS channel programs .Sh SYNOPSIS .Nm zfs .Cm program .Op Fl jn .Op Fl t Ar instruction-limit .Op Fl m Ar memory-limit .Ar pool .Ar script .\".Op Ar optional arguments to channel program .Sh DESCRIPTION The ZFS channel program interface allows ZFS administrative operations to be run programmatically as a Lua script. The entire script is executed atomically, with no other administrative operations taking effect concurrently. A library of ZFS calls is made available to channel program scripts. Channel programs may only be run with root privileges. .Pp A modified version of the Lua 5.2 interpreter is used to run channel program scripts. The Lua 5.2 manual can be found at: .Bd -centered -offset indent .Lk http://www.lua.org/manual/5.2/ .Ed .Pp The channel program given by .Ar script will be run on .Ar pool , and any attempts to access or modify other pools will cause an error. .Sh OPTIONS .Bl -tag -width "-t" .It Fl j Display channel program output in JSON format. When this flag is specified and standard output is empty - channel program encountered an error. The details of such an error will be printed to standard error in plain text. .It Fl n Executes a read-only channel program, which runs faster. The program cannot change on-disk state by calling functions from the zfs.sync submodule. The program can be used to gather information such as properties and determining if changes would succeed (zfs.check.*). Without this flag, all pending changes must be synced to disk before a channel program can complete. .It Fl t Ar instruction-limit Limit the number of Lua instructions to execute. If a channel program executes more than the specified number of instructions, it will be stopped and an error will be returned. The default limit is 10 million instructions, and it can be set to a maximum of 100 million instructions. .It Fl m Ar memory-limit Memory limit, in bytes. If a channel program attempts to allocate more memory than the given limit, it will be stopped and an error returned. The default memory limit is 10 MB, and can be set to a maximum of 100 MB. .El .Pp All remaining argument strings will be passed directly to the Lua script as described in the .Sx LUA INTERFACE section below. .Sh LUA INTERFACE A channel program can be invoked either from the command line, or via a library call to .Fn lzc_channel_program . .Ss Arguments Arguments passed to the channel program are converted to a Lua table. If invoked from the command line, extra arguments to the Lua script will be accessible as an array stored in the argument table with the key 'argv': .Bd -literal -offset indent args = ... argv = args["argv"] -- argv == {1="arg1", 2="arg2", ...} .Ed .Pp If invoked from the libZFS interface, an arbitrary argument list can be passed to the channel program, which is accessible via the same "..." syntax in Lua: .Bd -literal -offset indent args = ... -- args == {"foo"="bar", "baz"={...}, ...} .Ed .Pp Note that because Lua arrays are 1-indexed, arrays passed to Lua from the libZFS interface will have their indices incremented by 1. That is, the element in .Va arr[0] in a C array passed to a channel program will be stored in .Va arr[1] when accessed from Lua. .Ss Return Values Lua return statements take the form: .Bd -literal -offset indent return ret0, ret1, ret2, ... .Ed .Pp Return statements returning multiple values are permitted internally in a channel program script, but attempting to return more than one value from the top level of the channel program is not permitted and will throw an error. However, tables containing multiple values can still be returned. If invoked from the command line, a return statement: .Bd -literal -offset indent a = {foo="bar", baz=2} return a .Ed .Pp Will be output formatted as: .Bd -literal -offset indent Channel program fully executed with return value: return: baz: 2 foo: 'bar' .Ed .Ss Fatal Errors If the channel program encounters a fatal error while running, a non-zero exit status will be returned. If more information about the error is available, a singleton list will be returned detailing the error: .Bd -literal -offset indent error: "error string, including Lua stack trace" .Ed .Pp If a fatal error is returned, the channel program may have not executed at all, may have partially executed, or may have fully executed but failed to pass a return value back to userland. .Pp If the channel program exhausts an instruction or memory limit, a fatal error will be generated and the program will be stopped, leaving the program partially executed. No attempt is made to reverse or undo any operations already performed. Note that because both the instruction count and amount of memory used by a channel program are deterministic when run against the same inputs and filesystem state, as long as a channel program has run successfully once, you can guarantee that it will finish successfully against a similar size system. .Pp If a channel program attempts to return too large a value, the program will fully execute but exit with a nonzero status code and no return value. .Pp .Em Note : ZFS API functions do not generate Fatal Errors when correctly invoked, they return an error code and the channel program continues executing. See the .Sx ZFS API section below for function-specific details on error return codes. .Ss Lua to C Value Conversion When invoking a channel program via the libZFS interface, it is necessary to translate arguments and return values from Lua values to their C equivalents, and vice-versa. .Pp There is a correspondence between nvlist values in C and Lua tables. A Lua table which is returned from the channel program will be recursively converted to an nvlist, with table values converted to their natural equivalents: .Bd -literal -offset indent string -> string number -> int64 boolean -> boolean_value nil -> boolean (no value) table -> nvlist .Ed .Pp Likewise, table keys are replaced by string equivalents as follows: .Bd -literal -offset indent string -> no change number -> signed decimal string ("%lld") boolean -> "true" | "false" .Ed .Pp Any collision of table key strings (for example, the string "true" and a true boolean value) will cause a fatal error. .Pp Lua numbers are represented internally as signed 64-bit integers. .Sh LUA STANDARD LIBRARY The following Lua built-in base library functions are available: .Bd -literal -offset indent assert rawlen collectgarbage rawget error rawset getmetatable select ipairs setmetatable next tonumber pairs tostring rawequal type .Ed .Pp All functions in the .Em coroutine , .Em string , and .Em table built-in submodules are also available. A complete list and documentation of these modules is available in the Lua manual. .Pp The following functions base library functions have been disabled and are not available for use in channel programs: .Bd -literal -offset indent dofile loadfile load pcall print xpcall .Ed .Sh ZFS API .Ss Function Arguments Each API function takes a fixed set of required positional arguments and optional keyword arguments. For example, the destroy function takes a single positional string argument (the name of the dataset to destroy) and an optional "defer" keyword boolean argument. When using parentheses to specify the arguments to a Lua function, only positional arguments can be used: .Bd -literal -offset indent zfs.sync.destroy("rpool@snap") .Ed .Pp To use keyword arguments, functions must be called with a single argument that is a Lua table containing entries mapping integers to positional arguments and strings to keyword arguments: .Bd -literal -offset indent zfs.sync.destroy({1="rpool@snap", defer=true}) .Ed .Pp The Lua language allows curly braces to be used in place of parenthesis as syntactic sugar for this calling convention: .Bd -literal -offset indent zfs.sync.snapshot{"rpool@snap", defer=true} .Ed .Ss Function Return Values If an API function succeeds, it returns 0. If it fails, it returns an error code and the channel program continues executing. API functions do not generate Fatal Errors except in the case of an unrecoverable internal file system error. .Pp In addition to returning an error code, some functions also return extra details describing what caused the error. This extra description is given as a second return value, and will always be a Lua table, or Nil if no error details were returned. Different keys will exist in the error details table depending on the function and error case. Any such function may be called expecting a single return value: .Bd -literal -offset indent errno = zfs.sync.promote(dataset) .Ed .Pp Or, the error details can be retrieved: .Bd -literal -offset indent errno, details = zfs.sync.promote(dataset) if (errno == EEXIST) then assert(details ~= Nil) list_of_conflicting_snapshots = details end .Ed .Pp The following global aliases for API function error return codes are defined for use in channel programs: .Bd -literal -offset indent EPERM ECHILD ENODEV ENOSPC ENOENT EAGAIN ENOTDIR ESPIPE ESRCH ENOMEM EISDIR EROFS EINTR EACCES EINVAL EMLINK EIO EFAULT ENFILE EPIPE ENXIO ENOTBLK EMFILE EDOM E2BIG EBUSY ENOTTY ERANGE ENOEXEC EEXIST ETXTBSY EDQUOT EBADF EXDEV EFBIG .Ed .Ss API Functions For detailed descriptions of the exact behavior of any zfs administrative operations, see the main .Xr zfs 8 manual page. .Bl -tag -width "xx" .It Em zfs.debug(msg) Record a debug message in the zfs_dbgmsg log. A log of these messages can be printed via mdb's "::zfs_dbgmsg" command, or can be monitored live by running: .Bd -literal -offset indent dtrace -n 'zfs-dbgmsg{trace(stringof(arg0))}' .Ed .Pp msg (string) .Bd -ragged -compact -offset "xxxx" Debug message to be printed. .Ed .It Em zfs.exists(dataset) Returns true if the given dataset exists, or false if it doesn't. A fatal error will be thrown if the dataset is not in the target pool. That is, in a channel program running on rpool, zfs.exists("rpool/nonexistent_fs") returns false, but zfs.exists("somepool/fs_that_may_exist") will error. .Pp dataset (string) .Bd -ragged -compact -offset "xxxx" Dataset to check for existence. Must be in the target pool. .Ed .It Em zfs.get_prop(dataset, property) Returns two values. First, a string, number or table containing the property value for the given dataset. Second, a string containing the source of the property (i.e. the name of the dataset in which it was set or nil if it is readonly). Throws a Lua error if the dataset is invalid or the property doesn't exist. Note that Lua only supports int64 number types whereas ZFS number properties are uint64. This means very large values (like guid) may wrap around and appear negative. .Pp dataset (string) .Bd -ragged -compact -offset "xxxx" Filesystem or snapshot path to retrieve properties from. .Ed .Pp property (string) .Bd -ragged -compact -offset "xxxx" Name of property to retrieve. All filesystem, snapshot and volume properties are supported except for 'mounted' and 'iscsioptions.' Also supports the 'written@snap' and 'written#bookmark' properties and the '@id' properties, though the id must be in numeric form. .Ed .El .Bl -tag -width "xx" .It Sy zfs.sync submodule The sync submodule contains functions that modify the on-disk state. They are executed in "syncing context". .Pp The available sync submodule functions are as follows: .Bl -tag -width "xx" .It Em zfs.sync.destroy(dataset, [defer=true|false]) Destroy the given dataset. Returns 0 on successful destroy, or a nonzero error code if the dataset could not be destroyed (for example, if the dataset has any active children or clones). .Pp dataset (string) .Bd -ragged -compact -offset "xxxx" Filesystem or snapshot to be destroyed. .Ed .Pp [optional] defer (boolean) .Bd -ragged -compact -offset "xxxx" Valid only for destroying snapshots. If set to true, and the snapshot has holds or clones, allows the snapshot to be marked for deferred deletion rather than failing. .Ed .It Em zfs.sync.inherit(dataset, property) Clears the specified property in the given dataset, causing it to be inherited from an ancestor, or restored to the default if no ancestor property is set. The .Ql zfs inherit -S option has not been implemented. Returns 0 on success, or a nonzero error code if the property could not be cleared. .Pp dataset (string) .Bd -ragged -compact -offset "xxxx" Filesystem or snapshot containing the property to clear. .Ed .Pp property (string) .Bd -ragged -compact -offset "xxxx" The property to clear. Allowed properties are the same as those for the .Nm zfs Cm inherit command. .Ed .It Em zfs.sync.promote(dataset) Promote the given clone to a filesystem. Returns 0 on successful promotion, or a nonzero error code otherwise. If EEXIST is returned, the second return value will be an array of the clone's snapshots whose names collide with snapshots of the parent filesystem. .Pp dataset (string) .Bd -ragged -compact -offset "xxxx" Clone to be promoted. .Ed .It Em zfs.sync.rollback(filesystem) Rollback to the previous snapshot for a dataset. Returns 0 on successful rollback, or a nonzero error code otherwise. Rollbacks can be performed on filesystems or zvols, but not on snapshots or mounted datasets. EBUSY is returned in the case where the filesystem is mounted. .Pp filesystem (string) .Bd -ragged -compact -offset "xxxx" Filesystem to rollback. .Ed .It Em zfs.sync.set_prop(dataset, property, value) Sets the given property on a dataset. Currently only user properties are supported. Returns 0 if the property was set, or a nonzero error code otherwise. .Pp dataset (string) .Bd -ragged -compact -offset "xxxx" The dataset where the property will be set. .Ed .Pp property (string) .Bd -ragged -compact -offset "xxxx" The property to set. Only user properties are supported. .Ed .Pp value (string) .Bd -ragged -compact -offset "xxxx" The value of the property to be set. .Ed .It Em zfs.sync.snapshot(dataset) Create a snapshot of a filesystem. Returns 0 if the snapshot was successfully created, and a nonzero error code otherwise. .Pp Note: Taking a snapshot will fail on any pool older than legacy version 27. To enable taking snapshots from ZCP scripts, the pool must be upgraded. .Pp dataset (string) .Bd -ragged -compact -offset "xxxx" Name of snapshot to create. .Ed .It Em zfs.sync.bookmark(source, newbookmark) Create a bookmark of an existing source snapshot or bookmark. Returns 0 if the new bookmark was successfully created, and a nonzero error code otherwise. .Pp Note: Bookmarking requires the corresponding pool feature to be enabled. .Pp source (string) .Bd -ragged -compact -offset "xxxx" Full name of the existing snapshot or bookmark. .Ed .Pp newbookmark (string) .Bd -ragged -compact -offset "xxxx" Full name of the new bookmark. .El +.Ed .It Sy zfs.check submodule For each function in the zfs.sync submodule, there is a corresponding zfs.check function which performs a "dry run" of the same operation. Each takes the same arguments as its zfs.sync counterpart and returns 0 if the operation would succeed, or a non-zero error code if it would fail, along with any other error details. That is, each has the same behavior as the corresponding sync function except for actually executing the requested change. For example, .Em zfs.check.destroy("fs") returns 0 if .Em zfs.sync.destroy("fs") would successfully destroy the dataset. .Pp The available zfs.check functions are: .Bl -tag -width "xx" .It Em zfs.check.destroy(dataset, [defer=true|false]) .It Em zfs.check.promote(dataset) .It Em zfs.check.rollback(filesystem) .It Em zfs.check.set_property(dataset, property, value) .It Em zfs.check.snapshot(dataset) .El .It Sy zfs.list submodule The zfs.list submodule provides functions for iterating over datasets and properties. Rather than returning tables, these functions act as Lua iterators, and are generally used as follows: .Bd -literal -offset indent for child in zfs.list.children("rpool") do ... end .Ed .Pp The available zfs.list functions are: .Bl -tag -width "xx" .It Em zfs.list.clones(snapshot) Iterate through all clones of the given snapshot. .Pp snapshot (string) .Bd -ragged -compact -offset "xxxx" Must be a valid snapshot path in the current pool. .Ed .It Em zfs.list.snapshots(dataset) Iterate through all snapshots of the given dataset. Each snapshot is returned as a string containing the full dataset name, e.g. "pool/fs@snap". .Pp dataset (string) .Bd -ragged -compact -offset "xxxx" Must be a valid filesystem or volume. .Ed .It Em zfs.list.children(dataset) Iterate through all direct children of the given dataset. Each child is returned as a string containing the full dataset name, e.g. "pool/fs/child". .Pp dataset (string) .Bd -ragged -compact -offset "xxxx" Must be a valid filesystem or volume. .Ed .It Em zfs.list.bookmarks(dataset) Iterate through all bookmarks of the given dataset. Each bookmark is returned as a string containing the full dataset name, e.g. "pool/fs#bookmark". .Pp dataset (string) .Bd -ragged -compact -offset "xxxx" Must be a valid filesystem or volume. .Ed .It Em zfs.list.holds(snapshot) Iterate through all user holds on the given snapshot. Each hold is returned as a pair of the hold's tag and the timestamp (in seconds since the epoch) at which it was created. .Pp snapshot (string) .Bd -ragged -compact -offset "xxxx" Must be a valid snapshot. .Ed .It Em zfs.list.properties(dataset) An alias for zfs.list.user_properties (see relevant entry). .Pp dataset (string) .Bd -ragged -compact -offset "xxxx" Must be a valid filesystem, snapshot, or volume. .Ed .It Em zfs.list.user_properties(dataset) Iterate through all user properties for the given dataset. For each step of the iteration, output the property name, its value, and its source. Throws a Lua error if the dataset is invalid. .Pp dataset (string) .Bd -ragged -compact -offset "xxxx" Must be a valid filesystem, snapshot, or volume. .Ed .It Em zfs.list.system_properties(dataset) Returns an array of strings, the names of the valid system (non-user defined) properties for the given dataset. Throws a Lua error if the dataset is invalid. .Pp dataset (string) .Bd -ragged -compact -offset "xxxx" Must be a valid filesystem, snapshot or volume. .Ed .El .El .Sh EXAMPLES .Ss Example 1 The following channel program recursively destroys a filesystem and all its snapshots and children in a naive manner. Note that this does not involve any error handling or reporting. .Bd -literal -offset indent function destroy_recursive(root) for child in zfs.list.children(root) do destroy_recursive(child) end for snap in zfs.list.snapshots(root) do zfs.sync.destroy(snap) end zfs.sync.destroy(root) end destroy_recursive("pool/somefs") .Ed .Ss Example 2 A more verbose and robust version of the same channel program, which properly detects and reports errors, and also takes the dataset to destroy as a command line argument, would be as follows: .Bd -literal -offset indent succeeded = {} failed = {} function destroy_recursive(root) for child in zfs.list.children(root) do destroy_recursive(child) end for snap in zfs.list.snapshots(root) do err = zfs.sync.destroy(snap) if (err ~= 0) then failed[snap] = err else succeeded[snap] = err end end err = zfs.sync.destroy(root) if (err ~= 0) then failed[root] = err else succeeded[root] = err end end args = ... argv = args["argv"] destroy_recursive(argv[1]) results = {} results["succeeded"] = succeeded results["failed"] = failed return results .Ed .Ss Example 3 The following function performs a forced promote operation by attempting to promote the given clone and destroying any conflicting snapshots. .Bd -literal -offset indent function force_promote(ds) errno, details = zfs.check.promote(ds) if (errno == EEXIST) then assert(details ~= Nil) for i, snap in ipairs(details) do zfs.sync.destroy(ds .. "@" .. snap) end elseif (errno ~= 0) then return errno end return zfs.sync.promote(ds) end .Ed diff --git a/man/man8/zfs-wait.8 b/man/man8/zfs-wait.8 index de78a509293d..9213fa20f1ff 100644 --- a/man/man8/zfs-wait.8 +++ b/man/man8/zfs-wait.8 @@ -1,71 +1,70 @@ .\" .\" CDDL HEADER START .\" .\" The contents of this file are subject to the terms of the .\" Common Development and Distribution License (the "License"). .\" You may not use this file except in compliance with the License. .\" .\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE .\" or http://www.opensolaris.org/os/licensing. .\" See the License for the specific language governing permissions .\" and limitations under the License. .\" .\" When distributing Covered Code, include this CDDL HEADER in each .\" file and include the License file at usr/src/OPENSOLARIS.LICENSE. .\" If applicable, add the following below this CDDL HEADER, with the .\" fields enclosed by brackets "[]" replaced with your own identifying .\" information: Portions Copyright [yyyy] [name of copyright owner] .\" .\" CDDL HEADER END .\" .\" .\" Copyright (c) 2007, Sun Microsystems, Inc. All Rights Reserved. .\" Copyright (c) 2012, 2018 by Delphix. All rights reserved. .\" Copyright (c) 2012 Cyril Plisko. All Rights Reserved. .\" Copyright (c) 2017 Datto Inc. .\" Copyright (c) 2018 George Melikov. All Rights Reserved. .\" Copyright 2017 Nexenta Systems, Inc. .\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved. .\" .Dd August 9, 2019 .Dt ZFS-WAIT 8 .Os .Sh NAME .Nm zfs-wait .Nd Wait for background activity to stop in a ZFS filesystem .Sh SYNOPSIS .Nm zfs .Cm wait .Op Fl t Ar activity Ns Oo , Ns Ar activity Ns Oc Ns ... .Ar fs .Sh DESCRIPTION .Bl -tag -width Ds .It Xo .Nm zfs .Cm wait .Op Fl t Ar activity Ns Oo , Ns Ar activity Ns Oc Ns ... .Ar fs .Xc Waits until all background activity of the given types has ceased in the given filesystem. The activity could cease because it has completed or because the filesystem has been destroyed or unmounted. If no activities are specified, the command waits until background activity of every type listed below has ceased. If there is no activity of the given types in progress, the command returns immediately. .Pp These are the possible values for .Ar activity , along with what each one waits for: .Bd -literal deleteq The filesystem's internal delete queue to empty .Ed .Pp Note that the internal delete queue does not finish draining until all large files have had time to be fully destroyed and all open file handles to unlinked files are closed. .El -.El .Sh SEE ALSO .Xr lsof 8 diff --git a/man/man8/zfs_ids_to_path.8 b/man/man8/zfs_ids_to_path.8 index 9d6a4976efa2..4f7b8429e411 100644 --- a/man/man8/zfs_ids_to_path.8 +++ b/man/man8/zfs_ids_to_path.8 @@ -1,50 +1,50 @@ .\" .\" CDDL HEADER START .\" .\" The contents of this file are subject to the terms of the .\" Common Development and Distribution License (the "License"). .\" You may not use this file except in compliance with the License. .\" .\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE .\" or http://www.opensolaris.org/os/licensing. .\" See the License for the specific language governing permissions .\" and limitations under the License. .\" .\" When distributing Covered Code, include this CDDL HEADER in each .\" file and include the License file at usr/src/OPENSOLARIS.LICENSE. .\" If applicable, add the following below this CDDL HEADER, with the .\" fields enclosed by brackets "[]" replaced with your own identifying .\" information: Portions Copyright [yyyy] [name of copyright owner] .\" .\" CDDL HEADER END .\" .\" .\" Copyright (c) 2020 by Delphix. All rights reserved. .Dd April 17, 2020 .Dt ZFS_IDS_TO_PATH 8 .Os .Sh NAME .Nm zfs_ids_to_path .Nd convert objset and object ids to names and paths .Sh SYNOPSIS .Nm .Op Fl v .Ar pool .Ar objset id .Ar object id .Nm .Sh DESCRIPTION .Pp -.LP The .Sy zfs_ids_to_path utility converts a provided objset and object id into a path to the file that those ids refer to. .Bl -tag -width "-D" .It Fl v Verbose. Print the dataset name and the file path within the dataset separately. This will work correctly even if the dataset is not mounted. +.El .Sh SEE ALSO .Xr zfs 8 , .Xr zdb 8 diff --git a/man/man8/zstream.8 b/man/man8/zstream.8 index 6056e097b0ef..2912f10c6913 100644 --- a/man/man8/zstream.8 +++ b/man/man8/zstream.8 @@ -1,110 +1,111 @@ .\" .\" CDDL HEADER START .\" .\" The contents of this file are subject to the terms of the .\" Common Development and Distribution License (the "License"). .\" You may not use this file except in compliance with the License. .\" .\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE .\" or http://www.opensolaris.org/os/licensing. .\" See the License for the specific language governing permissions .\" and limitations under the License. .\" .\" When distributing Covered Code, include this CDDL HEADER in each .\" file and include the License file at usr/src/OPENSOLARIS.LICENSE. .\" If applicable, add the following below this CDDL HEADER, with the .\" fields enclosed by brackets "[]" replaced with your own identifying .\" information: Portions Copyright [yyyy] [name of copyright owner] .\" .\" CDDL HEADER END .\" .\" .\" Copyright (c) 2020 by Delphix. All rights reserved. .Dd March 25, 2020 .Dt ZSTREAM 8 .Os .Sh NAME .Nm zstream .Nd manipulate zfs send streams .Sh SYNOPSIS .Nm .Cm dump .Op Fl Cvd .Op Ar file .Nm .Cm redup .Op Fl v .Ar file .Nm .Cm token .Ar resume_token .Sh DESCRIPTION .sp -.LP The .Sy zstream utility manipulates zfs send streams, which are the output of the .Sy zfs send command. .Bl -tag -width "" .It Xo .Nm .Cm dump .Op Fl Cvd .Op Ar file .Xc Print information about the specified send stream, including headers and record counts. The send stream may either be in the specified .Ar file , or provided on standard input. .Bl -tag -width "-D" .It Fl C Suppress the validation of checksums. .It Fl v Verbose. Print metadata for each record. .It Fl d Dump data contained in each record. Implies verbose. .El .It Xo .Nm .Cm token .Ar resume_token .Xc Dumps zfs resume token information .It Xo .Nm .Cm redup .Op Fl v .Ar file .Xc Deduplicated send streams can be generated by using the .Nm zfs Cm send Fl D command. The ability to send deduplicated send streams is deprecated. In the future, the ability to receive a deduplicated send stream with .Nm zfs Cm receive will be removed. However, deduplicated send streams can still be received by utilizing .Nm zstream Cm redup . .Pp The .Nm zstream Cm redup command is provided a .Ar file containing a deduplicated send stream, and outputs an equivalent non-deduplicated send stream on standard output. Therefore, a deduplicated send stream can be received by running: .Bd -literal # zstream redup DEDUP_STREAM_FILE | zfs receive ... .Ed .Bl -tag -width "-D" .It Fl v Verbose. Print summary of converted records. +.El +.El .Sh SEE ALSO .Xr zfs 8 , .Xr zfs-send 8 , .Xr zfs-receive 8