diff --git a/contrib/pyzfs/libzfs_core/_constants.py b/contrib/pyzfs/libzfs_core/_constants.py index 917feee015ae..55de55d42286 100644 --- a/contrib/pyzfs/libzfs_core/_constants.py +++ b/contrib/pyzfs/libzfs_core/_constants.py @@ -1,70 +1,71 @@ # # Copyright 2015 ClusterHQ # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Important `libzfs_core` constants. """ from __future__ import absolute_import, division, print_function # https://stackoverflow.com/a/1695250 def enum(*sequential, **named): enums = dict(((b, a) for a, b in enumerate(sequential)), **named) return type('Enum', (), enums) #: Maximum length of any ZFS name. MAXNAMELEN = 255 #: Default channel program limits ZCP_DEFAULT_INSTRLIMIT = 10 * 1000 * 1000 ZCP_DEFAULT_MEMLIMIT = 10 * 1024 * 1024 #: Encryption wrapping key length WRAPPING_KEY_LEN = 32 #: Encryption key location enum zfs_key_location = enum( 'ZFS_KEYLOCATION_NONE', 'ZFS_KEYLOCATION_PROMPT', 'ZFS_KEYLOCATION_URI' ) #: Encryption key format enum zfs_keyformat = enum( 'ZFS_KEYFORMAT_NONE', 'ZFS_KEYFORMAT_RAW', 'ZFS_KEYFORMAT_HEX', 'ZFS_KEYFORMAT_PASSPHRASE' ) # Encryption algorithms enum zio_encrypt = enum( 'ZIO_CRYPT_INHERIT', 'ZIO_CRYPT_ON', 'ZIO_CRYPT_OFF', 'ZIO_CRYPT_AES_128_CCM', 'ZIO_CRYPT_AES_192_CCM', 'ZIO_CRYPT_AES_256_CCM', 'ZIO_CRYPT_AES_128_GCM', 'ZIO_CRYPT_AES_192_GCM', 'ZIO_CRYPT_AES_256_GCM' ) # ZFS-specific error codes ZFS_ERR_CHECKPOINT_EXISTS = 1024 ZFS_ERR_DISCARDING_CHECKPOINT = 1025 ZFS_ERR_NO_CHECKPOINT = 1026 ZFS_ERR_DEVRM_IN_PROGRESS = 1027 ZFS_ERR_VDEV_TOO_BIG = 1028 +ZFS_ERR_WRONG_PARENT = 1033 # vim: softtabstop=4 tabstop=4 expandtab shiftwidth=4 diff --git a/contrib/pyzfs/libzfs_core/_error_translation.py b/contrib/pyzfs/libzfs_core/_error_translation.py index b5f4bebce152..b888fd7255bb 100644 --- a/contrib/pyzfs/libzfs_core/_error_translation.py +++ b/contrib/pyzfs/libzfs_core/_error_translation.py @@ -1,817 +1,823 @@ # # Copyright 2015 ClusterHQ # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Helper routines for converting ``errno`` style error codes from C functions to Python exceptions defined by `libzfs_core` API. The conversion heavily depends on the context of the error: the attempted operation and the input parameters. For this reason, there is a conversion routine for each `libzfs_core` interface function. The conversion routines have the return code as a parameter as well as all the parameters of the corresponding interface functions. The parameters and exceptions are documented in the `libzfs_core` interfaces. """ from __future__ import absolute_import, division, print_function import errno import re import string from . import exceptions as lzc_exc from ._constants import ( MAXNAMELEN, ZFS_ERR_CHECKPOINT_EXISTS, ZFS_ERR_DISCARDING_CHECKPOINT, ZFS_ERR_NO_CHECKPOINT, ZFS_ERR_DEVRM_IN_PROGRESS, - ZFS_ERR_VDEV_TOO_BIG + ZFS_ERR_VDEV_TOO_BIG, + ZFS_ERR_WRONG_PARENT ) def lzc_create_translate_error(ret, name, ds_type, props): if ret == 0: return if ret == errno.EINVAL: - # XXX: should raise lzc_exc.WrongParent if parent is ZVOL _validate_fs_name(name) raise lzc_exc.PropertyInvalid(name) if ret == errno.EEXIST: raise lzc_exc.FilesystemExists(name) if ret == errno.ENOENT: raise lzc_exc.ParentNotFound(name) + if ret == ZFS_ERR_WRONG_PARENT: + raise lzc_exc.WrongParent(_fs_name(name)) raise _generic_exception(ret, name, "Failed to create filesystem") def lzc_clone_translate_error(ret, name, origin, props): if ret == 0: return if ret == errno.EINVAL: _validate_fs_name(name) _validate_snap_name(origin) raise lzc_exc.PropertyInvalid(name) if ret == errno.EXDEV: raise lzc_exc.PoolsDiffer(name) if ret == errno.EEXIST: raise lzc_exc.FilesystemExists(name) if ret == errno.ENOENT: if not _is_valid_snap_name(origin): raise lzc_exc.SnapshotNameInvalid(origin) raise lzc_exc.DatasetNotFound(name) raise _generic_exception(ret, name, "Failed to create clone") def lzc_rollback_translate_error(ret, name): if ret == 0: return if ret == errno.ESRCH: raise lzc_exc.SnapshotNotFound(name) if ret == errno.EINVAL: _validate_fs_name(name) raise lzc_exc.NameInvalid(name) if ret == errno.ENOENT: if not _is_valid_fs_name(name): raise lzc_exc.NameInvalid(name) else: raise lzc_exc.FilesystemNotFound(name) raise _generic_exception(ret, name, "Failed to rollback") def lzc_rollback_to_translate_error(ret, name, snap): if ret == errno.EEXIST: raise lzc_exc.SnapshotNotLatest(snap) else: lzc_rollback_translate_error(ret, name) def lzc_snapshot_translate_errors(ret, errlist, snaps, props): if ret == 0: return def _map(ret, name): if ret == errno.EXDEV: pool_names = iter(map(_pool_name, snaps)) pool_name = next(pool_names, None) same_pool = all(x == pool_name for x in pool_names) if same_pool: return lzc_exc.DuplicateSnapshots(name) else: return lzc_exc.PoolsDiffer(name) elif ret == errno.EINVAL: if any(not _is_valid_snap_name(s) for s in snaps): return lzc_exc.NameInvalid(name) elif any(len(s) > MAXNAMELEN for s in snaps): return lzc_exc.NameTooLong(name) else: return lzc_exc.PropertyInvalid(name) if ret == errno.EEXIST: return lzc_exc.SnapshotExists(name) if ret == errno.ENOENT: return lzc_exc.FilesystemNotFound(name) return _generic_exception(ret, name, "Failed to create snapshot") _handle_err_list(ret, errlist, snaps, lzc_exc.SnapshotFailure, _map) def lzc_destroy_snaps_translate_errors(ret, errlist, snaps, defer): if ret == 0: return def _map(ret, name): if ret == errno.EEXIST: return lzc_exc.SnapshotIsCloned(name) if ret == errno.ENOENT: return lzc_exc.PoolNotFound(name) if ret == errno.EBUSY: return lzc_exc.SnapshotIsHeld(name) return _generic_exception(ret, name, "Failed to destroy snapshot") _handle_err_list( ret, errlist, snaps, lzc_exc.SnapshotDestructionFailure, _map) def lzc_bookmark_translate_errors(ret, errlist, bookmarks): if ret == 0: return def _map(ret, name): if ret == errno.EINVAL: if name: snap = bookmarks[name] pool_names = map(_pool_name, bookmarks.keys()) if not _is_valid_bmark_name(name): return lzc_exc.BookmarkNameInvalid(name) elif not _is_valid_snap_name(snap): return lzc_exc.SnapshotNameInvalid(snap) elif _fs_name(name) != _fs_name(snap): return lzc_exc.BookmarkMismatch(name) elif any(x != _pool_name(name) for x in pool_names): return lzc_exc.PoolsDiffer(name) else: invalid_names = [ b for b in bookmarks.keys() if not _is_valid_bmark_name(b)] if invalid_names: return lzc_exc.BookmarkNameInvalid(invalid_names[0]) if ret == errno.EEXIST: return lzc_exc.BookmarkExists(name) if ret == errno.ENOENT: return lzc_exc.SnapshotNotFound(name) if ret == errno.ENOTSUP: return lzc_exc.BookmarkNotSupported(name) return _generic_exception(ret, name, "Failed to create bookmark") _handle_err_list( ret, errlist, bookmarks.keys(), lzc_exc.BookmarkFailure, _map) def lzc_get_bookmarks_translate_error(ret, fsname, props): if ret == 0: return if ret == errno.ENOENT: raise lzc_exc.FilesystemNotFound(fsname) raise _generic_exception(ret, fsname, "Failed to list bookmarks") def lzc_destroy_bookmarks_translate_errors(ret, errlist, bookmarks): if ret == 0: return def _map(ret, name): if ret == errno.EINVAL: return lzc_exc.NameInvalid(name) return _generic_exception(ret, name, "Failed to destroy bookmark") _handle_err_list( ret, errlist, bookmarks, lzc_exc.BookmarkDestructionFailure, _map) def lzc_snaprange_space_translate_error(ret, firstsnap, lastsnap): if ret == 0: return if ret == errno.EXDEV and firstsnap is not None: if _pool_name(firstsnap) != _pool_name(lastsnap): raise lzc_exc.PoolsDiffer(lastsnap) else: raise lzc_exc.SnapshotMismatch(lastsnap) if ret == errno.EINVAL: if not _is_valid_snap_name(firstsnap): raise lzc_exc.NameInvalid(firstsnap) elif not _is_valid_snap_name(lastsnap): raise lzc_exc.NameInvalid(lastsnap) elif len(firstsnap) > MAXNAMELEN: raise lzc_exc.NameTooLong(firstsnap) elif len(lastsnap) > MAXNAMELEN: raise lzc_exc.NameTooLong(lastsnap) elif _pool_name(firstsnap) != _pool_name(lastsnap): raise lzc_exc.PoolsDiffer(lastsnap) else: raise lzc_exc.SnapshotMismatch(lastsnap) if ret == errno.ENOENT: raise lzc_exc.SnapshotNotFound(lastsnap) raise _generic_exception( ret, lastsnap, "Failed to calculate space used by range of snapshots") def lzc_hold_translate_errors(ret, errlist, holds, fd): if ret == 0: return def _map(ret, name): if ret == errno.EXDEV: return lzc_exc.PoolsDiffer(name) elif ret == errno.EINVAL: if name: pool_names = map(_pool_name, holds.keys()) if not _is_valid_snap_name(name): return lzc_exc.NameInvalid(name) elif len(name) > MAXNAMELEN: return lzc_exc.NameTooLong(name) elif any(x != _pool_name(name) for x in pool_names): return lzc_exc.PoolsDiffer(name) else: invalid_names = [ b for b in holds.keys() if not _is_valid_snap_name(b)] if invalid_names: return lzc_exc.NameInvalid(invalid_names[0]) fs_name = None hold_name = None pool_name = None if name is not None: fs_name = _fs_name(name) pool_name = _pool_name(name) hold_name = holds[name] if ret == errno.ENOENT: return lzc_exc.FilesystemNotFound(fs_name) if ret == errno.EEXIST: return lzc_exc.HoldExists(name) if ret == errno.E2BIG: return lzc_exc.NameTooLong(hold_name) if ret == errno.ENOTSUP: return lzc_exc.FeatureNotSupported(pool_name) return _generic_exception(ret, name, "Failed to hold snapshot") if ret == errno.EBADF: raise lzc_exc.BadHoldCleanupFD() _handle_err_list(ret, errlist, holds.keys(), lzc_exc.HoldFailure, _map) def lzc_release_translate_errors(ret, errlist, holds): if ret == 0: return for snap in holds: hold_list = holds[snap] if not isinstance(hold_list, list): raise lzc_exc.TypeError('holds must be in a list') def _map(ret, name): if ret == errno.EXDEV: return lzc_exc.PoolsDiffer(name) elif ret == errno.EINVAL: if name: pool_names = map(_pool_name, holds.keys()) if not _is_valid_snap_name(name): return lzc_exc.NameInvalid(name) elif len(name) > MAXNAMELEN: return lzc_exc.NameTooLong(name) elif any(x != _pool_name(name) for x in pool_names): return lzc_exc.PoolsDiffer(name) else: invalid_names = [ b for b in holds.keys() if not _is_valid_snap_name(b)] if invalid_names: return lzc_exc.NameInvalid(invalid_names[0]) elif ret == errno.ENOENT: return lzc_exc.HoldNotFound(name) elif ret == errno.E2BIG: tag_list = holds[name] too_long_tags = [t for t in tag_list if len(t) > MAXNAMELEN] return lzc_exc.NameTooLong(too_long_tags[0]) elif ret == errno.ENOTSUP: pool_name = None if name is not None: pool_name = _pool_name(name) return lzc_exc.FeatureNotSupported(pool_name) else: return _generic_exception( ret, name, "Failed to release snapshot hold") _handle_err_list( ret, errlist, holds.keys(), lzc_exc.HoldReleaseFailure, _map) def lzc_get_holds_translate_error(ret, snapname): if ret == 0: return if ret == errno.EINVAL: _validate_snap_name(snapname) if ret == errno.ENOENT: raise lzc_exc.SnapshotNotFound(snapname) if ret == errno.ENOTSUP: raise lzc_exc.FeatureNotSupported(_pool_name(snapname)) raise _generic_exception(ret, snapname, "Failed to get holds on snapshot") def lzc_send_translate_error(ret, snapname, fromsnap, fd, flags): if ret == 0: return if ret == errno.EXDEV and fromsnap is not None: if _pool_name(fromsnap) != _pool_name(snapname): raise lzc_exc.PoolsDiffer(snapname) else: raise lzc_exc.SnapshotMismatch(snapname) elif ret == errno.EINVAL: if (fromsnap is not None and not _is_valid_snap_name(fromsnap) and not _is_valid_bmark_name(fromsnap)): raise lzc_exc.NameInvalid(fromsnap) elif (not _is_valid_snap_name(snapname) and not _is_valid_fs_name(snapname)): raise lzc_exc.NameInvalid(snapname) elif fromsnap is not None and len(fromsnap) > MAXNAMELEN: raise lzc_exc.NameTooLong(fromsnap) elif len(snapname) > MAXNAMELEN: raise lzc_exc.NameTooLong(snapname) elif (fromsnap is not None and _pool_name(fromsnap) != _pool_name(snapname)): raise lzc_exc.PoolsDiffer(snapname) elif ret == errno.ENOENT: if (fromsnap is not None and not _is_valid_snap_name(fromsnap) and not _is_valid_bmark_name(fromsnap)): raise lzc_exc.NameInvalid(fromsnap) raise lzc_exc.SnapshotNotFound(snapname) elif ret == errno.ENAMETOOLONG: if fromsnap is not None and len(fromsnap) > MAXNAMELEN: raise lzc_exc.NameTooLong(fromsnap) else: raise lzc_exc.NameTooLong(snapname) raise lzc_exc.StreamIOError(ret) def lzc_send_space_translate_error(ret, snapname, fromsnap): if ret == 0: return if ret == errno.EXDEV and fromsnap is not None: if _pool_name(fromsnap) != _pool_name(snapname): raise lzc_exc.PoolsDiffer(snapname) else: raise lzc_exc.SnapshotMismatch(snapname) elif ret == errno.EINVAL: if fromsnap is not None and not _is_valid_snap_name(fromsnap): raise lzc_exc.NameInvalid(fromsnap) elif not _is_valid_snap_name(snapname): raise lzc_exc.NameInvalid(snapname) elif fromsnap is not None and len(fromsnap) > MAXNAMELEN: raise lzc_exc.NameTooLong(fromsnap) elif len(snapname) > MAXNAMELEN: raise lzc_exc.NameTooLong(snapname) elif (fromsnap is not None and _pool_name(fromsnap) != _pool_name(snapname)): raise lzc_exc.PoolsDiffer(snapname) elif ret == errno.ENOENT and fromsnap is not None: if not _is_valid_snap_name(fromsnap): raise lzc_exc.NameInvalid(fromsnap) if ret == errno.ENOENT: raise lzc_exc.SnapshotNotFound(snapname) raise _generic_exception( ret, snapname, "Failed to estimate backup stream size") def lzc_receive_translate_errors( ret, snapname, fd, force, raw, resumable, embedded, origin, properrs ): if ret == 0: if properrs is not None and len(properrs) > 0: def _map(ret, name): if ret == errno.EINVAL: return lzc_exc.PropertyInvalid(name) return _generic_exception(ret, name, "Failed to set property") _handle_err_list( errno.EINVAL, properrs, [snapname], lzc_exc.ReceivePropertyFailure, _map) else: return if ret == errno.EINVAL: if (not _is_valid_snap_name(snapname) and not _is_valid_fs_name(snapname)): raise lzc_exc.NameInvalid(snapname) elif len(snapname) > MAXNAMELEN: raise lzc_exc.NameTooLong(snapname) elif origin is not None and not _is_valid_snap_name(origin): raise lzc_exc.NameInvalid(origin) elif resumable: raise lzc_exc.StreamFeatureInvalid() elif embedded and not raw: raise lzc_exc.StreamFeatureIncompatible() else: raise lzc_exc.BadStream() if ret == errno.ENOENT: if not _is_valid_snap_name(snapname): raise lzc_exc.NameInvalid(snapname) else: raise lzc_exc.DatasetNotFound(snapname) if ret == errno.EEXIST: raise lzc_exc.DatasetExists(snapname) if ret == errno.ENOTSUP: raise lzc_exc.StreamFeatureNotSupported() if ret == errno.ENODEV: raise lzc_exc.StreamMismatch(_fs_name(snapname)) if ret == errno.ETXTBSY: raise lzc_exc.DestinationModified(_fs_name(snapname)) if ret == errno.EBUSY: raise lzc_exc.DatasetBusy(_fs_name(snapname)) if ret == errno.ENOSPC: raise lzc_exc.NoSpace(_fs_name(snapname)) if ret == errno.EDQUOT: raise lzc_exc.QuotaExceeded(_fs_name(snapname)) if ret == errno.ENAMETOOLONG: raise lzc_exc.NameTooLong(snapname) if ret == errno.EROFS: raise lzc_exc.ReadOnlyPool(_pool_name(snapname)) if ret == errno.EAGAIN: raise lzc_exc.SuspendedPool(_pool_name(snapname)) if ret == errno.EBADE: # ECKSUM raise lzc_exc.BadStream() + if ret == ZFS_ERR_WRONG_PARENT: + raise lzc_exc.WrongParent(_fs_name(snapname)) raise lzc_exc.StreamIOError(ret) def lzc_promote_translate_error(ret, name): if ret == 0: return if ret == errno.EINVAL: _validate_fs_name(name) raise lzc_exc.NotClone(name) if ret == errno.ENOTSOCK: raise lzc_exc.NotClone(name) if ret == errno.ENOENT: raise lzc_exc.FilesystemNotFound(name) if ret == errno.EEXIST: raise lzc_exc.SnapshotExists(name) raise _generic_exception(ret, name, "Failed to promote dataset") def lzc_change_key_translate_error(ret, name): if ret == 0: return if ret == errno.EINVAL: _validate_fs_name(name) raise lzc_exc.PropertyInvalid(name) if ret == errno.ENOENT: raise lzc_exc.FilesystemNotFound(name) if ret == errno.EACCES: raise lzc_exc.EncryptionKeyNotLoaded() raise _generic_exception(ret, name, "Failed to change encryption key") def lzc_load_key_translate_error(ret, name, noop): if ret == 0: return if ret == errno.EINVAL: _validate_fs_name(name) raise lzc_exc.PropertyInvalid(name) if ret == errno.ENOENT: raise lzc_exc.FilesystemNotFound(name) if ret == errno.EACCES: raise lzc_exc.EncryptionKeyInvalid() if ret == errno.EEXIST: raise lzc_exc.EncryptionKeyAlreadyLoaded() if noop: raise _generic_exception(ret, name, "Failed to load encryption key") else: raise _generic_exception(ret, name, "Failed to verify encryption key") def lzc_unload_key_translate_error(ret, name): if ret == 0: return if ret == errno.EINVAL: _validate_fs_name(name) raise lzc_exc.PropertyInvalid(name) if ret == errno.ENOENT: raise lzc_exc.FilesystemNotFound(name) if ret == errno.EACCES: raise lzc_exc.EncryptionKeyNotLoaded() raise _generic_exception(ret, name, "Failed to unload encryption key") def lzc_sync_translate_error(ret, name): if ret == 0: return if ret == errno.ENOENT: raise lzc_exc.PoolNotFound(name) raise _generic_exception(ret, name, "Failed to sync pool") def lzc_reopen_translate_error(ret, name): if ret == 0: return if ret == errno.ENOENT: raise lzc_exc.PoolNotFound(name) raise _generic_exception(ret, name, "Failed to reopen pool") def lzc_channel_program_translate_error(ret, name, error): if ret == 0: return if ret == errno.ENOENT: raise lzc_exc.PoolNotFound(name) if ret == errno.ETIME: raise lzc_exc.ZCPTimeout() if ret == errno.ENOMEM: raise lzc_exc.ZCPMemoryError() if ret == errno.ENOSPC: raise lzc_exc.ZCPSpaceError() if ret == errno.EPERM: raise lzc_exc.ZCPPermissionError() if ret == errno.ECHRNG: raise lzc_exc.ZCPRuntimeError(error) if ret == errno.EINVAL: if error is None: raise lzc_exc.ZCPLimitInvalid() else: raise lzc_exc.ZCPSyntaxError(error) raise _generic_exception(ret, name, "Failed to execute channel program") def lzc_remap_translate_error(ret, name): if ret == 0: return if ret == errno.ENOENT: raise lzc_exc.DatasetNotFound(name) if ret == errno.EINVAL: _validate_fs_name(name) if ret == errno.ENOTSUP: return lzc_exc.FeatureNotSupported(name) raise _generic_exception(ret, name, "Failed to remap dataset") def lzc_pool_checkpoint_translate_error(ret, name, discard=False): if ret == 0: return if ret == errno.ENOENT: raise lzc_exc.PoolNotFound(name) if ret == ZFS_ERR_CHECKPOINT_EXISTS: raise lzc_exc.CheckpointExists() if ret == ZFS_ERR_NO_CHECKPOINT: raise lzc_exc.CheckpointNotFound() if ret == ZFS_ERR_DISCARDING_CHECKPOINT: raise lzc_exc.CheckpointDiscarding() if ret == ZFS_ERR_DEVRM_IN_PROGRESS: raise lzc_exc.DeviceRemovalRunning() if ret == ZFS_ERR_VDEV_TOO_BIG: raise lzc_exc.DeviceTooBig() if discard: raise _generic_exception( ret, name, "Failed to discard pool checkpoint") else: raise _generic_exception(ret, name, "Failed to create pool checkpoint") def lzc_pool_checkpoint_discard_translate_error(ret, name): lzc_pool_checkpoint_translate_error(ret, name, discard=True) def lzc_rename_translate_error(ret, source, target): if ret == 0: return if ret == errno.EINVAL: _validate_fs_name(source) _validate_fs_name(target) if _pool_name(source) != _pool_name(target): raise lzc_exc.PoolsDiffer(source) if ret == errno.EEXIST: raise lzc_exc.FilesystemExists(target) if ret == errno.ENOENT: raise lzc_exc.FilesystemNotFound(source) + if ret == ZFS_ERR_WRONG_PARENT: + raise lzc_exc.WrongParent(target) raise _generic_exception(ret, source, "Failed to rename dataset") def lzc_destroy_translate_error(ret, name): if ret == 0: return if ret == errno.EINVAL: _validate_fs_name(name) if ret == errno.ENOENT: raise lzc_exc.FilesystemNotFound(name) raise _generic_exception(ret, name, "Failed to destroy dataset") def lzc_inherit_prop_translate_error(ret, name, prop): if ret == 0: return if ret == errno.EINVAL: _validate_fs_name(name) raise lzc_exc.PropertyInvalid(prop) if ret == errno.ENOENT: raise lzc_exc.DatasetNotFound(name) raise _generic_exception(ret, name, "Failed to inherit a property") def lzc_set_prop_translate_error(ret, name, prop, val): if ret == 0: return if ret == errno.EINVAL: _validate_fs_or_snap_name(name) raise lzc_exc.PropertyInvalid(prop) if ret == errno.ENOENT: raise lzc_exc.DatasetNotFound(name) raise _generic_exception(ret, name, "Failed to set a property") def lzc_get_props_translate_error(ret, name): if ret == 0: return if ret == errno.EINVAL: _validate_fs_or_snap_name(name) if ret == errno.ENOENT: raise lzc_exc.DatasetNotFound(name) raise _generic_exception(ret, name, "Failed to get properties") def lzc_list_children_translate_error(ret, name): if ret == 0: return if ret == errno.EINVAL: _validate_fs_name(name) raise _generic_exception(ret, name, "Error while iterating children") def lzc_list_snaps_translate_error(ret, name): if ret == 0: return if ret == errno.EINVAL: _validate_fs_name(name) raise _generic_exception(ret, name, "Error while iterating snapshots") def lzc_list_translate_error(ret, name, opts): if ret == 0: return if ret == errno.ENOENT: raise lzc_exc.DatasetNotFound(name) if ret == errno.EINVAL: _validate_fs_or_snap_name(name) raise _generic_exception(ret, name, "Error obtaining a list") def _handle_err_list(ret, errlist, names, exception, mapper): ''' Convert one or more errors from an operation into the requested exception. :param int ret: the overall return code. :param errlist: the dictionary that maps entity names to their specific error codes. :type errlist: dict of bytes:int :param names: the list of all names of the entities on which the operation was attempted. :param type exception: the type of the exception to raise if an error occurred. The exception should be a subclass of ``MultipleOperationsFailure``. :param function mapper: the function that maps an error code and a name to a Python exception. Unless ``ret`` is zero this function will raise the ``exception``. If the ``errlist`` is not empty, then the compound exception will contain a list of exceptions corresponding to each individual error code in the ``errlist``. Otherwise, the ``exception`` will contain a list with a single exception corresponding to the ``ret`` value. If the ``names`` list contains only one element, that is, the operation was attempted on a single entity, then the name of that entity is passed to the ``mapper``. If the operation was attempted on multiple entities, but the ``errlist`` is empty, then we can not know which entity caused the error and, thus, ``None`` is used as a name to signify that fact. .. note:: Note that the ``errlist`` can contain a special element with a key of "N_MORE_ERRORS". That element means that there were too many errors to place on the ``errlist``. Those errors are suppressed and only their count is provided as a value of the special ``N_MORE_ERRORS`` element. ''' if ret == 0: return if len(errlist) == 0: suppressed_count = 0 names = list(zip(names, range(2))) if len(names) == 1: name, _ = names[0] else: name = None errors = [mapper(ret, name)] else: errors = [] suppressed_count = errlist.pop('N_MORE_ERRORS', 0) for name in errlist: err = errlist[name] errors.append(mapper(err, name)) raise exception(errors, suppressed_count) def _pool_name(name): ''' Extract a pool name from the given dataset or bookmark name. '/' separates dataset name components. '@' separates a snapshot name from the rest of the dataset name. '#' separates a bookmark name from the rest of the dataset name. ''' return re.split(b'[/@#]', name, 1)[0] def _fs_name(name): ''' Extract a dataset name from the given snapshot or bookmark name. '@' separates a snapshot name from the rest of the dataset name. '#' separates a bookmark name from the rest of the dataset name. ''' return re.split(b'[@#]', name, 1)[0] def _is_valid_name_component(component): allowed = string.ascii_letters + string.digits + u'-_.: ' return component and all(x in allowed.encode() for x in component) def _is_valid_fs_name(name): return name and all(_is_valid_name_component(c) for c in name.split(b'/')) def _is_valid_snap_name(name): parts = name.split(b'@') return (len(parts) == 2 and _is_valid_fs_name(parts[0]) and _is_valid_name_component(parts[1])) def _is_valid_bmark_name(name): parts = name.split(b'#') return (len(parts) == 2 and _is_valid_fs_name(parts[0]) and _is_valid_name_component(parts[1])) def _validate_fs_name(name): if not _is_valid_fs_name(name): raise lzc_exc.FilesystemNameInvalid(name) elif len(name) > MAXNAMELEN: raise lzc_exc.NameTooLong(name) def _validate_snap_name(name): if not _is_valid_snap_name(name): raise lzc_exc.SnapshotNameInvalid(name) elif len(name) > MAXNAMELEN: raise lzc_exc.NameTooLong(name) def _validate_bmark_name(name): if not _is_valid_bmark_name(name): raise lzc_exc.BookmarkNameInvalid(name) elif len(name) > MAXNAMELEN: raise lzc_exc.NameTooLong(name) def _validate_fs_or_snap_name(name): if not _is_valid_fs_name(name) and not _is_valid_snap_name(name): raise lzc_exc.NameInvalid(name) elif len(name) > MAXNAMELEN: raise lzc_exc.NameTooLong(name) def _generic_exception(err, name, message): if err in _error_to_exception: return _error_to_exception[err](name) else: return lzc_exc.ZFSGenericError(err, message, name) _error_to_exception = {e.errno: e for e in [ lzc_exc.ZIOError, lzc_exc.NoSpace, lzc_exc.QuotaExceeded, lzc_exc.DatasetBusy, lzc_exc.NameTooLong, lzc_exc.ReadOnlyPool, lzc_exc.SuspendedPool, lzc_exc.PoolsDiffer, lzc_exc.PropertyNotSupported, ]} # vim: softtabstop=4 tabstop=4 expandtab shiftwidth=4 diff --git a/contrib/pyzfs/libzfs_core/_libzfs_core.py b/contrib/pyzfs/libzfs_core/_libzfs_core.py index 589926ba876f..5c8a1f5e690a 100644 --- a/contrib/pyzfs/libzfs_core/_libzfs_core.py +++ b/contrib/pyzfs/libzfs_core/_libzfs_core.py @@ -1,1966 +1,1970 @@ # # Copyright 2015 ClusterHQ # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Python wrappers for libzfs_core interfaces. As a rule, there is a Python function for each C function. The signatures of the Python functions generally follow those of the functions, but the argument types are natural to Python. nvlists are wrapped as dictionaries or lists depending on their usage. Some parameters have default values depending on typical use for increased convenience. Output parameters are not used and return values are directly returned. Error conditions are signalled by exceptions rather than by integer error codes. """ from __future__ import absolute_import, division, print_function import errno import functools import fcntl import os import struct import threading from . import exceptions from . import _error_translation as errors from .bindings import libzfs_core from ._constants import ( # noqa: F401 MAXNAMELEN, ZCP_DEFAULT_INSTRLIMIT, ZCP_DEFAULT_MEMLIMIT, WRAPPING_KEY_LEN, zfs_key_location, zfs_keyformat, zio_encrypt ) from .ctypes import ( int32_t, uint64_t ) from ._nvlist import nvlist_in, nvlist_out def _uncommitted(depends_on=None): ''' Mark an API function as being an uncommitted extension that might not be available. :param function depends_on: the function that would be checked instead of a decorated function. For example, if the decorated function uses another uncommitted function. This decorator transforms a decorated function to raise :exc:`NotImplementedError` if the C libzfs_core library does not provide a function with the same name as the decorated function. The optional `depends_on` parameter can be provided if the decorated function does not directly call the C function but instead calls another Python function that follows the typical convention. One example is :func:`lzc_list_snaps` that calls :func:`lzc_list` that calls ``lzc_list`` in libzfs_core. This decorator is implemented using :func:`is_supported`. ''' def _uncommitted_decorator(func, depends_on=depends_on): @functools.wraps(func) def _f(*args, **kwargs): if not is_supported(_f): raise NotImplementedError(func.__name__) return func(*args, **kwargs) if depends_on is not None: _f._check_func = depends_on return _f return _uncommitted_decorator def lzc_create(name, ds_type='zfs', props=None, key=None): ''' Create a ZFS filesystem or a ZFS volume ("zvol"). :param bytes name: a name of the dataset to be created. :param str ds_type: the type of the dataset to be created, currently supported types are "zfs" (the default) for a filesystem and "zvol" for a volume. :param props: a `dict` of ZFS dataset property name-value pairs (empty by default). :type props: dict of bytes:Any :param key: dataset encryption key data (empty by default). :type key: bytes :raises FilesystemExists: if a dataset with the given name already exists. :raises ParentNotFound: if a parent dataset of the requested dataset does not exist. :raises PropertyInvalid: if one or more of the specified properties is invalid or has an invalid type or value. :raises NameInvalid: if the name is not a valid dataset name. :raises NameTooLong: if the name is too long. :raises WrongParent: if the parent dataset of the requested dataset is not a filesystem (e.g. ZVOL) ''' if props is None: props = {} if key is None: key = b"" else: key = bytes(key) if ds_type == 'zfs': ds_type = _lib.DMU_OST_ZFS elif ds_type == 'zvol': ds_type = _lib.DMU_OST_ZVOL else: raise exceptions.DatasetTypeInvalid(ds_type) nvlist = nvlist_in(props) ret = _lib.lzc_create(name, ds_type, nvlist, key, len(key)) errors.lzc_create_translate_error(ret, name, ds_type, props) def lzc_clone(name, origin, props=None): ''' Clone a ZFS filesystem or a ZFS volume ("zvol") from a given snapshot. :param bytes name: a name of the dataset to be created. :param bytes origin: a name of the origin snapshot. :param props: a `dict` of ZFS dataset property name-value pairs (empty by default). :type props: dict of bytes:Any :raises FilesystemExists: if a dataset with the given name already exists. :raises DatasetNotFound: if either a parent dataset of the requested dataset or the origin snapshot does not exist. :raises PropertyInvalid: if one or more of the specified properties is invalid or has an invalid type or value. :raises FilesystemNameInvalid: if the name is not a valid dataset name. :raises SnapshotNameInvalid: if the origin is not a valid snapshot name. :raises NameTooLong: if the name or the origin name is too long. :raises PoolsDiffer: if the clone and the origin have different pool names. .. note:: Because of a deficiency of the underlying C interface :exc:`.DatasetNotFound` can mean that either a parent filesystem of the target or the origin snapshot does not exist. It is currently impossible to distinguish between the cases. :func:`lzc_hold` can be used to check that the snapshot exists and ensure that it is not destroyed before cloning. ''' if props is None: props = {} nvlist = nvlist_in(props) ret = _lib.lzc_clone(name, origin, nvlist) errors.lzc_clone_translate_error(ret, name, origin, props) def lzc_rollback(name): ''' Roll back a filesystem or volume to its most recent snapshot. Note that the latest snapshot may change if a new one is concurrently created or the current one is destroyed. lzc_rollback_to can be used to roll back to a specific latest snapshot. :param bytes name: a name of the dataset to be rolled back. :return: a name of the most recent snapshot. :rtype: bytes :raises FilesystemNotFound: if the dataset does not exist. :raises SnapshotNotFound: if the dataset does not have any snapshots. :raises NameInvalid: if the dataset name is invalid. :raises NameTooLong: if the dataset name is too long. ''' # Account for terminating NUL in C strings. snapnamep = _ffi.new('char[]', MAXNAMELEN + 1) ret = _lib.lzc_rollback(name, snapnamep, MAXNAMELEN + 1) errors.lzc_rollback_translate_error(ret, name) return _ffi.string(snapnamep) def lzc_rollback_to(name, snap): ''' Roll back this filesystem or volume to the specified snapshot, if possible. :param bytes name: a name of the dataset to be rolled back. :param bytes snap: a name of the snapshot to be rolled back. :raises FilesystemNotFound: if the dataset does not exist. :raises SnapshotNotFound: if the dataset does not have any snapshots. :raises NameInvalid: if the dataset name is invalid. :raises NameTooLong: if the dataset name is too long. :raises SnapshotNotLatest: if the snapshot is not the latest. ''' ret = _lib.lzc_rollback_to(name, snap) errors.lzc_rollback_to_translate_error(ret, name, snap) def lzc_snapshot(snaps, props=None): ''' Create snapshots. All snapshots must be in the same pool. Optionally snapshot properties can be set on all snapshots. Currently only user properties (prefixed with "user:") are supported. Either all snapshots are successfully created or none are created if an exception is raised. :param snaps: a list of names of snapshots to be created. :type snaps: list of bytes :param props: a `dict` of ZFS dataset property name-value pairs (empty by default). :type props: dict of bytes:bytes :raises SnapshotFailure: if one or more snapshots could not be created. .. note:: :exc:`.SnapshotFailure` is a compound exception that provides at least one detailed error object in :attr:`SnapshotFailure.errors` `list`. .. warning:: The underlying implementation reports an individual, per-snapshot error only for :exc:`.SnapshotExists` condition and *sometimes* for :exc:`.NameTooLong`. In all other cases a single error is reported without connection to any specific snapshot name(s). This has the following implications: * if multiple error conditions are encountered only one of them is reported * unless only one snapshot is requested then it is impossible to tell how many snapshots are problematic and what they are * only if there are no other error conditions :exc:`.SnapshotExists` is reported for all affected snapshots * :exc:`.NameTooLong` can behave either in the same way as :exc:`.SnapshotExists` or as all other exceptions. The former is the case where the full snapshot name exceeds the maximum allowed length but the short snapshot name (after '@') is within the limit. The latter is the case when the short name alone exceeds the maximum allowed length. ''' snaps_dict = {name: None for name in snaps} errlist = {} snaps_nvlist = nvlist_in(snaps_dict) if props is None: props = {} props_nvlist = nvlist_in(props) with nvlist_out(errlist) as errlist_nvlist: ret = _lib.lzc_snapshot(snaps_nvlist, props_nvlist, errlist_nvlist) errors.lzc_snapshot_translate_errors(ret, errlist, snaps, props) lzc_snap = lzc_snapshot def lzc_destroy_snaps(snaps, defer): ''' Destroy snapshots. They must all be in the same pool. Snapshots that do not exist will be silently ignored. If 'defer' is not set, and a snapshot has user holds or clones, the destroy operation will fail and none of the snapshots will be destroyed. If 'defer' is set, and a snapshot has user holds or clones, it will be marked for deferred destruction, and will be destroyed when the last hold or clone is removed/destroyed. The operation succeeds if all snapshots were destroyed (or marked for later destruction if 'defer' is set) or didn't exist to begin with. :param snaps: a list of names of snapshots to be destroyed. :type snaps: list of bytes :param bool defer: whether to mark busy snapshots for deferred destruction rather than immediately failing. :raises SnapshotDestructionFailure: if one or more snapshots could not be created. .. note:: :exc:`.SnapshotDestructionFailure` is a compound exception that provides at least one detailed error object in :attr:`SnapshotDestructionFailure.errors` `list`. Typical error is :exc:`SnapshotIsCloned` if `defer` is `False`. The snapshot names are validated quite loosely and invalid names are typically ignored as nonexisiting snapshots. A snapshot name referring to a filesystem that doesn't exist is ignored. However, non-existent pool name causes :exc:`PoolNotFound`. ''' snaps_dict = {name: None for name in snaps} errlist = {} snaps_nvlist = nvlist_in(snaps_dict) with nvlist_out(errlist) as errlist_nvlist: ret = _lib.lzc_destroy_snaps(snaps_nvlist, defer, errlist_nvlist) errors.lzc_destroy_snaps_translate_errors(ret, errlist, snaps, defer) def lzc_bookmark(bookmarks): ''' Create bookmarks. :param bookmarks: a dict that maps names of wanted bookmarks to names of existing snapshots. :type bookmarks: dict of bytes to bytes :raises BookmarkFailure: if any of the bookmarks can not be created for any reason. The bookmarks `dict` maps from name of the bookmark (e.g. :file:`{pool}/{fs}#{bmark}`) to the name of the snapshot (e.g. :file:`{pool}/{fs}@{snap}`). All the bookmarks and snapshots must be in the same pool. ''' errlist = {} nvlist = nvlist_in(bookmarks) with nvlist_out(errlist) as errlist_nvlist: ret = _lib.lzc_bookmark(nvlist, errlist_nvlist) errors.lzc_bookmark_translate_errors(ret, errlist, bookmarks) def lzc_get_bookmarks(fsname, props=None): ''' Retrieve a listing of bookmarks for the given file system. :param bytes fsname: a name of the filesystem. :param props: a `list` of properties that will be returned for each bookmark. :type props: list of bytes :return: a `dict` that maps the bookmarks' short names to their properties. :rtype: dict of bytes:dict :raises FilesystemNotFound: if the filesystem is not found. The following are valid properties on bookmarks: guid : integer globally unique identifier of the snapshot the bookmark refers to createtxg : integer txg when the snapshot the bookmark refers to was created creation : integer timestamp when the snapshot the bookmark refers to was created Any other properties passed in ``props`` are ignored without reporting any error. Values in the returned dictionary map the names of the requested properties to their respective values. ''' bmarks = {} if props is None: props = [] props_dict = {name: None for name in props} nvlist = nvlist_in(props_dict) with nvlist_out(bmarks) as bmarks_nvlist: ret = _lib.lzc_get_bookmarks(fsname, nvlist, bmarks_nvlist) errors.lzc_get_bookmarks_translate_error(ret, fsname, props) return bmarks def lzc_destroy_bookmarks(bookmarks): ''' Destroy bookmarks. :param bookmarks: a list of the bookmarks to be destroyed. The bookmarks are specified as :file:`{fs}#{bmark}`. :type bookmarks: list of bytes :raises BookmarkDestructionFailure: if any of the bookmarks may not be destroyed. The bookmarks must all be in the same pool. Bookmarks that do not exist will be silently ignored. This also includes the case where the filesystem component of the bookmark name does not exist. However, an invalid bookmark name will cause :exc:`.NameInvalid` error reported in :attr:`SnapshotDestructionFailure.errors`. Either all bookmarks that existed are destroyed or an exception is raised. ''' errlist = {} bmarks_dict = {name: None for name in bookmarks} nvlist = nvlist_in(bmarks_dict) with nvlist_out(errlist) as errlist_nvlist: ret = _lib.lzc_destroy_bookmarks(nvlist, errlist_nvlist) errors.lzc_destroy_bookmarks_translate_errors(ret, errlist, bookmarks) def lzc_snaprange_space(firstsnap, lastsnap): ''' Calculate a size of data referenced by snapshots in the inclusive range between the ``firstsnap`` and the ``lastsnap`` and not shared with any other datasets. :param bytes firstsnap: the name of the first snapshot in the range. :param bytes lastsnap: the name of the last snapshot in the range. :return: the calculated stream size, in bytes. :rtype: `int` or `long` :raises SnapshotNotFound: if either of the snapshots does not exist. :raises NameInvalid: if the name of either snapshot is invalid. :raises NameTooLong: if the name of either snapshot is too long. :raises SnapshotMismatch: if ``fromsnap`` is not an ancestor snapshot of ``snapname``. :raises PoolsDiffer: if the snapshots belong to different pools. ``lzc_snaprange_space`` calculates total size of blocks that exist because they are referenced only by one or more snapshots in the given range but no other dataset. In other words, this is the set of blocks that were born after the snap before firstsnap, and died before the snap after the last snap. Yet another interpretation is that the result of ``lzc_snaprange_space`` is the size of the space that would be freed if the snapshots in the range are destroyed. If the same snapshot is given as both the ``firstsnap`` and the ``lastsnap``. In that case ``lzc_snaprange_space`` calculates space used by the snapshot. ''' valp = _ffi.new('uint64_t *') ret = _lib.lzc_snaprange_space(firstsnap, lastsnap, valp) errors.lzc_snaprange_space_translate_error(ret, firstsnap, lastsnap) return int(valp[0]) def lzc_hold(holds, fd=None): ''' Create *user holds* on snapshots. If there is a hold on a snapshot, the snapshot can not be destroyed. (However, it can be marked for deletion by :func:`lzc_destroy_snaps` ( ``defer`` = `True` ).) :param holds: the dictionary of names of the snapshots to hold mapped to the hold names. :type holds: dict of bytes : bytes :type fd: int or None :param fd: if not None then it must be the result of :func:`os.open` called as ``os.open("/dev/zfs", O_EXCL)``. :type fd: int or None :return: a list of the snapshots that do not exist. :rtype: list of bytes :raises HoldFailure: if a hold was impossible on one or more of the snapshots. :raises BadHoldCleanupFD: if ``fd`` is not a valid file descriptor associated with :file:`/dev/zfs`. The snapshots must all be in the same pool. If ``fd`` is not None, then when the ``fd`` is closed (including on process termination), the holds will be released. If the system is shut down uncleanly, the holds will be released when the pool is next opened or imported. Holds for snapshots which don't exist will be skipped and have an entry added to the return value, but will not cause an overall failure. No exceptions is raised if all holds, for snapshots that existed, were succesfully created. Otherwise :exc:`.HoldFailure` exception is raised and no holds will be created. :attr:`.HoldFailure.errors` may contain a single element for an error that is not specific to any hold / snapshot, or it may contain one or more elements detailing specific error per each affected hold. ''' errlist = {} if fd is None: fd = -1 nvlist = nvlist_in(holds) with nvlist_out(errlist) as errlist_nvlist: ret = _lib.lzc_hold(nvlist, fd, errlist_nvlist) errors.lzc_hold_translate_errors(ret, errlist, holds, fd) # If there is no error (no exception raised by _handleErrList), but errlist # is not empty, then it contains missing snapshots. assert all(errlist[x] == errno.ENOENT for x in errlist) return list(errlist.keys()) def lzc_release(holds): ''' Release *user holds* on snapshots. If the snapshot has been marked for deferred destroy (by lzc_destroy_snaps(defer=B_TRUE)), it does not have any clones, and all the user holds are removed, then the snapshot will be destroyed. The snapshots must all be in the same pool. :param holds: a ``dict`` where keys are snapshot names and values are lists of hold tags to remove. :type holds: dict of bytes : list of bytes :return: a list of any snapshots that do not exist and of any tags that do not exist for existing snapshots. Such tags are qualified with a corresponding snapshot name using the following format :file:`{pool}/{fs}@{snap}#{tag}` :rtype: list of bytes :raises HoldReleaseFailure: if one or more existing holds could not be released. Holds which failed to release because they didn't exist will have an entry added to errlist, but will not cause an overall failure. This call is success if ``holds`` was empty or all holds that existed, were successfully removed. Otherwise an exception will be raised. ''' errlist = {} holds_dict = {} for snap in holds: hold_list = holds[snap] if not isinstance(hold_list, list): raise TypeError('holds must be in a list') holds_dict[snap] = {hold: None for hold in hold_list} nvlist = nvlist_in(holds_dict) with nvlist_out(errlist) as errlist_nvlist: ret = _lib.lzc_release(nvlist, errlist_nvlist) errors.lzc_release_translate_errors(ret, errlist, holds) # If there is no error (no exception raised by _handleErrList), but errlist # is not empty, then it contains missing snapshots and tags. assert all(errlist[x] == errno.ENOENT for x in errlist) return list(errlist.keys()) def lzc_get_holds(snapname): ''' Retrieve list of *user holds* on the specified snapshot. :param bytes snapname: the name of the snapshot. :return: holds on the snapshot along with their creation times in seconds since the epoch :rtype: dict of bytes : int ''' holds = {} with nvlist_out(holds) as nvlist: ret = _lib.lzc_get_holds(snapname, nvlist) errors.lzc_get_holds_translate_error(ret, snapname) return holds def lzc_send(snapname, fromsnap, fd, flags=None): ''' Generate a zfs send stream for the specified snapshot and write it to the specified file descriptor. :param bytes snapname: the name of the snapshot to send. :param fromsnap: if not None the name of the starting snapshot for the incremental stream. :type fromsnap: bytes or None :param int fd: the file descriptor to write the send stream to. :param flags: the flags that control what enhanced features can be used in the stream. :type flags: list of bytes :raises SnapshotNotFound: if either the starting snapshot is not `None` and does not exist, or if the ending snapshot does not exist. :raises NameInvalid: if the name of either snapshot is invalid. :raises NameTooLong: if the name of either snapshot is too long. :raises SnapshotMismatch: if ``fromsnap`` is not an ancestor snapshot of ``snapname``. :raises PoolsDiffer: if the snapshots belong to different pools. :raises IOError: if an input / output error occurs while writing to ``fd``. :raises UnknownStreamFeature: if the ``flags`` contain an unknown flag name. If ``fromsnap`` is None, a full (non-incremental) stream will be sent. If ``fromsnap`` is not None, it must be the full name of a snapshot or bookmark to send an incremental from, e.g. :file:`{pool}/{fs}@{earlier_snap}` or :file:`{pool}/{fs}#{earlier_bmark}`. The specified snapshot or bookmark must represent an earlier point in the history of ``snapname``. It can be an earlier snapshot in the same filesystem or zvol as ``snapname``, or it can be the origin of ``snapname``'s filesystem, or an earlier snapshot in the origin, etc. ``fromsnap`` must be strictly an earlier snapshot, specifying the same snapshot as both ``fromsnap`` and ``snapname`` is an error. If ``flags`` contains *"large_blocks"*, the stream is permitted to contain ``DRR_WRITE`` records with ``drr_length`` > 128K, and ``DRR_OBJECT`` records with ``drr_blksz`` > 128K. If ``flags`` contains *"embedded_data"*, the stream is permitted to contain ``DRR_WRITE_EMBEDDED`` records with ``drr_etype`` == ``BP_EMBEDDED_TYPE_DATA``, which the receiving system must support (as indicated by support for the *embedded_data* feature). If ``flags`` contains *"compress"*, the stream is generated by using compressed WRITE records for blocks which are compressed on disk and in memory. If the *lz4_compress* feature is active on the sending system, then the receiving system must have that feature enabled as well. If ``flags`` contains *"raw"*, the stream is generated, for encrypted datasets, by sending data exactly as it exists on disk. This allows backups to be taken even if encryption keys are not currently loaded. .. note:: ``lzc_send`` can actually accept a filesystem name as the ``snapname``. In that case ``lzc_send`` acts as if a temporary snapshot was created after the start of the call and before the stream starts being produced. .. note:: ``lzc_send`` does not return until all of the stream is written to ``fd``. .. note:: ``lzc_send`` does *not* close ``fd`` upon returning. ''' if fromsnap is not None: c_fromsnap = fromsnap else: c_fromsnap = _ffi.NULL c_flags = 0 if flags is None: flags = [] for flag in flags: c_flag = { 'embedded_data': _lib.LZC_SEND_FLAG_EMBED_DATA, 'large_blocks': _lib.LZC_SEND_FLAG_LARGE_BLOCK, 'compress': _lib.LZC_SEND_FLAG_COMPRESS, 'raw': _lib.LZC_SEND_FLAG_RAW, }.get(flag) if c_flag is None: raise exceptions.UnknownStreamFeature(flag) c_flags |= c_flag ret = _lib.lzc_send(snapname, c_fromsnap, fd, c_flags) errors.lzc_send_translate_error(ret, snapname, fromsnap, fd, flags) def lzc_send_space(snapname, fromsnap=None, flags=None): ''' Estimate size of a full or incremental backup stream given the optional starting snapshot and the ending snapshot. :param bytes snapname: the name of the snapshot for which the estimate should be done. :param fromsnap: the optional starting snapshot name. If not `None` then an incremental stream size is estimated, otherwise a full stream is esimated. :type fromsnap: `bytes` or `None` :param flags: the flags that control what enhanced features can be used in the stream. :type flags: list of bytes :return: the estimated stream size, in bytes. :rtype: `int` or `long` :raises SnapshotNotFound: if either the starting snapshot is not `None` and does not exist, or if the ending snapshot does not exist. :raises NameInvalid: if the name of either snapshot is invalid. :raises NameTooLong: if the name of either snapshot is too long. :raises SnapshotMismatch: if ``fromsnap`` is not an ancestor snapshot of ``snapname``. :raises PoolsDiffer: if the snapshots belong to different pools. ``fromsnap``, if not ``None``, must be strictly an earlier snapshot, specifying the same snapshot as both ``fromsnap`` and ``snapname`` is an error. ''' if fromsnap is not None: c_fromsnap = fromsnap else: c_fromsnap = _ffi.NULL c_flags = 0 if flags is None: flags = [] for flag in flags: c_flag = { 'embedded_data': _lib.LZC_SEND_FLAG_EMBED_DATA, 'large_blocks': _lib.LZC_SEND_FLAG_LARGE_BLOCK, 'compress': _lib.LZC_SEND_FLAG_COMPRESS, 'raw': _lib.LZC_SEND_FLAG_RAW, }.get(flag) if c_flag is None: raise exceptions.UnknownStreamFeature(flag) c_flags |= c_flag valp = _ffi.new('uint64_t *') ret = _lib.lzc_send_space(snapname, c_fromsnap, c_flags, valp) errors.lzc_send_space_translate_error(ret, snapname, fromsnap) return int(valp[0]) def lzc_receive(snapname, fd, force=False, raw=False, origin=None, props=None): ''' Receive from the specified ``fd``, creating the specified snapshot. :param bytes snapname: the name of the snapshot to create. :param int fd: the file descriptor from which to read the stream. :param bool force: whether to roll back or destroy the target filesystem if that is required to receive the stream. :param bool raw: whether this is a "raw" stream. :param origin: the optional origin snapshot name if the stream is for a clone. :type origin: bytes or None :param props: the properties to set on the snapshot as *received* properties. :type props: dict of bytes : Any :raises IOError: if an input / output error occurs while reading from the ``fd``. :raises DatasetExists: if the snapshot named ``snapname`` already exists. :raises DatasetExists: if the stream is a full stream and the destination filesystem already exists. :raises DatasetExists: if ``force`` is `True` but the destination filesystem could not be rolled back to a matching snapshot because a newer snapshot exists and it is an origin of a cloned filesystem. :raises StreamMismatch: if an incremental stream is received and the latest snapshot of the destination filesystem does not match the source snapshot of the stream. :raises StreamMismatch: if a full stream is received and the destination filesystem already exists and it has at least one snapshot, and ``force`` is `False`. :raises StreamMismatch: if an incremental clone stream is received but the specified ``origin`` is not the actual received origin. :raises DestinationModified: if an incremental stream is received and the destination filesystem has been modified since the last snapshot and ``force`` is `False`. :raises DestinationModified: if a full stream is received and the destination filesystem already exists and it does not have any snapshots, and ``force`` is `False`. :raises DatasetNotFound: if the destination filesystem and its parent do not exist. :raises DatasetNotFound: if the ``origin`` is not `None` and does not exist. :raises DatasetBusy: if ``force`` is `True` but the destination filesystem could not be rolled back to a matching snapshot because a newer snapshot is held and could not be destroyed. :raises DatasetBusy: if another receive operation is being performed on the destination filesystem. :raises BadStream: if the stream is corrupt or it is not recognized or it is a compound stream or it is a clone stream, but ``origin`` is `None`. :raises BadStream: if a clone stream is received and the destination filesystem already exists. :raises StreamFeatureNotSupported: if the stream has a feature that is not supported on this side. :raises NameInvalid: if the name of either snapshot is invalid. :raises NameTooLong: if the name of either snapshot is too long. + :raises WrongParent: if the parent dataset of the received destination is + not a filesystem (e.g. ZVOL) .. note:: The ``origin`` is ignored if the actual stream is an incremental stream that is not a clone stream and the destination filesystem exists. If the stream is a full stream and the destination filesystem does not exist then the ``origin`` is checked for existence: if it does not exist :exc:`.DatasetNotFound` is raised, otherwise :exc:`.StreamMismatch` is raised, because that snapshot can not have any relation to the stream. .. note:: If ``force`` is `True` and the stream is incremental then the destination filesystem is rolled back to a matching source snapshot if necessary. Intermediate snapshots are destroyed in that case. However, none of the existing snapshots may have the same name as ``snapname`` even if such a snapshot were to be destroyed. The existing ``snapname`` snapshot always causes :exc:`.SnapshotExists` to be raised. If ``force`` is `True` and the stream is a full stream then the destination filesystem is replaced with the received filesystem unless the former has any snapshots. This prevents the destination filesystem from being rolled back / replaced. .. note:: This interface does not work on dedup'd streams (those with ``DMU_BACKUP_FEATURE_DEDUP``). .. note:: ``lzc_receive`` does not return until all of the stream is read from ``fd`` and applied to the pool. .. note:: ``lzc_receive`` does *not* close ``fd`` upon returning. ''' if origin is not None: c_origin = origin else: c_origin = _ffi.NULL if props is None: props = {} nvlist = nvlist_in(props) ret = _lib.lzc_receive(snapname, nvlist, c_origin, force, raw, fd) errors.lzc_receive_translate_errors( ret, snapname, fd, force, raw, False, False, origin, None ) lzc_recv = lzc_receive def lzc_exists(name): ''' Check if a dataset (a filesystem, or a volume, or a snapshot) with the given name exists. :param bytes name: the dataset name to check. :return: `True` if the dataset exists, `False` otherwise. :rtype: bool .. note:: ``lzc_exists`` can not be used to check for existence of bookmarks. ''' ret = _lib.lzc_exists(name) return bool(ret) @_uncommitted() def lzc_change_key(fsname, crypt_cmd, props=None, key=None): ''' Change encryption key on the specified dataset. :param bytes fsname: the name of the dataset. :param str crypt_cmd: the encryption "command" to be executed, currently supported values are "new_key", "inherit", "force_new_key" and "force_inherit". :param props: a `dict` of encryption-related property name-value pairs; only "keyformat", "keylocation" and "pbkdf2iters" are supported (empty by default). :type props: dict of bytes:Any :param key: dataset encryption key data (empty by default). :type key: bytes :raises PropertyInvalid: if ``props`` contains invalid values. :raises FilesystemNotFound: if the dataset does not exist. :raises UnknownCryptCommand: if ``crypt_cmd`` is invalid. :raises EncryptionKeyNotLoaded: if the encryption key is not currently loaded and therefore cannot be changed. ''' if props is None: props = {} if key is None: key = b"" else: key = bytes(key) cmd = { 'new_key': _lib.DCP_CMD_NEW_KEY, 'inherit': _lib.DCP_CMD_INHERIT, 'force_new_key': _lib.DCP_CMD_FORCE_NEW_KEY, 'force_inherit': _lib.DCP_CMD_FORCE_INHERIT, }.get(crypt_cmd) if cmd is None: raise exceptions.UnknownCryptCommand(crypt_cmd) nvlist = nvlist_in(props) ret = _lib.lzc_change_key(fsname, cmd, nvlist, key, len(key)) errors.lzc_change_key_translate_error(ret, fsname) @_uncommitted() def lzc_load_key(fsname, noop, key): ''' Load or verify encryption key on the specified dataset. :param bytes fsname: the name of the dataset. :param bool noop: if `True` the encryption key will only be verified, not loaded. :param key: dataset encryption key data. :type key: bytes :raises FilesystemNotFound: if the dataset does not exist. :raises EncryptionKeyAlreadyLoaded: if the encryption key is already loaded. :raises EncryptionKeyInvalid: if the encryption key provided is incorrect. ''' ret = _lib.lzc_load_key(fsname, noop, key, len(key)) errors.lzc_load_key_translate_error(ret, fsname, noop) @_uncommitted() def lzc_unload_key(fsname): ''' Unload encryption key from the specified dataset. :param bytes fsname: the name of the dataset. :raises FilesystemNotFound: if the dataset does not exist. :raises DatasetBusy: if the encryption key is still being used. This usually occurs when the dataset is mounted. :raises EncryptionKeyNotLoaded: if the encryption key is not currently loaded. ''' ret = _lib.lzc_unload_key(fsname) errors.lzc_unload_key_translate_error(ret, fsname) def lzc_channel_program( poolname, program, instrlimit=ZCP_DEFAULT_INSTRLIMIT, memlimit=ZCP_DEFAULT_MEMLIMIT, params=None ): ''' Executes a script as a ZFS channel program on pool ``poolname``. :param bytes poolname: the name of the pool. :param bytes program: channel program text. :param int instrlimit: execution time limit, in milliseconds. :param int memlimit: execution memory limit, in bytes. :param bytes params: a `list` of parameters passed to the channel program (empty by default). :type params: dict of bytes:Any :return: a dictionary of result values procuced by the channel program, if any. :rtype: dict :raises PoolNotFound: if the pool does not exist. :raises ZCPLimitInvalid: if either instruction or memory limit are invalid. :raises ZCPSyntaxError: if the channel program contains syntax errors. :raises ZCPTimeout: if the channel program took too long to execute. :raises ZCPSpaceError: if the channel program exhausted the memory limit. :raises ZCPMemoryError: if the channel program return value was too large. :raises ZCPPermissionError: if the user lacks the permission to run the channel program. Channel programs must be run as root. :raises ZCPRuntimeError: if the channel program encountered a runtime error. ''' output = {} params_nv = nvlist_in({b"argv": params}) with nvlist_out(output) as outnvl: ret = _lib.lzc_channel_program( poolname, program, instrlimit, memlimit, params_nv, outnvl) errors.lzc_channel_program_translate_error( ret, poolname, output.get(b"error")) return output.get(b"return") def lzc_channel_program_nosync( poolname, program, instrlimit=ZCP_DEFAULT_INSTRLIMIT, memlimit=ZCP_DEFAULT_MEMLIMIT, params=None ): ''' Executes a script as a read-only ZFS channel program on pool ``poolname``. A read-only channel program works programmatically the same way as a normal channel program executed with :func:`lzc_channel_program`. The only difference is it runs exclusively in open-context and therefore can return faster. The downside to that, is that the program cannot change on-disk state by calling functions from the zfs.sync submodule. :param bytes poolname: the name of the pool. :param bytes program: channel program text. :param int instrlimit: execution time limit, in milliseconds. :param int memlimit: execution memory limit, in bytes. :param bytes params: a `list` of parameters passed to the channel program (empty by default). :type params: dict of bytes:Any :return: a dictionary of result values procuced by the channel program, if any. :rtype: dict :raises PoolNotFound: if the pool does not exist. :raises ZCPLimitInvalid: if either instruction or memory limit are invalid. :raises ZCPSyntaxError: if the channel program contains syntax errors. :raises ZCPTimeout: if the channel program took too long to execute. :raises ZCPSpaceError: if the channel program exhausted the memory limit. :raises ZCPMemoryError: if the channel program return value was too large. :raises ZCPPermissionError: if the user lacks the permission to run the channel program. Channel programs must be run as root. :raises ZCPRuntimeError: if the channel program encountered a runtime error. ''' output = {} params_nv = nvlist_in({b"argv": params}) with nvlist_out(output) as outnvl: ret = _lib.lzc_channel_program_nosync( poolname, program, instrlimit, memlimit, params_nv, outnvl) errors.lzc_channel_program_translate_error( ret, poolname, output.get(b"error")) return output.get(b"return") def lzc_receive_resumable( snapname, fd, force=False, raw=False, origin=None, props=None ): ''' Like :func:`lzc_receive`, but if the receive fails due to premature stream termination, the intermediate state will be preserved on disk. In this case, ECKSUM will be returned. The receive may subsequently be resumed with a resuming send stream generated by lzc_send_resume(). :param bytes snapname: the name of the snapshot to create. :param int fd: the file descriptor from which to read the stream. :param bool force: whether to roll back or destroy the target filesystem if that is required to receive the stream. :param bool raw: whether this is a "raw" stream. :param origin: the optional origin snapshot name if the stream is for a clone. :type origin: bytes or None :param props: the properties to set on the snapshot as *received* properties. :type props: dict of bytes : Any :raises IOError: if an input / output error occurs while reading from the ``fd``. :raises DatasetExists: if the snapshot named ``snapname`` already exists. :raises DatasetExists: if the stream is a full stream and the destination filesystem already exists. :raises DatasetExists: if ``force`` is `True` but the destination filesystem could not be rolled back to a matching snapshot because a newer snapshot exists and it is an origin of a cloned filesystem. :raises StreamMismatch: if an incremental stream is received and the latest snapshot of the destination filesystem does not match the source snapshot of the stream. :raises StreamMismatch: if a full stream is received and the destination filesystem already exists and it has at least one snapshot, and ``force`` is `False`. :raises StreamMismatch: if an incremental clone stream is received but the specified ``origin`` is not the actual received origin. :raises DestinationModified: if an incremental stream is received and the destination filesystem has been modified since the last snapshot and ``force`` is `False`. :raises DestinationModified: if a full stream is received and the destination filesystem already exists and it does not have any snapshots, and ``force`` is `False`. :raises DatasetNotFound: if the destination filesystem and its parent do not exist. :raises DatasetNotFound: if the ``origin`` is not `None` and does not exist. :raises DatasetBusy: if ``force`` is `True` but the destination filesystem could not be rolled back to a matching snapshot because a newer snapshot is held and could not be destroyed. :raises DatasetBusy: if another receive operation is being performed on the destination filesystem. :raises BadStream: if the stream is corrupt or it is not recognized or it is a compound stream or it is a clone stream, but ``origin`` is `None`. :raises BadStream: if a clone stream is received and the destination filesystem already exists. :raises StreamFeatureNotSupported: if the stream has a feature that is not supported on this side. :raises NameInvalid: if the name of either snapshot is invalid. :raises NameTooLong: if the name of either snapshot is too long. ''' if origin is not None: c_origin = origin else: c_origin = _ffi.NULL if props is None: props = {} nvlist = nvlist_in(props) ret = _lib.lzc_receive_resumable( snapname, nvlist, c_origin, force, raw, fd) errors.lzc_receive_translate_errors( ret, snapname, fd, force, raw, False, False, origin, None) def lzc_receive_with_header( snapname, fd, begin_record, force=False, resumable=False, raw=False, origin=None, props=None ): ''' Like :func:`lzc_receive`, but allows the caller to read the begin record and then to pass it in. That could be useful if the caller wants to derive, for example, the snapname or the origin parameters based on the information contained in the begin record. :func:`receive_header` can be used to receive the begin record from the file descriptor. :param bytes snapname: the name of the snapshot to create. :param int fd: the file descriptor from which to read the stream. :param begin_record: the stream's begin record. :type begin_record: ``cffi`` `CData` representing the dmu_replay_record_t structure. :param bool force: whether to roll back or destroy the target filesystem if that is required to receive the stream. :param bool resumable: whether this stream should be treated as resumable. If the receive fails due to premature stream termination, the intermediate state will be preserved on disk and may subsequently be resumed with :func:`lzc_send_resume`. :param bool raw: whether this is a "raw" stream. :param origin: the optional origin snapshot name if the stream is for a clone. :type origin: bytes or None :param props: the properties to set on the snapshot as *received* properties. :type props: dict of bytes : Any :raises IOError: if an input / output error occurs while reading from the ``fd``. :raises DatasetExists: if the snapshot named ``snapname`` already exists. :raises DatasetExists: if the stream is a full stream and the destination filesystem already exists. :raises DatasetExists: if ``force`` is `True` but the destination filesystem could not be rolled back to a matching snapshot because a newer snapshot exists and it is an origin of a cloned filesystem. :raises StreamMismatch: if an incremental stream is received and the latest snapshot of the destination filesystem does not match the source snapshot of the stream. :raises StreamMismatch: if a full stream is received and the destination filesystem already exists and it has at least one snapshot, and ``force`` is `False`. :raises StreamMismatch: if an incremental clone stream is received but the specified ``origin`` is not the actual received origin. :raises DestinationModified: if an incremental stream is received and the destination filesystem has been modified since the last snapshot and ``force`` is `False`. :raises DestinationModified: if a full stream is received and the destination filesystem already exists and it does not have any snapshots, and ``force`` is `False`. :raises DatasetNotFound: if the destination filesystem and its parent do not exist. :raises DatasetNotFound: if the ``origin`` is not `None` and does not exist. :raises DatasetBusy: if ``force`` is `True` but the destination filesystem could not be rolled back to a matching snapshot because a newer snapshot is held and could not be destroyed. :raises DatasetBusy: if another receive operation is being performed on the destination filesystem. :raises BadStream: if the stream is corrupt or it is not recognized or it is a compound stream or it is a clone stream, but ``origin`` is `None`. :raises BadStream: if a clone stream is received and the destination filesystem already exists. :raises StreamFeatureNotSupported: if the stream has a feature that is not supported on this side. :raises NameInvalid: if the name of either snapshot is invalid. :raises NameTooLong: if the name of either snapshot is too long. ''' if origin is not None: c_origin = origin else: c_origin = _ffi.NULL if props is None: props = {} nvlist = nvlist_in(props) ret = _lib.lzc_receive_with_header( snapname, nvlist, c_origin, force, resumable, raw, fd, begin_record) errors.lzc_receive_translate_errors( ret, snapname, fd, force, raw, False, False, origin, None) def receive_header(fd): ''' Read the begin record of the ZFS backup stream from the given file descriptor. This is a helper function for :func:`lzc_receive_with_header`. :param int fd: the file descriptor from which to read the stream. :return: a tuple with two elements where the first one is a Python `dict` representing the fields of the begin record and the second one is an opaque object suitable for passing to :func:`lzc_receive_with_header`. :raises IOError: if an input / output error occurs while reading from the ``fd``. At present the following fields can be of interest in the header: drr_toname : bytes the name of the snapshot for which the stream has been created drr_toguid : integer the GUID of the snapshot for which the stream has been created drr_fromguid : integer the GUID of the starting snapshot in the case the stream is incremental, zero otherwise drr_flags : integer the flags describing the stream's properties drr_type : integer the type of the dataset for which the stream has been created (volume, filesystem) ''' # read sizeof(dmu_replay_record_t) bytes directly into the memort backing # 'record' record = _ffi.new("dmu_replay_record_t *") _ffi.buffer(record)[:] = os.read(fd, _ffi.sizeof(record[0])) # get drr_begin member and its representation as a Pythn dict drr_begin = record.drr_u.drr_begin header = {} for field, descr in _ffi.typeof(drr_begin).fields: if descr.type.kind == 'primitive': header[field] = getattr(drr_begin, field) elif descr.type.kind == 'enum': header[field] = getattr(drr_begin, field) elif descr.type.kind == 'array' and descr.type.item.cname == 'char': header[field] = _ffi.string(getattr(drr_begin, field)) else: raise TypeError( 'Unexpected field type in drr_begin: ' + str(descr.type)) return (header, record) @_uncommitted() def lzc_receive_one( snapname, fd, begin_record, force=False, resumable=False, raw=False, origin=None, props=None, cleanup_fd=-1, action_handle=0 ): ''' Like :func:`lzc_receive`, but allows the caller to pass all supported arguments and retrieve all values returned. The only additional input parameter is 'cleanup_fd' which is used to set a cleanup-on-exit file descriptor. :param bytes snapname: the name of the snapshot to create. :param int fd: the file descriptor from which to read the stream. :param begin_record: the stream's begin record. :type begin_record: ``cffi`` `CData` representing the dmu_replay_record_t structure. :param bool force: whether to roll back or destroy the target filesystem if that is required to receive the stream. :param bool resumable: whether this stream should be treated as resumable. If the receive fails due to premature stream termination, the intermediate state will be preserved on disk and may subsequently be resumed with :func:`lzc_send_resume`. :param bool raw: whether this is a "raw" stream. :param origin: the optional origin snapshot name if the stream is for a clone. :type origin: bytes or None :param props: the properties to set on the snapshot as *received* properties. :type props: dict of bytes : Any :param int cleanup_fd: file descriptor used to set a cleanup-on-exit file descriptor. :param int action_handle: variable used to pass the handle for guid/ds mapping: this should be set to zero on first call and will contain an updated handle on success, which should be passed in subsequent calls. :return: a tuple with two elements where the first one is the number of bytes read from the file descriptor and the second one is the action_handle return value. :raises IOError: if an input / output error occurs while reading from the ``fd``. :raises DatasetExists: if the snapshot named ``snapname`` already exists. :raises DatasetExists: if the stream is a full stream and the destination filesystem already exists. :raises DatasetExists: if ``force`` is `True` but the destination filesystem could not be rolled back to a matching snapshot because a newer snapshot exists and it is an origin of a cloned filesystem. :raises StreamMismatch: if an incremental stream is received and the latest snapshot of the destination filesystem does not match the source snapshot of the stream. :raises StreamMismatch: if a full stream is received and the destination filesystem already exists and it has at least one snapshot, and ``force`` is `False`. :raises StreamMismatch: if an incremental clone stream is received but the specified ``origin`` is not the actual received origin. :raises DestinationModified: if an incremental stream is received and the destination filesystem has been modified since the last snapshot and ``force`` is `False`. :raises DestinationModified: if a full stream is received and the destination filesystem already exists and it does not have any snapshots, and ``force`` is `False`. :raises DatasetNotFound: if the destination filesystem and its parent do not exist. :raises DatasetNotFound: if the ``origin`` is not `None` and does not exist. :raises DatasetBusy: if ``force`` is `True` but the destination filesystem could not be rolled back to a matching snapshot because a newer snapshot is held and could not be destroyed. :raises DatasetBusy: if another receive operation is being performed on the destination filesystem. :raises BadStream: if the stream is corrupt or it is not recognized or it is a compound stream or it is a clone stream, but ``origin`` is `None`. :raises BadStream: if a clone stream is received and the destination filesystem already exists. :raises StreamFeatureNotSupported: if the stream has a feature that is not supported on this side. :raises ReceivePropertyFailure: if one or more of the specified properties is invalid or has an invalid type or value. :raises NameInvalid: if the name of either snapshot is invalid. :raises NameTooLong: if the name of either snapshot is too long. ''' if origin is not None: c_origin = origin else: c_origin = _ffi.NULL if action_handle is not None: c_action_handle = _ffi.new("uint64_t *") else: c_action_handle = _ffi.NULL c_read_bytes = _ffi.new("uint64_t *") c_errflags = _ffi.new("uint64_t *") if props is None: props = {} nvlist = nvlist_in(props) properrs = {} with nvlist_out(properrs) as c_errors: ret = _lib.lzc_receive_one( snapname, nvlist, c_origin, force, resumable, raw, fd, begin_record, cleanup_fd, c_read_bytes, c_errflags, c_action_handle, c_errors) errors.lzc_receive_translate_errors( ret, snapname, fd, force, raw, False, False, origin, properrs) return (int(c_read_bytes[0]), action_handle) @_uncommitted() def lzc_receive_with_cmdprops( snapname, fd, begin_record, force=False, resumable=False, raw=False, origin=None, props=None, cmdprops=None, key=None, cleanup_fd=-1, action_handle=0 ): ''' Like :func:`lzc_receive_one`, but allows the caller to pass an additional 'cmdprops' argument. The 'cmdprops' nvlist contains both override ('zfs receive -o') and exclude ('zfs receive -x') properties. :param bytes snapname: the name of the snapshot to create. :param int fd: the file descriptor from which to read the stream. :param begin_record: the stream's begin record. :type begin_record: ``cffi`` `CData` representing the dmu_replay_record_t structure. :param bool force: whether to roll back or destroy the target filesystem if that is required to receive the stream. :param bool resumable: whether this stream should be treated as resumable. If the receive fails due to premature stream termination, the intermediate state will be preserved on disk and may subsequently be resumed with :func:`lzc_send_resume`. :param bool raw: whether this is a "raw" stream. :param origin: the optional origin snapshot name if the stream is for a clone. :type origin: bytes or None :param props: the properties to set on the snapshot as *received* properties. :type props: dict of bytes : Any :param cmdprops: the properties to set on the snapshot as local overrides to *received* properties. `bool` values are forcefully inherited while every other value is set locally as if the command "zfs set" was invoked immediately before the receive. :type cmdprops: dict of bytes : Any :param key: raw bytes representing user's wrapping key :type key: bytes :param int cleanup_fd: file descriptor used to set a cleanup-on-exit file descriptor. :param int action_handle: variable used to pass the handle for guid/ds mapping: this should be set to zero on first call and will contain an updated handle on success, it should be passed in subsequent calls. :return: a tuple with two elements where the first one is the number of bytes read from the file descriptor and the second one is the action_handle return value. :raises IOError: if an input / output error occurs while reading from the ``fd``. :raises DatasetExists: if the snapshot named ``snapname`` already exists. :raises DatasetExists: if the stream is a full stream and the destination filesystem already exists. :raises DatasetExists: if ``force`` is `True` but the destination filesystem could not be rolled back to a matching snapshot because a newer snapshot exists and it is an origin of a cloned filesystem. :raises StreamMismatch: if an incremental stream is received and the latest snapshot of the destination filesystem does not match the source snapshot of the stream. :raises StreamMismatch: if a full stream is received and the destination filesystem already exists and it has at least one snapshot, and ``force`` is `False`. :raises StreamMismatch: if an incremental clone stream is received but the specified ``origin`` is not the actual received origin. :raises DestinationModified: if an incremental stream is received and the destination filesystem has been modified since the last snapshot and ``force`` is `False`. :raises DestinationModified: if a full stream is received and the destination filesystem already exists and it does not have any snapshots, and ``force`` is `False`. :raises DatasetNotFound: if the destination filesystem and its parent do not exist. :raises DatasetNotFound: if the ``origin`` is not `None` and does not exist. :raises DatasetBusy: if ``force`` is `True` but the destination filesystem could not be rolled back to a matching snapshot because a newer snapshot is held and could not be destroyed. :raises DatasetBusy: if another receive operation is being performed on the destination filesystem. :raises BadStream: if the stream is corrupt or it is not recognized or it is a compound stream or it is a clone stream, but ``origin`` is `None`. :raises BadStream: if a clone stream is received and the destination filesystem already exists. :raises StreamFeatureNotSupported: if the stream has a feature that is not supported on this side. :raises ReceivePropertyFailure: if one or more of the specified properties is invalid or has an invalid type or value. :raises NameInvalid: if the name of either snapshot is invalid. :raises NameTooLong: if the name of either snapshot is too long. ''' if origin is not None: c_origin = origin else: c_origin = _ffi.NULL if action_handle is not None: c_action_handle = _ffi.new("uint64_t *") else: c_action_handle = _ffi.NULL c_read_bytes = _ffi.new("uint64_t *") c_errflags = _ffi.new("uint64_t *") if props is None: props = {} if cmdprops is None: cmdprops = {} if key is None: key = b"" else: key = bytes(key) nvlist = nvlist_in(props) cmdnvlist = nvlist_in(cmdprops) properrs = {} with nvlist_out(properrs) as c_errors: ret = _lib.lzc_receive_with_cmdprops( snapname, nvlist, cmdnvlist, key, len(key), c_origin, force, resumable, raw, fd, begin_record, cleanup_fd, c_read_bytes, c_errflags, c_action_handle, c_errors) errors.lzc_receive_translate_errors( ret, snapname, fd, force, raw, False, False, origin, properrs) return (int(c_read_bytes[0]), action_handle) @_uncommitted() def lzc_reopen(poolname, restart=True): ''' Reopen a pool :param bytes poolname: the name of the pool. :param bool restart: whether to restart an in-progress scrub operation. :raises PoolNotFound: if the pool does not exist. ''' ret = _lib.lzc_reopen(poolname, restart) errors.lzc_reopen_translate_error(ret, poolname) def lzc_send_resume( snapname, fromsnap, fd, flags=None, resumeobj=0, resumeoff=0 ): ''' Resume a previously interrupted send operation generating a zfs send stream for the specified snapshot and writing it to the specified file descriptor. :param bytes snapname: the name of the snapshot to send. :param fromsnap: if not None the name of the starting snapshot for the incremental stream. :type fromsnap: bytes or None :param int fd: the file descriptor to write the send stream to. :param flags: the flags that control what enhanced features can be used in the stream. :type flags: list of bytes :param int resumeobj: the object number where this send stream should resume from. :param int resumeoff: the offset where this send stream should resume from. :raises SnapshotNotFound: if either the starting snapshot is not `None` and does not exist, or if the ending snapshot does not exist. :raises NameInvalid: if the name of either snapshot is invalid. :raises NameTooLong: if the name of either snapshot is too long. :raises SnapshotMismatch: if ``fromsnap`` is not an ancestor snapshot of ``snapname``. :raises PoolsDiffer: if the snapshots belong to different pools. :raises IOError: if an input / output error occurs while writing to ``fd``. :raises UnknownStreamFeature: if the ``flags`` contain an unknown flag name. .. note:: See :func:`lzc_send` for more information. ''' if fromsnap is not None: c_fromsnap = fromsnap else: c_fromsnap = _ffi.NULL c_flags = 0 if flags is None: flags = [] for flag in flags: c_flag = { 'embedded_data': _lib.LZC_SEND_FLAG_EMBED_DATA, 'large_blocks': _lib.LZC_SEND_FLAG_LARGE_BLOCK, 'compress': _lib.LZC_SEND_FLAG_COMPRESS, 'raw': _lib.LZC_SEND_FLAG_RAW, }.get(flag) if c_flag is None: raise exceptions.UnknownStreamFeature(flag) c_flags |= c_flag ret = _lib.lzc_send_resume( snapname, c_fromsnap, fd, c_flags, uint64_t(resumeobj), uint64_t(resumeoff)) errors.lzc_send_translate_error(ret, snapname, fromsnap, fd, flags) @_uncommitted() def lzc_sync(poolname, force=False): ''' Forces all in-core dirty data to be written to the primary pool storage and not the ZIL. :param bytes poolname: the name of the pool. :param bool force: whether to force uberblock update even if there is no dirty data. :raises PoolNotFound: if the pool does not exist. .. note:: This method signature is different from its C libzfs_core counterpart: `innvl` has been replaced by the `force` boolean and `outnvl` has been conveniently removed since it's not used. ''' innvl = nvlist_in({b"force": force}) with nvlist_out({}) as outnvl: ret = _lib.lzc_sync(poolname, innvl, outnvl) errors.lzc_sync_translate_error(ret, poolname) def is_supported(func): ''' Check whether C *libzfs_core* provides implementation required for the given Python wrapper. If `is_supported` returns ``False`` for the function, then calling the function would result in :exc:`NotImplementedError`. :param function func: the function to check. :return bool: whether the function can be used. ''' fname = func.__name__ if fname not in globals(): raise ValueError(fname + ' is not from libzfs_core') if not callable(func): raise ValueError(fname + ' is not a function') if not fname.startswith("lzc_"): raise ValueError(fname + ' is not a libzfs_core API function') check_func = getattr(func, "_check_func", None) if check_func is not None: return is_supported(check_func) return getattr(_lib, fname, None) is not None @_uncommitted() def lzc_promote(name): ''' Promotes the ZFS dataset. :param bytes name: the name of the dataset to promote. :raises NameInvalid: if the dataset name is invalid. :raises NameTooLong: if the dataset name is too long. :raises NameTooLong: if the dataset's origin has a snapshot that, if transferred to the dataset, would get a too long name. :raises NotClone: if the dataset is not a clone. :raises FilesystemNotFound: if the dataset does not exist. :raises SnapshotExists: if the dataset already has a snapshot with the same name as one of the origin's snapshots. ''' ret = _lib.lzc_promote(name, _ffi.NULL, _ffi.NULL) errors.lzc_promote_translate_error(ret, name) @_uncommitted() def lzc_remap(name): ''' Remaps the ZFS dataset. :param bytes name: the name of the dataset to remap. :raises NameInvalid: if the dataset name is invalid. :raises NameTooLong: if the dataset name is too long. :raises DatasetNotFound: if the dataset does not exist. :raises FeatureNotSupported: if the pool containing the dataset does not have the *obsolete_counts* feature enabled. ''' ret = _lib.lzc_remap(name) errors.lzc_remap_translate_error(ret, name) @_uncommitted() def lzc_pool_checkpoint(name): ''' Creates a checkpoint for the specified pool. :param bytes name: the name of the pool to create a checkpoint for. :raises CheckpointExists: if the pool already has a checkpoint. :raises CheckpointDiscarding: if ZFS is in the middle of discarding a checkpoint for this pool. :raises DeviceRemovalRunning: if a vdev is currently being removed. :raises DeviceTooBig: if one or more top-level vdevs exceed the maximum vdev size. ''' ret = _lib.lzc_pool_checkpoint(name) errors.lzc_pool_checkpoint_translate_error(ret, name) @_uncommitted() def lzc_pool_checkpoint_discard(name): ''' Discard the checkpoint from the specified pool. :param bytes name: the name of the pool to discard the checkpoint from. :raises CheckpointNotFound: if pool does not have a checkpoint. :raises CheckpointDiscarding: if ZFS is in the middle of discarding a checkpoint for this pool. ''' ret = _lib.lzc_pool_checkpoint_discard(name) errors.lzc_pool_checkpoint_discard_translate_error(ret, name) def lzc_rename(source, target): ''' Rename the ZFS dataset. :param source name: the current name of the dataset to rename. :param target name: the new name of the dataset. :raises NameInvalid: if either the source or target name is invalid. :raises NameTooLong: if either the source or target name is too long. :raises NameTooLong: if a snapshot of the source would get a too long name after renaming. :raises FilesystemNotFound: if the source does not exist. :raises FilesystemNotFound: if the target's parent does not exist. :raises FilesystemExists: if the target already exists. :raises PoolsDiffer: if the source and target belong to different pools. + :raises WrongParent: if the "new" parent dataset is not a filesystem + (e.g. ZVOL) ''' ret = _lib.lzc_rename(source, target) errors.lzc_rename_translate_error(ret, source, target) def lzc_destroy(name): ''' Destroy the ZFS dataset. :param bytes name: the name of the dataset to destroy. :raises NameInvalid: if the dataset name is invalid. :raises NameTooLong: if the dataset name is too long. :raises FilesystemNotFound: if the dataset does not exist. ''' ret = _lib.lzc_destroy(name) errors.lzc_destroy_translate_error(ret, name) @_uncommitted() def lzc_inherit(name, prop): ''' Inherit properties from a parent dataset of the given ZFS dataset. :param bytes name: the name of the dataset. :param bytes prop: the name of the property to inherit. :raises NameInvalid: if the dataset name is invalid. :raises NameTooLong: if the dataset name is too long. :raises DatasetNotFound: if the dataset does not exist. :raises PropertyInvalid: if one or more of the specified properties is invalid or has an invalid type or value. Inheriting a property actually resets it to its default value or removes it if it's a user property, so that the property could be inherited if it's inheritable. If the property is not inheritable then it would just have its default value. This function can be used on snapshots to inherit user defined properties. ''' ret = _lib.lzc_inherit(name, prop, _ffi.NULL) errors.lzc_inherit_prop_translate_error(ret, name, prop) # As the extended API is not committed yet, the names of the new interfaces # are not settled down yet. # lzc_inherit_prop makes it clearer what is to be inherited. lzc_inherit_prop = lzc_inherit @_uncommitted() def lzc_set_props(name, prop, val): ''' Set properties of the ZFS dataset. :param bytes name: the name of the dataset. :param bytes prop: the name of the property. :param Any val: the value of the property. :raises NameInvalid: if the dataset name is invalid. :raises NameTooLong: if the dataset name is too long. :raises DatasetNotFound: if the dataset does not exist. :raises NoSpace: if the property controls a quota and the values is too small for that quota. :raises PropertyInvalid: if one or more of the specified properties is invalid or has an invalid type or value. This function can be used on snapshots to set user defined properties. .. note:: An attempt to set a readonly / statistic property is ignored without reporting any error. ''' props = {prop: val} props_nv = nvlist_in(props) ret = _lib.lzc_set_props(name, props_nv, _ffi.NULL, _ffi.NULL) errors.lzc_set_prop_translate_error(ret, name, prop, val) # As the extended API is not committed yet, the names of the new interfaces # are not settled down yet. # It's not clear if atomically setting multiple properties is an achievable # goal and an interface acting on mutiple entities must do so atomically # by convention. # Being able to set a single property at a time is sufficient for ClusterHQ. lzc_set_prop = lzc_set_props @_uncommitted() def lzc_list(name, options): ''' List subordinate elements of the given dataset. This function can be used to list child datasets and snapshots of the given dataset. The listed elements can be filtered by their type and by their depth relative to the starting dataset. :param bytes name: the name of the dataset to be listed, could be a snapshot or a dataset. :param options: a `dict` of the options that control the listing behavior. :type options: dict of bytes:Any :return: a pair of file descriptors the first of which can be used to read the listing. :rtype: tuple of (int, int) :raises DatasetNotFound: if the dataset does not exist. Two options are currently available: recurse : integer or None specifies depth of the recursive listing. If ``None`` the depth is not limited. Absence of this option means that only the given dataset is listed. type : dict of bytes:None specifies dataset types to include into the listing. Currently allowed keys are "filesystem", "volume", "snapshot". Absence of this option implies all types. The first of the returned file descriptors can be used to read the listing in a binary encounded format. The data is a series of variable sized records each starting with a fixed size header, the header is followed by a serialized ``nvlist``. Each record describes a single element and contains the element's name as well as its properties. The file descriptor must be closed after reading from it. The second file descriptor represents a pipe end to which the kernel driver is writing information. It should not be closed until all interesting information has been read and it must be explicitly closed afterwards. ''' (rfd, wfd) = os.pipe() fcntl.fcntl(rfd, fcntl.F_SETFD, fcntl.FD_CLOEXEC) fcntl.fcntl(wfd, fcntl.F_SETFD, fcntl.FD_CLOEXEC) options = options.copy() options['fd'] = int32_t(wfd) opts_nv = nvlist_in(options) ret = _lib.lzc_list(name, opts_nv) if ret == errno.ESRCH: return (None, None) errors.lzc_list_translate_error(ret, name, options) return (rfd, wfd) # Description of the binary format used to pass data from the kernel. _PIPE_RECORD_FORMAT = 'IBBBB' _PIPE_RECORD_SIZE = struct.calcsize(_PIPE_RECORD_FORMAT) def _list(name, recurse=None, types=None): ''' A wrapper for :func:`lzc_list` that hides details of working with the file descriptors and provides data in an easy to consume format. :param bytes name: the name of the dataset to be listed, could be a snapshot, a volume or a filesystem. :param recurse: specifies depth of the recursive listing. If ``None`` the depth is not limited. :param types: specifies dataset types to include into the listing. Currently allowed keys are "filesystem", "volume", "snapshot". ``None`` is equivalent to specifying the type of the dataset named by `name`. :type types: list of bytes or None :type recurse: integer or None :return: a list of dictionaries each describing a single listed element. :rtype: list of dict ''' options = {} # Convert types to a dict suitable for mapping to an nvlist. if types is not None: types = {x: None for x in types} options['type'] = types if recurse is None or recurse > 0: options['recurse'] = recurse # Note that other_fd is used by the kernel side to write # the data, so we have to keep that descriptor open until # we are done. # Also, we have to explicitly close the descriptor as the # kernel doesn't do that. (fd, other_fd) = lzc_list(name, options) if fd is None: return try: while True: record_bytes = os.read(fd, _PIPE_RECORD_SIZE) if not record_bytes: break (size, _, err, _, _) = struct.unpack( _PIPE_RECORD_FORMAT, record_bytes) if err == errno.ESRCH: break errors.lzc_list_translate_error(err, name, options) if size == 0: break data_bytes = os.read(fd, size) result = {} with nvlist_out(result) as nvp: ret = _lib.nvlist_unpack(data_bytes, size, nvp, 0) if ret != 0: raise exceptions.ZFSGenericError( ret, None, "Failed to unpack list data") yield result finally: os.close(other_fd) os.close(fd) @_uncommitted(lzc_list) def lzc_get_props(name): ''' Get properties of the ZFS dataset. :param bytes name: the name of the dataset. :raises DatasetNotFound: if the dataset does not exist. :raises NameInvalid: if the dataset name is invalid. :raises NameTooLong: if the dataset name is too long. :return: a dictionary mapping the property names to their values. :rtype: dict of bytes:Any .. note:: The value of ``clones`` property is a `list` of clone names as byte strings. .. warning:: The returned dictionary does not contain entries for properties with default values. One exception is the ``mountpoint`` property for which the default value is derived from the dataset name. ''' result = next(_list(name, recurse=0)) is_snapshot = result['dmu_objset_stats']['dds_is_snapshot'] result = result['properties'] # In most cases the source of the property is uninteresting and the # value alone is sufficient. One exception is the 'mountpoint' # property the final value of which is not the same as the inherited # value. mountpoint = result.get('mountpoint') if mountpoint is not None: mountpoint_src = mountpoint['source'] mountpoint_val = mountpoint['value'] # 'source' is the name of the dataset that has 'mountpoint' set # to a non-default value and from which the current dataset inherits # the property. 'source' can be the current dataset if its # 'mountpoint' is explicitly set. # 'source' can also be a special value like '$recvd', that case # is equivalent to the property being set on the current dataset. # Note that a normal mountpoint value should start with '/' # unlike the special values "none" and "legacy". if (mountpoint_val.startswith('/') and not mountpoint_src.startswith('$')): mountpoint_val = mountpoint_val + name[len(mountpoint_src):] elif not is_snapshot: mountpoint_val = '/' + name else: mountpoint_val = None result = {k: result[k]['value'] for k in result} if 'clones' in result: result['clones'] = list(result['clones'].keys()) if mountpoint_val is not None: result['mountpoint'] = mountpoint_val return result @_uncommitted(lzc_list) def lzc_list_children(name): ''' List the children of the ZFS dataset. :param bytes name: the name of the dataset. :return: an iterator that produces the names of the children. :raises NameInvalid: if the dataset name is invalid. :raises NameTooLong: if the dataset name is too long. :raises DatasetNotFound: if the dataset does not exist. .. warning:: If the dataset does not exist, then the returned iterator would produce no results and no error is reported. That case is indistinguishable from the dataset having no children. An attempt to list children of a snapshot is silently ignored as well. ''' children = [] for entry in _list(name, recurse=1, types=['filesystem', 'volume']): child = entry['name'] if child != name: children.append(child) return iter(children) @_uncommitted(lzc_list) def lzc_list_snaps(name): ''' List the snapshots of the ZFS dataset. :param bytes name: the name of the dataset. :return: an iterator that produces the names of the snapshots. :raises NameInvalid: if the dataset name is invalid. :raises NameTooLong: if the dataset name is too long. :raises DatasetNotFound: if the dataset does not exist. .. warning:: If the dataset does not exist, then the returned iterator would produce no results and no error is reported. That case is indistinguishable from the dataset having no snapshots. An attempt to list snapshots of a snapshot is silently ignored as well. ''' snaps = [] for entry in _list(name, recurse=1, types=['snapshot']): snap = entry['name'] if snap != name: snaps.append(snap) return iter(snaps) # TODO: a better way to init and uninit the library def _initialize(): class LazyInit(object): def __init__(self, lib): self._lib = lib self._inited = False self._lock = threading.Lock() def __getattr__(self, name): if not self._inited: with self._lock: if not self._inited: ret = self._lib.libzfs_core_init() if ret != 0: raise exceptions.ZFSInitializationFailed(ret) self._inited = True return getattr(self._lib, name) return LazyInit(libzfs_core.lib) _ffi = libzfs_core.ffi _lib = _initialize() # vim: softtabstop=4 tabstop=4 expandtab shiftwidth=4 diff --git a/contrib/pyzfs/libzfs_core/exceptions.py b/contrib/pyzfs/libzfs_core/exceptions.py index c54459ec8b47..f465cd3d9309 100644 --- a/contrib/pyzfs/libzfs_core/exceptions.py +++ b/contrib/pyzfs/libzfs_core/exceptions.py @@ -1,583 +1,584 @@ # # Copyright 2015 ClusterHQ # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Exceptions that can be raised by libzfs_core operations. """ from __future__ import absolute_import, division, print_function import errno from ._constants import ( ZFS_ERR_CHECKPOINT_EXISTS, ZFS_ERR_DISCARDING_CHECKPOINT, ZFS_ERR_NO_CHECKPOINT, ZFS_ERR_DEVRM_IN_PROGRESS, - ZFS_ERR_VDEV_TOO_BIG + ZFS_ERR_VDEV_TOO_BIG, + ZFS_ERR_WRONG_PARENT ) class ZFSError(Exception): errno = None message = None name = None def __str__(self): if self.name is not None: return "[Errno %d] %s: '%s'" % ( self.errno, self.message, self.name) else: return "[Errno %d] %s" % (self.errno, self.message) def __repr__(self): return "%s(%r, %r)" % ( self.__class__.__name__, self.errno, self.message) class ZFSGenericError(ZFSError): def __init__(self, errno, name, message): self.errno = errno self.message = message self.name = name class ZFSInitializationFailed(ZFSError): message = "Failed to initialize libzfs_core" def __init__(self, errno): self.errno = errno class MultipleOperationsFailure(ZFSError): def __init__(self, errors, suppressed_count): # Use first of the individual error codes # as an overall error code. This is more consistent. self.errno = errors[0].errno self.errors = errors # this many errors were encountered but not placed on the `errors` list self.suppressed_count = suppressed_count def __str__(self): return "%s, %d errors included, %d suppressed" % ( ZFSError.__str__(self), len(self.errors), self.suppressed_count) def __repr__(self): return "%s(%r, %r, errors=%r, supressed=%r)" % ( self.__class__.__name__, self.errno, self.message, self.errors, self.suppressed_count) class DatasetNotFound(ZFSError): """ This exception is raised when an operation failure can be caused by a missing snapshot or a missing filesystem and it is impossible to distinguish between the causes. """ errno = errno.ENOENT message = "Dataset not found" def __init__(self, name): self.name = name class DatasetExists(ZFSError): """ This exception is raised when an operation failure can be caused by an existing snapshot or filesystem and it is impossible to distinguish between the causes. """ errno = errno.EEXIST message = "Dataset already exists" def __init__(self, name): self.name = name class NotClone(ZFSError): errno = errno.EINVAL message = "Filesystem is not a clone, can not promote" def __init__(self, name): self.name = name class FilesystemExists(DatasetExists): message = "Filesystem already exists" def __init__(self, name): self.name = name class FilesystemNotFound(DatasetNotFound): message = "Filesystem not found" def __init__(self, name): self.name = name class ParentNotFound(ZFSError): errno = errno.ENOENT message = "Parent not found" def __init__(self, name): self.name = name class WrongParent(ZFSError): - errno = errno.EINVAL + errno = ZFS_ERR_WRONG_PARENT message = "Parent dataset is not a filesystem" def __init__(self, name): self.name = name class SnapshotExists(DatasetExists): message = "Snapshot already exists" def __init__(self, name): self.name = name class SnapshotNotFound(DatasetNotFound): message = "Snapshot not found" def __init__(self, name): self.name = name class SnapshotNotLatest(ZFSError): errno = errno.EEXIST message = "Snapshot is not the latest" def __init__(self, name): self.name = name class SnapshotIsCloned(ZFSError): errno = errno.EEXIST message = "Snapshot is cloned" def __init__(self, name): self.name = name class SnapshotIsHeld(ZFSError): errno = errno.EBUSY message = "Snapshot is held" def __init__(self, name): self.name = name class DuplicateSnapshots(ZFSError): errno = errno.EXDEV message = "Requested multiple snapshots of the same filesystem" def __init__(self, name): self.name = name class SnapshotFailure(MultipleOperationsFailure): message = "Creation of snapshot(s) failed for one or more reasons" def __init__(self, errors, suppressed_count): super(SnapshotFailure, self).__init__(errors, suppressed_count) class SnapshotDestructionFailure(MultipleOperationsFailure): message = "Destruction of snapshot(s) failed for one or more reasons" def __init__(self, errors, suppressed_count): super(SnapshotDestructionFailure, self).__init__( errors, suppressed_count) class BookmarkExists(ZFSError): errno = errno.EEXIST message = "Bookmark already exists" def __init__(self, name): self.name = name class BookmarkNotFound(ZFSError): errno = errno.ENOENT message = "Bookmark not found" def __init__(self, name): self.name = name class BookmarkMismatch(ZFSError): errno = errno.EINVAL message = "Bookmark is not in snapshot's filesystem" def __init__(self, name): self.name = name class BookmarkNotSupported(ZFSError): errno = errno.ENOTSUP message = "Bookmark feature is not supported" def __init__(self, name): self.name = name class BookmarkFailure(MultipleOperationsFailure): message = "Creation of bookmark(s) failed for one or more reasons" def __init__(self, errors, suppressed_count): super(BookmarkFailure, self).__init__(errors, suppressed_count) class BookmarkDestructionFailure(MultipleOperationsFailure): message = "Destruction of bookmark(s) failed for one or more reasons" def __init__(self, errors, suppressed_count): super(BookmarkDestructionFailure, self).__init__( errors, suppressed_count) class BadHoldCleanupFD(ZFSError): errno = errno.EBADF message = "Bad file descriptor as cleanup file descriptor" class HoldExists(ZFSError): errno = errno.EEXIST message = "Hold with a given tag already exists on snapshot" def __init__(self, name): self.name = name class HoldNotFound(ZFSError): errno = errno.ENOENT message = "Hold with a given tag does not exist on snapshot" def __init__(self, name): self.name = name class HoldFailure(MultipleOperationsFailure): message = "Placement of hold(s) failed for one or more reasons" def __init__(self, errors, suppressed_count): super(HoldFailure, self).__init__(errors, suppressed_count) class HoldReleaseFailure(MultipleOperationsFailure): message = "Release of hold(s) failed for one or more reasons" def __init__(self, errors, suppressed_count): super(HoldReleaseFailure, self).__init__(errors, suppressed_count) class SnapshotMismatch(ZFSError): errno = errno.ENODEV message = "Snapshot is not descendant of source snapshot" def __init__(self, name): self.name = name class StreamMismatch(ZFSError): errno = errno.ENODEV message = "Stream is not applicable to destination dataset" def __init__(self, name): self.name = name class DestinationModified(ZFSError): errno = errno.ETXTBSY message = "Destination dataset has modifications that can not be undone" def __init__(self, name): self.name = name class BadStream(ZFSError): errno = errno.EBADE message = "Bad backup stream" class StreamFeatureNotSupported(ZFSError): errno = errno.ENOTSUP message = "Stream contains unsupported feature" class UnknownStreamFeature(ZFSError): errno = errno.ENOTSUP message = "Unknown feature requested for stream" class StreamFeatureInvalid(ZFSError): errno = errno.EINVAL message = "Kernel modules must be upgraded to receive this stream" class StreamFeatureIncompatible(ZFSError): errno = errno.EINVAL message = "Incompatible embedded feature with encrypted receive" class ReceivePropertyFailure(MultipleOperationsFailure): message = "Receiving of properties failed for one or more reasons" def __init__(self, errors, suppressed_count): super(ReceivePropertyFailure, self).__init__(errors, suppressed_count) class StreamIOError(ZFSError): message = "I/O error while writing or reading stream" def __init__(self, errno): self.errno = errno class ZIOError(ZFSError): errno = errno.EIO message = "I/O error" def __init__(self, name): self.name = name class NoSpace(ZFSError): errno = errno.ENOSPC message = "No space left" def __init__(self, name): self.name = name class QuotaExceeded(ZFSError): errno = errno.EDQUOT message = "Quouta exceeded" def __init__(self, name): self.name = name class DatasetBusy(ZFSError): errno = errno.EBUSY message = "Dataset is busy" def __init__(self, name): self.name = name class NameTooLong(ZFSError): errno = errno.ENAMETOOLONG message = "Dataset name is too long" def __init__(self, name): self.name = name class NameInvalid(ZFSError): errno = errno.EINVAL message = "Invalid name" def __init__(self, name): self.name = name class SnapshotNameInvalid(NameInvalid): message = "Invalid name for snapshot" def __init__(self, name): self.name = name class FilesystemNameInvalid(NameInvalid): message = "Invalid name for filesystem or volume" def __init__(self, name): self.name = name class BookmarkNameInvalid(NameInvalid): message = "Invalid name for bookmark" def __init__(self, name): self.name = name class ReadOnlyPool(ZFSError): errno = errno.EROFS message = "Pool is read-only" def __init__(self, name): self.name = name class SuspendedPool(ZFSError): errno = errno.EAGAIN message = "Pool is suspended" def __init__(self, name): self.name = name class PoolNotFound(ZFSError): errno = errno.EXDEV message = "No such pool" def __init__(self, name): self.name = name class PoolsDiffer(ZFSError): errno = errno.EXDEV message = "Source and target belong to different pools" def __init__(self, name): self.name = name class FeatureNotSupported(ZFSError): errno = errno.ENOTSUP message = "Feature is not supported in this version" def __init__(self, name): self.name = name class PropertyNotSupported(ZFSError): errno = errno.ENOTSUP message = "Property is not supported in this version" def __init__(self, name): self.name = name class PropertyInvalid(ZFSError): errno = errno.EINVAL message = "Invalid property or property value" def __init__(self, name): self.name = name class DatasetTypeInvalid(ZFSError): errno = errno.EINVAL message = "Specified dataset type is unknown" def __init__(self, name): self.name = name class UnknownCryptCommand(ZFSError): errno = errno.EINVAL message = "Specified crypt command is invalid" def __init__(self, name): self.name = name class EncryptionKeyNotLoaded(ZFSError): errno = errno.EACCES message = "Encryption key is not currently loaded" class EncryptionKeyAlreadyLoaded(ZFSError): errno = errno.EEXIST message = "Encryption key is already loaded" class EncryptionKeyInvalid(ZFSError): errno = errno.EACCES message = "Incorrect encryption key provided" class ZCPError(ZFSError): errno = None message = None class ZCPSyntaxError(ZCPError): errno = errno.EINVAL message = "Channel program contains syntax errors" def __init__(self, details): self.details = details class ZCPRuntimeError(ZCPError): errno = errno.ECHRNG message = "Channel programs encountered a runtime error" def __init__(self, details): self.details = details class ZCPLimitInvalid(ZCPError): errno = errno.EINVAL message = "Channel program called with invalid limits" class ZCPTimeout(ZCPError): errno = errno.ETIME message = "Channel program timed out" class ZCPSpaceError(ZCPError): errno = errno.ENOSPC message = "Channel program exhausted the memory limit" class ZCPMemoryError(ZCPError): errno = errno.ENOMEM message = "Channel program return value too large" class ZCPPermissionError(ZCPError): errno = errno.EPERM message = "Channel programs must be run as root" class CheckpointExists(ZFSError): errno = ZFS_ERR_CHECKPOINT_EXISTS message = "Pool already has a checkpoint" class CheckpointNotFound(ZFSError): errno = ZFS_ERR_NO_CHECKPOINT message = "Pool does not have a checkpoint" class CheckpointDiscarding(ZFSError): errno = ZFS_ERR_DISCARDING_CHECKPOINT message = "Pool checkpoint is being discarded" class DeviceRemovalRunning(ZFSError): errno = ZFS_ERR_DEVRM_IN_PROGRESS message = "A vdev is currently being removed" class DeviceTooBig(ZFSError): errno = ZFS_ERR_VDEV_TOO_BIG message = "One or more top-level vdevs exceed the maximum vdev size" # vim: softtabstop=4 tabstop=4 expandtab shiftwidth=4 diff --git a/contrib/pyzfs/libzfs_core/test/test_libzfs_core.py b/contrib/pyzfs/libzfs_core/test/test_libzfs_core.py index 97fd36ce7236..25f20a4aeebc 100644 --- a/contrib/pyzfs/libzfs_core/test/test_libzfs_core.py +++ b/contrib/pyzfs/libzfs_core/test/test_libzfs_core.py @@ -1,4308 +1,4370 @@ # # Copyright 2015 ClusterHQ # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Tests for `libzfs_core` operations. These are mostly functional and conformance tests that validate that the operations produce expected effects or fail with expected exceptions. """ from __future__ import absolute_import, division, print_function import unittest import contextlib import errno import filecmp import os import platform import resource import shutil import stat import subprocess import sys import tempfile import time import uuid import itertools import zlib from .. import _libzfs_core as lzc from .. import exceptions as lzc_exc from .._nvlist import packed_nvlist_out def _print(*args): for arg in args: print(arg, end=' ') print() @contextlib.contextmanager def suppress(exceptions=None): try: yield except BaseException as e: if exceptions is None or isinstance(e, exceptions): pass else: raise @contextlib.contextmanager def _zfs_mount(fs): mntdir = tempfile.mkdtemp() if platform.system() == 'SunOS': mount_cmd = ['mount', '-F', 'zfs', fs, mntdir] else: mount_cmd = ['mount', '-t', 'zfs', fs, mntdir] unmount_cmd = ['umount', '-f', mntdir] try: subprocess.check_output(mount_cmd, stderr=subprocess.STDOUT) try: yield mntdir finally: with suppress(): subprocess.check_output(unmount_cmd, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: print('failed to mount %s @ %s : %s' % (fs, mntdir, e.output)) raise finally: os.rmdir(mntdir) # XXX On illumos it is impossible to explicitly mount a snapshot. # So, either we need to implicitly mount it using .zfs/snapshot/ # or we need to create a clone and mount it readonly (and discard # it afterwards). # At the moment the former approach is implemented. # This dictionary is used to keep track of mounted filesystems # (not snapshots), so that we do not try to mount a filesystem # more than once in the case more than one snapshot of the # filesystem is accessed from the same context or the filesystem # and its snapshot are accessed. _mnttab = {} @contextlib.contextmanager def _illumos_mount_fs(fs): if fs in _mnttab: yield _mnttab[fs] else: with _zfs_mount(fs) as mntdir: _mnttab[fs] = mntdir try: yield mntdir finally: _mnttab.pop(fs, None) @contextlib.contextmanager def _illumos_mount_snap(fs): (base, snap) = fs.split('@', 1) with _illumos_mount_fs(base) as mntdir: yield os.path.join(mntdir, '.zfs', 'snapshot', snap) @contextlib.contextmanager def _zfs_mount_illumos(fs): if '@' not in fs: with _illumos_mount_fs(fs) as mntdir: yield mntdir else: with _illumos_mount_snap(fs) as mntdir: yield mntdir if platform.system() == 'SunOS': zfs_mount = _zfs_mount_illumos else: zfs_mount = _zfs_mount @contextlib.contextmanager def cleanup_fd(): fd = os.open('/dev/zfs', os.O_EXCL) try: yield fd finally: os.close(fd) @contextlib.contextmanager def os_open(name, mode): fd = os.open(name, mode) try: yield fd finally: os.close(fd) @contextlib.contextmanager def dev_null(): with os_open('/dev/null', os.O_WRONLY) as fd: yield fd @contextlib.contextmanager def dev_zero(): with os_open('/dev/zero', os.O_RDONLY) as fd: yield fd @contextlib.contextmanager def temp_file_in_fs(fs): with zfs_mount(fs) as mntdir: with tempfile.NamedTemporaryFile(dir=mntdir) as f: for i in range(1024): f.write(b'x' * 1024) f.flush() yield f.name def make_snapshots(fs, before, modified, after): def _maybe_snap(snap): if snap is not None: if not snap.startswith(fs): snap = fs + b'@' + snap lzc.lzc_snapshot([snap]) return snap before = _maybe_snap(before) with temp_file_in_fs(fs) as name: modified = _maybe_snap(modified) after = _maybe_snap(after) return (name, (before, modified, after)) @contextlib.contextmanager def streams(fs, first, second): (filename, snaps) = make_snapshots(fs, None, first, second) - with tempfile.TemporaryFile(suffix='.ztream') as full: + with tempfile.TemporaryFile(suffix='.zstream') as full: lzc.lzc_send(snaps[1], None, full.fileno()) full.seek(0) if snaps[2] is not None: - with tempfile.TemporaryFile(suffix='.ztream') as incremental: + with tempfile.TemporaryFile(suffix='.zstream') as incremental: lzc.lzc_send(snaps[2], snaps[1], incremental.fileno()) incremental.seek(0) yield (filename, (full, incremental)) else: yield (filename, (full, None)) @contextlib.contextmanager def encrypted_filesystem(): fs = ZFSTest.pool.getFilesystem(b"encrypted") name = fs.getName() filename = None key = os.urandom(lzc.WRAPPING_KEY_LEN) with tempfile.NamedTemporaryFile() as f: filename = "file://" + f.name props = { b"encryption": lzc.zio_encrypt.ZIO_CRYPT_AES_256_CCM, b"keylocation": filename.encode(), b"keyformat": lzc.zfs_keyformat.ZFS_KEYFORMAT_RAW, } lzc.lzc_create(name, 'zfs', props=props, key=key) yield (name, key) def runtimeSkipIf(check_method, message): def _decorator(f): def _f(_self, *args, **kwargs): if check_method(_self): return _self.skipTest(message) else: return f(_self, *args, **kwargs) _f.__name__ = f.__name__ return _f return _decorator def skipIfFeatureAvailable(feature, message): return runtimeSkipIf( lambda _self: _self.__class__.pool.isPoolFeatureAvailable(feature), message) def skipUnlessFeatureEnabled(feature, message): return runtimeSkipIf( lambda _self: not _self.__class__.pool.isPoolFeatureEnabled(feature), message) def skipUnlessBookmarksSupported(f): return skipUnlessFeatureEnabled( 'bookmarks', 'bookmarks are not enabled')(f) def snap_always_unmounted_before_destruction(): # Apparently ZoL automatically unmounts the snapshot # only if it is mounted at its default .zfs/snapshot # mountpoint. return ( platform.system() != 'Linux', 'snapshot is not auto-unmounted') def illumos_bug_6379(): # zfs_ioc_hold() panics on a bad cleanup fd return ( platform.system() == 'SunOS', 'see https://www.illumos.org/issues/6379') def needs_support(function): return unittest.skipUnless( lzc.is_supported(function), '{} not available'.format(function.__name__)) class ZFSTest(unittest.TestCase): POOL_FILE_SIZE = 128 * 1024 * 1024 FILESYSTEMS = [b'fs1', b'fs2', b'fs1/fs'] pool = None misc_pool = None readonly_pool = None @classmethod def setUpClass(cls): try: cls.pool = _TempPool(filesystems=cls.FILESYSTEMS) cls.misc_pool = _TempPool() cls.readonly_pool = _TempPool( filesystems=cls.FILESYSTEMS, readonly=True) cls.pools = [cls.pool, cls.misc_pool, cls.readonly_pool] except Exception: cls._cleanUp() raise @classmethod def tearDownClass(cls): cls._cleanUp() @classmethod def _cleanUp(cls): for pool in [cls.pool, cls.misc_pool, cls.readonly_pool]: if pool is not None: pool.cleanUp() def setUp(self): pass def tearDown(self): for pool in ZFSTest.pools: pool.reset() def assertExists(self, name): self.assertTrue( lzc.lzc_exists(name), 'ZFS dataset %s does not exist' % (name, )) def assertNotExists(self, name): self.assertFalse( lzc.lzc_exists(name), 'ZFS dataset %s exists' % (name, )) def test_exists(self): self.assertExists(ZFSTest.pool.makeName()) def test_exists_in_ro_pool(self): self.assertExists(ZFSTest.readonly_pool.makeName()) def test_exists_failure(self): self.assertNotExists(ZFSTest.pool.makeName(b'nonexistent')) def test_create_fs(self): name = ZFSTest.pool.makeName(b"fs1/fs/test1") lzc.lzc_create(name) self.assertExists(name) def test_create_zvol(self): name = ZFSTest.pool.makeName(b"fs1/fs/zvol") props = {b"volsize": 1024 * 1024} lzc.lzc_create(name, ds_type='zvol', props=props) self.assertExists(name) # On Gentoo with ZFS 0.6.5.4 the volume is busy # and can not be destroyed right after its creation. # A reason for this is unknown at the moment. # Because of that the post-test clean up could fail. time.sleep(0.1) def test_create_fs_with_prop(self): name = ZFSTest.pool.makeName(b"fs1/fs/test2") props = {b"atime": 0} lzc.lzc_create(name, props=props) self.assertExists(name) def test_create_fs_wrong_ds_type(self): name = ZFSTest.pool.makeName(b"fs1/fs/test1") with self.assertRaises(lzc_exc.DatasetTypeInvalid): lzc.lzc_create(name, ds_type='wrong') - # XXX: we should have a way to raise lzc_exc.WrongParent from lzc_create() - @unittest.expectedFailure def test_create_fs_below_zvol(self): name = ZFSTest.pool.makeName(b"fs1/fs/zvol") props = {b"volsize": 1024 * 1024} lzc.lzc_create(name, ds_type='zvol', props=props) with self.assertRaises(lzc_exc.WrongParent): lzc.lzc_create(name + b'/fs') + def test_create_zvol_below_zvol(self): + name = ZFSTest.pool.makeName(b"fs1/fs/zvol") + props = {b"volsize": 1024 * 1024} + + lzc.lzc_create(name, ds_type='zvol', props=props) + with self.assertRaises(lzc_exc.WrongParent): + lzc.lzc_create(name + b'/zvol', ds_type='zvol', props=props) + def test_create_fs_duplicate(self): name = ZFSTest.pool.makeName(b"fs1/fs/test6") lzc.lzc_create(name) with self.assertRaises(lzc_exc.FilesystemExists): lzc.lzc_create(name) def test_create_fs_in_ro_pool(self): name = ZFSTest.readonly_pool.makeName(b"fs") with self.assertRaises(lzc_exc.ReadOnlyPool): lzc.lzc_create(name) def test_create_fs_without_parent(self): name = ZFSTest.pool.makeName(b"fs1/nonexistent/test") with self.assertRaises(lzc_exc.ParentNotFound): lzc.lzc_create(name) self.assertNotExists(name) def test_create_fs_in_nonexistent_pool(self): name = b"no-such-pool/fs" with self.assertRaises(lzc_exc.ParentNotFound): lzc.lzc_create(name) self.assertNotExists(name) def test_create_fs_with_invalid_prop(self): name = ZFSTest.pool.makeName(b"fs1/fs/test3") props = {b"BOGUS": 0} with self.assertRaises(lzc_exc.PropertyInvalid): lzc.lzc_create(name, 'zfs', props) self.assertNotExists(name) def test_create_fs_with_invalid_prop_type(self): name = ZFSTest.pool.makeName(b"fs1/fs/test4") props = {b"recordsize": b"128k"} with self.assertRaises(lzc_exc.PropertyInvalid): lzc.lzc_create(name, 'zfs', props) self.assertNotExists(name) def test_create_fs_with_invalid_prop_val(self): name = ZFSTest.pool.makeName(b"fs1/fs/test5") props = {b"atime": 20} with self.assertRaises(lzc_exc.PropertyInvalid): lzc.lzc_create(name, 'zfs', props) self.assertNotExists(name) def test_create_fs_with_invalid_name(self): name = ZFSTest.pool.makeName(b"@badname") with self.assertRaises(lzc_exc.NameInvalid): lzc.lzc_create(name) self.assertNotExists(name) def test_create_fs_with_invalid_pool_name(self): name = b"bad!pool/fs" with self.assertRaises(lzc_exc.NameInvalid): lzc.lzc_create(name) self.assertNotExists(name) def test_create_encrypted_fs(self): fs = ZFSTest.pool.getFilesystem(b"encrypted") name = fs.getName() filename = None with tempfile.NamedTemporaryFile() as f: filename = "file://" + f.name props = { b"encryption": lzc.zio_encrypt.ZIO_CRYPT_AES_256_CCM, b"keylocation": filename.encode(), b"keyformat": lzc.zfs_keyformat.ZFS_KEYFORMAT_RAW, } key = os.urandom(lzc.WRAPPING_KEY_LEN) lzc.lzc_create(name, 'zfs', props=props, key=key) self.assertEqual(fs.getProperty("encryption"), b"aes-256-ccm") self.assertEqual(fs.getProperty("encryptionroot"), name) self.assertEqual(fs.getProperty("keylocation"), filename.encode()) self.assertEqual(fs.getProperty("keyformat"), b"raw") def test_snapshot(self): snapname = ZFSTest.pool.makeName(b"@snap") snaps = [snapname] lzc.lzc_snapshot(snaps) self.assertExists(snapname) def test_snapshot_empty_list(self): lzc.lzc_snapshot([]) def test_snapshot_user_props(self): snapname = ZFSTest.pool.makeName(b"@snap") snaps = [snapname] props = {b"user:foo": b"bar"} lzc.lzc_snapshot(snaps, props) self.assertExists(snapname) def test_snapshot_invalid_props(self): snapname = ZFSTest.pool.makeName(b"@snap") snaps = [snapname] props = {b"foo": b"bar"} with self.assertRaises(lzc_exc.SnapshotFailure) as ctx: lzc.lzc_snapshot(snaps, props) self.assertEqual(len(ctx.exception.errors), len(snaps)) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.PropertyInvalid) self.assertNotExists(snapname) def test_snapshot_ro_pool(self): snapname1 = ZFSTest.readonly_pool.makeName(b"@snap") snapname2 = ZFSTest.readonly_pool.makeName(b"fs1@snap") snaps = [snapname1, snapname2] with self.assertRaises(lzc_exc.SnapshotFailure) as ctx: lzc.lzc_snapshot(snaps) # NB: one common error is reported. self.assertEqual(len(ctx.exception.errors), 1) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.ReadOnlyPool) self.assertNotExists(snapname1) self.assertNotExists(snapname2) def test_snapshot_nonexistent_pool(self): snapname = b"no-such-pool@snap" snaps = [snapname] with self.assertRaises(lzc_exc.SnapshotFailure) as ctx: lzc.lzc_snapshot(snaps) self.assertEqual(len(ctx.exception.errors), 1) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.FilesystemNotFound) def test_snapshot_nonexistent_fs(self): snapname = ZFSTest.pool.makeName(b"nonexistent@snap") snaps = [snapname] with self.assertRaises(lzc_exc.SnapshotFailure) as ctx: lzc.lzc_snapshot(snaps) self.assertEqual(len(ctx.exception.errors), 1) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.FilesystemNotFound) def test_snapshot_nonexistent_and_existent_fs(self): snapname1 = ZFSTest.pool.makeName(b"@snap") snapname2 = ZFSTest.pool.makeName(b"nonexistent@snap") snaps = [snapname1, snapname2] with self.assertRaises(lzc_exc.SnapshotFailure) as ctx: lzc.lzc_snapshot(snaps) self.assertEqual(len(ctx.exception.errors), 1) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.FilesystemNotFound) self.assertNotExists(snapname1) self.assertNotExists(snapname2) def test_multiple_snapshots_nonexistent_fs(self): snapname1 = ZFSTest.pool.makeName(b"nonexistent@snap1") snapname2 = ZFSTest.pool.makeName(b"nonexistent@snap2") snaps = [snapname1, snapname2] with self.assertRaises(lzc_exc.SnapshotFailure) as ctx: lzc.lzc_snapshot(snaps) # XXX two errors should be reported but alas self.assertEqual(len(ctx.exception.errors), 1) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.DuplicateSnapshots) self.assertNotExists(snapname1) self.assertNotExists(snapname2) def test_multiple_snapshots_multiple_nonexistent_fs(self): snapname1 = ZFSTest.pool.makeName(b"nonexistent1@snap") snapname2 = ZFSTest.pool.makeName(b"nonexistent2@snap") snaps = [snapname1, snapname2] with self.assertRaises(lzc_exc.SnapshotFailure) as ctx: lzc.lzc_snapshot(snaps) self.assertEqual(len(ctx.exception.errors), 2) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.FilesystemNotFound) self.assertNotExists(snapname1) self.assertNotExists(snapname2) def test_snapshot_already_exists(self): snapname = ZFSTest.pool.makeName(b"@snap") snaps = [snapname] lzc.lzc_snapshot(snaps) with self.assertRaises(lzc_exc.SnapshotFailure) as ctx: lzc.lzc_snapshot(snaps) self.assertEqual(len(ctx.exception.errors), 1) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.SnapshotExists) def test_multiple_snapshots_for_same_fs(self): snapname1 = ZFSTest.pool.makeName(b"@snap1") snapname2 = ZFSTest.pool.makeName(b"@snap2") snaps = [snapname1, snapname2] with self.assertRaises(lzc_exc.SnapshotFailure) as ctx: lzc.lzc_snapshot(snaps) self.assertEqual(len(ctx.exception.errors), 1) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.DuplicateSnapshots) self.assertNotExists(snapname1) self.assertNotExists(snapname2) def test_multiple_snapshots(self): snapname1 = ZFSTest.pool.makeName(b"@snap") snapname2 = ZFSTest.pool.makeName(b"fs1@snap") snaps = [snapname1, snapname2] lzc.lzc_snapshot(snaps) self.assertExists(snapname1) self.assertExists(snapname2) def test_multiple_existing_snapshots(self): snapname1 = ZFSTest.pool.makeName(b"@snap") snapname2 = ZFSTest.pool.makeName(b"fs1@snap") snaps = [snapname1, snapname2] lzc.lzc_snapshot(snaps) with self.assertRaises(lzc_exc.SnapshotFailure) as ctx: lzc.lzc_snapshot(snaps) self.assertEqual(len(ctx.exception.errors), 2) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.SnapshotExists) def test_multiple_new_and_existing_snapshots(self): snapname1 = ZFSTest.pool.makeName(b"@snap") snapname2 = ZFSTest.pool.makeName(b"fs1@snap") snapname3 = ZFSTest.pool.makeName(b"fs2@snap") snaps = [snapname1, snapname2] more_snaps = snaps + [snapname3] lzc.lzc_snapshot(snaps) with self.assertRaises(lzc_exc.SnapshotFailure) as ctx: lzc.lzc_snapshot(more_snaps) self.assertEqual(len(ctx.exception.errors), 2) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.SnapshotExists) self.assertNotExists(snapname3) def test_snapshot_multiple_errors(self): snapname1 = ZFSTest.pool.makeName(b"@snap") snapname2 = ZFSTest.pool.makeName(b"nonexistent@snap") snapname3 = ZFSTest.pool.makeName(b"fs1@snap") snaps = [snapname1] more_snaps = [snapname1, snapname2, snapname3] # create 'snapname1' snapshot lzc.lzc_snapshot(snaps) # attempt to create 3 snapshots: # 1. duplicate snapshot name # 2. refers to filesystem that doesn't exist # 3. could have succeeded if not for 1 and 2 with self.assertRaises(lzc_exc.SnapshotFailure) as ctx: lzc.lzc_snapshot(more_snaps) # It seems that FilesystemNotFound overrides the other error, # but it doesn't have to. self.assertGreater(len(ctx.exception.errors), 0) for e in ctx.exception.errors: self.assertIsInstance( e, (lzc_exc.SnapshotExists, lzc_exc.FilesystemNotFound)) self.assertNotExists(snapname2) self.assertNotExists(snapname3) def test_snapshot_different_pools(self): snapname1 = ZFSTest.pool.makeName(b"@snap") snapname2 = ZFSTest.misc_pool.makeName(b"@snap") snaps = [snapname1, snapname2] with self.assertRaises(lzc_exc.SnapshotFailure) as ctx: lzc.lzc_snapshot(snaps) # NB: one common error is reported. self.assertEqual(len(ctx.exception.errors), 1) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.PoolsDiffer) self.assertNotExists(snapname1) self.assertNotExists(snapname2) def test_snapshot_different_pools_ro_pool(self): snapname1 = ZFSTest.pool.makeName(b"@snap") snapname2 = ZFSTest.readonly_pool.makeName(b"@snap") snaps = [snapname1, snapname2] with self.assertRaises(lzc_exc.SnapshotFailure) as ctx: lzc.lzc_snapshot(snaps) # NB: one common error is reported. self.assertEqual(len(ctx.exception.errors), 1) for e in ctx.exception.errors: # NB: depending on whether the first attempted snapshot is # for the read-only pool a different error is reported. self.assertIsInstance( e, (lzc_exc.PoolsDiffer, lzc_exc.ReadOnlyPool)) self.assertNotExists(snapname1) self.assertNotExists(snapname2) def test_snapshot_invalid_name(self): snapname1 = ZFSTest.pool.makeName(b"@bad&name") snapname2 = ZFSTest.pool.makeName(b"fs1@bad*name") snapname3 = ZFSTest.pool.makeName(b"fs2@snap") snaps = [snapname1, snapname2, snapname3] with self.assertRaises(lzc_exc.SnapshotFailure) as ctx: lzc.lzc_snapshot(snaps) # NB: one common error is reported. self.assertEqual(len(ctx.exception.errors), 1) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.NameInvalid) self.assertIsNone(e.name) def test_snapshot_too_long_complete_name(self): snapname1 = ZFSTest.pool.makeTooLongName(b"fs1@") snapname2 = ZFSTest.pool.makeTooLongName(b"fs2@") snapname3 = ZFSTest.pool.makeName(b"@snap") snaps = [snapname1, snapname2, snapname3] with self.assertRaises(lzc_exc.SnapshotFailure) as ctx: lzc.lzc_snapshot(snaps) self.assertEqual(len(ctx.exception.errors), 2) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.NameTooLong) self.assertIsNotNone(e.name) def test_snapshot_too_long_snap_name(self): snapname1 = ZFSTest.pool.makeTooLongComponent(b"fs1@") snapname2 = ZFSTest.pool.makeTooLongComponent(b"fs2@") snapname3 = ZFSTest.pool.makeName(b"@snap") snaps = [snapname1, snapname2, snapname3] with self.assertRaises(lzc_exc.SnapshotFailure) as ctx: lzc.lzc_snapshot(snaps) # NB: one common error is reported. self.assertEqual(len(ctx.exception.errors), 1) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.NameTooLong) self.assertIsNone(e.name) def test_destroy_nonexistent_snapshot(self): lzc.lzc_destroy_snaps([ZFSTest.pool.makeName(b"@nonexistent")], False) lzc.lzc_destroy_snaps([ZFSTest.pool.makeName(b"@nonexistent")], True) def test_destroy_snapshot_of_nonexistent_pool(self): with self.assertRaises(lzc_exc.SnapshotDestructionFailure) as ctx: lzc.lzc_destroy_snaps([b"no-such-pool@snap"], False) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.PoolNotFound) with self.assertRaises(lzc_exc.SnapshotDestructionFailure) as ctx: lzc.lzc_destroy_snaps([b"no-such-pool@snap"], True) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.PoolNotFound) # NB: note the difference from the nonexistent pool test. def test_destroy_snapshot_of_nonexistent_fs(self): lzc.lzc_destroy_snaps( [ZFSTest.pool.makeName(b"nonexistent@snap")], False) lzc.lzc_destroy_snaps( [ZFSTest.pool.makeName(b"nonexistent@snap")], True) # Apparently the name is not checked for validity. @unittest.expectedFailure def test_destroy_invalid_snap_name(self): with self.assertRaises(lzc_exc.SnapshotDestructionFailure): lzc.lzc_destroy_snaps( [ZFSTest.pool.makeName(b"@non$&*existent")], False) with self.assertRaises(lzc_exc.SnapshotDestructionFailure): lzc.lzc_destroy_snaps( [ZFSTest.pool.makeName(b"@non$&*existent")], True) # Apparently the full name is not checked for length. @unittest.expectedFailure def test_destroy_too_long_full_snap_name(self): snapname1 = ZFSTest.pool.makeTooLongName(b"fs1@") snaps = [snapname1] with self.assertRaises(lzc_exc.SnapshotDestructionFailure): lzc.lzc_destroy_snaps(snaps, False) with self.assertRaises(lzc_exc.SnapshotDestructionFailure): lzc.lzc_destroy_snaps(snaps, True) def test_destroy_too_long_short_snap_name(self): snapname1 = ZFSTest.pool.makeTooLongComponent(b"fs1@") snapname2 = ZFSTest.pool.makeTooLongComponent(b"fs2@") snapname3 = ZFSTest.pool.makeName(b"@snap") snaps = [snapname1, snapname2, snapname3] with self.assertRaises(lzc_exc.SnapshotDestructionFailure) as ctx: lzc.lzc_destroy_snaps(snaps, False) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.NameTooLong) @unittest.skipUnless(*snap_always_unmounted_before_destruction()) def test_destroy_mounted_snap(self): snap = ZFSTest.pool.getRoot().getSnap() lzc.lzc_snapshot([snap]) with zfs_mount(snap): # the snapshot should be force-unmounted lzc.lzc_destroy_snaps([snap], defer=False) self.assertNotExists(snap) def test_clone(self): # NB: note the special name for the snapshot. # Since currently we can not destroy filesystems, # it would be impossible to destroy the snapshot, # so no point in attempting to clean it up. snapname = ZFSTest.pool.makeName(b"fs2@origin1") name = ZFSTest.pool.makeName(b"fs1/fs/clone1") lzc.lzc_snapshot([snapname]) lzc.lzc_clone(name, snapname) self.assertExists(name) def test_clone_nonexistent_snapshot(self): snapname = ZFSTest.pool.makeName(b"fs2@nonexistent") name = ZFSTest.pool.makeName(b"fs1/fs/clone2") # XXX The error should be SnapshotNotFound # but limitations of C interface do not allow # to differentiate between the errors. with self.assertRaises(lzc_exc.DatasetNotFound): lzc.lzc_clone(name, snapname) self.assertNotExists(name) def test_clone_nonexistent_parent_fs(self): snapname = ZFSTest.pool.makeName(b"fs2@origin3") name = ZFSTest.pool.makeName(b"fs1/nonexistent/clone3") lzc.lzc_snapshot([snapname]) with self.assertRaises(lzc_exc.DatasetNotFound): lzc.lzc_clone(name, snapname) self.assertNotExists(name) def test_clone_to_nonexistent_pool(self): snapname = ZFSTest.pool.makeName(b"fs2@snap") name = b"no-such-pool/fs" lzc.lzc_snapshot([snapname]) with self.assertRaises(lzc_exc.DatasetNotFound): lzc.lzc_clone(name, snapname) self.assertNotExists(name) def test_clone_invalid_snap_name(self): # Use a valid filesystem name of filesystem that # exists as a snapshot name snapname = ZFSTest.pool.makeName(b"fs1/fs") name = ZFSTest.pool.makeName(b"fs2/clone") with self.assertRaises(lzc_exc.SnapshotNameInvalid): lzc.lzc_clone(name, snapname) self.assertNotExists(name) def test_clone_invalid_snap_name_2(self): # Use a valid filesystem name of filesystem that # doesn't exist as a snapshot name snapname = ZFSTest.pool.makeName(b"fs1/nonexistent") name = ZFSTest.pool.makeName(b"fs2/clone") with self.assertRaises(lzc_exc.SnapshotNameInvalid): lzc.lzc_clone(name, snapname) self.assertNotExists(name) def test_clone_invalid_name(self): snapname = ZFSTest.pool.makeName(b"fs2@snap") name = ZFSTest.pool.makeName(b"fs1/bad#name") lzc.lzc_snapshot([snapname]) with self.assertRaises(lzc_exc.FilesystemNameInvalid): lzc.lzc_clone(name, snapname) self.assertNotExists(name) def test_clone_invalid_pool_name(self): snapname = ZFSTest.pool.makeName(b"fs2@snap") name = b"bad!pool/fs1" lzc.lzc_snapshot([snapname]) with self.assertRaises(lzc_exc.FilesystemNameInvalid): lzc.lzc_clone(name, snapname) self.assertNotExists(name) def test_clone_across_pools(self): snapname = ZFSTest.pool.makeName(b"fs2@snap") name = ZFSTest.misc_pool.makeName(b"clone1") lzc.lzc_snapshot([snapname]) with self.assertRaises(lzc_exc.PoolsDiffer): lzc.lzc_clone(name, snapname) self.assertNotExists(name) def test_clone_across_pools_to_ro_pool(self): snapname = ZFSTest.pool.makeName(b"fs2@snap") name = ZFSTest.readonly_pool.makeName(b"fs1/clone1") lzc.lzc_snapshot([snapname]) # it's legal to report either of the conditions with self.assertRaises((lzc_exc.ReadOnlyPool, lzc_exc.PoolsDiffer)): lzc.lzc_clone(name, snapname) self.assertNotExists(name) def test_destroy_cloned_fs(self): snapname1 = ZFSTest.pool.makeName(b"fs2@origin4") snapname2 = ZFSTest.pool.makeName(b"fs1@snap") clonename = ZFSTest.pool.makeName(b"fs1/fs/clone4") snaps = [snapname1, snapname2] lzc.lzc_snapshot(snaps) lzc.lzc_clone(clonename, snapname1) with self.assertRaises(lzc_exc.SnapshotDestructionFailure) as ctx: lzc.lzc_destroy_snaps(snaps, False) self.assertEqual(len(ctx.exception.errors), 1) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.SnapshotIsCloned) for snap in snaps: self.assertExists(snap) def test_deferred_destroy_cloned_fs(self): snapname1 = ZFSTest.pool.makeName(b"fs2@origin5") snapname2 = ZFSTest.pool.makeName(b"fs1@snap") clonename = ZFSTest.pool.makeName(b"fs1/fs/clone5") snaps = [snapname1, snapname2] lzc.lzc_snapshot(snaps) lzc.lzc_clone(clonename, snapname1) lzc.lzc_destroy_snaps(snaps, defer=True) self.assertExists(snapname1) self.assertNotExists(snapname2) def test_rollback(self): name = ZFSTest.pool.makeName(b"fs1") snapname = name + b"@snap" lzc.lzc_snapshot([snapname]) ret = lzc.lzc_rollback(name) self.assertEqual(ret, snapname) def test_rollback_2(self): name = ZFSTest.pool.makeName(b"fs1") snapname1 = name + b"@snap1" snapname2 = name + b"@snap2" lzc.lzc_snapshot([snapname1]) lzc.lzc_snapshot([snapname2]) ret = lzc.lzc_rollback(name) self.assertEqual(ret, snapname2) def test_rollback_no_snaps(self): name = ZFSTest.pool.makeName(b"fs1") with self.assertRaises(lzc_exc.SnapshotNotFound): lzc.lzc_rollback(name) def test_rollback_non_existent_fs(self): name = ZFSTest.pool.makeName(b"nonexistent") with self.assertRaises(lzc_exc.FilesystemNotFound): lzc.lzc_rollback(name) def test_rollback_invalid_fs_name(self): name = ZFSTest.pool.makeName(b"bad~name") with self.assertRaises(lzc_exc.NameInvalid): lzc.lzc_rollback(name) def test_rollback_snap_name(self): name = ZFSTest.pool.makeName(b"fs1@snap") with self.assertRaises(lzc_exc.NameInvalid): lzc.lzc_rollback(name) def test_rollback_snap_name_2(self): name = ZFSTest.pool.makeName(b"fs1@snap") lzc.lzc_snapshot([name]) with self.assertRaises(lzc_exc.NameInvalid): lzc.lzc_rollback(name) def test_rollback_too_long_fs_name(self): name = ZFSTest.pool.makeTooLongName() with self.assertRaises(lzc_exc.NameTooLong): lzc.lzc_rollback(name) def test_rollback_to_snap_name(self): name = ZFSTest.pool.makeName(b"fs1") snap = name + b"@snap" lzc.lzc_snapshot([snap]) lzc.lzc_rollback_to(name, snap) def test_rollback_to_not_latest(self): fsname = ZFSTest.pool.makeName(b'fs1') snap1 = fsname + b"@snap1" snap2 = fsname + b"@snap2" lzc.lzc_snapshot([snap1]) lzc.lzc_snapshot([snap2]) with self.assertRaises(lzc_exc.SnapshotNotLatest): lzc.lzc_rollback_to(fsname, fsname + b"@snap1") @skipUnlessBookmarksSupported def test_bookmarks(self): snaps = [ZFSTest.pool.makeName( b'fs1@snap1'), ZFSTest.pool.makeName(b'fs2@snap1')] bmarks = [ZFSTest.pool.makeName( b'fs1#bmark1'), ZFSTest.pool.makeName(b'fs2#bmark1')] bmark_dict = {x: y for x, y in zip(bmarks, snaps)} lzc.lzc_snapshot(snaps) lzc.lzc_bookmark(bmark_dict) @skipUnlessBookmarksSupported def test_bookmarks_2(self): snaps = [ZFSTest.pool.makeName( b'fs1@snap1'), ZFSTest.pool.makeName(b'fs2@snap1')] bmarks = [ZFSTest.pool.makeName( b'fs1#bmark1'), ZFSTest.pool.makeName(b'fs2#bmark1')] bmark_dict = {x: y for x, y in zip(bmarks, snaps)} lzc.lzc_snapshot(snaps) lzc.lzc_bookmark(bmark_dict) lzc.lzc_destroy_snaps(snaps, defer=False) @skipUnlessBookmarksSupported def test_bookmarks_empty(self): lzc.lzc_bookmark({}) @skipUnlessBookmarksSupported def test_bookmarks_mismatching_name(self): snaps = [ZFSTest.pool.makeName(b'fs1@snap1')] bmarks = [ZFSTest.pool.makeName(b'fs2#bmark1')] bmark_dict = {x: y for x, y in zip(bmarks, snaps)} lzc.lzc_snapshot(snaps) with self.assertRaises(lzc_exc.BookmarkFailure) as ctx: lzc.lzc_bookmark(bmark_dict) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.BookmarkMismatch) @skipUnlessBookmarksSupported def test_bookmarks_invalid_name(self): snaps = [ZFSTest.pool.makeName(b'fs1@snap1')] bmarks = [ZFSTest.pool.makeName(b'fs1#bmark!')] bmark_dict = {x: y for x, y in zip(bmarks, snaps)} lzc.lzc_snapshot(snaps) with self.assertRaises(lzc_exc.BookmarkFailure) as ctx: lzc.lzc_bookmark(bmark_dict) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.NameInvalid) @skipUnlessBookmarksSupported def test_bookmarks_invalid_name_2(self): snaps = [ZFSTest.pool.makeName(b'fs1@snap1')] bmarks = [ZFSTest.pool.makeName(b'fs1@bmark')] bmark_dict = {x: y for x, y in zip(bmarks, snaps)} lzc.lzc_snapshot(snaps) with self.assertRaises(lzc_exc.BookmarkFailure) as ctx: lzc.lzc_bookmark(bmark_dict) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.NameInvalid) @skipUnlessBookmarksSupported def test_bookmarks_too_long_name(self): snaps = [ZFSTest.pool.makeName(b'fs1@snap1')] bmarks = [ZFSTest.pool.makeTooLongName(b'fs1#')] bmark_dict = {x: y for x, y in zip(bmarks, snaps)} lzc.lzc_snapshot(snaps) with self.assertRaises(lzc_exc.BookmarkFailure) as ctx: lzc.lzc_bookmark(bmark_dict) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.NameTooLong) @skipUnlessBookmarksSupported def test_bookmarks_too_long_name_2(self): snaps = [ZFSTest.pool.makeName(b'fs1@snap1')] bmarks = [ZFSTest.pool.makeTooLongComponent(b'fs1#')] bmark_dict = {x: y for x, y in zip(bmarks, snaps)} lzc.lzc_snapshot(snaps) with self.assertRaises(lzc_exc.BookmarkFailure) as ctx: lzc.lzc_bookmark(bmark_dict) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.NameTooLong) @skipUnlessBookmarksSupported def test_bookmarks_mismatching_names(self): snaps = [ZFSTest.pool.makeName( b'fs1@snap1'), ZFSTest.pool.makeName(b'fs2@snap1')] bmarks = [ZFSTest.pool.makeName( b'fs2#bmark1'), ZFSTest.pool.makeName(b'fs1#bmark1')] bmark_dict = {x: y for x, y in zip(bmarks, snaps)} lzc.lzc_snapshot(snaps) with self.assertRaises(lzc_exc.BookmarkFailure) as ctx: lzc.lzc_bookmark(bmark_dict) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.BookmarkMismatch) @skipUnlessBookmarksSupported def test_bookmarks_partially_mismatching_names(self): snaps = [ZFSTest.pool.makeName( b'fs1@snap1'), ZFSTest.pool.makeName(b'fs2@snap1')] bmarks = [ZFSTest.pool.makeName( b'fs2#bmark'), ZFSTest.pool.makeName(b'fs2#bmark1')] bmark_dict = {x: y for x, y in zip(bmarks, snaps)} lzc.lzc_snapshot(snaps) with self.assertRaises(lzc_exc.BookmarkFailure) as ctx: lzc.lzc_bookmark(bmark_dict) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.BookmarkMismatch) @skipUnlessBookmarksSupported def test_bookmarks_cross_pool(self): snaps = [ZFSTest.pool.makeName( b'fs1@snap1'), ZFSTest.misc_pool.makeName(b'@snap1')] bmarks = [ZFSTest.pool.makeName( b'fs1#bmark1'), ZFSTest.misc_pool.makeName(b'#bmark1')] bmark_dict = {x: y for x, y in zip(bmarks, snaps)} lzc.lzc_snapshot(snaps[0:1]) lzc.lzc_snapshot(snaps[1:2]) with self.assertRaises(lzc_exc.BookmarkFailure) as ctx: lzc.lzc_bookmark(bmark_dict) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.PoolsDiffer) @skipUnlessBookmarksSupported def test_bookmarks_missing_snap(self): snaps = [ZFSTest.pool.makeName( b'fs1@snap1'), ZFSTest.pool.makeName(b'fs2@snap1')] bmarks = [ZFSTest.pool.makeName( b'fs1#bmark1'), ZFSTest.pool.makeName(b'fs2#bmark1')] bmark_dict = {x: y for x, y in zip(bmarks, snaps)} lzc.lzc_snapshot(snaps[0:1]) with self.assertRaises(lzc_exc.BookmarkFailure) as ctx: lzc.lzc_bookmark(bmark_dict) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.SnapshotNotFound) @skipUnlessBookmarksSupported def test_bookmarks_missing_snaps(self): snaps = [ZFSTest.pool.makeName( b'fs1@snap1'), ZFSTest.pool.makeName(b'fs2@snap1')] bmarks = [ZFSTest.pool.makeName( b'fs1#bmark1'), ZFSTest.pool.makeName(b'fs2#bmark1')] bmark_dict = {x: y for x, y in zip(bmarks, snaps)} with self.assertRaises(lzc_exc.BookmarkFailure) as ctx: lzc.lzc_bookmark(bmark_dict) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.SnapshotNotFound) @skipUnlessBookmarksSupported def test_bookmarks_for_the_same_snap(self): snap = ZFSTest.pool.makeName(b'fs1@snap1') bmark1 = ZFSTest.pool.makeName(b'fs1#bmark1') bmark2 = ZFSTest.pool.makeName(b'fs1#bmark2') bmark_dict = {bmark1: snap, bmark2: snap} lzc.lzc_snapshot([snap]) lzc.lzc_bookmark(bmark_dict) @skipUnlessBookmarksSupported def test_bookmarks_for_the_same_snap_2(self): snap = ZFSTest.pool.makeName(b'fs1@snap1') bmark1 = ZFSTest.pool.makeName(b'fs1#bmark1') bmark2 = ZFSTest.pool.makeName(b'fs1#bmark2') bmark_dict1 = {bmark1: snap} bmark_dict2 = {bmark2: snap} lzc.lzc_snapshot([snap]) lzc.lzc_bookmark(bmark_dict1) lzc.lzc_bookmark(bmark_dict2) @skipUnlessBookmarksSupported def test_bookmarks_duplicate_name(self): snap1 = ZFSTest.pool.makeName(b'fs1@snap1') snap2 = ZFSTest.pool.makeName(b'fs1@snap2') bmark = ZFSTest.pool.makeName(b'fs1#bmark') bmark_dict1 = {bmark: snap1} bmark_dict2 = {bmark: snap2} lzc.lzc_snapshot([snap1]) lzc.lzc_snapshot([snap2]) lzc.lzc_bookmark(bmark_dict1) with self.assertRaises(lzc_exc.BookmarkFailure) as ctx: lzc.lzc_bookmark(bmark_dict2) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.BookmarkExists) @skipUnlessBookmarksSupported def test_get_bookmarks(self): snap1 = ZFSTest.pool.makeName(b'fs1@snap1') snap2 = ZFSTest.pool.makeName(b'fs1@snap2') bmark = ZFSTest.pool.makeName(b'fs1#bmark') bmark1 = ZFSTest.pool.makeName(b'fs1#bmark1') bmark2 = ZFSTest.pool.makeName(b'fs1#bmark2') bmark_dict1 = {bmark1: snap1, bmark2: snap2} bmark_dict2 = {bmark: snap2} lzc.lzc_snapshot([snap1]) lzc.lzc_snapshot([snap2]) lzc.lzc_bookmark(bmark_dict1) lzc.lzc_bookmark(bmark_dict2) lzc.lzc_destroy_snaps([snap1, snap2], defer=False) bmarks = lzc.lzc_get_bookmarks(ZFSTest.pool.makeName(b'fs1')) self.assertEqual(len(bmarks), 3) for b in b'bmark', b'bmark1', b'bmark2': self.assertIn(b, bmarks) self.assertIsInstance(bmarks[b], dict) self.assertEqual(len(bmarks[b]), 0) bmarks = lzc.lzc_get_bookmarks(ZFSTest.pool.makeName(b'fs1'), [b'guid', b'createtxg', b'creation']) self.assertEqual(len(bmarks), 3) for b in b'bmark', b'bmark1', b'bmark2': self.assertIn(b, bmarks) self.assertIsInstance(bmarks[b], dict) self.assertEqual(len(bmarks[b]), 3) @skipUnlessBookmarksSupported def test_get_bookmarks_invalid_property(self): snap = ZFSTest.pool.makeName(b'fs1@snap') bmark = ZFSTest.pool.makeName(b'fs1#bmark') bmark_dict = {bmark: snap} lzc.lzc_snapshot([snap]) lzc.lzc_bookmark(bmark_dict) bmarks = lzc.lzc_get_bookmarks( ZFSTest.pool.makeName(b'fs1'), [b'badprop']) self.assertEqual(len(bmarks), 1) for b in (b'bmark', ): self.assertIn(b, bmarks) self.assertIsInstance(bmarks[b], dict) self.assertEqual(len(bmarks[b]), 0) @skipUnlessBookmarksSupported def test_get_bookmarks_nonexistent_fs(self): with self.assertRaises(lzc_exc.FilesystemNotFound): lzc.lzc_get_bookmarks(ZFSTest.pool.makeName(b'nonexistent')) @skipUnlessBookmarksSupported def test_destroy_bookmarks(self): snap = ZFSTest.pool.makeName(b'fs1@snap') bmark = ZFSTest.pool.makeName(b'fs1#bmark') bmark_dict = {bmark: snap} lzc.lzc_snapshot([snap]) lzc.lzc_bookmark(bmark_dict) lzc.lzc_destroy_bookmarks( [bmark, ZFSTest.pool.makeName(b'fs1#nonexistent')]) bmarks = lzc.lzc_get_bookmarks(ZFSTest.pool.makeName(b'fs1')) self.assertEqual(len(bmarks), 0) @skipUnlessBookmarksSupported def test_destroy_bookmarks_invalid_name(self): snap = ZFSTest.pool.makeName(b'fs1@snap') bmark = ZFSTest.pool.makeName(b'fs1#bmark') bmark_dict = {bmark: snap} lzc.lzc_snapshot([snap]) lzc.lzc_bookmark(bmark_dict) with self.assertRaises(lzc_exc.BookmarkDestructionFailure) as ctx: lzc.lzc_destroy_bookmarks( [bmark, ZFSTest.pool.makeName(b'fs1/nonexistent')]) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.NameInvalid) bmarks = lzc.lzc_get_bookmarks(ZFSTest.pool.makeName(b'fs1')) self.assertEqual(len(bmarks), 1) self.assertIn(b'bmark', bmarks) @skipUnlessBookmarksSupported def test_destroy_bookmark_nonexistent_fs(self): lzc.lzc_destroy_bookmarks( [ZFSTest.pool.makeName(b'nonexistent#bmark')]) @skipUnlessBookmarksSupported def test_destroy_bookmarks_empty(self): lzc.lzc_bookmark({}) def test_snaprange_space(self): snap1 = ZFSTest.pool.makeName(b"fs1@snap1") snap2 = ZFSTest.pool.makeName(b"fs1@snap2") snap3 = ZFSTest.pool.makeName(b"fs1@snap") lzc.lzc_snapshot([snap1]) lzc.lzc_snapshot([snap2]) lzc.lzc_snapshot([snap3]) space = lzc.lzc_snaprange_space(snap1, snap2) self.assertIsInstance(space, (int, int)) space = lzc.lzc_snaprange_space(snap2, snap3) self.assertIsInstance(space, (int, int)) space = lzc.lzc_snaprange_space(snap1, snap3) self.assertIsInstance(space, (int, int)) def test_snaprange_space_2(self): snap1 = ZFSTest.pool.makeName(b"fs1@snap1") snap2 = ZFSTest.pool.makeName(b"fs1@snap2") snap3 = ZFSTest.pool.makeName(b"fs1@snap") lzc.lzc_snapshot([snap1]) with zfs_mount(ZFSTest.pool.makeName(b"fs1")) as mntdir: with tempfile.NamedTemporaryFile(dir=mntdir) as f: for i in range(1024): f.write(b'x' * 1024) f.flush() lzc.lzc_snapshot([snap2]) lzc.lzc_snapshot([snap3]) space = lzc.lzc_snaprange_space(snap1, snap2) self.assertGreater(space, 1024 * 1024) space = lzc.lzc_snaprange_space(snap2, snap3) self.assertGreater(space, 1024 * 1024) space = lzc.lzc_snaprange_space(snap1, snap3) self.assertGreater(space, 1024 * 1024) def test_snaprange_space_same_snap(self): snap = ZFSTest.pool.makeName(b"fs1@snap") with zfs_mount(ZFSTest.pool.makeName(b"fs1")) as mntdir: with tempfile.NamedTemporaryFile(dir=mntdir) as f: for i in range(1024): f.write(b'x' * 1024) f.flush() lzc.lzc_snapshot([snap]) space = lzc.lzc_snaprange_space(snap, snap) self.assertGreater(space, 1024 * 1024) self.assertAlmostEqual(space, 1024 * 1024, delta=1024 * 1024 // 20) def test_snaprange_space_wrong_order(self): snap1 = ZFSTest.pool.makeName(b"fs1@snap1") snap2 = ZFSTest.pool.makeName(b"fs1@snap2") lzc.lzc_snapshot([snap1]) lzc.lzc_snapshot([snap2]) with self.assertRaises(lzc_exc.SnapshotMismatch): lzc.lzc_snaprange_space(snap2, snap1) def test_snaprange_space_unrelated(self): snap1 = ZFSTest.pool.makeName(b"fs1@snap1") snap2 = ZFSTest.pool.makeName(b"fs2@snap2") lzc.lzc_snapshot([snap1]) lzc.lzc_snapshot([snap2]) with self.assertRaises(lzc_exc.SnapshotMismatch): lzc.lzc_snaprange_space(snap1, snap2) def test_snaprange_space_across_pools(self): snap1 = ZFSTest.pool.makeName(b"fs1@snap1") snap2 = ZFSTest.misc_pool.makeName(b"@snap2") lzc.lzc_snapshot([snap1]) lzc.lzc_snapshot([snap2]) with self.assertRaises(lzc_exc.PoolsDiffer): lzc.lzc_snaprange_space(snap1, snap2) def test_snaprange_space_nonexistent(self): snap1 = ZFSTest.pool.makeName(b"fs1@snap1") snap2 = ZFSTest.pool.makeName(b"fs1@snap2") lzc.lzc_snapshot([snap1]) with self.assertRaises(lzc_exc.SnapshotNotFound) as ctx: lzc.lzc_snaprange_space(snap1, snap2) self.assertEqual(ctx.exception.name, snap2) with self.assertRaises(lzc_exc.SnapshotNotFound) as ctx: lzc.lzc_snaprange_space(snap2, snap1) self.assertEqual(ctx.exception.name, snap1) def test_snaprange_space_invalid_name(self): snap1 = ZFSTest.pool.makeName(b"fs1@snap1") snap2 = ZFSTest.pool.makeName(b"fs1@sn#p") lzc.lzc_snapshot([snap1]) with self.assertRaises(lzc_exc.NameInvalid): lzc.lzc_snaprange_space(snap1, snap2) def test_snaprange_space_not_snap(self): snap1 = ZFSTest.pool.makeName(b"fs1@snap1") snap2 = ZFSTest.pool.makeName(b"fs1") lzc.lzc_snapshot([snap1]) with self.assertRaises(lzc_exc.NameInvalid): lzc.lzc_snaprange_space(snap1, snap2) with self.assertRaises(lzc_exc.NameInvalid): lzc.lzc_snaprange_space(snap2, snap1) def test_snaprange_space_not_snap_2(self): snap1 = ZFSTest.pool.makeName(b"fs1@snap1") snap2 = ZFSTest.pool.makeName(b"fs1#bmark") lzc.lzc_snapshot([snap1]) with self.assertRaises(lzc_exc.NameInvalid): lzc.lzc_snaprange_space(snap1, snap2) with self.assertRaises(lzc_exc.NameInvalid): lzc.lzc_snaprange_space(snap2, snap1) def test_send_space(self): snap1 = ZFSTest.pool.makeName(b"fs1@snap1") snap2 = ZFSTest.pool.makeName(b"fs1@snap2") snap3 = ZFSTest.pool.makeName(b"fs1@snap") lzc.lzc_snapshot([snap1]) lzc.lzc_snapshot([snap2]) lzc.lzc_snapshot([snap3]) space = lzc.lzc_send_space(snap2, snap1) self.assertIsInstance(space, (int, int)) space = lzc.lzc_send_space(snap3, snap2) self.assertIsInstance(space, (int, int)) space = lzc.lzc_send_space(snap3, snap1) self.assertIsInstance(space, (int, int)) space = lzc.lzc_send_space(snap1) self.assertIsInstance(space, (int, int)) space = lzc.lzc_send_space(snap2) self.assertIsInstance(space, (int, int)) space = lzc.lzc_send_space(snap3) self.assertIsInstance(space, (int, int)) def test_send_space_2(self): snap1 = ZFSTest.pool.makeName(b"fs1@snap1") snap2 = ZFSTest.pool.makeName(b"fs1@snap2") snap3 = ZFSTest.pool.makeName(b"fs1@snap") lzc.lzc_snapshot([snap1]) with zfs_mount(ZFSTest.pool.makeName(b"fs1")) as mntdir: with tempfile.NamedTemporaryFile(dir=mntdir) as f: for i in range(1024): f.write(b'x' * 1024) f.flush() lzc.lzc_snapshot([snap2]) lzc.lzc_snapshot([snap3]) space = lzc.lzc_send_space(snap2, snap1) self.assertGreater(space, 1024 * 1024) space = lzc.lzc_send_space(snap3, snap2) space = lzc.lzc_send_space(snap3, snap1) space_empty = lzc.lzc_send_space(snap1) space = lzc.lzc_send_space(snap2) self.assertGreater(space, 1024 * 1024) space = lzc.lzc_send_space(snap3) self.assertEqual(space, space_empty) def test_send_space_same_snap(self): snap1 = ZFSTest.pool.makeName(b"fs1@snap1") lzc.lzc_snapshot([snap1]) with self.assertRaises(lzc_exc.SnapshotMismatch): lzc.lzc_send_space(snap1, snap1) def test_send_space_wrong_order(self): snap1 = ZFSTest.pool.makeName(b"fs1@snap1") snap2 = ZFSTest.pool.makeName(b"fs1@snap2") lzc.lzc_snapshot([snap1]) lzc.lzc_snapshot([snap2]) with self.assertRaises(lzc_exc.SnapshotMismatch): lzc.lzc_send_space(snap1, snap2) def test_send_space_unrelated(self): snap1 = ZFSTest.pool.makeName(b"fs1@snap1") snap2 = ZFSTest.pool.makeName(b"fs2@snap2") lzc.lzc_snapshot([snap1]) lzc.lzc_snapshot([snap2]) with self.assertRaises(lzc_exc.SnapshotMismatch): lzc.lzc_send_space(snap1, snap2) def test_send_space_across_pools(self): snap1 = ZFSTest.pool.makeName(b"fs1@snap1") snap2 = ZFSTest.misc_pool.makeName(b"@snap2") lzc.lzc_snapshot([snap1]) lzc.lzc_snapshot([snap2]) with self.assertRaises(lzc_exc.PoolsDiffer): lzc.lzc_send_space(snap1, snap2) def test_send_space_nonexistent(self): snap1 = ZFSTest.pool.makeName(b"fs1@snap1") snap2 = ZFSTest.pool.makeName(b"fs2@snap2") lzc.lzc_snapshot([snap1]) with self.assertRaises(lzc_exc.SnapshotNotFound) as ctx: lzc.lzc_send_space(snap1, snap2) self.assertEqual(ctx.exception.name, snap1) with self.assertRaises(lzc_exc.SnapshotNotFound) as ctx: lzc.lzc_send_space(snap2, snap1) self.assertEqual(ctx.exception.name, snap2) with self.assertRaises(lzc_exc.SnapshotNotFound) as ctx: lzc.lzc_send_space(snap2) self.assertEqual(ctx.exception.name, snap2) def test_send_space_invalid_name(self): snap1 = ZFSTest.pool.makeName(b"fs1@snap1") snap2 = ZFSTest.pool.makeName(b"fs1@sn!p") lzc.lzc_snapshot([snap1]) with self.assertRaises(lzc_exc.NameInvalid) as ctx: lzc.lzc_send_space(snap2, snap1) self.assertEqual(ctx.exception.name, snap2) with self.assertRaises(lzc_exc.NameInvalid) as ctx: lzc.lzc_send_space(snap2) self.assertEqual(ctx.exception.name, snap2) with self.assertRaises(lzc_exc.NameInvalid) as ctx: lzc.lzc_send_space(snap1, snap2) self.assertEqual(ctx.exception.name, snap2) def test_send_space_not_snap(self): snap1 = ZFSTest.pool.makeName(b"fs1@snap1") snap2 = ZFSTest.pool.makeName(b"fs1") lzc.lzc_snapshot([snap1]) with self.assertRaises(lzc_exc.NameInvalid): lzc.lzc_send_space(snap1, snap2) with self.assertRaises(lzc_exc.NameInvalid): lzc.lzc_send_space(snap2, snap1) with self.assertRaises(lzc_exc.NameInvalid): lzc.lzc_send_space(snap2) def test_send_space_not_snap_2(self): snap1 = ZFSTest.pool.makeName(b"fs1@snap1") snap2 = ZFSTest.pool.makeName(b"fs1#bmark") lzc.lzc_snapshot([snap1]) with self.assertRaises(lzc_exc.NameInvalid): lzc.lzc_send_space(snap2, snap1) with self.assertRaises(lzc_exc.NameInvalid): lzc.lzc_send_space(snap2) def test_send_full(self): snap = ZFSTest.pool.makeName(b"fs1@snap") with zfs_mount(ZFSTest.pool.makeName(b"fs1")) as mntdir: with tempfile.NamedTemporaryFile(dir=mntdir) as f: for i in range(1024): f.write(b'x' * 1024) f.flush() lzc.lzc_snapshot([snap]) - with tempfile.TemporaryFile(suffix='.ztream') as output: + with tempfile.TemporaryFile(suffix='.zstream') as output: estimate = lzc.lzc_send_space(snap) fd = output.fileno() lzc.lzc_send(snap, None, fd) st = os.fstat(fd) # 5%, arbitrary. self.assertAlmostEqual(st.st_size, estimate, delta=estimate // 20) def test_send_incremental(self): snap1 = ZFSTest.pool.makeName(b"fs1@snap1") snap2 = ZFSTest.pool.makeName(b"fs1@snap2") lzc.lzc_snapshot([snap1]) with zfs_mount(ZFSTest.pool.makeName(b"fs1")) as mntdir: with tempfile.NamedTemporaryFile(dir=mntdir) as f: for i in range(1024): f.write(b'x' * 1024) f.flush() lzc.lzc_snapshot([snap2]) - with tempfile.TemporaryFile(suffix='.ztream') as output: + with tempfile.TemporaryFile(suffix='.zstream') as output: estimate = lzc.lzc_send_space(snap2, snap1) fd = output.fileno() lzc.lzc_send(snap2, snap1, fd) st = os.fstat(fd) # 5%, arbitrary. self.assertAlmostEqual(st.st_size, estimate, delta=estimate // 20) def test_send_flags(self): flags = ['embedded_data', 'large_blocks', 'compress', 'raw'] snap = ZFSTest.pool.makeName(b"fs1@snap") lzc.lzc_snapshot([snap]) for c in range(len(flags)): for flag in itertools.permutations(flags, c + 1): with dev_null() as fd: lzc.lzc_send(snap, None, fd, list(flag)) def test_send_unknown_flags(self): snap = ZFSTest.pool.makeName(b"fs1@snap") lzc.lzc_snapshot([snap]) with dev_null() as fd: with self.assertRaises(lzc_exc.UnknownStreamFeature): lzc.lzc_send(snap, None, fd, ['embedded_data', 'UNKNOWN']) def test_send_same_snap(self): snap1 = ZFSTest.pool.makeName(b"fs1@snap1") lzc.lzc_snapshot([snap1]) - with tempfile.TemporaryFile(suffix='.ztream') as output: + with tempfile.TemporaryFile(suffix='.zstream') as output: fd = output.fileno() with self.assertRaises(lzc_exc.SnapshotMismatch): lzc.lzc_send(snap1, snap1, fd) def test_send_wrong_order(self): snap1 = ZFSTest.pool.makeName(b"fs1@snap1") snap2 = ZFSTest.pool.makeName(b"fs1@snap2") lzc.lzc_snapshot([snap1]) lzc.lzc_snapshot([snap2]) - with tempfile.TemporaryFile(suffix='.ztream') as output: + with tempfile.TemporaryFile(suffix='.zstream') as output: fd = output.fileno() with self.assertRaises(lzc_exc.SnapshotMismatch): lzc.lzc_send(snap1, snap2, fd) def test_send_unrelated(self): snap1 = ZFSTest.pool.makeName(b"fs1@snap1") snap2 = ZFSTest.pool.makeName(b"fs2@snap2") lzc.lzc_snapshot([snap1]) lzc.lzc_snapshot([snap2]) - with tempfile.TemporaryFile(suffix='.ztream') as output: + with tempfile.TemporaryFile(suffix='.zstream') as output: fd = output.fileno() with self.assertRaises(lzc_exc.SnapshotMismatch): lzc.lzc_send(snap1, snap2, fd) def test_send_across_pools(self): snap1 = ZFSTest.pool.makeName(b"fs1@snap1") snap2 = ZFSTest.misc_pool.makeName(b"@snap2") lzc.lzc_snapshot([snap1]) lzc.lzc_snapshot([snap2]) - with tempfile.TemporaryFile(suffix='.ztream') as output: + with tempfile.TemporaryFile(suffix='.zstream') as output: fd = output.fileno() with self.assertRaises(lzc_exc.PoolsDiffer): lzc.lzc_send(snap1, snap2, fd) def test_send_nonexistent(self): snap1 = ZFSTest.pool.makeName(b"fs1@snap1") snap2 = ZFSTest.pool.makeName(b"fs1@snap2") lzc.lzc_snapshot([snap1]) - with tempfile.TemporaryFile(suffix='.ztream') as output: + with tempfile.TemporaryFile(suffix='.zstream') as output: fd = output.fileno() with self.assertRaises(lzc_exc.SnapshotNotFound) as ctx: lzc.lzc_send(snap1, snap2, fd) self.assertEqual(ctx.exception.name, snap1) with self.assertRaises(lzc_exc.SnapshotNotFound) as ctx: lzc.lzc_send(snap2, snap1, fd) self.assertEqual(ctx.exception.name, snap2) with self.assertRaises(lzc_exc.SnapshotNotFound) as ctx: lzc.lzc_send(snap2, None, fd) self.assertEqual(ctx.exception.name, snap2) def test_send_invalid_name(self): snap1 = ZFSTest.pool.makeName(b"fs1@snap1") snap2 = ZFSTest.pool.makeName(b"fs1@sn!p") lzc.lzc_snapshot([snap1]) - with tempfile.TemporaryFile(suffix='.ztream') as output: + with tempfile.TemporaryFile(suffix='.zstream') as output: fd = output.fileno() with self.assertRaises(lzc_exc.NameInvalid) as ctx: lzc.lzc_send(snap2, snap1, fd) self.assertEqual(ctx.exception.name, snap2) with self.assertRaises(lzc_exc.NameInvalid) as ctx: lzc.lzc_send(snap2, None, fd) self.assertEqual(ctx.exception.name, snap2) with self.assertRaises(lzc_exc.NameInvalid) as ctx: lzc.lzc_send(snap1, snap2, fd) self.assertEqual(ctx.exception.name, snap2) # XXX Although undocumented the API allows to create an incremental # or full stream for a filesystem as if a temporary unnamed snapshot # is taken at some time after the call is made and before the stream # starts being produced. def test_send_filesystem(self): snap = ZFSTest.pool.makeName(b"fs1@snap1") fs = ZFSTest.pool.makeName(b"fs1") lzc.lzc_snapshot([snap]) - with tempfile.TemporaryFile(suffix='.ztream') as output: + with tempfile.TemporaryFile(suffix='.zstream') as output: fd = output.fileno() lzc.lzc_send(fs, snap, fd) lzc.lzc_send(fs, None, fd) def test_send_from_filesystem(self): snap = ZFSTest.pool.makeName(b"fs1@snap1") fs = ZFSTest.pool.makeName(b"fs1") lzc.lzc_snapshot([snap]) - with tempfile.TemporaryFile(suffix='.ztream') as output: + with tempfile.TemporaryFile(suffix='.zstream') as output: fd = output.fileno() with self.assertRaises(lzc_exc.NameInvalid): lzc.lzc_send(snap, fs, fd) @skipUnlessBookmarksSupported def test_send_bookmark(self): snap1 = ZFSTest.pool.makeName(b"fs1@snap1") snap2 = ZFSTest.pool.makeName(b"fs1@snap2") bmark = ZFSTest.pool.makeName(b"fs1#bmark") lzc.lzc_snapshot([snap1]) lzc.lzc_snapshot([snap2]) lzc.lzc_bookmark({bmark: snap2}) lzc.lzc_destroy_snaps([snap2], defer=False) - with tempfile.TemporaryFile(suffix='.ztream') as output: + with tempfile.TemporaryFile(suffix='.zstream') as output: fd = output.fileno() with self.assertRaises(lzc_exc.NameInvalid): lzc.lzc_send(bmark, snap1, fd) with self.assertRaises(lzc_exc.NameInvalid): lzc.lzc_send(bmark, None, fd) @skipUnlessBookmarksSupported def test_send_from_bookmark(self): snap1 = ZFSTest.pool.makeName(b"fs1@snap1") snap2 = ZFSTest.pool.makeName(b"fs1@snap2") bmark = ZFSTest.pool.makeName(b"fs1#bmark") lzc.lzc_snapshot([snap1]) lzc.lzc_snapshot([snap2]) lzc.lzc_bookmark({bmark: snap1}) lzc.lzc_destroy_snaps([snap1], defer=False) - with tempfile.TemporaryFile(suffix='.ztream') as output: + with tempfile.TemporaryFile(suffix='.zstream') as output: fd = output.fileno() lzc.lzc_send(snap2, bmark, fd) def test_send_bad_fd(self): snap = ZFSTest.pool.makeName(b"fs1@snap") lzc.lzc_snapshot([snap]) with tempfile.TemporaryFile() as tmp: bad_fd = tmp.fileno() with self.assertRaises(lzc_exc.StreamIOError) as ctx: lzc.lzc_send(snap, None, bad_fd) self.assertEqual(ctx.exception.errno, errno.EBADF) def test_send_bad_fd_2(self): snap = ZFSTest.pool.makeName(b"fs1@snap") lzc.lzc_snapshot([snap]) with self.assertRaises(lzc_exc.StreamIOError) as ctx: lzc.lzc_send(snap, None, -2) self.assertEqual(ctx.exception.errno, errno.EBADF) def test_send_bad_fd_3(self): snap = ZFSTest.pool.makeName(b"fs1@snap") lzc.lzc_snapshot([snap]) with tempfile.TemporaryFile() as tmp: bad_fd = tmp.fileno() (soft, hard) = resource.getrlimit(resource.RLIMIT_NOFILE) bad_fd = hard + 1 with self.assertRaises(lzc_exc.StreamIOError) as ctx: lzc.lzc_send(snap, None, bad_fd) self.assertEqual(ctx.exception.errno, errno.EBADF) def test_send_to_broken_pipe(self): snap = ZFSTest.pool.makeName(b"fs1@snap") lzc.lzc_snapshot([snap]) if sys.version_info < (3, 0): proc = subprocess.Popen(['true'], stdin=subprocess.PIPE) proc.wait() with self.assertRaises(lzc_exc.StreamIOError) as ctx: lzc.lzc_send(snap, None, proc.stdin.fileno()) self.assertEqual(ctx.exception.errno, errno.EPIPE) else: with subprocess.Popen(['true'], stdin=subprocess.PIPE) as proc: proc.wait() with self.assertRaises(lzc_exc.StreamIOError) as ctx: lzc.lzc_send(snap, None, proc.stdin.fileno()) self.assertEqual(ctx.exception.errno, errno.EPIPE) def test_send_to_broken_pipe_2(self): snap = ZFSTest.pool.makeName(b"fs1@snap") with zfs_mount(ZFSTest.pool.makeName(b"fs1")) as mntdir: with tempfile.NamedTemporaryFile(dir=mntdir) as f: for i in range(1024): f.write(b'x' * 1024) f.flush() lzc.lzc_snapshot([snap]) if sys.version_info < (3, 0): p = subprocess.Popen(['sleep', '2'], stdin=subprocess.PIPE) with self.assertRaises(lzc_exc.StreamIOError) as ctx: lzc.lzc_send(snap, None, p.stdin.fileno()) self.assertTrue(ctx.exception.errno == errno.EPIPE or ctx.exception.errno == errno.EINTR) else: with subprocess.Popen(['sleep', '2'], stdin=subprocess.PIPE) as p: with self.assertRaises(lzc_exc.StreamIOError) as ctx: lzc.lzc_send(snap, None, p.stdin.fileno()) self.assertTrue(ctx.exception.errno == errno.EPIPE or ctx.exception.errno == errno.EINTR) def test_send_to_ro_file(self): snap = ZFSTest.pool.makeName(b"fs1@snap") lzc.lzc_snapshot([snap]) with tempfile.NamedTemporaryFile( - suffix='.ztream', delete=False) as output: + suffix='.zstream', delete=False) as output: # tempfile always opens a temporary file in read-write mode # regardless of the specified mode, so we have to open it again. os.chmod(output.name, stat.S_IRUSR) fd = os.open(output.name, os.O_RDONLY) with self.assertRaises(lzc_exc.StreamIOError) as ctx: lzc.lzc_send(snap, None, fd) os.close(fd) self.assertEqual(ctx.exception.errno, errno.EBADF) def test_recv_full(self): src = ZFSTest.pool.makeName(b"fs1@snap") dst = ZFSTest.pool.makeName(b"fs2/received-1@snap") with temp_file_in_fs(ZFSTest.pool.makeName(b"fs1")) as name: lzc.lzc_snapshot([src]) - with tempfile.TemporaryFile(suffix='.ztream') as stream: + with tempfile.TemporaryFile(suffix='.zstream') as stream: lzc.lzc_send(src, None, stream.fileno()) stream.seek(0) lzc.lzc_receive(dst, stream.fileno()) name = os.path.basename(name) with zfs_mount(src) as mnt1, zfs_mount(dst) as mnt2: self.assertTrue( filecmp.cmp( os.path.join(mnt1, name), os.path.join(mnt2, name), False)) def test_recv_incremental(self): src1 = ZFSTest.pool.makeName(b"fs1@snap1") src2 = ZFSTest.pool.makeName(b"fs1@snap2") dst1 = ZFSTest.pool.makeName(b"fs2/received-2@snap1") dst2 = ZFSTest.pool.makeName(b"fs2/received-2@snap2") lzc.lzc_snapshot([src1]) with temp_file_in_fs(ZFSTest.pool.makeName(b"fs1")) as name: lzc.lzc_snapshot([src2]) - with tempfile.TemporaryFile(suffix='.ztream') as stream: + with tempfile.TemporaryFile(suffix='.zstream') as stream: lzc.lzc_send(src1, None, stream.fileno()) stream.seek(0) lzc.lzc_receive(dst1, stream.fileno()) - with tempfile.TemporaryFile(suffix='.ztream') as stream: + with tempfile.TemporaryFile(suffix='.zstream') as stream: lzc.lzc_send(src2, src1, stream.fileno()) stream.seek(0) lzc.lzc_receive(dst2, stream.fileno()) name = os.path.basename(name) with zfs_mount(src2) as mnt1, zfs_mount(dst2) as mnt2: self.assertTrue( filecmp.cmp( os.path.join(mnt1, name), os.path.join(mnt2, name), False)) # This test case fails unless unless a patch from # https://clusterhq.atlassian.net/browse/ZFS-20 # is applied to libzfs_core, otherwise it succeeds. @unittest.skip("fails with unpatched libzfs_core") def test_recv_without_explicit_snap_name(self): srcfs = ZFSTest.pool.makeName(b"fs1") src1 = srcfs + b"@snap1" src2 = srcfs + b"@snap2" dstfs = ZFSTest.pool.makeName(b"fs2/received-100") dst1 = dstfs + b'@snap1' dst2 = dstfs + b'@snap2' with streams(srcfs, src1, src2) as (_, (full, incr)): lzc.lzc_receive(dstfs, full.fileno()) lzc.lzc_receive(dstfs, incr.fileno()) self.assertExists(dst1) self.assertExists(dst2) def test_recv_clone(self): orig_src = ZFSTest.pool.makeName(b"fs2@send-origin") clone = ZFSTest.pool.makeName(b"fs1/fs/send-clone") clone_snap = clone + b"@snap" orig_dst = ZFSTest.pool.makeName(b"fs1/fs/recv-origin@snap") clone_dst = ZFSTest.pool.makeName(b"fs1/fs/recv-clone@snap") lzc.lzc_snapshot([orig_src]) - with tempfile.TemporaryFile(suffix='.ztream') as stream: + with tempfile.TemporaryFile(suffix='.zstream') as stream: lzc.lzc_send(orig_src, None, stream.fileno()) stream.seek(0) lzc.lzc_receive(orig_dst, stream.fileno()) lzc.lzc_clone(clone, orig_src) lzc.lzc_snapshot([clone_snap]) - with tempfile.TemporaryFile(suffix='.ztream') as stream: + with tempfile.TemporaryFile(suffix='.zstream') as stream: lzc.lzc_send(clone_snap, orig_src, stream.fileno()) stream.seek(0) lzc.lzc_receive(clone_dst, stream.fileno(), origin=orig_dst) def test_recv_full_already_existing_empty_fs(self): src = ZFSTest.pool.makeName(b"fs1@snap") dstfs = ZFSTest.pool.makeName(b"fs2/received-3") dst = dstfs + b'@snap' with temp_file_in_fs(ZFSTest.pool.makeName(b"fs1")): lzc.lzc_snapshot([src]) lzc.lzc_create(dstfs) - with tempfile.TemporaryFile(suffix='.ztream') as stream: + with tempfile.TemporaryFile(suffix='.zstream') as stream: lzc.lzc_send(src, None, stream.fileno()) stream.seek(0) with self.assertRaises(( lzc_exc.DestinationModified, lzc_exc.DatasetExists)): lzc.lzc_receive(dst, stream.fileno()) def test_recv_full_into_root_empty_pool(self): empty_pool = None try: srcfs = ZFSTest.pool.makeName(b"fs1") empty_pool = _TempPool() dst = empty_pool.makeName(b'@snap') with streams(srcfs, b"snap", None) as (_, (stream, _)): with self.assertRaises(( lzc_exc.DestinationModified, lzc_exc.DatasetExists)): lzc.lzc_receive(dst, stream.fileno()) finally: if empty_pool is not None: empty_pool.cleanUp() def test_recv_full_into_ro_pool(self): srcfs = ZFSTest.pool.makeName(b"fs1") dst = ZFSTest.readonly_pool.makeName(b'fs2/received@snap') with streams(srcfs, b"snap", None) as (_, (stream, _)): with self.assertRaises(lzc_exc.ReadOnlyPool): lzc.lzc_receive(dst, stream.fileno()) def test_recv_full_already_existing_modified_fs(self): src = ZFSTest.pool.makeName(b"fs1@snap") dstfs = ZFSTest.pool.makeName(b"fs2/received-5") dst = dstfs + b'@snap' with temp_file_in_fs(ZFSTest.pool.makeName(b"fs1")): lzc.lzc_snapshot([src]) lzc.lzc_create(dstfs) with temp_file_in_fs(dstfs): - with tempfile.TemporaryFile(suffix='.ztream') as stream: + with tempfile.TemporaryFile(suffix='.zstream') as stream: lzc.lzc_send(src, None, stream.fileno()) stream.seek(0) with self.assertRaises(( lzc_exc.DestinationModified, lzc_exc.DatasetExists)): lzc.lzc_receive(dst, stream.fileno()) def test_recv_full_already_existing_with_snapshots(self): src = ZFSTest.pool.makeName(b"fs1@snap") dstfs = ZFSTest.pool.makeName(b"fs2/received-4") dst = dstfs + b'@snap' with temp_file_in_fs(ZFSTest.pool.makeName(b"fs1")): lzc.lzc_snapshot([src]) lzc.lzc_create(dstfs) lzc.lzc_snapshot([dstfs + b"@snap1"]) - with tempfile.TemporaryFile(suffix='.ztream') as stream: + with tempfile.TemporaryFile(suffix='.zstream') as stream: lzc.lzc_send(src, None, stream.fileno()) stream.seek(0) with self.assertRaises(( lzc_exc.StreamMismatch, lzc_exc.DatasetExists)): lzc.lzc_receive(dst, stream.fileno()) def test_recv_full_already_existing_snapshot(self): src = ZFSTest.pool.makeName(b"fs1@snap") dstfs = ZFSTest.pool.makeName(b"fs2/received-6") dst = dstfs + b'@snap' with temp_file_in_fs(ZFSTest.pool.makeName(b"fs1")): lzc.lzc_snapshot([src]) lzc.lzc_create(dstfs) lzc.lzc_snapshot([dst]) - with tempfile.TemporaryFile(suffix='.ztream') as stream: + with tempfile.TemporaryFile(suffix='.zstream') as stream: lzc.lzc_send(src, None, stream.fileno()) stream.seek(0) with self.assertRaises(lzc_exc.DatasetExists): lzc.lzc_receive(dst, stream.fileno()) def test_recv_full_missing_parent_fs(self): src = ZFSTest.pool.makeName(b"fs1@snap") dst = ZFSTest.pool.makeName(b"fs2/nonexistent/fs@snap") with temp_file_in_fs(ZFSTest.pool.makeName(b"fs1")): lzc.lzc_snapshot([src]) - with tempfile.TemporaryFile(suffix='.ztream') as stream: + with tempfile.TemporaryFile(suffix='.zstream') as stream: lzc.lzc_send(src, None, stream.fileno()) stream.seek(0) with self.assertRaises(lzc_exc.DatasetNotFound): lzc.lzc_receive(dst, stream.fileno()) def test_recv_full_but_specify_origin(self): srcfs = ZFSTest.pool.makeName(b"fs1") src = srcfs + b"@snap" dstfs = ZFSTest.pool.makeName(b"fs2/received-30") dst = dstfs + b'@snap' origin1 = ZFSTest.pool.makeName(b"fs2@snap1") origin2 = ZFSTest.pool.makeName(b"fs2@snap2") lzc.lzc_snapshot([origin1]) with streams(srcfs, src, None) as (_, (stream, _)): lzc.lzc_receive(dst, stream.fileno(), origin=origin1) origin = ZFSTest.pool.getFilesystem( b"fs2/received-30").getProperty('origin') self.assertEqual(origin, origin1) stream.seek(0) # because origin snap does not exist can't receive as a clone of it with self.assertRaises(( lzc_exc.DatasetNotFound, lzc_exc.BadStream)): lzc.lzc_receive(dst, stream.fileno(), origin=origin2) def test_recv_full_existing_empty_fs_and_origin(self): srcfs = ZFSTest.pool.makeName(b"fs1") src = srcfs + b"@snap" dstfs = ZFSTest.pool.makeName(b"fs2/received-31") dst = dstfs + b'@snap' origin = dstfs + b'@dummy' lzc.lzc_create(dstfs) with streams(srcfs, src, None) as (_, (stream, _)): # because the destination fs already exists and has no snaps with self.assertRaises(( lzc_exc.DestinationModified, lzc_exc.DatasetExists, lzc_exc.BadStream)): lzc.lzc_receive(dst, stream.fileno(), origin=origin) lzc.lzc_snapshot([origin]) stream.seek(0) # because the destination fs already exists and has the snap with self.assertRaises(( lzc_exc.StreamMismatch, lzc_exc.DatasetExists, lzc_exc.BadStream)): lzc.lzc_receive(dst, stream.fileno(), origin=origin) def test_recv_incremental_mounted_fs(self): srcfs = ZFSTest.pool.makeName(b"fs1") src1 = srcfs + b"@snap1" src2 = srcfs + b"@snap2" dstfs = ZFSTest.pool.makeName(b"fs2/received-7") dst1 = dstfs + b'@snap1' dst2 = dstfs + b'@snap2' with streams(srcfs, src1, src2) as (_, (full, incr)): lzc.lzc_receive(dst1, full.fileno()) with zfs_mount(dstfs): lzc.lzc_receive(dst2, incr.fileno()) def test_recv_incremental_modified_fs(self): srcfs = ZFSTest.pool.makeName(b"fs1") src1 = srcfs + b"@snap1" src2 = srcfs + b"@snap2" dstfs = ZFSTest.pool.makeName(b"fs2/received-15") dst1 = dstfs + b'@snap1' dst2 = dstfs + b'@snap2' with streams(srcfs, src1, src2) as (_, (full, incr)): lzc.lzc_receive(dst1, full.fileno()) with temp_file_in_fs(dstfs): with self.assertRaises(lzc_exc.DestinationModified): lzc.lzc_receive(dst2, incr.fileno()) def test_recv_incremental_snapname_used(self): srcfs = ZFSTest.pool.makeName(b"fs1") src1 = srcfs + b"@snap1" src2 = srcfs + b"@snap2" dstfs = ZFSTest.pool.makeName(b"fs2/received-8") dst1 = dstfs + b'@snap1' dst2 = dstfs + b'@snap2' with streams(srcfs, src1, src2) as (_, (full, incr)): lzc.lzc_receive(dst1, full.fileno()) lzc.lzc_snapshot([dst2]) with self.assertRaises(lzc_exc.DatasetExists): lzc.lzc_receive(dst2, incr.fileno()) def test_recv_incremental_more_recent_snap_with_no_changes(self): srcfs = ZFSTest.pool.makeName(b"fs1") src1 = srcfs + b"@snap1" src2 = srcfs + b"@snap2" dstfs = ZFSTest.pool.makeName(b"fs2/received-9") dst1 = dstfs + b'@snap1' dst2 = dstfs + b'@snap2' dst_snap = dstfs + b'@snap' with streams(srcfs, src1, src2) as (_, (full, incr)): lzc.lzc_receive(dst1, full.fileno()) lzc.lzc_snapshot([dst_snap]) lzc.lzc_receive(dst2, incr.fileno()) def test_recv_incremental_non_clone_but_set_origin(self): srcfs = ZFSTest.pool.makeName(b"fs1") src1 = srcfs + b"@snap1" src2 = srcfs + b"@snap2" dstfs = ZFSTest.pool.makeName(b"fs2/received-20") dst1 = dstfs + b'@snap1' dst2 = dstfs + b'@snap2' dst_snap = dstfs + b'@snap' with streams(srcfs, src1, src2) as (_, (full, incr)): lzc.lzc_receive(dst1, full.fileno()) lzc.lzc_snapshot([dst_snap]) # becase cannot receive incremental and set origin on a non-clone with self.assertRaises(lzc_exc.BadStream): lzc.lzc_receive(dst2, incr.fileno(), origin=dst1) def test_recv_incremental_non_clone_but_set_random_origin(self): srcfs = ZFSTest.pool.makeName(b"fs1") src1 = srcfs + b"@snap1" src2 = srcfs + b"@snap2" dstfs = ZFSTest.pool.makeName(b"fs2/received-21") dst1 = dstfs + b'@snap1' dst2 = dstfs + b'@snap2' dst_snap = dstfs + b'@snap' with streams(srcfs, src1, src2) as (_, (full, incr)): lzc.lzc_receive(dst1, full.fileno()) lzc.lzc_snapshot([dst_snap]) # because origin snap does not exist can't receive as a clone of it with self.assertRaises(( lzc_exc.DatasetNotFound, lzc_exc.BadStream)): lzc.lzc_receive( dst2, incr.fileno(), origin=ZFSTest.pool.makeName(b"fs2/fs@snap")) def test_recv_incremental_more_recent_snap(self): srcfs = ZFSTest.pool.makeName(b"fs1") src1 = srcfs + b"@snap1" src2 = srcfs + b"@snap2" dstfs = ZFSTest.pool.makeName(b"fs2/received-10") dst1 = dstfs + b'@snap1' dst2 = dstfs + b'@snap2' dst_snap = dstfs + b'@snap' with streams(srcfs, src1, src2) as (_, (full, incr)): lzc.lzc_receive(dst1, full.fileno()) with temp_file_in_fs(dstfs): lzc.lzc_snapshot([dst_snap]) with self.assertRaises(lzc_exc.DestinationModified): lzc.lzc_receive(dst2, incr.fileno()) def test_recv_incremental_duplicate(self): srcfs = ZFSTest.pool.makeName(b"fs1") src1 = srcfs + b"@snap1" src2 = srcfs + b"@snap2" dstfs = ZFSTest.pool.makeName(b"fs2/received-11") dst1 = dstfs + b'@snap1' dst2 = dstfs + b'@snap2' dst_snap = dstfs + b'@snap' with streams(srcfs, src1, src2) as (_, (full, incr)): lzc.lzc_receive(dst1, full.fileno()) lzc.lzc_receive(dst2, incr.fileno()) incr.seek(0) with self.assertRaises(lzc_exc.DestinationModified): lzc.lzc_receive(dst_snap, incr.fileno()) def test_recv_incremental_unrelated_fs(self): srcfs = ZFSTest.pool.makeName(b"fs1") src1 = srcfs + b"@snap1" src2 = srcfs + b"@snap2" dstfs = ZFSTest.pool.makeName(b"fs2/received-12") dst_snap = dstfs + b'@snap' with streams(srcfs, src1, src2) as (_, (_, incr)): lzc.lzc_create(dstfs) with self.assertRaises(lzc_exc.StreamMismatch): lzc.lzc_receive(dst_snap, incr.fileno()) def test_recv_incremental_nonexistent_fs(self): srcfs = ZFSTest.pool.makeName(b"fs1") src1 = srcfs + b"@snap1" src2 = srcfs + b"@snap2" dstfs = ZFSTest.pool.makeName(b"fs2/received-13") dst_snap = dstfs + b'@snap' with streams(srcfs, src1, src2) as (_, (_, incr)): with self.assertRaises(lzc_exc.DatasetNotFound): lzc.lzc_receive(dst_snap, incr.fileno()) def test_recv_incremental_same_fs(self): srcfs = ZFSTest.pool.makeName(b"fs1") src1 = srcfs + b"@snap1" src2 = srcfs + b"@snap2" src_snap = srcfs + b'@snap' with streams(srcfs, src1, src2) as (_, (_, incr)): with self.assertRaises(lzc_exc.DestinationModified): lzc.lzc_receive(src_snap, incr.fileno()) def test_recv_clone_without_specifying_origin(self): orig_src = ZFSTest.pool.makeName(b"fs2@send-origin-2") clone = ZFSTest.pool.makeName(b"fs1/fs/send-clone-2") clone_snap = clone + b"@snap" orig_dst = ZFSTest.pool.makeName(b"fs1/fs/recv-origin-2@snap") clone_dst = ZFSTest.pool.makeName(b"fs1/fs/recv-clone-2@snap") lzc.lzc_snapshot([orig_src]) - with tempfile.TemporaryFile(suffix='.ztream') as stream: + with tempfile.TemporaryFile(suffix='.zstream') as stream: lzc.lzc_send(orig_src, None, stream.fileno()) stream.seek(0) lzc.lzc_receive(orig_dst, stream.fileno()) lzc.lzc_clone(clone, orig_src) lzc.lzc_snapshot([clone_snap]) - with tempfile.TemporaryFile(suffix='.ztream') as stream: + with tempfile.TemporaryFile(suffix='.zstream') as stream: lzc.lzc_send(clone_snap, orig_src, stream.fileno()) stream.seek(0) with self.assertRaises(lzc_exc.BadStream): lzc.lzc_receive(clone_dst, stream.fileno()) def test_recv_clone_invalid_origin(self): orig_src = ZFSTest.pool.makeName(b"fs2@send-origin-3") clone = ZFSTest.pool.makeName(b"fs1/fs/send-clone-3") clone_snap = clone + b"@snap" orig_dst = ZFSTest.pool.makeName(b"fs1/fs/recv-origin-3@snap") clone_dst = ZFSTest.pool.makeName(b"fs1/fs/recv-clone-3@snap") lzc.lzc_snapshot([orig_src]) - with tempfile.TemporaryFile(suffix='.ztream') as stream: + with tempfile.TemporaryFile(suffix='.zstream') as stream: lzc.lzc_send(orig_src, None, stream.fileno()) stream.seek(0) lzc.lzc_receive(orig_dst, stream.fileno()) lzc.lzc_clone(clone, orig_src) lzc.lzc_snapshot([clone_snap]) - with tempfile.TemporaryFile(suffix='.ztream') as stream: + with tempfile.TemporaryFile(suffix='.zstream') as stream: lzc.lzc_send(clone_snap, orig_src, stream.fileno()) stream.seek(0) with self.assertRaises(lzc_exc.NameInvalid): lzc.lzc_receive( clone_dst, stream.fileno(), origin=ZFSTest.pool.makeName(b"fs1/fs")) def test_recv_clone_wrong_origin(self): orig_src = ZFSTest.pool.makeName(b"fs2@send-origin-4") clone = ZFSTest.pool.makeName(b"fs1/fs/send-clone-4") clone_snap = clone + b"@snap" orig_dst = ZFSTest.pool.makeName(b"fs1/fs/recv-origin-4@snap") clone_dst = ZFSTest.pool.makeName(b"fs1/fs/recv-clone-4@snap") wrong_origin = ZFSTest.pool.makeName(b"fs1/fs@snap") lzc.lzc_snapshot([orig_src]) - with tempfile.TemporaryFile(suffix='.ztream') as stream: + with tempfile.TemporaryFile(suffix='.zstream') as stream: lzc.lzc_send(orig_src, None, stream.fileno()) stream.seek(0) lzc.lzc_receive(orig_dst, stream.fileno()) lzc.lzc_clone(clone, orig_src) lzc.lzc_snapshot([clone_snap]) lzc.lzc_snapshot([wrong_origin]) - with tempfile.TemporaryFile(suffix='.ztream') as stream: + with tempfile.TemporaryFile(suffix='.zstream') as stream: lzc.lzc_send(clone_snap, orig_src, stream.fileno()) stream.seek(0) with self.assertRaises(lzc_exc.StreamMismatch): lzc.lzc_receive( clone_dst, stream.fileno(), origin=wrong_origin) def test_recv_clone_nonexistent_origin(self): orig_src = ZFSTest.pool.makeName(b"fs2@send-origin-5") clone = ZFSTest.pool.makeName(b"fs1/fs/send-clone-5") clone_snap = clone + b"@snap" orig_dst = ZFSTest.pool.makeName(b"fs1/fs/recv-origin-5@snap") clone_dst = ZFSTest.pool.makeName(b"fs1/fs/recv-clone-5@snap") wrong_origin = ZFSTest.pool.makeName(b"fs1/fs@snap") lzc.lzc_snapshot([orig_src]) - with tempfile.TemporaryFile(suffix='.ztream') as stream: + with tempfile.TemporaryFile(suffix='.zstream') as stream: lzc.lzc_send(orig_src, None, stream.fileno()) stream.seek(0) lzc.lzc_receive(orig_dst, stream.fileno()) lzc.lzc_clone(clone, orig_src) lzc.lzc_snapshot([clone_snap]) - with tempfile.TemporaryFile(suffix='.ztream') as stream: + with tempfile.TemporaryFile(suffix='.zstream') as stream: lzc.lzc_send(clone_snap, orig_src, stream.fileno()) stream.seek(0) with self.assertRaises(lzc_exc.DatasetNotFound): lzc.lzc_receive( clone_dst, stream.fileno(), origin=wrong_origin) def test_force_recv_full_existing_fs(self): src = ZFSTest.pool.makeName(b"fs1@snap") dstfs = ZFSTest.pool.makeName(b"fs2/received-50") dst = dstfs + b'@snap' with temp_file_in_fs(ZFSTest.pool.makeName(b"fs1")): lzc.lzc_snapshot([src]) lzc.lzc_create(dstfs) with temp_file_in_fs(dstfs): pass # enough to taint the fs - with tempfile.TemporaryFile(suffix='.ztream') as stream: + with tempfile.TemporaryFile(suffix='.zstream') as stream: lzc.lzc_send(src, None, stream.fileno()) stream.seek(0) lzc.lzc_receive(dst, stream.fileno(), force=True) def test_force_recv_full_existing_modified_mounted_fs(self): src = ZFSTest.pool.makeName(b"fs1@snap") dstfs = ZFSTest.pool.makeName(b"fs2/received-53") dst = dstfs + b'@snap' with temp_file_in_fs(ZFSTest.pool.makeName(b"fs1")): lzc.lzc_snapshot([src]) lzc.lzc_create(dstfs) - with tempfile.TemporaryFile(suffix='.ztream') as stream: + with tempfile.TemporaryFile(suffix='.zstream') as stream: lzc.lzc_send(src, None, stream.fileno()) stream.seek(0) with zfs_mount(dstfs) as mntdir: f = tempfile.NamedTemporaryFile(dir=mntdir, delete=False) for i in range(1024): f.write(b'x' * 1024) lzc.lzc_receive(dst, stream.fileno(), force=True) # The temporary file dissappears and any access, even close(), # results in EIO. self.assertFalse(os.path.exists(f.name)) with self.assertRaises(IOError): f.close() # This test-case expects the behavior that should be there, # at the moment it may fail with DatasetExists or StreamMismatch # depending on the implementation. def test_force_recv_full_already_existing_with_snapshots(self): src = ZFSTest.pool.makeName(b"fs1@snap") dstfs = ZFSTest.pool.makeName(b"fs2/received-51") dst = dstfs + b'@snap' with temp_file_in_fs(ZFSTest.pool.makeName(b"fs1")): lzc.lzc_snapshot([src]) lzc.lzc_create(dstfs) with temp_file_in_fs(dstfs): pass # enough to taint the fs lzc.lzc_snapshot([dstfs + b"@snap1"]) - with tempfile.TemporaryFile(suffix='.ztream') as stream: + with tempfile.TemporaryFile(suffix='.zstream') as stream: lzc.lzc_send(src, None, stream.fileno()) stream.seek(0) lzc.lzc_receive(dst, stream.fileno(), force=True) def test_force_recv_full_already_existing_with_same_snap(self): src = ZFSTest.pool.makeName(b"fs1@snap") dstfs = ZFSTest.pool.makeName(b"fs2/received-52") dst = dstfs + b'@snap' with temp_file_in_fs(ZFSTest.pool.makeName(b"fs1")): lzc.lzc_snapshot([src]) lzc.lzc_create(dstfs) with temp_file_in_fs(dstfs): pass # enough to taint the fs lzc.lzc_snapshot([dst]) - with tempfile.TemporaryFile(suffix='.ztream') as stream: + with tempfile.TemporaryFile(suffix='.zstream') as stream: lzc.lzc_send(src, None, stream.fileno()) stream.seek(0) with self.assertRaises(lzc_exc.DatasetExists): lzc.lzc_receive(dst, stream.fileno(), force=True) def test_force_recv_full_missing_parent_fs(self): src = ZFSTest.pool.makeName(b"fs1@snap") dst = ZFSTest.pool.makeName(b"fs2/nonexistent/fs@snap") with temp_file_in_fs(ZFSTest.pool.makeName(b"fs1")): lzc.lzc_snapshot([src]) - with tempfile.TemporaryFile(suffix='.ztream') as stream: + with tempfile.TemporaryFile(suffix='.zstream') as stream: lzc.lzc_send(src, None, stream.fileno()) stream.seek(0) with self.assertRaises(lzc_exc.DatasetNotFound): lzc.lzc_receive(dst, stream.fileno(), force=True) def test_force_recv_incremental_modified_fs(self): srcfs = ZFSTest.pool.makeName(b"fs1") src1 = srcfs + b"@snap1" src2 = srcfs + b"@snap2" dstfs = ZFSTest.pool.makeName(b"fs2/received-60") dst1 = dstfs + b'@snap1' dst2 = dstfs + b'@snap2' with streams(srcfs, src1, src2) as (_, (full, incr)): lzc.lzc_receive(dst1, full.fileno()) with temp_file_in_fs(dstfs): pass # enough to taint the fs lzc.lzc_receive(dst2, incr.fileno(), force=True) def test_force_recv_incremental_modified_mounted_fs(self): srcfs = ZFSTest.pool.makeName(b"fs1") src1 = srcfs + b"@snap1" src2 = srcfs + b"@snap2" dstfs = ZFSTest.pool.makeName(b"fs2/received-64") dst1 = dstfs + b'@snap1' dst2 = dstfs + b'@snap2' with streams(srcfs, src1, src2) as (_, (full, incr)): lzc.lzc_receive(dst1, full.fileno()) with zfs_mount(dstfs) as mntdir: f = tempfile.NamedTemporaryFile(dir=mntdir, delete=False) for i in range(1024): f.write(b'x' * 1024) lzc.lzc_receive(dst2, incr.fileno(), force=True) # The temporary file dissappears and any access, even close(), # results in EIO. self.assertFalse(os.path.exists(f.name)) with self.assertRaises(IOError): f.close() def test_force_recv_incremental_modified_fs_plus_later_snap(self): srcfs = ZFSTest.pool.makeName(b"fs1") src1 = srcfs + b"@snap1" src2 = srcfs + b"@snap2" dstfs = ZFSTest.pool.makeName(b"fs2/received-61") dst1 = dstfs + b'@snap1' dst2 = dstfs + b'@snap2' dst3 = dstfs + b'@snap' with streams(srcfs, src1, src2) as (_, (full, incr)): lzc.lzc_receive(dst1, full.fileno()) with temp_file_in_fs(dstfs): pass # enough to taint the fs lzc.lzc_snapshot([dst3]) lzc.lzc_receive(dst2, incr.fileno(), force=True) self.assertExists(dst1) self.assertExists(dst2) self.assertNotExists(dst3) def test_force_recv_incremental_modified_fs_plus_same_name_snap(self): srcfs = ZFSTest.pool.makeName(b"fs1") src1 = srcfs + b"@snap1" src2 = srcfs + b"@snap2" dstfs = ZFSTest.pool.makeName(b"fs2/received-62") dst1 = dstfs + b'@snap1' dst2 = dstfs + b'@snap2' with streams(srcfs, src1, src2) as (_, (full, incr)): lzc.lzc_receive(dst1, full.fileno()) with temp_file_in_fs(dstfs): pass # enough to taint the fs lzc.lzc_snapshot([dst2]) with self.assertRaises(lzc_exc.DatasetExists): lzc.lzc_receive(dst2, incr.fileno(), force=True) def test_force_recv_incremental_modified_fs_plus_held_snap(self): srcfs = ZFSTest.pool.makeName(b"fs1") src1 = srcfs + b"@snap1" src2 = srcfs + b"@snap2" dstfs = ZFSTest.pool.makeName(b"fs2/received-63") dst1 = dstfs + b'@snap1' dst2 = dstfs + b'@snap2' dst3 = dstfs + b'@snap' with streams(srcfs, src1, src2) as (_, (full, incr)): lzc.lzc_receive(dst1, full.fileno()) with temp_file_in_fs(dstfs): pass # enough to taint the fs lzc.lzc_snapshot([dst3]) with cleanup_fd() as cfd: lzc.lzc_hold({dst3: b'tag'}, cfd) with self.assertRaises(lzc_exc.DatasetBusy): lzc.lzc_receive(dst2, incr.fileno(), force=True) self.assertExists(dst1) self.assertNotExists(dst2) self.assertExists(dst3) def test_force_recv_incremental_modified_fs_plus_cloned_snap(self): srcfs = ZFSTest.pool.makeName(b"fs1") src1 = srcfs + b"@snap1" src2 = srcfs + b"@snap2" dstfs = ZFSTest.pool.makeName(b"fs2/received-70") dst1 = dstfs + b'@snap1' dst2 = dstfs + b'@snap2' dst3 = dstfs + b'@snap' cloned = ZFSTest.pool.makeName(b"fs2/received-cloned-70") with streams(srcfs, src1, src2) as (_, (full, incr)): lzc.lzc_receive(dst1, full.fileno()) with temp_file_in_fs(dstfs): pass # enough to taint the fs lzc.lzc_snapshot([dst3]) lzc.lzc_clone(cloned, dst3) with self.assertRaises(lzc_exc.DatasetExists): lzc.lzc_receive(dst2, incr.fileno(), force=True) self.assertExists(dst1) self.assertNotExists(dst2) self.assertExists(dst3) def test_recv_incremental_into_cloned_fs(self): srcfs = ZFSTest.pool.makeName(b"fs1") src1 = srcfs + b"@snap1" src2 = srcfs + b"@snap2" dstfs = ZFSTest.pool.makeName(b"fs2/received-71") dst1 = dstfs + b'@snap1' cloned = ZFSTest.pool.makeName(b"fs2/received-cloned-71") dst2 = cloned + b'@snap' with streams(srcfs, src1, src2) as (_, (full, incr)): lzc.lzc_receive(dst1, full.fileno()) lzc.lzc_clone(cloned, dst1) # test both graceful and with-force attempts with self.assertRaises(lzc_exc.StreamMismatch): lzc.lzc_receive(dst2, incr.fileno()) incr.seek(0) with self.assertRaises(lzc_exc.StreamMismatch): lzc.lzc_receive(dst2, incr.fileno(), force=True) self.assertExists(dst1) self.assertNotExists(dst2) def test_recv_with_header_full(self): src = ZFSTest.pool.makeName(b"fs1@snap") dst = ZFSTest.pool.makeName(b"fs2/received") with temp_file_in_fs(ZFSTest.pool.makeName(b"fs1")) as name: lzc.lzc_snapshot([src]) - with tempfile.TemporaryFile(suffix='.ztream') as stream: + with tempfile.TemporaryFile(suffix='.zstream') as stream: lzc.lzc_send(src, None, stream.fileno()) stream.seek(0) (header, c_header) = lzc.receive_header(stream.fileno()) self.assertEqual(src, header['drr_toname']) snap = header['drr_toname'].split(b'@', 1)[1] lzc.lzc_receive_with_header( dst + b'@' + snap, stream.fileno(), c_header) name = os.path.basename(name) with zfs_mount(src) as mnt1, zfs_mount(dst) as mnt2: self.assertTrue( filecmp.cmp( os.path.join(mnt1, name), os.path.join(mnt2, name), False)) + def test_recv_fs_below_zvol(self): + send = ZFSTest.pool.makeName(b"fs1@snap") + zvol = ZFSTest.pool.makeName(b"fs1/zvol") + dest = zvol + b"/fs@snap" + props = {b"volsize": 1024 * 1024} + + lzc.lzc_snapshot([send]) + lzc.lzc_create(zvol, ds_type='zvol', props=props) + with tempfile.TemporaryFile(suffix='.zstream') as stream: + lzc.lzc_send(send, None, stream.fileno()) + stream.seek(0) + with self.assertRaises(lzc_exc.WrongParent): + lzc.lzc_receive(dest, stream.fileno()) + + def test_recv_zvol_over_fs_with_children(self): + parent = ZFSTest.pool.makeName(b"fs1") + child = parent + b"subfs" + zvol = ZFSTest.pool.makeName(b"fs1/zvol") + send = zvol + b"@snap" + props = {b"volsize": 1024 * 1024} + + lzc.lzc_create(child) + lzc.lzc_create(zvol, ds_type='zvol', props=props) + lzc.lzc_snapshot([send]) + with tempfile.TemporaryFile(suffix='.zstream') as stream: + lzc.lzc_send(send, None, stream.fileno()) + stream.seek(0) + with self.assertRaises(lzc_exc.WrongParent): + lzc.lzc_receive(parent + b"@snap", stream.fileno(), force=True) + + def test_recv_zvol_overwrite_rootds(self): + zvol = ZFSTest.pool.makeName(b"fs1/zvol") + snap = zvol + b"@snap" + rootds = ZFSTest.pool.getRoot().getName() + props = {b"volsize": 1024 * 1024} + + lzc.lzc_create(zvol, ds_type='zvol', props=props) + lzc.lzc_snapshot([snap]) + with tempfile.TemporaryFile(suffix='.zstream') as stream: + lzc.lzc_send(snap, None, stream.fileno()) + stream.seek(0) + with self.assertRaises(lzc_exc.WrongParent): + lzc.lzc_receive(rootds + b"@snap", stream.fileno(), force=True) + def test_send_full_across_clone_branch_point(self): origfs = ZFSTest.pool.makeName(b"fs2") (_, (fromsnap, origsnap, _)) = make_snapshots( origfs, b"snap1", b"send-origin-20", None) clonefs = ZFSTest.pool.makeName(b"fs1/fs/send-clone-20") lzc.lzc_clone(clonefs, origsnap) (_, (_, tosnap, _)) = make_snapshots(clonefs, None, b"snap", None) - with tempfile.TemporaryFile(suffix='.ztream') as stream: + with tempfile.TemporaryFile(suffix='.zstream') as stream: lzc.lzc_send(tosnap, None, stream.fileno()) def test_send_incr_across_clone_branch_point(self): origfs = ZFSTest.pool.makeName(b"fs2") (_, (fromsnap, origsnap, _)) = make_snapshots( origfs, b"snap1", b"send-origin-21", None) clonefs = ZFSTest.pool.makeName(b"fs1/fs/send-clone-21") lzc.lzc_clone(clonefs, origsnap) (_, (_, tosnap, _)) = make_snapshots(clonefs, None, b"snap", None) - with tempfile.TemporaryFile(suffix='.ztream') as stream: + with tempfile.TemporaryFile(suffix='.zstream') as stream: lzc.lzc_send(tosnap, fromsnap, stream.fileno()) def test_send_resume_token_full(self): src = ZFSTest.pool.makeName(b"fs1@snap") dstfs = ZFSTest.pool.getFilesystem(b"fs2/received") dst = dstfs.getSnap() with zfs_mount(ZFSTest.pool.makeName(b"fs1")) as mntdir: for i in range(1, 10): with tempfile.NamedTemporaryFile(dir=mntdir) as f: f.write(b'x' * 1024 * i) f.flush() lzc.lzc_snapshot([src]) - with tempfile.NamedTemporaryFile(suffix='.ztream') as stream: + with tempfile.NamedTemporaryFile(suffix='.zstream') as stream: lzc.lzc_send(src, None, stream.fileno()) stream.seek(0) stream.truncate(1024 * 3) with self.assertRaises(lzc_exc.BadStream): lzc.lzc_receive_resumable(dst, stream.fileno()) # Resume token code from zfs_send_resume_token_to_nvlist() # XXX: if used more than twice move this code into an external func # format: --- token = dstfs.getProperty("receive_resume_token") self.assertNotEqual(token, b'-') tokens = token.split(b'-') self.assertEqual(len(tokens), 4) version = tokens[0] packed_size = int(tokens[2], 16) compressed_nvs = tokens[3] # Validate resume token self.assertEqual(version, b'1') # ZFS_SEND_RESUME_TOKEN_VERSION if sys.version_info < (3, 0): payload = ( zlib.decompress(str(bytearray.fromhex(compressed_nvs))) ) else: payload = ( zlib.decompress(bytearray.fromhex(compressed_nvs.decode())) ) self.assertEqual(len(payload), packed_size) # Unpack resume_values = packed_nvlist_out(payload, packed_size) resumeobj = resume_values.get(b'object') resumeoff = resume_values.get(b'offset') - with tempfile.NamedTemporaryFile(suffix='.ztream') as rstream: + with tempfile.NamedTemporaryFile(suffix='.zstream') as rstream: lzc.lzc_send_resume( src, None, rstream.fileno(), None, resumeobj, resumeoff) rstream.seek(0) lzc.lzc_receive_resumable(dst, rstream.fileno()) def test_send_resume_token_incremental(self): snap1 = ZFSTest.pool.makeName(b"fs1@snap1") snap2 = ZFSTest.pool.makeName(b"fs1@snap2") dstfs = ZFSTest.pool.getFilesystem(b"fs2/received") dst1 = dstfs.getSnap() dst2 = dstfs.getSnap() lzc.lzc_snapshot([snap1]) - with tempfile.NamedTemporaryFile(suffix='.ztream') as stream: + with tempfile.NamedTemporaryFile(suffix='.zstream') as stream: lzc.lzc_send(snap1, None, stream.fileno()) stream.seek(0) lzc.lzc_receive(dst1, stream.fileno()) with zfs_mount(ZFSTest.pool.makeName(b"fs1")) as mntdir: for i in range(1, 10): with tempfile.NamedTemporaryFile(dir=mntdir) as f: f.write(b'x' * 1024 * i) f.flush() lzc.lzc_snapshot([snap2]) - with tempfile.NamedTemporaryFile(suffix='.ztream') as stream: + with tempfile.NamedTemporaryFile(suffix='.zstream') as stream: lzc.lzc_send(snap2, snap1, stream.fileno()) stream.seek(0) stream.truncate(1024 * 3) with self.assertRaises(lzc_exc.BadStream): lzc.lzc_receive_resumable(dst2, stream.fileno()) # Resume token code from zfs_send_resume_token_to_nvlist() # format: --- token = dstfs.getProperty("receive_resume_token") self.assertNotEqual(token, '-') tokens = token.split(b'-') self.assertEqual(len(tokens), 4) version = tokens[0] packed_size = int(tokens[2], 16) compressed_nvs = tokens[3] # Validate resume token self.assertEqual(version, b'1') # ZFS_SEND_RESUME_TOKEN_VERSION if sys.version_info < (3, 0): payload = ( zlib.decompress(str(bytearray.fromhex(compressed_nvs))) ) else: payload = ( zlib.decompress(bytearray.fromhex(compressed_nvs.decode())) ) self.assertEqual(len(payload), packed_size) # Unpack resume_values = packed_nvlist_out(payload, packed_size) resumeobj = resume_values.get(b'object') resumeoff = resume_values.get(b'offset') - with tempfile.NamedTemporaryFile(suffix='.ztream') as rstream: + with tempfile.NamedTemporaryFile(suffix='.zstream') as rstream: lzc.lzc_send_resume( snap2, snap1, rstream.fileno(), None, resumeobj, resumeoff) rstream.seek(0) lzc.lzc_receive_resumable(dst2, rstream.fileno()) def test_recv_full_across_clone_branch_point(self): origfs = ZFSTest.pool.makeName(b"fs2") (_, (fromsnap, origsnap, _)) = make_snapshots( origfs, b"snap1", b"send-origin-30", None) clonefs = ZFSTest.pool.makeName(b"fs1/fs/send-clone-30") lzc.lzc_clone(clonefs, origsnap) (_, (_, tosnap, _)) = make_snapshots(clonefs, None, b"snap", None) recvfs = ZFSTest.pool.makeName(b"fs1/recv-clone-30") recvsnap = recvfs + b"@snap" - with tempfile.TemporaryFile(suffix='.ztream') as stream: + with tempfile.TemporaryFile(suffix='.zstream') as stream: lzc.lzc_send(tosnap, None, stream.fileno()) stream.seek(0) lzc.lzc_receive(recvsnap, stream.fileno()) def test_recv_one(self): fromsnap = ZFSTest.pool.makeName(b"fs1@snap1") tosnap = ZFSTest.pool.makeName(b"recv@snap1") lzc.lzc_snapshot([fromsnap]) - with tempfile.TemporaryFile(suffix='.ztream') as stream: + with tempfile.TemporaryFile(suffix='.zstream') as stream: lzc.lzc_send(fromsnap, None, stream.fileno()) stream.seek(0) (header, c_header) = lzc.receive_header(stream.fileno()) lzc.lzc_receive_one(tosnap, stream.fileno(), c_header) def test_recv_one_size(self): fromsnap = ZFSTest.pool.makeName(b"fs1@snap1") tosnap = ZFSTest.pool.makeName(b"recv@snap1") lzc.lzc_snapshot([fromsnap]) - with tempfile.TemporaryFile(suffix='.ztream') as stream: + with tempfile.TemporaryFile(suffix='.zstream') as stream: lzc.lzc_send(fromsnap, None, stream.fileno()) size = os.fstat(stream.fileno()).st_size stream.seek(0) (header, c_header) = lzc.receive_header(stream.fileno()) (read, _) = lzc.lzc_receive_one(tosnap, stream.fileno(), c_header) self.assertAlmostEqual(read, size, delta=read * 0.05) def test_recv_one_props(self): fromsnap = ZFSTest.pool.makeName(b"fs1@snap1") fs = ZFSTest.pool.getFilesystem(b"recv") tosnap = fs.getName() + b"@snap1" props = { b"compression": 0x01, b"ns:prop": b"val" } lzc.lzc_snapshot([fromsnap]) - with tempfile.TemporaryFile(suffix='.ztream') as stream: + with tempfile.TemporaryFile(suffix='.zstream') as stream: lzc.lzc_send(fromsnap, None, stream.fileno()) stream.seek(0) (header, c_header) = lzc.receive_header(stream.fileno()) lzc.lzc_receive_one(tosnap, stream.fileno(), c_header, props=props) self.assertExists(tosnap) self.assertEqual(fs.getProperty("compression", "received"), b"on") self.assertEqual(fs.getProperty("ns:prop", "received"), b"val") def test_recv_one_invalid_prop(self): fromsnap = ZFSTest.pool.makeName(b"fs1@snap1") fs = ZFSTest.pool.getFilesystem(b"recv") tosnap = fs.getName() + b"@snap1" props = { b"exec": 0xff, b"atime": 0x00 } lzc.lzc_snapshot([fromsnap]) - with tempfile.TemporaryFile(suffix='.ztream') as stream: + with tempfile.TemporaryFile(suffix='.zstream') as stream: lzc.lzc_send(fromsnap, None, stream.fileno()) stream.seek(0) (header, c_header) = lzc.receive_header(stream.fileno()) with self.assertRaises(lzc_exc.ReceivePropertyFailure) as ctx: lzc.lzc_receive_one( tosnap, stream.fileno(), c_header, props=props) self.assertExists(tosnap) self.assertEqual(fs.getProperty("atime", "received"), b"off") for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.PropertyInvalid) self.assertEqual(e.name, b"exec") def test_recv_with_cmdprops(self): fromsnap = ZFSTest.pool.makeName(b"fs1@snap1") fs = ZFSTest.pool.getFilesystem(b"recv") tosnap = fs.getName() + b"@snap1" props = {} cmdprops = { b"compression": 0x01, b"ns:prop": b"val" } lzc.lzc_snapshot([fromsnap]) - with tempfile.TemporaryFile(suffix='.ztream') as stream: + with tempfile.TemporaryFile(suffix='.zstream') as stream: lzc.lzc_send(fromsnap, None, stream.fileno()) stream.seek(0) (header, c_header) = lzc.receive_header(stream.fileno()) lzc.lzc_receive_with_cmdprops( tosnap, stream.fileno(), c_header, props=props, cmdprops=cmdprops) self.assertExists(tosnap) self.assertEqual(fs.getProperty("compression"), b"on") self.assertEqual(fs.getProperty("ns:prop"), b"val") def test_recv_with_cmdprops_and_recvprops(self): fromsnap = ZFSTest.pool.makeName(b"fs1@snap1") fs = ZFSTest.pool.getFilesystem(b"recv") tosnap = fs.getName() + b"@snap1" props = { b"atime": 0x01, b"exec": 0x00, b"ns:prop": b"abc" } cmdprops = { b"compression": 0x01, b"ns:prop": b"def", b"exec": None, } lzc.lzc_snapshot([fromsnap]) - with tempfile.TemporaryFile(suffix='.ztream') as stream: + with tempfile.TemporaryFile(suffix='.zstream') as stream: lzc.lzc_send(fromsnap, None, stream.fileno()) stream.seek(0) (header, c_header) = lzc.receive_header(stream.fileno()) lzc.lzc_receive_with_cmdprops( tosnap, stream.fileno(), c_header, props=props, cmdprops=cmdprops) self.assertExists(tosnap) self.assertEqual(fs.getProperty("atime", True), b"on") self.assertEqual(fs.getProperty("exec", True), b"off") self.assertEqual(fs.getProperty("ns:prop", True), b"abc") self.assertEqual(fs.getProperty("compression"), b"on") self.assertEqual(fs.getProperty("ns:prop"), b"def") self.assertEqual(fs.getProperty("exec"), b"on") def test_recv_incr_across_clone_branch_point_no_origin(self): origfs = ZFSTest.pool.makeName(b"fs2") (_, (fromsnap, origsnap, _)) = make_snapshots( origfs, b"snap1", b"send-origin-32", None) clonefs = ZFSTest.pool.makeName(b"fs1/fs/send-clone-32") lzc.lzc_clone(clonefs, origsnap) (_, (_, tosnap, _)) = make_snapshots(clonefs, None, b"snap", None) recvfs = ZFSTest.pool.makeName(b"fs1/recv-clone-32") recvsnap1 = recvfs + b"@snap1" recvsnap2 = recvfs + b"@snap2" - with tempfile.TemporaryFile(suffix='.ztream') as stream: + with tempfile.TemporaryFile(suffix='.zstream') as stream: lzc.lzc_send(fromsnap, None, stream.fileno()) stream.seek(0) lzc.lzc_receive(recvsnap1, stream.fileno()) - with tempfile.TemporaryFile(suffix='.ztream') as stream: + with tempfile.TemporaryFile(suffix='.zstream') as stream: lzc.lzc_send(tosnap, fromsnap, stream.fileno()) stream.seek(0) with self.assertRaises(lzc_exc.BadStream): lzc.lzc_receive(recvsnap2, stream.fileno()) def test_recv_incr_across_clone_branch_point(self): origfs = ZFSTest.pool.makeName(b"fs2") (_, (fromsnap, origsnap, _)) = make_snapshots( origfs, b"snap1", b"send-origin-31", None) clonefs = ZFSTest.pool.makeName(b"fs1/fs/send-clone-31") lzc.lzc_clone(clonefs, origsnap) (_, (_, tosnap, _)) = make_snapshots(clonefs, None, b"snap", None) recvfs = ZFSTest.pool.makeName(b"fs1/recv-clone-31") recvsnap1 = recvfs + b"@snap1" recvsnap2 = recvfs + b"@snap2" - with tempfile.TemporaryFile(suffix='.ztream') as stream: + with tempfile.TemporaryFile(suffix='.zstream') as stream: lzc.lzc_send(fromsnap, None, stream.fileno()) stream.seek(0) lzc.lzc_receive(recvsnap1, stream.fileno()) - with tempfile.TemporaryFile(suffix='.ztream') as stream: + with tempfile.TemporaryFile(suffix='.zstream') as stream: lzc.lzc_send(tosnap, fromsnap, stream.fileno()) stream.seek(0) with self.assertRaises(lzc_exc.BadStream): lzc.lzc_receive(recvsnap2, stream.fileno(), origin=recvsnap1) def test_recv_incr_across_clone_branch_point_new_fs(self): origfs = ZFSTest.pool.makeName(b"fs2") (_, (fromsnap, origsnap, _)) = make_snapshots( origfs, b"snap1", b"send-origin-33", None) clonefs = ZFSTest.pool.makeName(b"fs1/fs/send-clone-33") lzc.lzc_clone(clonefs, origsnap) (_, (_, tosnap, _)) = make_snapshots(clonefs, None, b"snap", None) recvfs1 = ZFSTest.pool.makeName(b"fs1/recv-clone-33") recvsnap1 = recvfs1 + b"@snap" recvfs2 = ZFSTest.pool.makeName(b"fs1/recv-clone-33_2") recvsnap2 = recvfs2 + b"@snap" - with tempfile.TemporaryFile(suffix='.ztream') as stream: + with tempfile.TemporaryFile(suffix='.zstream') as stream: lzc.lzc_send(fromsnap, None, stream.fileno()) stream.seek(0) lzc.lzc_receive(recvsnap1, stream.fileno()) - with tempfile.TemporaryFile(suffix='.ztream') as stream: + with tempfile.TemporaryFile(suffix='.zstream') as stream: lzc.lzc_send(tosnap, fromsnap, stream.fileno()) stream.seek(0) lzc.lzc_receive(recvsnap2, stream.fileno(), origin=recvsnap1) def test_recv_bad_stream(self): dstfs = ZFSTest.pool.makeName(b"fs2/received") dst_snap = dstfs + b'@snap' with dev_zero() as fd: with self.assertRaises(lzc_exc.BadStream): lzc.lzc_receive(dst_snap, fd) @needs_support(lzc.lzc_promote) def test_promote(self): origfs = ZFSTest.pool.makeName(b"fs2") snap = b"@promote-snap-1" origsnap = origfs + snap lzc.lzc_snap([origsnap]) clonefs = ZFSTest.pool.makeName(b"fs1/fs/promote-clone-1") lzc.lzc_clone(clonefs, origsnap) lzc.lzc_promote(clonefs) # the snapshot now should belong to the promoted fs self.assertExists(clonefs + snap) @needs_support(lzc.lzc_promote) def test_promote_too_long_snapname(self): # origfs name must be shorter than clonefs name origfs = ZFSTest.pool.makeName(b"fs2") clonefs = ZFSTest.pool.makeName(b"fs1/fs/promote-clone-2") snapprefix = b"@promote-snap-2-" pad_len = 1 + lzc.MAXNAMELEN - len(clonefs) - len(snapprefix) snap = snapprefix + b'x' * pad_len origsnap = origfs + snap lzc.lzc_snap([origsnap]) lzc.lzc_clone(clonefs, origsnap) # This may fail on older buggy systems. # See: https://www.illumos.org/issues/5909 with self.assertRaises(lzc_exc.NameTooLong): lzc.lzc_promote(clonefs) @needs_support(lzc.lzc_promote) def test_promote_not_cloned(self): fs = ZFSTest.pool.makeName(b"fs2") with self.assertRaises(lzc_exc.NotClone): lzc.lzc_promote(fs) @unittest.skipIf(*illumos_bug_6379()) def test_hold_bad_fd(self): snap = ZFSTest.pool.getRoot().getSnap() lzc.lzc_snapshot([snap]) with tempfile.TemporaryFile() as tmp: bad_fd = tmp.fileno() with self.assertRaises(lzc_exc.BadHoldCleanupFD): lzc.lzc_hold({snap: b'tag'}, bad_fd) @unittest.skipIf(*illumos_bug_6379()) def test_hold_bad_fd_2(self): snap = ZFSTest.pool.getRoot().getSnap() lzc.lzc_snapshot([snap]) with self.assertRaises(lzc_exc.BadHoldCleanupFD): lzc.lzc_hold({snap: b'tag'}, -2) @unittest.skipIf(*illumos_bug_6379()) def test_hold_bad_fd_3(self): snap = ZFSTest.pool.getRoot().getSnap() lzc.lzc_snapshot([snap]) (soft, hard) = resource.getrlimit(resource.RLIMIT_NOFILE) bad_fd = hard + 1 with self.assertRaises(lzc_exc.BadHoldCleanupFD): lzc.lzc_hold({snap: b'tag'}, bad_fd) @unittest.skipIf(*illumos_bug_6379()) def test_hold_wrong_fd(self): snap = ZFSTest.pool.getRoot().getSnap() lzc.lzc_snapshot([snap]) with tempfile.TemporaryFile() as tmp: fd = tmp.fileno() with self.assertRaises(lzc_exc.BadHoldCleanupFD): lzc.lzc_hold({snap: b'tag'}, fd) def test_hold_fd(self): snap = ZFSTest.pool.getRoot().getSnap() lzc.lzc_snapshot([snap]) with cleanup_fd() as fd: lzc.lzc_hold({snap: b'tag'}, fd) def test_hold_empty(self): with cleanup_fd() as fd: lzc.lzc_hold({}, fd) def test_hold_empty_2(self): lzc.lzc_hold({}) def test_hold_vs_snap_destroy(self): snap = ZFSTest.pool.getRoot().getSnap() lzc.lzc_snapshot([snap]) with cleanup_fd() as fd: lzc.lzc_hold({snap: b'tag'}, fd) with self.assertRaises(lzc_exc.SnapshotDestructionFailure) as ctx: lzc.lzc_destroy_snaps([snap], defer=False) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.SnapshotIsHeld) lzc.lzc_destroy_snaps([snap], defer=True) self.assertExists(snap) # after automatic hold cleanup and deferred destruction self.assertNotExists(snap) def test_hold_many_tags(self): snap = ZFSTest.pool.getRoot().getSnap() lzc.lzc_snapshot([snap]) with cleanup_fd() as fd: lzc.lzc_hold({snap: b'tag1'}, fd) lzc.lzc_hold({snap: b'tag2'}, fd) def test_hold_many_snaps(self): snap1 = ZFSTest.pool.getRoot().getSnap() snap2 = ZFSTest.pool.getRoot().getSnap() lzc.lzc_snapshot([snap1]) lzc.lzc_snapshot([snap2]) with cleanup_fd() as fd: lzc.lzc_hold({snap1: b'tag', snap2: b'tag'}, fd) def test_hold_many_with_one_missing(self): snap1 = ZFSTest.pool.getRoot().getSnap() snap2 = ZFSTest.pool.getRoot().getSnap() lzc.lzc_snapshot([snap1]) with cleanup_fd() as fd: missing = lzc.lzc_hold({snap1: b'tag', snap2: b'tag'}, fd) self.assertEqual(len(missing), 1) self.assertEqual(missing[0], snap2) def test_hold_many_with_all_missing(self): snap1 = ZFSTest.pool.getRoot().getSnap() snap2 = ZFSTest.pool.getRoot().getSnap() with cleanup_fd() as fd: missing = lzc.lzc_hold({snap1: b'tag', snap2: b'tag'}, fd) self.assertEqual(len(missing), 2) self.assertEqual(sorted(missing), sorted([snap1, snap2])) def test_hold_missing_fs(self): # XXX skip pre-created filesystems ZFSTest.pool.getRoot().getFilesystem() ZFSTest.pool.getRoot().getFilesystem() ZFSTest.pool.getRoot().getFilesystem() ZFSTest.pool.getRoot().getFilesystem() ZFSTest.pool.getRoot().getFilesystem() snap = ZFSTest.pool.getRoot().getFilesystem().getSnap() snaps = lzc.lzc_hold({snap: b'tag'}) self.assertEqual([snap], snaps) def test_hold_missing_fs_auto_cleanup(self): # XXX skip pre-created filesystems ZFSTest.pool.getRoot().getFilesystem() ZFSTest.pool.getRoot().getFilesystem() ZFSTest.pool.getRoot().getFilesystem() ZFSTest.pool.getRoot().getFilesystem() ZFSTest.pool.getRoot().getFilesystem() snap = ZFSTest.pool.getRoot().getFilesystem().getSnap() with cleanup_fd() as fd: snaps = lzc.lzc_hold({snap: b'tag'}, fd) self.assertEqual([snap], snaps) def test_hold_duplicate(self): snap = ZFSTest.pool.getRoot().getSnap() lzc.lzc_snapshot([snap]) with cleanup_fd() as fd: lzc.lzc_hold({snap: b'tag'}, fd) with self.assertRaises(lzc_exc.HoldFailure) as ctx: lzc.lzc_hold({snap: b'tag'}, fd) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.HoldExists) def test_hold_across_pools(self): snap1 = ZFSTest.pool.getRoot().getSnap() snap2 = ZFSTest.misc_pool.getRoot().getSnap() lzc.lzc_snapshot([snap1]) lzc.lzc_snapshot([snap2]) with cleanup_fd() as fd: with self.assertRaises(lzc_exc.HoldFailure) as ctx: lzc.lzc_hold({snap1: b'tag', snap2: b'tag'}, fd) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.PoolsDiffer) def test_hold_too_long_tag(self): snap = ZFSTest.pool.getRoot().getSnap() tag = b't' * 256 lzc.lzc_snapshot([snap]) with cleanup_fd() as fd: with self.assertRaises(lzc_exc.HoldFailure) as ctx: lzc.lzc_hold({snap: tag}, fd) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.NameTooLong) self.assertEqual(e.name, tag) # Apparently the full snapshot name is not checked for length # and this snapshot is treated as simply missing. @unittest.expectedFailure def test_hold_too_long_snap_name(self): snap = ZFSTest.pool.getRoot().getTooLongSnap(False) with cleanup_fd() as fd: with self.assertRaises(lzc_exc.HoldFailure) as ctx: lzc.lzc_hold({snap: b'tag'}, fd) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.NameTooLong) self.assertEqual(e.name, snap) def test_hold_too_long_snap_name_2(self): snap = ZFSTest.pool.getRoot().getTooLongSnap(True) with cleanup_fd() as fd: with self.assertRaises(lzc_exc.HoldFailure) as ctx: lzc.lzc_hold({snap: b'tag'}, fd) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.NameTooLong) self.assertEqual(e.name, snap) def test_hold_invalid_snap_name(self): snap = ZFSTest.pool.getRoot().getSnap() + b'@bad' with cleanup_fd() as fd: with self.assertRaises(lzc_exc.HoldFailure) as ctx: lzc.lzc_hold({snap: b'tag'}, fd) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.NameInvalid) self.assertEqual(e.name, snap) def test_hold_invalid_snap_name_2(self): snap = ZFSTest.pool.getRoot().getFilesystem().getName() with cleanup_fd() as fd: with self.assertRaises(lzc_exc.HoldFailure) as ctx: lzc.lzc_hold({snap: b'tag'}, fd) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.NameInvalid) self.assertEqual(e.name, snap) def test_get_holds(self): snap = ZFSTest.pool.getRoot().getSnap() lzc.lzc_snapshot([snap]) with cleanup_fd() as fd: lzc.lzc_hold({snap: b'tag1'}, fd) lzc.lzc_hold({snap: b'tag2'}, fd) holds = lzc.lzc_get_holds(snap) self.assertEqual(len(holds), 2) self.assertIn(b'tag1', holds) self.assertIn(b'tag2', holds) self.assertIsInstance(holds[b'tag1'], (int, int)) def test_get_holds_after_auto_cleanup(self): snap = ZFSTest.pool.getRoot().getSnap() lzc.lzc_snapshot([snap]) with cleanup_fd() as fd: lzc.lzc_hold({snap: b'tag1'}, fd) lzc.lzc_hold({snap: b'tag2'}, fd) holds = lzc.lzc_get_holds(snap) self.assertEqual(len(holds), 0) self.assertIsInstance(holds, dict) def test_get_holds_nonexistent_snap(self): snap = ZFSTest.pool.getRoot().getSnap() with self.assertRaises(lzc_exc.SnapshotNotFound): lzc.lzc_get_holds(snap) def test_get_holds_too_long_snap_name(self): snap = ZFSTest.pool.getRoot().getTooLongSnap(False) with self.assertRaises(lzc_exc.NameTooLong): lzc.lzc_get_holds(snap) def test_get_holds_too_long_snap_name_2(self): snap = ZFSTest.pool.getRoot().getTooLongSnap(True) with self.assertRaises(lzc_exc.NameTooLong): lzc.lzc_get_holds(snap) def test_get_holds_invalid_snap_name(self): snap = ZFSTest.pool.getRoot().getSnap() + b'@bad' with self.assertRaises(lzc_exc.NameInvalid): lzc.lzc_get_holds(snap) # A filesystem-like snapshot name is not recognized as # an invalid name. @unittest.expectedFailure def test_get_holds_invalid_snap_name_2(self): snap = ZFSTest.pool.getRoot().getFilesystem().getName() with self.assertRaises(lzc_exc.NameInvalid): lzc.lzc_get_holds(snap) def test_release_hold(self): snap = ZFSTest.pool.getRoot().getSnap() lzc.lzc_snapshot([snap]) lzc.lzc_hold({snap: b'tag'}) ret = lzc.lzc_release({snap: [b'tag']}) self.assertEqual(len(ret), 0) def test_release_hold_empty(self): ret = lzc.lzc_release({}) self.assertEqual(len(ret), 0) def test_release_hold_complex(self): snap1 = ZFSTest.pool.getRoot().getSnap() snap2 = ZFSTest.pool.getRoot().getSnap() snap3 = ZFSTest.pool.getRoot().getFilesystem().getSnap() lzc.lzc_snapshot([snap1]) lzc.lzc_snapshot([snap2, snap3]) lzc.lzc_hold({snap1: b'tag1'}) lzc.lzc_hold({snap1: b'tag2'}) lzc.lzc_hold({snap2: b'tag'}) lzc.lzc_hold({snap3: b'tag1'}) lzc.lzc_hold({snap3: b'tag2'}) holds = lzc.lzc_get_holds(snap1) self.assertEqual(len(holds), 2) holds = lzc.lzc_get_holds(snap2) self.assertEqual(len(holds), 1) holds = lzc.lzc_get_holds(snap3) self.assertEqual(len(holds), 2) release = { snap1: [b'tag1', b'tag2'], snap2: [b'tag'], snap3: [b'tag2'], } ret = lzc.lzc_release(release) self.assertEqual(len(ret), 0) holds = lzc.lzc_get_holds(snap1) self.assertEqual(len(holds), 0) holds = lzc.lzc_get_holds(snap2) self.assertEqual(len(holds), 0) holds = lzc.lzc_get_holds(snap3) self.assertEqual(len(holds), 1) ret = lzc.lzc_release({snap3: [b'tag1']}) self.assertEqual(len(ret), 0) holds = lzc.lzc_get_holds(snap3) self.assertEqual(len(holds), 0) def test_release_hold_before_auto_cleanup(self): snap = ZFSTest.pool.getRoot().getSnap() lzc.lzc_snapshot([snap]) with cleanup_fd() as fd: lzc.lzc_hold({snap: b'tag'}, fd) ret = lzc.lzc_release({snap: [b'tag']}) self.assertEqual(len(ret), 0) def test_release_hold_and_snap_destruction(self): snap = ZFSTest.pool.getRoot().getSnap() lzc.lzc_snapshot([snap]) with cleanup_fd() as fd: lzc.lzc_hold({snap: b'tag1'}, fd) lzc.lzc_hold({snap: b'tag2'}, fd) lzc.lzc_destroy_snaps([snap], defer=True) self.assertExists(snap) lzc.lzc_release({snap: [b'tag1']}) self.assertExists(snap) lzc.lzc_release({snap: [b'tag2']}) self.assertNotExists(snap) def test_release_hold_and_multiple_snap_destruction(self): snap = ZFSTest.pool.getRoot().getSnap() lzc.lzc_snapshot([snap]) with cleanup_fd() as fd: lzc.lzc_hold({snap: b'tag'}, fd) lzc.lzc_destroy_snaps([snap], defer=True) self.assertExists(snap) lzc.lzc_destroy_snaps([snap], defer=True) self.assertExists(snap) lzc.lzc_release({snap: [b'tag']}) self.assertNotExists(snap) def test_release_hold_missing_tag(self): snap = ZFSTest.pool.getRoot().getSnap() lzc.lzc_snapshot([snap]) ret = lzc.lzc_release({snap: [b'tag']}) self.assertEqual(len(ret), 1) self.assertEqual(ret[0], snap + b'#tag') def test_release_hold_missing_snap(self): snap = ZFSTest.pool.getRoot().getSnap() ret = lzc.lzc_release({snap: [b'tag']}) self.assertEqual(len(ret), 1) self.assertEqual(ret[0], snap) def test_release_hold_missing_snap_2(self): snap = ZFSTest.pool.getRoot().getSnap() ret = lzc.lzc_release({snap: [b'tag', b'another']}) self.assertEqual(len(ret), 1) self.assertEqual(ret[0], snap) def test_release_hold_across_pools(self): snap1 = ZFSTest.pool.getRoot().getSnap() snap2 = ZFSTest.misc_pool.getRoot().getSnap() lzc.lzc_snapshot([snap1]) lzc.lzc_snapshot([snap2]) with cleanup_fd() as fd: lzc.lzc_hold({snap1: b'tag'}, fd) lzc.lzc_hold({snap2: b'tag'}, fd) with self.assertRaises(lzc_exc.HoldReleaseFailure) as ctx: lzc.lzc_release({snap1: [b'tag'], snap2: [b'tag']}) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.PoolsDiffer) # Apparently the tag name is not verified, # only its existence is checked. @unittest.expectedFailure def test_release_hold_too_long_tag(self): snap = ZFSTest.pool.getRoot().getSnap() tag = b't' * 256 lzc.lzc_snapshot([snap]) with self.assertRaises(lzc_exc.HoldReleaseFailure): lzc.lzc_release({snap: [tag]}) # Apparently the full snapshot name is not checked for length # and this snapshot is treated as simply missing. @unittest.expectedFailure def test_release_hold_too_long_snap_name(self): snap = ZFSTest.pool.getRoot().getTooLongSnap(False) with self.assertRaises(lzc_exc.HoldReleaseFailure): lzc.lzc_release({snap: [b'tag']}) def test_release_hold_too_long_snap_name_2(self): snap = ZFSTest.pool.getRoot().getTooLongSnap(True) with self.assertRaises(lzc_exc.HoldReleaseFailure) as ctx: lzc.lzc_release({snap: [b'tag']}) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.NameTooLong) self.assertEqual(e.name, snap) def test_release_hold_invalid_snap_name(self): snap = ZFSTest.pool.getRoot().getSnap() + b'@bad' with self.assertRaises(lzc_exc.HoldReleaseFailure) as ctx: lzc.lzc_release({snap: [b'tag']}) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.NameInvalid) self.assertEqual(e.name, snap) def test_release_hold_invalid_snap_name_2(self): snap = ZFSTest.pool.getRoot().getFilesystem().getName() with self.assertRaises(lzc_exc.HoldReleaseFailure) as ctx: lzc.lzc_release({snap: [b'tag']}) for e in ctx.exception.errors: self.assertIsInstance(e, lzc_exc.NameInvalid) self.assertEqual(e.name, snap) def test_sync_missing_pool(self): pool = b"nonexistent" with self.assertRaises(lzc_exc.PoolNotFound): lzc.lzc_sync(pool) def test_sync_pool_forced(self): pool = ZFSTest.pool.getRoot().getName() lzc.lzc_sync(pool, True) def test_reopen_missing_pool(self): pool = b"nonexistent" with self.assertRaises(lzc_exc.PoolNotFound): lzc.lzc_reopen(pool) def test_reopen_pool_no_restart(self): pool = ZFSTest.pool.getRoot().getName() lzc.lzc_reopen(pool, False) def test_channel_program_missing_pool(self): pool = b"nonexistent" with self.assertRaises(lzc_exc.PoolNotFound): lzc.lzc_channel_program(pool, b"return {}") def test_channel_program_timeout(self): pool = ZFSTest.pool.getRoot().getName() zcp = b""" for i = 1,10000 do zfs.sync.snapshot('""" + pool + b"""@zcp' .. i) end """ with self.assertRaises(lzc_exc.ZCPTimeout): lzc.lzc_channel_program(pool, zcp, instrlimit=1) def test_channel_program_memory_limit(self): pool = ZFSTest.pool.getRoot().getName() zcp = b""" for i = 1,10000 do zfs.sync.snapshot('""" + pool + b"""@zcp' .. i) end """ with self.assertRaises(lzc_exc.ZCPSpaceError): lzc.lzc_channel_program(pool, zcp, memlimit=1) def test_channel_program_invalid_limits(self): pool = ZFSTest.pool.getRoot().getName() zcp = b""" return {} """ with self.assertRaises(lzc_exc.ZCPLimitInvalid): lzc.lzc_channel_program(pool, zcp, instrlimit=0) with self.assertRaises(lzc_exc.ZCPLimitInvalid): lzc.lzc_channel_program(pool, zcp, memlimit=0) def test_channel_program_syntax_error(self): pool = ZFSTest.pool.getRoot().getName() zcp = b""" inv+val:id """ with self.assertRaises(lzc_exc.ZCPSyntaxError) as ctx: lzc.lzc_channel_program(pool, zcp) self.assertTrue(b"syntax error" in ctx.exception.details) def test_channel_program_sync_snapshot(self): pool = ZFSTest.pool.getRoot().getName() snapname = ZFSTest.pool.makeName(b"@zcp") zcp = b""" zfs.sync.snapshot('""" + snapname + b"""') """ lzc.lzc_channel_program(pool, zcp) self.assertExists(snapname) def test_channel_program_runtime_error(self): pool = ZFSTest.pool.getRoot().getName() # failing an assertion raises a runtime error with self.assertRaises(lzc_exc.ZCPRuntimeError) as ctx: lzc.lzc_channel_program(pool, b"assert(1 == 2)") self.assertTrue( b"assertion failed" in ctx.exception.details) # invoking the error() function raises a runtime error with self.assertRaises(lzc_exc.ZCPRuntimeError) as ctx: lzc.lzc_channel_program(pool, b"error()") def test_channel_program_nosync_runtime_error(self): pool = ZFSTest.pool.getRoot().getName() zcp = b""" zfs.sync.snapshot('""" + pool + b"""@zcp') """ # lzc_channel_program_nosync() allows only "read-only" operations with self.assertRaises(lzc_exc.ZCPRuntimeError) as ctx: lzc.lzc_channel_program_nosync(pool, zcp) self.assertTrue( b"running functions from the zfs.sync" in ctx.exception.details) def test_change_key_new(self): with encrypted_filesystem() as (fs, _): lzc.lzc_change_key( fs, 'new_key', props={b"keyformat": lzc.zfs_keyformat.ZFS_KEYFORMAT_RAW}, key=os.urandom(lzc.WRAPPING_KEY_LEN)) def test_change_key_missing_fs(self): name = b"nonexistent" with self.assertRaises(lzc_exc.FilesystemNotFound): lzc.lzc_change_key( name, 'new_key', props={b"keyformat": lzc.zfs_keyformat.ZFS_KEYFORMAT_RAW}, key=os.urandom(lzc.WRAPPING_KEY_LEN)) def test_change_key_not_loaded(self): with encrypted_filesystem() as (fs, _): lzc.lzc_unload_key(fs) with self.assertRaises(lzc_exc.EncryptionKeyNotLoaded): lzc.lzc_change_key( fs, 'new_key', props={b"keyformat": lzc.zfs_keyformat.ZFS_KEYFORMAT_RAW}, key=os.urandom(lzc.WRAPPING_KEY_LEN)) def test_change_key_invalid_property(self): with encrypted_filesystem() as (fs, _): with self.assertRaises(lzc_exc.PropertyInvalid): lzc.lzc_change_key(fs, 'new_key', props={b"invalid": b"prop"}) def test_change_key_invalid_crypt_command(self): with encrypted_filesystem() as (fs, _): with self.assertRaises(lzc_exc.UnknownCryptCommand): lzc.lzc_change_key(fs, 'duplicate_key') def test_load_key(self): with encrypted_filesystem() as (fs, key): lzc.lzc_unload_key(fs) lzc.lzc_load_key(fs, False, key) def test_load_key_invalid(self): with encrypted_filesystem() as (fs, key): lzc.lzc_unload_key(fs) with self.assertRaises(lzc_exc.EncryptionKeyInvalid): lzc.lzc_load_key(fs, False, os.urandom(lzc.WRAPPING_KEY_LEN)) def test_load_key_already_loaded(self): with encrypted_filesystem() as (fs, key): lzc.lzc_unload_key(fs) lzc.lzc_load_key(fs, False, key) with self.assertRaises(lzc_exc.EncryptionKeyAlreadyLoaded): lzc.lzc_load_key(fs, False, key) def test_load_key_missing_fs(self): name = b"nonexistent" with self.assertRaises(lzc_exc.FilesystemNotFound): lzc.lzc_load_key(name, False, key=os.urandom(lzc.WRAPPING_KEY_LEN)) def test_unload_key(self): with encrypted_filesystem() as (fs, _): lzc.lzc_unload_key(fs) def test_unload_key_missing_fs(self): name = b"nonexistent" with self.assertRaises(lzc_exc.FilesystemNotFound): lzc.lzc_unload_key(name) def test_unload_key_busy(self): with encrypted_filesystem() as (fs, _): with zfs_mount(fs): with self.assertRaises(lzc_exc.DatasetBusy): lzc.lzc_unload_key(fs) def test_unload_key_not_loaded(self): with encrypted_filesystem() as (fs, _): lzc.lzc_unload_key(fs) with self.assertRaises(lzc_exc.EncryptionKeyNotLoaded): lzc.lzc_unload_key(fs) def test_remap_missing_fs(self): name = b"nonexistent" with self.assertRaises(lzc_exc.DatasetNotFound): lzc.lzc_remap(name) def test_remap_invalid_fs(self): ds = ZFSTest.pool.makeName(b"fs1") snap = ds + b"@snap1" lzc.lzc_snapshot([snap]) with self.assertRaises(lzc_exc.NameInvalid): lzc.lzc_remap(snap) def test_remap_too_long_fs_name(self): name = ZFSTest.pool.makeTooLongName() with self.assertRaises(lzc_exc.NameTooLong): lzc.lzc_remap(name) def test_remap(self): name = ZFSTest.pool.makeName(b"fs1") lzc.lzc_remap(name) def test_checkpoint(self): pool = ZFSTest.pool.getRoot().getName() lzc.lzc_pool_checkpoint(pool) def test_checkpoint_missing_pool(self): pool = b"nonexistent" with self.assertRaises(lzc_exc.PoolNotFound): lzc.lzc_pool_checkpoint(pool) def test_checkpoint_already_exists(self): pool = ZFSTest.pool.getRoot().getName() lzc.lzc_pool_checkpoint(pool) with self.assertRaises(lzc_exc.CheckpointExists): lzc.lzc_pool_checkpoint(pool) def test_checkpoint_discard(self): pool = ZFSTest.pool.getRoot().getName() lzc.lzc_pool_checkpoint(pool) lzc.lzc_pool_checkpoint_discard(pool) def test_checkpoint_discard_missing_pool(self): pool = b"nonexistent" with self.assertRaises(lzc_exc.PoolNotFound): lzc.lzc_pool_checkpoint_discard(pool) def test_checkpoint_discard_missing_checkpoint(self): pool = ZFSTest.pool.getRoot().getName() with self.assertRaises(lzc_exc.CheckpointNotFound): lzc.lzc_pool_checkpoint_discard(pool) @needs_support(lzc.lzc_list_children) def test_list_children(self): name = ZFSTest.pool.makeName(b"fs1/fs") names = [ZFSTest.pool.makeName(b"fs1/fs/test1"), ZFSTest.pool.makeName(b"fs1/fs/test2"), ZFSTest.pool.makeName(b"fs1/fs/test3"), ] # and one snap to see that it is not listed snap = ZFSTest.pool.makeName(b"fs1/fs@test") for fs in names: lzc.lzc_create(fs) lzc.lzc_snapshot([snap]) children = list(lzc.lzc_list_children(name)) self.assertItemsEqual(children, names) @needs_support(lzc.lzc_list_children) def test_list_children_nonexistent(self): fs = ZFSTest.pool.makeName(b"nonexistent") with self.assertRaises(lzc_exc.DatasetNotFound): list(lzc.lzc_list_children(fs)) @needs_support(lzc.lzc_list_children) def test_list_children_of_snap(self): snap = ZFSTest.pool.makeName(b"@newsnap") lzc.lzc_snapshot([snap]) children = list(lzc.lzc_list_children(snap)) self.assertEqual(children, []) @needs_support(lzc.lzc_list_snaps) def test_list_snaps(self): name = ZFSTest.pool.makeName(b"fs1/fs") names = [ZFSTest.pool.makeName(b"fs1/fs@test1"), ZFSTest.pool.makeName(b"fs1/fs@test2"), ZFSTest.pool.makeName(b"fs1/fs@test3"), ] # and one filesystem to see that it is not listed fs = ZFSTest.pool.makeName(b"fs1/fs/test") for snap in names: lzc.lzc_snapshot([snap]) lzc.lzc_create(fs) snaps = list(lzc.lzc_list_snaps(name)) self.assertItemsEqual(snaps, names) @needs_support(lzc.lzc_list_snaps) def test_list_snaps_nonexistent(self): fs = ZFSTest.pool.makeName(b"nonexistent") with self.assertRaises(lzc_exc.DatasetNotFound): list(lzc.lzc_list_snaps(fs)) @needs_support(lzc.lzc_list_snaps) def test_list_snaps_of_snap(self): snap = ZFSTest.pool.makeName(b"@newsnap") lzc.lzc_snapshot([snap]) snaps = list(lzc.lzc_list_snaps(snap)) self.assertEqual(snaps, []) @needs_support(lzc.lzc_get_props) def test_get_fs_props(self): fs = ZFSTest.pool.makeName(b"new") props = {b"user:foo": b"bar"} lzc.lzc_create(fs, props=props) actual_props = lzc.lzc_get_props(fs) self.assertDictContainsSubset(props, actual_props) @needs_support(lzc.lzc_get_props) def test_get_fs_props_with_child(self): parent = ZFSTest.pool.makeName(b"parent") child = ZFSTest.pool.makeName(b"parent/child") parent_props = {b"user:foo": b"parent"} child_props = {b"user:foo": b"child"} lzc.lzc_create(parent, props=parent_props) lzc.lzc_create(child, props=child_props) actual_parent_props = lzc.lzc_get_props(parent) actual_child_props = lzc.lzc_get_props(child) self.assertDictContainsSubset(parent_props, actual_parent_props) self.assertDictContainsSubset(child_props, actual_child_props) @needs_support(lzc.lzc_get_props) def test_get_snap_props(self): snapname = ZFSTest.pool.makeName(b"@snap") snaps = [snapname] props = {b"user:foo": b"bar"} lzc.lzc_snapshot(snaps, props) actual_props = lzc.lzc_get_props(snapname) self.assertDictContainsSubset(props, actual_props) @needs_support(lzc.lzc_get_props) def test_get_props_nonexistent(self): fs = ZFSTest.pool.makeName(b"nonexistent") with self.assertRaises(lzc_exc.DatasetNotFound): lzc.lzc_get_props(fs) @needs_support(lzc.lzc_get_props) def test_get_mountpoint_none(self): ''' If the *mountpoint* property is set to none, then its value is returned as `bytes` "none". Also, a child filesystem inherits that value. ''' fs = ZFSTest.pool.makeName(b"new") child = ZFSTest.pool.makeName(b"new/child") props = {b"mountpoint": b"none"} lzc.lzc_create(fs, props=props) lzc.lzc_create(child) actual_props = lzc.lzc_get_props(fs) self.assertDictContainsSubset(props, actual_props) # check that mountpoint value is correctly inherited child_props = lzc.lzc_get_props(child) self.assertDictContainsSubset(props, child_props) @needs_support(lzc.lzc_get_props) def test_get_mountpoint_legacy(self): ''' If the *mountpoint* property is set to legacy, then its value is returned as `bytes` "legacy". Also, a child filesystem inherits that value. ''' fs = ZFSTest.pool.makeName(b"new") child = ZFSTest.pool.makeName(b"new/child") props = {b"mountpoint": b"legacy"} lzc.lzc_create(fs, props=props) lzc.lzc_create(child) actual_props = lzc.lzc_get_props(fs) self.assertDictContainsSubset(props, actual_props) # check that mountpoint value is correctly inherited child_props = lzc.lzc_get_props(child) self.assertDictContainsSubset(props, child_props) @needs_support(lzc.lzc_get_props) def test_get_mountpoint_path(self): ''' If the *mountpoint* property is set to a path and the property is not explicitly set on a child filesystem, then its value is that of the parent filesystem with the child's name appended using the '/' separator. ''' fs = ZFSTest.pool.makeName(b"new") child = ZFSTest.pool.makeName(b"new/child") props = {b"mountpoint": b"/mnt"} lzc.lzc_create(fs, props=props) lzc.lzc_create(child) actual_props = lzc.lzc_get_props(fs) self.assertDictContainsSubset(props, actual_props) # check that mountpoint value is correctly inherited child_props = lzc.lzc_get_props(child) self.assertDictContainsSubset( {b"mountpoint": b"/mnt/child"}, child_props) @needs_support(lzc.lzc_get_props) def test_get_snap_clones(self): fs = ZFSTest.pool.makeName(b"new") snap = ZFSTest.pool.makeName(b"@snap") clone1 = ZFSTest.pool.makeName(b"clone1") clone2 = ZFSTest.pool.makeName(b"clone2") lzc.lzc_create(fs) lzc.lzc_snapshot([snap]) lzc.lzc_clone(clone1, snap) lzc.lzc_clone(clone2, snap) clones_prop = lzc.lzc_get_props(snap)["clones"] self.assertItemsEqual(clones_prop, [clone1, clone2]) @needs_support(lzc.lzc_rename) def test_rename(self): src = ZFSTest.pool.makeName(b"source") tgt = ZFSTest.pool.makeName(b"target") lzc.lzc_create(src) lzc.lzc_rename(src, tgt) self.assertNotExists(src) self.assertExists(tgt) @needs_support(lzc.lzc_rename) def test_rename_nonexistent(self): src = ZFSTest.pool.makeName(b"source") tgt = ZFSTest.pool.makeName(b"target") with self.assertRaises(lzc_exc.FilesystemNotFound): lzc.lzc_rename(src, tgt) @needs_support(lzc.lzc_rename) def test_rename_existing_target(self): src = ZFSTest.pool.makeName(b"source") tgt = ZFSTest.pool.makeName(b"target") lzc.lzc_create(src) lzc.lzc_create(tgt) with self.assertRaises(lzc_exc.FilesystemExists): lzc.lzc_rename(src, tgt) @needs_support(lzc.lzc_rename) def test_rename_nonexistent_target_parent(self): src = ZFSTest.pool.makeName(b"source") tgt = ZFSTest.pool.makeName(b"parent/target") lzc.lzc_create(src) with self.assertRaises(lzc_exc.FilesystemNotFound): lzc.lzc_rename(src, tgt) + @needs_support(lzc.lzc_rename) + def test_rename_parent_is_zvol(self): + src = ZFSTest.pool.makeName(b"source") + zvol = ZFSTest.pool.makeName(b"parent") + tgt = zvol + b"/target" + props = {b"volsize": 1024 * 1024} + + lzc.lzc_create(src) + lzc.lzc_create(zvol, ds_type='zvol', props=props) + with self.assertRaises(lzc_exc.WrongParent): + lzc.lzc_rename(src, tgt) + @needs_support(lzc.lzc_destroy) def test_destroy(self): fs = ZFSTest.pool.makeName(b"test-fs") lzc.lzc_create(fs) lzc.lzc_destroy(fs) self.assertNotExists(fs) @needs_support(lzc.lzc_destroy) def test_destroy_nonexistent(self): fs = ZFSTest.pool.makeName(b"test-fs") with self.assertRaises(lzc_exc.FilesystemNotFound): lzc.lzc_destroy(fs) @needs_support(lzc.lzc_inherit_prop) def test_inherit_prop(self): parent = ZFSTest.pool.makeName(b"parent") child = ZFSTest.pool.makeName(b"parent/child") the_prop = b"user:foo" parent_props = {the_prop: b"parent"} child_props = {the_prop: b"child"} lzc.lzc_create(parent, props=parent_props) lzc.lzc_create(child, props=child_props) lzc.lzc_inherit_prop(child, the_prop) actual_props = lzc.lzc_get_props(child) self.assertDictContainsSubset(parent_props, actual_props) @needs_support(lzc.lzc_inherit_prop) def test_inherit_missing_prop(self): parent = ZFSTest.pool.makeName(b"parent") child = ZFSTest.pool.makeName(b"parent/child") the_prop = "user:foo" child_props = {the_prop: "child"} lzc.lzc_create(parent) lzc.lzc_create(child, props=child_props) lzc.lzc_inherit_prop(child, the_prop) actual_props = lzc.lzc_get_props(child) self.assertNotIn(the_prop, actual_props) @needs_support(lzc.lzc_inherit_prop) def test_inherit_readonly_prop(self): parent = ZFSTest.pool.makeName(b"parent") child = ZFSTest.pool.makeName(b"parent/child") the_prop = b"createtxg" lzc.lzc_create(parent) lzc.lzc_create(child) with self.assertRaises(lzc_exc.PropertyInvalid): lzc.lzc_inherit_prop(child, the_prop) @needs_support(lzc.lzc_inherit_prop) def test_inherit_unknown_prop(self): parent = ZFSTest.pool.makeName(b"parent") child = ZFSTest.pool.makeName(b"parent/child") the_prop = b"nosuchprop" lzc.lzc_create(parent) lzc.lzc_create(child) with self.assertRaises(lzc_exc.PropertyInvalid): lzc.lzc_inherit_prop(child, the_prop) @needs_support(lzc.lzc_inherit_prop) def test_inherit_prop_on_snap(self): fs = ZFSTest.pool.makeName(b"new") snapname = ZFSTest.pool.makeName(b"new@snap") prop = b"user:foo" fs_val = b"fs" snap_val = b"snap" lzc.lzc_create(fs, props={prop: fs_val}) lzc.lzc_snapshot([snapname], props={prop: snap_val}) actual_props = lzc.lzc_get_props(snapname) self.assertDictContainsSubset({prop: snap_val}, actual_props) lzc.lzc_inherit_prop(snapname, prop) actual_props = lzc.lzc_get_props(snapname) self.assertDictContainsSubset({prop: fs_val}, actual_props) @needs_support(lzc.lzc_set_prop) def test_set_fs_prop(self): fs = ZFSTest.pool.makeName(b"new") prop = b"user:foo" val = b"bar" lzc.lzc_create(fs) lzc.lzc_set_prop(fs, prop, val) actual_props = lzc.lzc_get_props(fs) self.assertDictContainsSubset({prop: val}, actual_props) @needs_support(lzc.lzc_set_prop) def test_set_snap_prop(self): snapname = ZFSTest.pool.makeName(b"@snap") prop = b"user:foo" val = b"bar" lzc.lzc_snapshot([snapname]) lzc.lzc_set_prop(snapname, prop, val) actual_props = lzc.lzc_get_props(snapname) self.assertDictContainsSubset({prop: val}, actual_props) @needs_support(lzc.lzc_set_prop) def test_set_prop_nonexistent(self): fs = ZFSTest.pool.makeName(b"nonexistent") prop = b"user:foo" val = b"bar" with self.assertRaises(lzc_exc.DatasetNotFound): lzc.lzc_set_prop(fs, prop, val) @needs_support(lzc.lzc_set_prop) def test_set_sys_prop(self): fs = ZFSTest.pool.makeName(b"new") prop = b"recordsize" val = 4096 lzc.lzc_create(fs) lzc.lzc_set_prop(fs, prop, val) actual_props = lzc.lzc_get_props(fs) self.assertDictContainsSubset({prop: val}, actual_props) @needs_support(lzc.lzc_set_prop) def test_set_invalid_prop(self): fs = ZFSTest.pool.makeName(b"new") prop = b"nosuchprop" val = 0 lzc.lzc_create(fs) with self.assertRaises(lzc_exc.PropertyInvalid): lzc.lzc_set_prop(fs, prop, val) @needs_support(lzc.lzc_set_prop) def test_set_invalid_value_prop(self): fs = ZFSTest.pool.makeName(b"new") prop = b"atime" val = 100 lzc.lzc_create(fs) with self.assertRaises(lzc_exc.PropertyInvalid): lzc.lzc_set_prop(fs, prop, val) @needs_support(lzc.lzc_set_prop) def test_set_invalid_value_prop_2(self): fs = ZFSTest.pool.makeName(b"new") prop = b"readonly" val = 100 lzc.lzc_create(fs) with self.assertRaises(lzc_exc.PropertyInvalid): lzc.lzc_set_prop(fs, prop, val) @needs_support(lzc.lzc_set_prop) def test_set_prop_too_small_quota(self): fs = ZFSTest.pool.makeName(b"new") prop = b"refquota" val = 1 lzc.lzc_create(fs) with self.assertRaises(lzc_exc.NoSpace): lzc.lzc_set_prop(fs, prop, val) @needs_support(lzc.lzc_set_prop) def test_set_readonly_prop(self): fs = ZFSTest.pool.makeName(b"new") prop = b"creation" val = 0 lzc.lzc_create(fs) lzc.lzc_set_prop(fs, prop, val) actual_props = lzc.lzc_get_props(fs) # the change is silently ignored self.assertTrue(actual_props[prop] != val) class _TempPool(object): SNAPSHOTS = [b'snap', b'snap1', b'snap2'] BOOKMARKS = [b'bmark', b'bmark1', b'bmark2'] _cachefile_suffix = ".cachefile" # XXX Whether to do a sloppy but much faster cleanup # or a proper but slower one. _recreate_pools = True def __init__(self, size=128 * 1024 * 1024, readonly=False, filesystems=[]): self._filesystems = filesystems self._readonly = readonly if sys.version_info < (3, 0): self._pool_name = b'pool.' + bytes(uuid.uuid4()) else: self._pool_name = b'pool.' + bytes(str(uuid.uuid4()), encoding='utf-8') self._root = _Filesystem(self._pool_name) (fd, self._pool_file_path) = tempfile.mkstemp( suffix='.zpool', prefix='tmp-') if readonly: cachefile = self._pool_file_path + _TempPool._cachefile_suffix else: cachefile = 'none' self._zpool_create = [ 'zpool', 'create', '-o', 'cachefile=' + cachefile, '-O', 'mountpoint=legacy', self._pool_name, self._pool_file_path] try: os.ftruncate(fd, size) os.close(fd) subprocess.check_output( self._zpool_create, stderr=subprocess.STDOUT) for fs in filesystems: lzc.lzc_create(self.makeName(fs)) self._bmarks_supported = self.isPoolFeatureEnabled('bookmarks') if readonly: # To make a pool read-only it must exported and re-imported # with readonly option. # The most deterministic way to re-import the pool is by using # a cache file. # But the cache file has to be stashed away before the pool is # exported, because otherwise the pool is removed from the # cache. shutil.copyfile(cachefile, cachefile + '.tmp') subprocess.check_output( ['zpool', 'export', '-f', self._pool_name], stderr=subprocess.STDOUT) os.rename(cachefile + '.tmp', cachefile) subprocess.check_output( ['zpool', 'import', '-f', '-N', '-c', cachefile, '-o', 'readonly=on', self._pool_name], stderr=subprocess.STDOUT) os.remove(cachefile) except subprocess.CalledProcessError as e: self.cleanUp() if b'permission denied' in e.output: raise unittest.SkipTest( 'insufficient privileges to run libzfs_core tests') print('command failed: ', e.output) raise except Exception: self.cleanUp() raise def reset(self): if self._readonly: return if not self.__class__._recreate_pools: snaps = [] for fs in [''] + self._filesystems: for snap in self.__class__.SNAPSHOTS: snaps.append(self.makeName(fs + '@' + snap)) self.getRoot().visitSnaps(lambda snap: snaps.append(snap)) lzc.lzc_destroy_snaps(snaps, defer=False) if self._bmarks_supported: bmarks = [] for fs in [''] + self._filesystems: for bmark in self.__class__.BOOKMARKS: bmarks.append(self.makeName(fs + '#' + bmark)) self.getRoot().visitBookmarks( lambda bmark: bmarks.append(bmark)) lzc.lzc_destroy_bookmarks(bmarks) self.getRoot().reset() return # On the Buildbot builders this may fail with "pool is busy" # Retry 5 times before raising an error retry = 0 while True: try: subprocess.check_output( ['zpool', 'destroy', '-f', self._pool_name], stderr=subprocess.STDOUT) subprocess.check_output( self._zpool_create, stderr=subprocess.STDOUT) break except subprocess.CalledProcessError as e: if b'pool is busy' in e.output and retry < 5: retry += 1 time.sleep(1) continue else: print('command failed: ', e.output) raise for fs in self._filesystems: lzc.lzc_create(self.makeName(fs)) self.getRoot().reset() def cleanUp(self): try: subprocess.check_output( ['zpool', 'destroy', '-f', self._pool_name], stderr=subprocess.STDOUT) except Exception: pass try: os.remove(self._pool_file_path) except Exception: pass try: os.remove(self._pool_file_path + _TempPool._cachefile_suffix) except Exception: pass try: os.remove( self._pool_file_path + _TempPool._cachefile_suffix + '.tmp') except Exception: pass def makeName(self, relative=None): if not relative: return self._pool_name if relative.startswith((b'@', b'#')): return self._pool_name + relative return self._pool_name + b'/' + relative def makeTooLongName(self, prefix=None): if not prefix: prefix = b'x' prefix = self.makeName(prefix) pad_len = lzc.MAXNAMELEN + 1 - len(prefix) if pad_len > 0: return prefix + b'x' * pad_len else: return prefix def makeTooLongComponent(self, prefix=None): padding = b'x' * (lzc.MAXNAMELEN + 1) if not prefix: prefix = padding else: prefix = prefix + padding return self.makeName(prefix) def getRoot(self): return self._root def getFilesystem(self, fsname): return _Filesystem(self._pool_name + b'/' + fsname) def isPoolFeatureAvailable(self, feature): output = subprocess.check_output( ['zpool', 'get', '-H', 'feature@' + feature, self._pool_name]) output = output.strip() return output != '' def isPoolFeatureEnabled(self, feature): output = subprocess.check_output( ['zpool', 'get', '-H', 'feature@' + feature, self._pool_name]) output = output.split()[2] return output in [b'active', b'enabled'] class _Filesystem(object): def __init__(self, name): self._name = name self.reset() def getName(self): return self._name def reset(self): self._children = [] self._fs_id = 0 self._snap_id = 0 self._bmark_id = 0 def getFilesystem(self): self._fs_id += 1 fsname = self._name + b'/fs' + str(self._fs_id).encode() fs = _Filesystem(fsname) self._children.append(fs) return fs def getProperty(self, propname, received=False): if received: output = subprocess.check_output( ['zfs', 'get', '-pH', '-o', 'received', propname, self._name]) else: output = subprocess.check_output( ['zfs', 'get', '-pH', '-o', 'value', propname, self._name]) return output.strip() def _makeSnapName(self, i): return self._name + b'@snap' + str(i).encode() def getSnap(self): self._snap_id += 1 return self._makeSnapName(self._snap_id) def _makeBookmarkName(self, i): return self._name + b'#bmark' + bytes(i) def getBookmark(self): self._bmark_id += 1 return self._makeBookmarkName(self._bmark_id) def _makeTooLongName(self, too_long_component): if too_long_component: return b'x' * (lzc.MAXNAMELEN + 1) # Note that another character is used for one of '/', '@', '#'. comp_len = lzc.MAXNAMELEN - len(self._name) if comp_len > 0: return b'x' * comp_len else: return b'x' def getTooLongFilesystemName(self, too_long_component): return self._name + b'/' + self._makeTooLongName(too_long_component) def getTooLongSnap(self, too_long_component): return self._name + b'@' + self._makeTooLongName(too_long_component) def getTooLongBookmark(self, too_long_component): return self._name + b'#' + self._makeTooLongName(too_long_component) def _visitFilesystems(self, visitor): for child in self._children: child._visitFilesystems(visitor) visitor(self) def visitFilesystems(self, visitor): def _fsVisitor(fs): visitor(fs._name) self._visitFilesystems(_fsVisitor) def visitSnaps(self, visitor): def _snapVisitor(fs): for i in range(1, fs._snap_id + 1): visitor(fs._makeSnapName(i)) self._visitFilesystems(_snapVisitor) def visitBookmarks(self, visitor): def _bmarkVisitor(fs): for i in range(1, fs._bmark_id + 1): visitor(fs._makeBookmarkName(i)) self._visitFilesystems(_bmarkVisitor) # vim: softtabstop=4 tabstop=4 expandtab shiftwidth=4 diff --git a/include/libzfs.h b/include/libzfs.h index 85b0bc0ddb77..72d956b41f32 100644 --- a/include/libzfs.h +++ b/include/libzfs.h @@ -1,822 +1,823 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2011, 2018 by Delphix. All rights reserved. * Copyright (c) 2012, Joyent, Inc. All rights reserved. * Copyright (c) 2013 Steven Hartland. All rights reserved. * Copyright (c) 2016, Intel Corporation. * Copyright 2016 Nexenta Systems, Inc. * Copyright (c) 2017 Open-E, Inc. All Rights Reserved. * Copyright (c) 2018 Datto Inc. */ #ifndef _LIBZFS_H #define _LIBZFS_H #include #include #include #include #include #include #include #include #include #include #ifdef __cplusplus extern "C" { #endif /* * Miscellaneous ZFS constants */ #define ZFS_MAXPROPLEN MAXPATHLEN #define ZPOOL_MAXPROPLEN MAXPATHLEN /* * libzfs errors */ typedef enum zfs_error { EZFS_SUCCESS = 0, /* no error -- success */ EZFS_NOMEM = 2000, /* out of memory */ EZFS_BADPROP, /* invalid property value */ EZFS_PROPREADONLY, /* cannot set readonly property */ EZFS_PROPTYPE, /* property does not apply to dataset type */ EZFS_PROPNONINHERIT, /* property is not inheritable */ EZFS_PROPSPACE, /* bad quota or reservation */ EZFS_BADTYPE, /* dataset is not of appropriate type */ EZFS_BUSY, /* pool or dataset is busy */ EZFS_EXISTS, /* pool or dataset already exists */ EZFS_NOENT, /* no such pool or dataset */ EZFS_BADSTREAM, /* bad backup stream */ EZFS_DSREADONLY, /* dataset is readonly */ EZFS_VOLTOOBIG, /* volume is too large for 32-bit system */ EZFS_INVALIDNAME, /* invalid dataset name */ EZFS_BADRESTORE, /* unable to restore to destination */ EZFS_BADBACKUP, /* backup failed */ EZFS_BADTARGET, /* bad attach/detach/replace target */ EZFS_NODEVICE, /* no such device in pool */ EZFS_BADDEV, /* invalid device to add */ EZFS_NOREPLICAS, /* no valid replicas */ EZFS_RESILVERING, /* currently resilvering */ EZFS_BADVERSION, /* unsupported version */ EZFS_POOLUNAVAIL, /* pool is currently unavailable */ EZFS_DEVOVERFLOW, /* too many devices in one vdev */ EZFS_BADPATH, /* must be an absolute path */ EZFS_CROSSTARGET, /* rename or clone across pool or dataset */ EZFS_ZONED, /* used improperly in local zone */ EZFS_MOUNTFAILED, /* failed to mount dataset */ EZFS_UMOUNTFAILED, /* failed to unmount dataset */ EZFS_UNSHARENFSFAILED, /* unshare(1M) failed */ EZFS_SHARENFSFAILED, /* share(1M) failed */ EZFS_PERM, /* permission denied */ EZFS_NOSPC, /* out of space */ EZFS_FAULT, /* bad address */ EZFS_IO, /* I/O error */ EZFS_INTR, /* signal received */ EZFS_ISSPARE, /* device is a hot spare */ EZFS_INVALCONFIG, /* invalid vdev configuration */ EZFS_RECURSIVE, /* recursive dependency */ EZFS_NOHISTORY, /* no history object */ EZFS_POOLPROPS, /* couldn't retrieve pool props */ EZFS_POOL_NOTSUP, /* ops not supported for this type of pool */ EZFS_POOL_INVALARG, /* invalid argument for this pool operation */ EZFS_NAMETOOLONG, /* dataset name is too long */ EZFS_OPENFAILED, /* open of device failed */ EZFS_NOCAP, /* couldn't get capacity */ EZFS_LABELFAILED, /* write of label failed */ EZFS_BADWHO, /* invalid permission who */ EZFS_BADPERM, /* invalid permission */ EZFS_BADPERMSET, /* invalid permission set name */ EZFS_NODELEGATION, /* delegated administration is disabled */ EZFS_UNSHARESMBFAILED, /* failed to unshare over smb */ EZFS_SHARESMBFAILED, /* failed to share over smb */ EZFS_BADCACHE, /* bad cache file */ EZFS_ISL2CACHE, /* device is for the level 2 ARC */ EZFS_VDEVNOTSUP, /* unsupported vdev type */ EZFS_NOTSUP, /* ops not supported on this dataset */ EZFS_ACTIVE_SPARE, /* pool has active shared spare devices */ EZFS_UNPLAYED_LOGS, /* log device has unplayed logs */ EZFS_REFTAG_RELE, /* snapshot release: tag not found */ EZFS_REFTAG_HOLD, /* snapshot hold: tag already exists */ EZFS_TAGTOOLONG, /* snapshot hold/rele: tag too long */ EZFS_PIPEFAILED, /* pipe create failed */ EZFS_THREADCREATEFAILED, /* thread create failed */ EZFS_POSTSPLIT_ONLINE, /* onlining a disk after splitting it */ EZFS_SCRUBBING, /* currently scrubbing */ EZFS_NO_SCRUB, /* no active scrub */ EZFS_DIFF, /* general failure of zfs diff */ EZFS_DIFFDATA, /* bad zfs diff data */ EZFS_POOLREADONLY, /* pool is in read-only mode */ EZFS_SCRUB_PAUSED, /* scrub currently paused */ EZFS_ACTIVE_POOL, /* pool is imported on a different system */ EZFS_CRYPTOFAILED, /* failed to setup encryption */ EZFS_NO_PENDING, /* cannot cancel, no operation is pending */ EZFS_CHECKPOINT_EXISTS, /* checkpoint exists */ EZFS_DISCARDING_CHECKPOINT, /* currently discarding a checkpoint */ EZFS_NO_CHECKPOINT, /* pool has no checkpoint */ EZFS_DEVRM_IN_PROGRESS, /* a device is currently being removed */ EZFS_VDEV_TOO_BIG, /* a device is too big to be used */ EZFS_IOC_NOTSUPPORTED, /* operation not supported by zfs module */ EZFS_TOOMANY, /* argument list too long */ EZFS_INITIALIZING, /* currently initializing */ EZFS_NO_INITIALIZE, /* no active initialize */ + EZFS_WRONG_PARENT, /* invalid parent dataset (e.g ZVOL) */ EZFS_UNKNOWN } zfs_error_t; /* * The following data structures are all part * of the zfs_allow_t data structure which is * used for printing 'allow' permissions. * It is a linked list of zfs_allow_t's which * then contain avl tree's for user/group/sets/... * and each one of the entries in those trees have * avl tree's for the permissions they belong to and * whether they are local,descendent or local+descendent * permissions. The AVL trees are used primarily for * sorting purposes, but also so that we can quickly find * a given user and or permission. */ typedef struct zfs_perm_node { avl_node_t z_node; char z_pname[MAXPATHLEN]; } zfs_perm_node_t; typedef struct zfs_allow_node { avl_node_t z_node; char z_key[MAXPATHLEN]; /* name, such as joe */ avl_tree_t z_localdescend; /* local+descendent perms */ avl_tree_t z_local; /* local permissions */ avl_tree_t z_descend; /* descendent permissions */ } zfs_allow_node_t; typedef struct zfs_allow { struct zfs_allow *z_next; char z_setpoint[MAXPATHLEN]; avl_tree_t z_sets; avl_tree_t z_crperms; avl_tree_t z_user; avl_tree_t z_group; avl_tree_t z_everyone; } zfs_allow_t; /* * Basic handle types */ typedef struct zfs_handle zfs_handle_t; typedef struct zpool_handle zpool_handle_t; typedef struct libzfs_handle libzfs_handle_t; /* * Library initialization */ extern libzfs_handle_t *libzfs_init(void); extern void libzfs_fini(libzfs_handle_t *); extern libzfs_handle_t *zpool_get_handle(zpool_handle_t *); extern libzfs_handle_t *zfs_get_handle(zfs_handle_t *); extern void libzfs_print_on_error(libzfs_handle_t *, boolean_t); extern void zfs_save_arguments(int argc, char **, char *, int); extern int zpool_log_history(libzfs_handle_t *, const char *); extern int libzfs_errno(libzfs_handle_t *); extern const char *libzfs_error_init(int); extern const char *libzfs_error_action(libzfs_handle_t *); extern const char *libzfs_error_description(libzfs_handle_t *); extern int zfs_standard_error(libzfs_handle_t *, int, const char *); extern void libzfs_mnttab_init(libzfs_handle_t *); extern void libzfs_mnttab_fini(libzfs_handle_t *); extern void libzfs_mnttab_cache(libzfs_handle_t *, boolean_t); extern int libzfs_mnttab_find(libzfs_handle_t *, const char *, struct mnttab *); extern void libzfs_mnttab_add(libzfs_handle_t *, const char *, const char *, const char *); extern void libzfs_mnttab_remove(libzfs_handle_t *, const char *); /* * Basic handle functions */ extern zpool_handle_t *zpool_open(libzfs_handle_t *, const char *); extern zpool_handle_t *zpool_open_canfail(libzfs_handle_t *, const char *); extern void zpool_close(zpool_handle_t *); extern const char *zpool_get_name(zpool_handle_t *); extern int zpool_get_state(zpool_handle_t *); extern const char *zpool_state_to_name(vdev_state_t, vdev_aux_t); extern const char *zpool_pool_state_to_name(pool_state_t); extern void zpool_free_handles(libzfs_handle_t *); /* * Iterate over all active pools in the system. */ typedef int (*zpool_iter_f)(zpool_handle_t *, void *); extern int zpool_iter(libzfs_handle_t *, zpool_iter_f, void *); extern boolean_t zpool_skip_pool(const char *); /* * Functions to create and destroy pools */ extern int zpool_create(libzfs_handle_t *, const char *, nvlist_t *, nvlist_t *, nvlist_t *); extern int zpool_destroy(zpool_handle_t *, const char *); extern int zpool_add(zpool_handle_t *, nvlist_t *); typedef struct splitflags { /* do not split, but return the config that would be split off */ int dryrun : 1; /* after splitting, import the pool */ int import : 1; int name_flags; } splitflags_t; /* * Functions to manipulate pool and vdev state */ extern int zpool_scan(zpool_handle_t *, pool_scan_func_t, pool_scrub_cmd_t); extern int zpool_initialize(zpool_handle_t *, pool_initialize_func_t, nvlist_t *); extern int zpool_clear(zpool_handle_t *, const char *, nvlist_t *); extern int zpool_reguid(zpool_handle_t *); extern int zpool_reopen_one(zpool_handle_t *, void *); extern int zpool_sync_one(zpool_handle_t *, void *); extern int zpool_vdev_online(zpool_handle_t *, const char *, int, vdev_state_t *); extern int zpool_vdev_offline(zpool_handle_t *, const char *, boolean_t); extern int zpool_vdev_attach(zpool_handle_t *, const char *, const char *, nvlist_t *, int); extern int zpool_vdev_detach(zpool_handle_t *, const char *); extern int zpool_vdev_remove(zpool_handle_t *, const char *); extern int zpool_vdev_remove_cancel(zpool_handle_t *); extern int zpool_vdev_indirect_size(zpool_handle_t *, const char *, uint64_t *); extern int zpool_vdev_split(zpool_handle_t *, char *, nvlist_t **, nvlist_t *, splitflags_t); extern int zpool_vdev_fault(zpool_handle_t *, uint64_t, vdev_aux_t); extern int zpool_vdev_degrade(zpool_handle_t *, uint64_t, vdev_aux_t); extern int zpool_vdev_clear(zpool_handle_t *, uint64_t); extern nvlist_t *zpool_find_vdev(zpool_handle_t *, const char *, boolean_t *, boolean_t *, boolean_t *); extern nvlist_t *zpool_find_vdev_by_physpath(zpool_handle_t *, const char *, boolean_t *, boolean_t *, boolean_t *); extern int zpool_label_disk(libzfs_handle_t *, zpool_handle_t *, char *); extern uint64_t zpool_vdev_path_to_guid(zpool_handle_t *zhp, const char *path); const char *zpool_get_state_str(zpool_handle_t *); /* * Functions to manage pool properties */ extern int zpool_set_prop(zpool_handle_t *, const char *, const char *); extern int zpool_get_prop(zpool_handle_t *, zpool_prop_t, char *, size_t proplen, zprop_source_t *, boolean_t literal); extern uint64_t zpool_get_prop_int(zpool_handle_t *, zpool_prop_t, zprop_source_t *); extern const char *zpool_prop_to_name(zpool_prop_t); extern const char *zpool_prop_values(zpool_prop_t); /* * Pool health statistics. */ typedef enum { /* * The following correspond to faults as defined in the (fault.fs.zfs.*) * event namespace. Each is associated with a corresponding message ID. * This must be kept in sync with the zfs_msgid_table in * lib/libzfs/libzfs_status.c. */ ZPOOL_STATUS_CORRUPT_CACHE, /* corrupt /kernel/drv/zpool.cache */ ZPOOL_STATUS_MISSING_DEV_R, /* missing device with replicas */ ZPOOL_STATUS_MISSING_DEV_NR, /* missing device with no replicas */ ZPOOL_STATUS_CORRUPT_LABEL_R, /* bad device label with replicas */ ZPOOL_STATUS_CORRUPT_LABEL_NR, /* bad device label with no replicas */ ZPOOL_STATUS_BAD_GUID_SUM, /* sum of device guids didn't match */ ZPOOL_STATUS_CORRUPT_POOL, /* pool metadata is corrupted */ ZPOOL_STATUS_CORRUPT_DATA, /* data errors in user (meta)data */ ZPOOL_STATUS_FAILING_DEV, /* device experiencing errors */ ZPOOL_STATUS_VERSION_NEWER, /* newer on-disk version */ ZPOOL_STATUS_HOSTID_MISMATCH, /* last accessed by another system */ ZPOOL_STATUS_HOSTID_ACTIVE, /* currently active on another system */ ZPOOL_STATUS_HOSTID_REQUIRED, /* multihost=on and hostid=0 */ ZPOOL_STATUS_IO_FAILURE_WAIT, /* failed I/O, failmode 'wait' */ ZPOOL_STATUS_IO_FAILURE_CONTINUE, /* failed I/O, failmode 'continue' */ ZPOOL_STATUS_IO_FAILURE_MMP, /* failed MMP, failmode not 'panic' */ ZPOOL_STATUS_BAD_LOG, /* cannot read log chain(s) */ ZPOOL_STATUS_ERRATA, /* informational errata available */ /* * If the pool has unsupported features but can still be opened in * read-only mode, its status is ZPOOL_STATUS_UNSUP_FEAT_WRITE. If the * pool has unsupported features but cannot be opened at all, its * status is ZPOOL_STATUS_UNSUP_FEAT_READ. */ ZPOOL_STATUS_UNSUP_FEAT_READ, /* unsupported features for read */ ZPOOL_STATUS_UNSUP_FEAT_WRITE, /* unsupported features for write */ /* * These faults have no corresponding message ID. At the time we are * checking the status, the original reason for the FMA fault (I/O or * checksum errors) has been lost. */ ZPOOL_STATUS_FAULTED_DEV_R, /* faulted device with replicas */ ZPOOL_STATUS_FAULTED_DEV_NR, /* faulted device with no replicas */ /* * The following are not faults per se, but still an error possibly * requiring administrative attention. There is no corresponding * message ID. */ ZPOOL_STATUS_VERSION_OLDER, /* older legacy on-disk version */ ZPOOL_STATUS_FEAT_DISABLED, /* supported features are disabled */ ZPOOL_STATUS_RESILVERING, /* device being resilvered */ ZPOOL_STATUS_OFFLINE_DEV, /* device offline */ ZPOOL_STATUS_REMOVED_DEV, /* removed device */ /* * Finally, the following indicates a healthy pool. */ ZPOOL_STATUS_OK } zpool_status_t; extern zpool_status_t zpool_get_status(zpool_handle_t *, char **, zpool_errata_t *); extern zpool_status_t zpool_import_status(nvlist_t *, char **, zpool_errata_t *); /* * Statistics and configuration functions. */ extern nvlist_t *zpool_get_config(zpool_handle_t *, nvlist_t **); extern nvlist_t *zpool_get_features(zpool_handle_t *); extern int zpool_refresh_stats(zpool_handle_t *, boolean_t *); extern int zpool_get_errlog(zpool_handle_t *, nvlist_t **); /* * Import and export functions */ extern int zpool_export(zpool_handle_t *, boolean_t, const char *); extern int zpool_export_force(zpool_handle_t *, const char *); extern int zpool_import(libzfs_handle_t *, nvlist_t *, const char *, char *altroot); extern int zpool_import_props(libzfs_handle_t *, nvlist_t *, const char *, nvlist_t *, int); extern void zpool_print_unsup_feat(nvlist_t *config); /* * Miscellaneous pool functions */ struct zfs_cmd; extern const char *zfs_history_event_names[]; typedef enum { VDEV_NAME_PATH = 1 << 0, VDEV_NAME_GUID = 1 << 1, VDEV_NAME_FOLLOW_LINKS = 1 << 2, VDEV_NAME_TYPE_ID = 1 << 3, } vdev_name_t; extern char *zpool_vdev_name(libzfs_handle_t *, zpool_handle_t *, nvlist_t *, int name_flags); extern int zpool_upgrade(zpool_handle_t *, uint64_t); extern int zpool_get_history(zpool_handle_t *, nvlist_t **); extern int zpool_events_next(libzfs_handle_t *, nvlist_t **, int *, unsigned, int); extern int zpool_events_clear(libzfs_handle_t *, int *); extern int zpool_events_seek(libzfs_handle_t *, uint64_t, int); extern void zpool_obj_to_path(zpool_handle_t *, uint64_t, uint64_t, char *, size_t len); extern int zfs_ioctl(libzfs_handle_t *, int, struct zfs_cmd *); extern int zpool_get_physpath(zpool_handle_t *, char *, size_t); extern void zpool_explain_recover(libzfs_handle_t *, const char *, int, nvlist_t *); extern int zpool_checkpoint(zpool_handle_t *); extern int zpool_discard_checkpoint(zpool_handle_t *); /* * Basic handle manipulations. These functions do not create or destroy the * underlying datasets, only the references to them. */ extern zfs_handle_t *zfs_open(libzfs_handle_t *, const char *, int); extern zfs_handle_t *zfs_handle_dup(zfs_handle_t *); extern void zfs_close(zfs_handle_t *); extern zfs_type_t zfs_get_type(const zfs_handle_t *); extern const char *zfs_get_name(const zfs_handle_t *); extern zpool_handle_t *zfs_get_pool_handle(const zfs_handle_t *); extern const char *zfs_get_pool_name(const zfs_handle_t *); /* * Property management functions. Some functions are shared with the kernel, * and are found in sys/fs/zfs.h. */ /* * zfs dataset property management */ extern const char *zfs_prop_default_string(zfs_prop_t); extern uint64_t zfs_prop_default_numeric(zfs_prop_t); extern const char *zfs_prop_column_name(zfs_prop_t); extern boolean_t zfs_prop_align_right(zfs_prop_t); extern nvlist_t *zfs_valid_proplist(libzfs_handle_t *, zfs_type_t, nvlist_t *, uint64_t, zfs_handle_t *, zpool_handle_t *, boolean_t, const char *); extern const char *zfs_prop_to_name(zfs_prop_t); extern int zfs_prop_set(zfs_handle_t *, const char *, const char *); extern int zfs_prop_set_list(zfs_handle_t *, nvlist_t *); extern int zfs_prop_get(zfs_handle_t *, zfs_prop_t, char *, size_t, zprop_source_t *, char *, size_t, boolean_t); extern int zfs_prop_get_recvd(zfs_handle_t *, const char *, char *, size_t, boolean_t); extern int zfs_prop_get_numeric(zfs_handle_t *, zfs_prop_t, uint64_t *, zprop_source_t *, char *, size_t); extern int zfs_prop_get_userquota_int(zfs_handle_t *zhp, const char *propname, uint64_t *propvalue); extern int zfs_prop_get_userquota(zfs_handle_t *zhp, const char *propname, char *propbuf, int proplen, boolean_t literal); extern int zfs_prop_get_written_int(zfs_handle_t *zhp, const char *propname, uint64_t *propvalue); extern int zfs_prop_get_written(zfs_handle_t *zhp, const char *propname, char *propbuf, int proplen, boolean_t literal); extern int zfs_prop_get_feature(zfs_handle_t *zhp, const char *propname, char *buf, size_t len); extern uint64_t getprop_uint64(zfs_handle_t *, zfs_prop_t, char **); extern uint64_t zfs_prop_get_int(zfs_handle_t *, zfs_prop_t); extern int zfs_prop_inherit(zfs_handle_t *, const char *, boolean_t); extern const char *zfs_prop_values(zfs_prop_t); extern int zfs_prop_is_string(zfs_prop_t prop); extern nvlist_t *zfs_get_all_props(zfs_handle_t *); extern nvlist_t *zfs_get_user_props(zfs_handle_t *); extern nvlist_t *zfs_get_recvd_props(zfs_handle_t *); extern nvlist_t *zfs_get_clones_nvl(zfs_handle_t *); /* * zfs encryption management */ extern int zfs_crypto_get_encryption_root(zfs_handle_t *, boolean_t *, char *); extern int zfs_crypto_create(libzfs_handle_t *, char *, nvlist_t *, nvlist_t *, boolean_t stdin_available, uint8_t **, uint_t *); extern int zfs_crypto_clone_check(libzfs_handle_t *, zfs_handle_t *, char *, nvlist_t *); extern int zfs_crypto_attempt_load_keys(libzfs_handle_t *, char *); extern int zfs_crypto_load_key(zfs_handle_t *, boolean_t, char *); extern int zfs_crypto_unload_key(zfs_handle_t *); extern int zfs_crypto_rewrap(zfs_handle_t *, nvlist_t *, boolean_t); typedef struct zprop_list { int pl_prop; char *pl_user_prop; struct zprop_list *pl_next; boolean_t pl_all; size_t pl_width; size_t pl_recvd_width; boolean_t pl_fixed; } zprop_list_t; extern int zfs_expand_proplist(zfs_handle_t *, zprop_list_t **, boolean_t, boolean_t); extern void zfs_prune_proplist(zfs_handle_t *, uint8_t *); #define ZFS_MOUNTPOINT_NONE "none" #define ZFS_MOUNTPOINT_LEGACY "legacy" #define ZFS_FEATURE_DISABLED "disabled" #define ZFS_FEATURE_ENABLED "enabled" #define ZFS_FEATURE_ACTIVE "active" #define ZFS_UNSUPPORTED_INACTIVE "inactive" #define ZFS_UNSUPPORTED_READONLY "readonly" /* * zpool property management */ extern int zpool_expand_proplist(zpool_handle_t *, zprop_list_t **); extern int zpool_prop_get_feature(zpool_handle_t *, const char *, char *, size_t); extern const char *zpool_prop_default_string(zpool_prop_t); extern uint64_t zpool_prop_default_numeric(zpool_prop_t); extern const char *zpool_prop_column_name(zpool_prop_t); extern boolean_t zpool_prop_align_right(zpool_prop_t); /* * Functions shared by zfs and zpool property management. */ extern int zprop_iter(zprop_func func, void *cb, boolean_t show_all, boolean_t ordered, zfs_type_t type); extern int zprop_get_list(libzfs_handle_t *, char *, zprop_list_t **, zfs_type_t); extern void zprop_free_list(zprop_list_t *); #define ZFS_GET_NCOLS 5 typedef enum { GET_COL_NONE, GET_COL_NAME, GET_COL_PROPERTY, GET_COL_VALUE, GET_COL_RECVD, GET_COL_SOURCE } zfs_get_column_t; /* * Functions for printing zfs or zpool properties */ typedef struct zprop_get_cbdata { int cb_sources; zfs_get_column_t cb_columns[ZFS_GET_NCOLS]; int cb_colwidths[ZFS_GET_NCOLS + 1]; boolean_t cb_scripted; boolean_t cb_literal; boolean_t cb_first; zprop_list_t *cb_proplist; zfs_type_t cb_type; } zprop_get_cbdata_t; void zprop_print_one_property(const char *, zprop_get_cbdata_t *, const char *, const char *, zprop_source_t, const char *, const char *); /* * Iterator functions. */ typedef int (*zfs_iter_f)(zfs_handle_t *, void *); extern int zfs_iter_root(libzfs_handle_t *, zfs_iter_f, void *); extern int zfs_iter_children(zfs_handle_t *, zfs_iter_f, void *); extern int zfs_iter_dependents(zfs_handle_t *, boolean_t, zfs_iter_f, void *); extern int zfs_iter_filesystems(zfs_handle_t *, zfs_iter_f, void *); extern int zfs_iter_snapshots(zfs_handle_t *, boolean_t, zfs_iter_f, void *); extern int zfs_iter_snapshots_sorted(zfs_handle_t *, zfs_iter_f, void *); extern int zfs_iter_snapspec(zfs_handle_t *, const char *, zfs_iter_f, void *); extern int zfs_iter_bookmarks(zfs_handle_t *, zfs_iter_f, void *); extern int zfs_iter_mounted(zfs_handle_t *, zfs_iter_f, void *); typedef struct get_all_cb { zfs_handle_t **cb_handles; size_t cb_alloc; size_t cb_used; } get_all_cb_t; void zfs_foreach_mountpoint(libzfs_handle_t *, zfs_handle_t **, size_t, zfs_iter_f, void *, boolean_t); void libzfs_add_handle(get_all_cb_t *, zfs_handle_t *); /* * Functions to create and destroy datasets. */ extern int zfs_create(libzfs_handle_t *, const char *, zfs_type_t, nvlist_t *); extern int zfs_create_ancestors(libzfs_handle_t *, const char *); extern int zfs_destroy(zfs_handle_t *, boolean_t); extern int zfs_destroy_snaps(zfs_handle_t *, char *, boolean_t); extern int zfs_destroy_snaps_nvl(libzfs_handle_t *, nvlist_t *, boolean_t); extern int zfs_clone(zfs_handle_t *, const char *, nvlist_t *); extern int zfs_snapshot(libzfs_handle_t *, const char *, boolean_t, nvlist_t *); extern int zfs_snapshot_nvl(libzfs_handle_t *hdl, nvlist_t *snaps, nvlist_t *props); extern int zfs_rollback(zfs_handle_t *, zfs_handle_t *, boolean_t); extern int zfs_rename(zfs_handle_t *, const char *, boolean_t, boolean_t); typedef struct sendflags { /* print informational messages (ie, -v was specified) */ boolean_t verbose; /* recursive send (ie, -R) */ boolean_t replicate; /* for incrementals, do all intermediate snapshots */ boolean_t doall; /* if dataset is a clone, do incremental from its origin */ boolean_t fromorigin; /* do deduplication */ boolean_t dedup; /* send properties (ie, -p) */ boolean_t props; /* do not send (no-op, ie. -n) */ boolean_t dryrun; /* parsable verbose output (ie. -P) */ boolean_t parsable; /* show progress (ie. -v) */ boolean_t progress; /* large blocks (>128K) are permitted */ boolean_t largeblock; /* WRITE_EMBEDDED records of type DATA are permitted */ boolean_t embed_data; /* compressed WRITE records are permitted */ boolean_t compress; /* raw encrypted records are permitted */ boolean_t raw; /* only send received properties (ie. -b) */ boolean_t backup; } sendflags_t; typedef boolean_t (snapfilter_cb_t)(zfs_handle_t *, void *); extern int zfs_send(zfs_handle_t *, const char *, const char *, sendflags_t *, int, snapfilter_cb_t, void *, nvlist_t **); extern int zfs_send_one(zfs_handle_t *, const char *, int, sendflags_t flags); extern int zfs_send_resume(libzfs_handle_t *, sendflags_t *, int outfd, const char *); extern nvlist_t *zfs_send_resume_token_to_nvlist(libzfs_handle_t *hdl, const char *token); extern int zfs_promote(zfs_handle_t *); extern int zfs_hold(zfs_handle_t *, const char *, const char *, boolean_t, int); extern int zfs_hold_nvl(zfs_handle_t *, int, nvlist_t *); extern int zfs_release(zfs_handle_t *, const char *, const char *, boolean_t); extern int zfs_get_holds(zfs_handle_t *, nvlist_t **); extern uint64_t zvol_volsize_to_reservation(uint64_t, nvlist_t *); typedef int (*zfs_userspace_cb_t)(void *arg, const char *domain, uid_t rid, uint64_t space); extern int zfs_userspace(zfs_handle_t *, zfs_userquota_prop_t, zfs_userspace_cb_t, void *); extern int zfs_get_fsacl(zfs_handle_t *, nvlist_t **); extern int zfs_set_fsacl(zfs_handle_t *, boolean_t, nvlist_t *); typedef struct recvflags { /* print informational messages (ie, -v was specified) */ boolean_t verbose; /* the destination is a prefix, not the exact fs (ie, -d) */ boolean_t isprefix; /* * Only the tail of the sent snapshot path is appended to the * destination to determine the received snapshot name (ie, -e). */ boolean_t istail; /* do not actually do the recv, just check if it would work (ie, -n) */ boolean_t dryrun; /* rollback/destroy filesystems as necessary (eg, -F) */ boolean_t force; /* set "canmount=off" on all modified filesystems */ boolean_t canmountoff; /* * Mark the file systems as "resumable" and do not destroy them if the * receive is interrupted */ boolean_t resumable; /* byteswap flag is used internally; callers need not specify */ boolean_t byteswap; /* do not mount file systems as they are extracted (private) */ boolean_t nomount; } recvflags_t; extern int zfs_receive(libzfs_handle_t *, const char *, nvlist_t *, recvflags_t *, int, avl_tree_t *); typedef enum diff_flags { ZFS_DIFF_PARSEABLE = 0x1, ZFS_DIFF_TIMESTAMP = 0x2, ZFS_DIFF_CLASSIFY = 0x4 } diff_flags_t; extern int zfs_show_diffs(zfs_handle_t *, int, const char *, const char *, int); /* * Miscellaneous functions. */ extern const char *zfs_type_to_name(zfs_type_t); extern void zfs_refresh_properties(zfs_handle_t *); extern int zfs_name_valid(const char *, zfs_type_t); extern zfs_handle_t *zfs_path_to_zhandle(libzfs_handle_t *, char *, zfs_type_t); extern int zfs_parent_name(zfs_handle_t *, char *, size_t); extern boolean_t zfs_dataset_exists(libzfs_handle_t *, const char *, zfs_type_t); extern int zfs_spa_version(zfs_handle_t *, int *); extern boolean_t zfs_bookmark_exists(const char *path); /* * Mount support functions. */ extern boolean_t is_mounted(libzfs_handle_t *, const char *special, char **); extern boolean_t zfs_is_mounted(zfs_handle_t *, char **); extern int zfs_mount(zfs_handle_t *, const char *, int); extern int zfs_unmount(zfs_handle_t *, const char *, int); extern int zfs_unmountall(zfs_handle_t *, int); /* * Share support functions. */ extern boolean_t zfs_is_shared(zfs_handle_t *); extern int zfs_share(zfs_handle_t *); extern int zfs_unshare(zfs_handle_t *); /* * Protocol-specific share support functions. */ extern boolean_t zfs_is_shared_nfs(zfs_handle_t *, char **); extern boolean_t zfs_is_shared_smb(zfs_handle_t *, char **); extern int zfs_share_nfs(zfs_handle_t *); extern int zfs_share_smb(zfs_handle_t *); extern int zfs_shareall(zfs_handle_t *); extern int zfs_unshare_nfs(zfs_handle_t *, const char *); extern int zfs_unshare_smb(zfs_handle_t *, const char *); extern int zfs_unshareall_nfs(zfs_handle_t *); extern int zfs_unshareall_smb(zfs_handle_t *); extern int zfs_unshareall_bypath(zfs_handle_t *, const char *); extern int zfs_unshareall_bytype(zfs_handle_t *, const char *, const char *); extern int zfs_unshareall(zfs_handle_t *); extern int zfs_deleg_share_nfs(libzfs_handle_t *, char *, char *, char *, void *, void *, int, zfs_share_op_t); extern int zfs_nicestrtonum(libzfs_handle_t *, const char *, uint64_t *); /* * Utility functions to run an external process. */ #define STDOUT_VERBOSE 0x01 #define STDERR_VERBOSE 0x02 #define NO_DEFAULT_PATH 0x04 /* Don't use $PATH to lookup the command */ int libzfs_run_process(const char *, char **, int flags); int libzfs_run_process_get_stdout(const char *path, char *argv[], char *env[], char **lines[], int *lines_cnt); int libzfs_run_process_get_stdout_nopath(const char *path, char *argv[], char *env[], char **lines[], int *lines_cnt); void libzfs_free_str_array(char **strs, int count); int libzfs_envvar_is_set(char *envvar); /* * Given a device or file, determine if it is part of a pool. */ extern int zpool_in_use(libzfs_handle_t *, int, pool_state_t *, char **, boolean_t *); /* * Label manipulation. */ extern int zpool_clear_label(int); /* * Management interfaces for SMB ACL files */ int zfs_smb_acl_add(libzfs_handle_t *, char *, char *, char *); int zfs_smb_acl_remove(libzfs_handle_t *, char *, char *, char *); int zfs_smb_acl_purge(libzfs_handle_t *, char *, char *); int zfs_smb_acl_rename(libzfs_handle_t *, char *, char *, char *, char *); /* * Enable and disable datasets within a pool by mounting/unmounting and * sharing/unsharing them. */ extern int zpool_enable_datasets(zpool_handle_t *, const char *, int); extern int zpool_disable_datasets(zpool_handle_t *, boolean_t); extern int zfs_remap_indirects(libzfs_handle_t *hdl, const char *); #ifdef __cplusplus } #endif #endif /* _LIBZFS_H */ diff --git a/include/sys/fs/zfs.h b/include/sys/fs/zfs.h index 945853739b7c..395d2e27f20f 100644 --- a/include/sys/fs/zfs.h +++ b/include/sys/fs/zfs.h @@ -1,1421 +1,1422 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2011, 2018 by Delphix. All rights reserved. * Copyright 2011 Nexenta Systems, Inc. All rights reserved. * Copyright (c) 2013, 2017 Joyent, Inc. All rights reserved. * Copyright (c) 2014 Integros [integros.com] * Copyright (c) 2017 Datto Inc. * Copyright (c) 2017, Intel Corporation. */ /* Portions Copyright 2010 Robert Milkowski */ #ifndef _SYS_FS_ZFS_H #define _SYS_FS_ZFS_H #include #include #ifdef __cplusplus extern "C" { #endif /* * Types and constants shared between userland and the kernel. */ /* * Each dataset can be one of the following types. These constants can be * combined into masks that can be passed to various functions. */ typedef enum { ZFS_TYPE_FILESYSTEM = (1 << 0), ZFS_TYPE_SNAPSHOT = (1 << 1), ZFS_TYPE_VOLUME = (1 << 2), ZFS_TYPE_POOL = (1 << 3), ZFS_TYPE_BOOKMARK = (1 << 4) } zfs_type_t; /* * NB: lzc_dataset_type should be updated whenever a new objset type is added, * if it represents a real type of a dataset that can be created from userland. */ typedef enum dmu_objset_type { DMU_OST_NONE, DMU_OST_META, DMU_OST_ZFS, DMU_OST_ZVOL, DMU_OST_OTHER, /* For testing only! */ DMU_OST_ANY, /* Be careful! */ DMU_OST_NUMTYPES } dmu_objset_type_t; #define ZFS_TYPE_DATASET \ (ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME | ZFS_TYPE_SNAPSHOT) /* * All of these include the terminating NUL byte. */ #define ZAP_MAXNAMELEN 256 #define ZAP_MAXVALUELEN (1024 * 8) #define ZAP_OLDMAXVALUELEN 1024 #define ZFS_MAX_DATASET_NAME_LEN 256 /* * Dataset properties are identified by these constants and must be added to * the end of this list to ensure that external consumers are not affected * by the change. If you make any changes to this list, be sure to update * the property table in module/zcommon/zfs_prop.c. */ typedef enum { ZPROP_CONT = -2, ZPROP_INVAL = -1, ZFS_PROP_TYPE = 0, ZFS_PROP_CREATION, ZFS_PROP_USED, ZFS_PROP_AVAILABLE, ZFS_PROP_REFERENCED, ZFS_PROP_COMPRESSRATIO, ZFS_PROP_MOUNTED, ZFS_PROP_ORIGIN, ZFS_PROP_QUOTA, ZFS_PROP_RESERVATION, ZFS_PROP_VOLSIZE, ZFS_PROP_VOLBLOCKSIZE, ZFS_PROP_RECORDSIZE, ZFS_PROP_MOUNTPOINT, ZFS_PROP_SHARENFS, ZFS_PROP_CHECKSUM, ZFS_PROP_COMPRESSION, ZFS_PROP_ATIME, ZFS_PROP_DEVICES, ZFS_PROP_EXEC, ZFS_PROP_SETUID, ZFS_PROP_READONLY, ZFS_PROP_ZONED, ZFS_PROP_SNAPDIR, ZFS_PROP_PRIVATE, /* not exposed to user, temporary */ ZFS_PROP_ACLINHERIT, ZFS_PROP_CREATETXG, ZFS_PROP_NAME, /* not exposed to the user */ ZFS_PROP_CANMOUNT, ZFS_PROP_ISCSIOPTIONS, /* not exposed to the user */ ZFS_PROP_XATTR, ZFS_PROP_NUMCLONES, /* not exposed to the user */ ZFS_PROP_COPIES, ZFS_PROP_VERSION, ZFS_PROP_UTF8ONLY, ZFS_PROP_NORMALIZE, ZFS_PROP_CASE, ZFS_PROP_VSCAN, ZFS_PROP_NBMAND, ZFS_PROP_SHARESMB, ZFS_PROP_REFQUOTA, ZFS_PROP_REFRESERVATION, ZFS_PROP_GUID, ZFS_PROP_PRIMARYCACHE, ZFS_PROP_SECONDARYCACHE, ZFS_PROP_USEDSNAP, ZFS_PROP_USEDDS, ZFS_PROP_USEDCHILD, ZFS_PROP_USEDREFRESERV, ZFS_PROP_USERACCOUNTING, /* not exposed to the user */ ZFS_PROP_STMF_SHAREINFO, /* not exposed to the user */ ZFS_PROP_DEFER_DESTROY, ZFS_PROP_USERREFS, ZFS_PROP_LOGBIAS, ZFS_PROP_UNIQUE, /* not exposed to the user */ ZFS_PROP_OBJSETID, ZFS_PROP_DEDUP, ZFS_PROP_MLSLABEL, ZFS_PROP_SYNC, ZFS_PROP_DNODESIZE, ZFS_PROP_REFRATIO, ZFS_PROP_WRITTEN, ZFS_PROP_CLONES, ZFS_PROP_LOGICALUSED, ZFS_PROP_LOGICALREFERENCED, ZFS_PROP_INCONSISTENT, /* not exposed to the user */ ZFS_PROP_VOLMODE, ZFS_PROP_FILESYSTEM_LIMIT, ZFS_PROP_SNAPSHOT_LIMIT, ZFS_PROP_FILESYSTEM_COUNT, ZFS_PROP_SNAPSHOT_COUNT, ZFS_PROP_SNAPDEV, ZFS_PROP_ACLTYPE, ZFS_PROP_SELINUX_CONTEXT, ZFS_PROP_SELINUX_FSCONTEXT, ZFS_PROP_SELINUX_DEFCONTEXT, ZFS_PROP_SELINUX_ROOTCONTEXT, ZFS_PROP_RELATIME, ZFS_PROP_REDUNDANT_METADATA, ZFS_PROP_OVERLAY, ZFS_PROP_PREV_SNAP, ZFS_PROP_RECEIVE_RESUME_TOKEN, ZFS_PROP_ENCRYPTION, ZFS_PROP_KEYLOCATION, ZFS_PROP_KEYFORMAT, ZFS_PROP_PBKDF2_SALT, ZFS_PROP_PBKDF2_ITERS, ZFS_PROP_ENCRYPTION_ROOT, ZFS_PROP_KEY_GUID, ZFS_PROP_KEYSTATUS, ZFS_PROP_REMAPTXG, /* not exposed to the user */ ZFS_PROP_SPECIAL_SMALL_BLOCKS, ZFS_NUM_PROPS } zfs_prop_t; typedef enum { ZFS_PROP_USERUSED, ZFS_PROP_USERQUOTA, ZFS_PROP_GROUPUSED, ZFS_PROP_GROUPQUOTA, ZFS_PROP_USEROBJUSED, ZFS_PROP_USEROBJQUOTA, ZFS_PROP_GROUPOBJUSED, ZFS_PROP_GROUPOBJQUOTA, ZFS_PROP_PROJECTUSED, ZFS_PROP_PROJECTQUOTA, ZFS_PROP_PROJECTOBJUSED, ZFS_PROP_PROJECTOBJQUOTA, ZFS_NUM_USERQUOTA_PROPS } zfs_userquota_prop_t; extern const char *zfs_userquota_prop_prefixes[ZFS_NUM_USERQUOTA_PROPS]; /* * Pool properties are identified by these constants and must be added to the * end of this list to ensure that external consumers are not affected * by the change. If you make any changes to this list, be sure to update * the property table in module/zcommon/zpool_prop.c. */ typedef enum { ZPOOL_PROP_INVAL = -1, ZPOOL_PROP_NAME, ZPOOL_PROP_SIZE, ZPOOL_PROP_CAPACITY, ZPOOL_PROP_ALTROOT, ZPOOL_PROP_HEALTH, ZPOOL_PROP_GUID, ZPOOL_PROP_VERSION, ZPOOL_PROP_BOOTFS, ZPOOL_PROP_DELEGATION, ZPOOL_PROP_AUTOREPLACE, ZPOOL_PROP_CACHEFILE, ZPOOL_PROP_FAILUREMODE, ZPOOL_PROP_LISTSNAPS, ZPOOL_PROP_AUTOEXPAND, ZPOOL_PROP_DEDUPDITTO, ZPOOL_PROP_DEDUPRATIO, ZPOOL_PROP_FREE, ZPOOL_PROP_ALLOCATED, ZPOOL_PROP_READONLY, ZPOOL_PROP_ASHIFT, ZPOOL_PROP_COMMENT, ZPOOL_PROP_EXPANDSZ, ZPOOL_PROP_FREEING, ZPOOL_PROP_FRAGMENTATION, ZPOOL_PROP_LEAKED, ZPOOL_PROP_MAXBLOCKSIZE, ZPOOL_PROP_TNAME, ZPOOL_PROP_MAXDNODESIZE, ZPOOL_PROP_MULTIHOST, ZPOOL_PROP_CHECKPOINT, ZPOOL_PROP_LOAD_GUID, ZPOOL_NUM_PROPS } zpool_prop_t; /* Small enough to not hog a whole line of printout in zpool(1M). */ #define ZPROP_MAX_COMMENT 32 #define ZPROP_VALUE "value" #define ZPROP_SOURCE "source" typedef enum { ZPROP_SRC_NONE = 0x1, ZPROP_SRC_DEFAULT = 0x2, ZPROP_SRC_TEMPORARY = 0x4, ZPROP_SRC_LOCAL = 0x8, ZPROP_SRC_INHERITED = 0x10, ZPROP_SRC_RECEIVED = 0x20 } zprop_source_t; #define ZPROP_SRC_ALL 0x3f #define ZPROP_SOURCE_VAL_RECVD "$recvd" #define ZPROP_N_MORE_ERRORS "N_MORE_ERRORS" /* * Dataset flag implemented as a special entry in the props zap object * indicating that the dataset has received properties on or after * SPA_VERSION_RECVD_PROPS. The first such receive blows away local properties * just as it did in earlier versions, and thereafter, local properties are * preserved. */ #define ZPROP_HAS_RECVD "$hasrecvd" typedef enum { ZPROP_ERR_NOCLEAR = 0x1, /* failure to clear existing props */ ZPROP_ERR_NORESTORE = 0x2 /* failure to restore props on error */ } zprop_errflags_t; typedef int (*zprop_func)(int, void *); /* * Properties to be set on the root file system of a new pool * are stuffed into their own nvlist, which is then included in * the properties nvlist with the pool properties. */ #define ZPOOL_ROOTFS_PROPS "root-props-nvl" /* * Length of 'written@' and 'written#' */ #define ZFS_WRITTEN_PROP_PREFIX_LEN 8 /* * Dataset property functions shared between libzfs and kernel. */ const char *zfs_prop_default_string(zfs_prop_t); uint64_t zfs_prop_default_numeric(zfs_prop_t); boolean_t zfs_prop_readonly(zfs_prop_t); boolean_t zfs_prop_visible(zfs_prop_t prop); boolean_t zfs_prop_inheritable(zfs_prop_t); boolean_t zfs_prop_setonce(zfs_prop_t); boolean_t zfs_prop_encryption_key_param(zfs_prop_t); boolean_t zfs_prop_valid_keylocation(const char *, boolean_t); const char *zfs_prop_to_name(zfs_prop_t); zfs_prop_t zfs_name_to_prop(const char *); boolean_t zfs_prop_user(const char *); boolean_t zfs_prop_userquota(const char *); boolean_t zfs_prop_written(const char *); int zfs_prop_index_to_string(zfs_prop_t, uint64_t, const char **); int zfs_prop_string_to_index(zfs_prop_t, const char *, uint64_t *); uint64_t zfs_prop_random_value(zfs_prop_t, uint64_t seed); boolean_t zfs_prop_valid_for_type(int, zfs_type_t, boolean_t); /* * Pool property functions shared between libzfs and kernel. */ zpool_prop_t zpool_name_to_prop(const char *); const char *zpool_prop_to_name(zpool_prop_t); const char *zpool_prop_default_string(zpool_prop_t); uint64_t zpool_prop_default_numeric(zpool_prop_t); boolean_t zpool_prop_readonly(zpool_prop_t); boolean_t zpool_prop_setonce(zpool_prop_t); boolean_t zpool_prop_feature(const char *); boolean_t zpool_prop_unsupported(const char *); int zpool_prop_index_to_string(zpool_prop_t, uint64_t, const char **); int zpool_prop_string_to_index(zpool_prop_t, const char *, uint64_t *); uint64_t zpool_prop_random_value(zpool_prop_t, uint64_t seed); /* * Definitions for the Delegation. */ typedef enum { ZFS_DELEG_WHO_UNKNOWN = 0, ZFS_DELEG_USER = 'u', ZFS_DELEG_USER_SETS = 'U', ZFS_DELEG_GROUP = 'g', ZFS_DELEG_GROUP_SETS = 'G', ZFS_DELEG_EVERYONE = 'e', ZFS_DELEG_EVERYONE_SETS = 'E', ZFS_DELEG_CREATE = 'c', ZFS_DELEG_CREATE_SETS = 'C', ZFS_DELEG_NAMED_SET = 's', ZFS_DELEG_NAMED_SET_SETS = 'S' } zfs_deleg_who_type_t; typedef enum { ZFS_DELEG_NONE = 0, ZFS_DELEG_PERM_LOCAL = 1, ZFS_DELEG_PERM_DESCENDENT = 2, ZFS_DELEG_PERM_LOCALDESCENDENT = 3, ZFS_DELEG_PERM_CREATE = 4 } zfs_deleg_inherit_t; #define ZFS_DELEG_PERM_UID "uid" #define ZFS_DELEG_PERM_GID "gid" #define ZFS_DELEG_PERM_GROUPS "groups" #define ZFS_MLSLABEL_DEFAULT "none" #define ZFS_SMB_ACL_SRC "src" #define ZFS_SMB_ACL_TARGET "target" typedef enum { ZFS_CANMOUNT_OFF = 0, ZFS_CANMOUNT_ON = 1, ZFS_CANMOUNT_NOAUTO = 2 } zfs_canmount_type_t; typedef enum { ZFS_LOGBIAS_LATENCY = 0, ZFS_LOGBIAS_THROUGHPUT = 1 } zfs_logbias_op_t; typedef enum zfs_share_op { ZFS_SHARE_NFS = 0, ZFS_UNSHARE_NFS = 1, ZFS_SHARE_SMB = 2, ZFS_UNSHARE_SMB = 3 } zfs_share_op_t; typedef enum zfs_smb_acl_op { ZFS_SMB_ACL_ADD, ZFS_SMB_ACL_REMOVE, ZFS_SMB_ACL_RENAME, ZFS_SMB_ACL_PURGE } zfs_smb_acl_op_t; typedef enum zfs_cache_type { ZFS_CACHE_NONE = 0, ZFS_CACHE_METADATA = 1, ZFS_CACHE_ALL = 2 } zfs_cache_type_t; typedef enum { ZFS_SYNC_STANDARD = 0, ZFS_SYNC_ALWAYS = 1, ZFS_SYNC_DISABLED = 2 } zfs_sync_type_t; typedef enum { ZFS_XATTR_OFF = 0, ZFS_XATTR_DIR = 1, ZFS_XATTR_SA = 2 } zfs_xattr_type_t; typedef enum { ZFS_DNSIZE_LEGACY = 0, ZFS_DNSIZE_AUTO = 1, ZFS_DNSIZE_1K = 1024, ZFS_DNSIZE_2K = 2048, ZFS_DNSIZE_4K = 4096, ZFS_DNSIZE_8K = 8192, ZFS_DNSIZE_16K = 16384 } zfs_dnsize_type_t; typedef enum { ZFS_REDUNDANT_METADATA_ALL, ZFS_REDUNDANT_METADATA_MOST } zfs_redundant_metadata_type_t; typedef enum { ZFS_VOLMODE_DEFAULT = 0, ZFS_VOLMODE_GEOM = 1, ZFS_VOLMODE_DEV = 2, ZFS_VOLMODE_NONE = 3 } zfs_volmode_t; typedef enum zfs_keystatus { ZFS_KEYSTATUS_NONE = 0, ZFS_KEYSTATUS_UNAVAILABLE, ZFS_KEYSTATUS_AVAILABLE, } zfs_keystatus_t; typedef enum zfs_keyformat { ZFS_KEYFORMAT_NONE = 0, ZFS_KEYFORMAT_RAW, ZFS_KEYFORMAT_HEX, ZFS_KEYFORMAT_PASSPHRASE, ZFS_KEYFORMAT_FORMATS } zfs_keyformat_t; typedef enum zfs_key_location { ZFS_KEYLOCATION_NONE = 0, ZFS_KEYLOCATION_PROMPT, ZFS_KEYLOCATION_URI, ZFS_KEYLOCATION_LOCATIONS } zfs_keylocation_t; #define DEFAULT_PBKDF2_ITERATIONS 350000 #define MIN_PBKDF2_ITERATIONS 100000 /* * On-disk version number. */ #define SPA_VERSION_1 1ULL #define SPA_VERSION_2 2ULL #define SPA_VERSION_3 3ULL #define SPA_VERSION_4 4ULL #define SPA_VERSION_5 5ULL #define SPA_VERSION_6 6ULL #define SPA_VERSION_7 7ULL #define SPA_VERSION_8 8ULL #define SPA_VERSION_9 9ULL #define SPA_VERSION_10 10ULL #define SPA_VERSION_11 11ULL #define SPA_VERSION_12 12ULL #define SPA_VERSION_13 13ULL #define SPA_VERSION_14 14ULL #define SPA_VERSION_15 15ULL #define SPA_VERSION_16 16ULL #define SPA_VERSION_17 17ULL #define SPA_VERSION_18 18ULL #define SPA_VERSION_19 19ULL #define SPA_VERSION_20 20ULL #define SPA_VERSION_21 21ULL #define SPA_VERSION_22 22ULL #define SPA_VERSION_23 23ULL #define SPA_VERSION_24 24ULL #define SPA_VERSION_25 25ULL #define SPA_VERSION_26 26ULL #define SPA_VERSION_27 27ULL #define SPA_VERSION_28 28ULL #define SPA_VERSION_5000 5000ULL /* * When bumping up SPA_VERSION, make sure GRUB ZFS understands the on-disk * format change. Go to usr/src/grub/grub-0.97/stage2/{zfs-include/, fsys_zfs*}, * and do the appropriate changes. Also bump the version number in * usr/src/grub/capability. */ #define SPA_VERSION SPA_VERSION_5000 #define SPA_VERSION_STRING "5000" /* * Symbolic names for the changes that caused a SPA_VERSION switch. * Used in the code when checking for presence or absence of a feature. * Feel free to define multiple symbolic names for each version if there * were multiple changes to on-disk structures during that version. * * NOTE: When checking the current SPA_VERSION in your code, be sure * to use spa_version() since it reports the version of the * last synced uberblock. Checking the in-flight version can * be dangerous in some cases. */ #define SPA_VERSION_INITIAL SPA_VERSION_1 #define SPA_VERSION_DITTO_BLOCKS SPA_VERSION_2 #define SPA_VERSION_SPARES SPA_VERSION_3 #define SPA_VERSION_RAIDZ2 SPA_VERSION_3 #define SPA_VERSION_BPOBJ_ACCOUNT SPA_VERSION_3 #define SPA_VERSION_RAIDZ_DEFLATE SPA_VERSION_3 #define SPA_VERSION_DNODE_BYTES SPA_VERSION_3 #define SPA_VERSION_ZPOOL_HISTORY SPA_VERSION_4 #define SPA_VERSION_GZIP_COMPRESSION SPA_VERSION_5 #define SPA_VERSION_BOOTFS SPA_VERSION_6 #define SPA_VERSION_SLOGS SPA_VERSION_7 #define SPA_VERSION_DELEGATED_PERMS SPA_VERSION_8 #define SPA_VERSION_FUID SPA_VERSION_9 #define SPA_VERSION_REFRESERVATION SPA_VERSION_9 #define SPA_VERSION_REFQUOTA SPA_VERSION_9 #define SPA_VERSION_UNIQUE_ACCURATE SPA_VERSION_9 #define SPA_VERSION_L2CACHE SPA_VERSION_10 #define SPA_VERSION_NEXT_CLONES SPA_VERSION_11 #define SPA_VERSION_ORIGIN SPA_VERSION_11 #define SPA_VERSION_DSL_SCRUB SPA_VERSION_11 #define SPA_VERSION_SNAP_PROPS SPA_VERSION_12 #define SPA_VERSION_USED_BREAKDOWN SPA_VERSION_13 #define SPA_VERSION_PASSTHROUGH_X SPA_VERSION_14 #define SPA_VERSION_USERSPACE SPA_VERSION_15 #define SPA_VERSION_STMF_PROP SPA_VERSION_16 #define SPA_VERSION_RAIDZ3 SPA_VERSION_17 #define SPA_VERSION_USERREFS SPA_VERSION_18 #define SPA_VERSION_HOLES SPA_VERSION_19 #define SPA_VERSION_ZLE_COMPRESSION SPA_VERSION_20 #define SPA_VERSION_DEDUP SPA_VERSION_21 #define SPA_VERSION_RECVD_PROPS SPA_VERSION_22 #define SPA_VERSION_SLIM_ZIL SPA_VERSION_23 #define SPA_VERSION_SA SPA_VERSION_24 #define SPA_VERSION_SCAN SPA_VERSION_25 #define SPA_VERSION_DIR_CLONES SPA_VERSION_26 #define SPA_VERSION_DEADLISTS SPA_VERSION_26 #define SPA_VERSION_FAST_SNAP SPA_VERSION_27 #define SPA_VERSION_MULTI_REPLACE SPA_VERSION_28 #define SPA_VERSION_BEFORE_FEATURES SPA_VERSION_28 #define SPA_VERSION_FEATURES SPA_VERSION_5000 #define SPA_VERSION_IS_SUPPORTED(v) \ (((v) >= SPA_VERSION_INITIAL && (v) <= SPA_VERSION_BEFORE_FEATURES) || \ ((v) >= SPA_VERSION_FEATURES && (v) <= SPA_VERSION)) /* * ZPL version - rev'd whenever an incompatible on-disk format change * occurs. This is independent of SPA/DMU/ZAP versioning. You must * also update the version_table[] and help message in zfs_prop.c. * * When changing, be sure to teach GRUB how to read the new format! * See usr/src/grub/grub-0.97/stage2/{zfs-include/,fsys_zfs*} */ #define ZPL_VERSION_1 1ULL #define ZPL_VERSION_2 2ULL #define ZPL_VERSION_3 3ULL #define ZPL_VERSION_4 4ULL #define ZPL_VERSION_5 5ULL #define ZPL_VERSION ZPL_VERSION_5 #define ZPL_VERSION_STRING "5" #define ZPL_VERSION_INITIAL ZPL_VERSION_1 #define ZPL_VERSION_DIRENT_TYPE ZPL_VERSION_2 #define ZPL_VERSION_FUID ZPL_VERSION_3 #define ZPL_VERSION_NORMALIZATION ZPL_VERSION_3 #define ZPL_VERSION_SYSATTR ZPL_VERSION_3 #define ZPL_VERSION_USERSPACE ZPL_VERSION_4 #define ZPL_VERSION_SA ZPL_VERSION_5 /* Rewind policy information */ #define ZPOOL_NO_REWIND 1 /* No policy - default behavior */ #define ZPOOL_NEVER_REWIND 2 /* Do not search for best txg or rewind */ #define ZPOOL_TRY_REWIND 4 /* Search for best txg, but do not rewind */ #define ZPOOL_DO_REWIND 8 /* Rewind to best txg w/in deferred frees */ #define ZPOOL_EXTREME_REWIND 16 /* Allow extreme measures to find best txg */ #define ZPOOL_REWIND_MASK 28 /* All the possible rewind bits */ #define ZPOOL_REWIND_POLICIES 31 /* All the possible policy bits */ typedef struct zpool_load_policy { uint32_t zlp_rewind; /* rewind policy requested */ uint64_t zlp_maxmeta; /* max acceptable meta-data errors */ uint64_t zlp_maxdata; /* max acceptable data errors */ uint64_t zlp_txg; /* specific txg to load */ } zpool_load_policy_t; /* * The following are configuration names used in the nvlist describing a pool's * configuration. New on-disk names should be prefixed with ":" * (e.g. "org.open-zfs:") to avoid conflicting names being developed * independently. */ #define ZPOOL_CONFIG_VERSION "version" #define ZPOOL_CONFIG_POOL_NAME "name" #define ZPOOL_CONFIG_POOL_STATE "state" #define ZPOOL_CONFIG_POOL_TXG "txg" #define ZPOOL_CONFIG_POOL_GUID "pool_guid" #define ZPOOL_CONFIG_CREATE_TXG "create_txg" #define ZPOOL_CONFIG_TOP_GUID "top_guid" #define ZPOOL_CONFIG_VDEV_TREE "vdev_tree" #define ZPOOL_CONFIG_TYPE "type" #define ZPOOL_CONFIG_CHILDREN "children" #define ZPOOL_CONFIG_ID "id" #define ZPOOL_CONFIG_GUID "guid" #define ZPOOL_CONFIG_INDIRECT_OBJECT "com.delphix:indirect_object" #define ZPOOL_CONFIG_INDIRECT_BIRTHS "com.delphix:indirect_births" #define ZPOOL_CONFIG_PREV_INDIRECT_VDEV "com.delphix:prev_indirect_vdev" #define ZPOOL_CONFIG_PATH "path" #define ZPOOL_CONFIG_DEVID "devid" #define ZPOOL_CONFIG_METASLAB_ARRAY "metaslab_array" #define ZPOOL_CONFIG_METASLAB_SHIFT "metaslab_shift" #define ZPOOL_CONFIG_ASHIFT "ashift" #define ZPOOL_CONFIG_ASIZE "asize" #define ZPOOL_CONFIG_DTL "DTL" #define ZPOOL_CONFIG_SCAN_STATS "scan_stats" /* not stored on disk */ #define ZPOOL_CONFIG_REMOVAL_STATS "removal_stats" /* not stored on disk */ #define ZPOOL_CONFIG_CHECKPOINT_STATS "checkpoint_stats" /* not on disk */ #define ZPOOL_CONFIG_VDEV_STATS "vdev_stats" /* not stored on disk */ #define ZPOOL_CONFIG_INDIRECT_SIZE "indirect_size" /* not stored on disk */ /* container nvlist of extended stats */ #define ZPOOL_CONFIG_VDEV_STATS_EX "vdev_stats_ex" /* Active queue read/write stats */ #define ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE "vdev_sync_r_active_queue" #define ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE "vdev_sync_w_active_queue" #define ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE "vdev_async_r_active_queue" #define ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE "vdev_async_w_active_queue" #define ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE "vdev_async_scrub_active_queue" /* Queue sizes */ #define ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE "vdev_sync_r_pend_queue" #define ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE "vdev_sync_w_pend_queue" #define ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE "vdev_async_r_pend_queue" #define ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE "vdev_async_w_pend_queue" #define ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE "vdev_async_scrub_pend_queue" /* Latency read/write histogram stats */ #define ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO "vdev_tot_r_lat_histo" #define ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO "vdev_tot_w_lat_histo" #define ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO "vdev_disk_r_lat_histo" #define ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO "vdev_disk_w_lat_histo" #define ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO "vdev_sync_r_lat_histo" #define ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO "vdev_sync_w_lat_histo" #define ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO "vdev_async_r_lat_histo" #define ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO "vdev_async_w_lat_histo" #define ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO "vdev_scrub_histo" /* Request size histograms */ #define ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO "vdev_sync_ind_r_histo" #define ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO "vdev_sync_ind_w_histo" #define ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO "vdev_async_ind_r_histo" #define ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO "vdev_async_ind_w_histo" #define ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO "vdev_ind_scrub_histo" #define ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO "vdev_sync_agg_r_histo" #define ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO "vdev_sync_agg_w_histo" #define ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO "vdev_async_agg_r_histo" #define ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO "vdev_async_agg_w_histo" #define ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO "vdev_agg_scrub_histo" /* Number of slow IOs */ #define ZPOOL_CONFIG_VDEV_SLOW_IOS "vdev_slow_ios" /* vdev enclosure sysfs path */ #define ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH "vdev_enc_sysfs_path" #define ZPOOL_CONFIG_WHOLE_DISK "whole_disk" #define ZPOOL_CONFIG_ERRCOUNT "error_count" #define ZPOOL_CONFIG_NOT_PRESENT "not_present" #define ZPOOL_CONFIG_SPARES "spares" #define ZPOOL_CONFIG_IS_SPARE "is_spare" #define ZPOOL_CONFIG_NPARITY "nparity" #define ZPOOL_CONFIG_HOSTID "hostid" #define ZPOOL_CONFIG_HOSTNAME "hostname" #define ZPOOL_CONFIG_LOADED_TIME "initial_load_time" #define ZPOOL_CONFIG_UNSPARE "unspare" #define ZPOOL_CONFIG_PHYS_PATH "phys_path" #define ZPOOL_CONFIG_IS_LOG "is_log" #define ZPOOL_CONFIG_L2CACHE "l2cache" #define ZPOOL_CONFIG_HOLE_ARRAY "hole_array" #define ZPOOL_CONFIG_VDEV_CHILDREN "vdev_children" #define ZPOOL_CONFIG_IS_HOLE "is_hole" #define ZPOOL_CONFIG_DDT_HISTOGRAM "ddt_histogram" #define ZPOOL_CONFIG_DDT_OBJ_STATS "ddt_object_stats" #define ZPOOL_CONFIG_DDT_STATS "ddt_stats" #define ZPOOL_CONFIG_SPLIT "splitcfg" #define ZPOOL_CONFIG_ORIG_GUID "orig_guid" #define ZPOOL_CONFIG_SPLIT_GUID "split_guid" #define ZPOOL_CONFIG_SPLIT_LIST "guid_list" #define ZPOOL_CONFIG_REMOVING "removing" #define ZPOOL_CONFIG_RESILVER_TXG "resilver_txg" #define ZPOOL_CONFIG_COMMENT "comment" #define ZPOOL_CONFIG_SUSPENDED "suspended" /* not stored on disk */ #define ZPOOL_CONFIG_SUSPENDED_REASON "suspended_reason" /* not stored */ #define ZPOOL_CONFIG_TIMESTAMP "timestamp" /* not stored on disk */ #define ZPOOL_CONFIG_BOOTFS "bootfs" /* not stored on disk */ #define ZPOOL_CONFIG_MISSING_DEVICES "missing_vdevs" /* not stored on disk */ #define ZPOOL_CONFIG_LOAD_INFO "load_info" /* not stored on disk */ #define ZPOOL_CONFIG_REWIND_INFO "rewind_info" /* not stored on disk */ #define ZPOOL_CONFIG_UNSUP_FEAT "unsup_feat" /* not stored on disk */ #define ZPOOL_CONFIG_ENABLED_FEAT "enabled_feat" /* not stored on disk */ #define ZPOOL_CONFIG_CAN_RDONLY "can_rdonly" /* not stored on disk */ #define ZPOOL_CONFIG_FEATURES_FOR_READ "features_for_read" #define ZPOOL_CONFIG_FEATURE_STATS "feature_stats" /* not stored on disk */ #define ZPOOL_CONFIG_ERRATA "errata" /* not stored on disk */ #define ZPOOL_CONFIG_VDEV_TOP_ZAP "com.delphix:vdev_zap_top" #define ZPOOL_CONFIG_VDEV_LEAF_ZAP "com.delphix:vdev_zap_leaf" #define ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS "com.delphix:has_per_vdev_zaps" #define ZPOOL_CONFIG_RESILVER_DEFER "com.datto:resilver_defer" #define ZPOOL_CONFIG_CACHEFILE "cachefile" /* not stored on disk */ #define ZPOOL_CONFIG_MMP_STATE "mmp_state" /* not stored on disk */ #define ZPOOL_CONFIG_MMP_TXG "mmp_txg" /* not stored on disk */ #define ZPOOL_CONFIG_MMP_HOSTNAME "mmp_hostname" /* not stored on disk */ #define ZPOOL_CONFIG_MMP_HOSTID "mmp_hostid" /* not stored on disk */ #define ZPOOL_CONFIG_ALLOCATION_BIAS "alloc_bias" /* not stored on disk */ #define ZPOOL_CONFIG_EXPANSION_TIME "expansion_time" /* not stored */ /* * The persistent vdev state is stored as separate values rather than a single * 'vdev_state' entry. This is because a device can be in multiple states, such * as offline and degraded. */ #define ZPOOL_CONFIG_OFFLINE "offline" #define ZPOOL_CONFIG_FAULTED "faulted" #define ZPOOL_CONFIG_DEGRADED "degraded" #define ZPOOL_CONFIG_REMOVED "removed" #define ZPOOL_CONFIG_FRU "fru" #define ZPOOL_CONFIG_AUX_STATE "aux_state" /* Pool load policy parameters */ #define ZPOOL_LOAD_POLICY "load-policy" #define ZPOOL_LOAD_REWIND_POLICY "load-rewind-policy" #define ZPOOL_LOAD_REQUEST_TXG "load-request-txg" #define ZPOOL_LOAD_META_THRESH "load-meta-thresh" #define ZPOOL_LOAD_DATA_THRESH "load-data-thresh" /* Rewind data discovered */ #define ZPOOL_CONFIG_LOAD_TIME "rewind_txg_ts" #define ZPOOL_CONFIG_LOAD_DATA_ERRORS "verify_data_errors" #define ZPOOL_CONFIG_REWIND_TIME "seconds_of_rewind" #define VDEV_TYPE_ROOT "root" #define VDEV_TYPE_MIRROR "mirror" #define VDEV_TYPE_REPLACING "replacing" #define VDEV_TYPE_RAIDZ "raidz" #define VDEV_TYPE_DISK "disk" #define VDEV_TYPE_FILE "file" #define VDEV_TYPE_MISSING "missing" #define VDEV_TYPE_HOLE "hole" #define VDEV_TYPE_SPARE "spare" #define VDEV_TYPE_LOG "log" #define VDEV_TYPE_L2CACHE "l2cache" #define VDEV_TYPE_INDIRECT "indirect" /* VDEV_TOP_ZAP_* are used in top-level vdev ZAP objects. */ #define VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM \ "com.delphix:indirect_obsolete_sm" #define VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE \ "com.delphix:obsolete_counts_are_precise" #define VDEV_TOP_ZAP_POOL_CHECKPOINT_SM \ "com.delphix:pool_checkpoint_sm" #define VDEV_TOP_ZAP_ALLOCATION_BIAS \ "org.zfsonlinux:allocation_bias" /* vdev metaslab allocation bias */ #define VDEV_ALLOC_BIAS_LOG "log" #define VDEV_ALLOC_BIAS_SPECIAL "special" #define VDEV_ALLOC_BIAS_DEDUP "dedup" #define VDEV_LEAF_ZAP_INITIALIZE_LAST_OFFSET \ "com.delphix:next_offset_to_initialize" #define VDEV_LEAF_ZAP_INITIALIZE_STATE \ "com.delphix:vdev_initialize_state" #define VDEV_LEAF_ZAP_INITIALIZE_ACTION_TIME \ "com.delphix:vdev_initialize_action_time" /* * This is needed in userland to report the minimum necessary device size. */ #define SPA_MINDEVSIZE (64ULL << 20) /* * Set if the fragmentation has not yet been calculated. This can happen * because the space maps have not been upgraded or the histogram feature * is not enabled. */ #define ZFS_FRAG_INVALID UINT64_MAX /* * The location of the pool configuration repository, shared between kernel and * userland. */ #define ZPOOL_CACHE "/etc/zfs/zpool.cache" /* * vdev states are ordered from least to most healthy. * A vdev that's CANT_OPEN or below is considered unusable. */ typedef enum vdev_state { VDEV_STATE_UNKNOWN = 0, /* Uninitialized vdev */ VDEV_STATE_CLOSED, /* Not currently open */ VDEV_STATE_OFFLINE, /* Not allowed to open */ VDEV_STATE_REMOVED, /* Explicitly removed from system */ VDEV_STATE_CANT_OPEN, /* Tried to open, but failed */ VDEV_STATE_FAULTED, /* External request to fault device */ VDEV_STATE_DEGRADED, /* Replicated vdev with unhealthy kids */ VDEV_STATE_HEALTHY /* Presumed good */ } vdev_state_t; #define VDEV_STATE_ONLINE VDEV_STATE_HEALTHY /* * vdev aux states. When a vdev is in the CANT_OPEN state, the aux field * of the vdev stats structure uses these constants to distinguish why. */ typedef enum vdev_aux { VDEV_AUX_NONE, /* no error */ VDEV_AUX_OPEN_FAILED, /* ldi_open_*() or vn_open() failed */ VDEV_AUX_CORRUPT_DATA, /* bad label or disk contents */ VDEV_AUX_NO_REPLICAS, /* insufficient number of replicas */ VDEV_AUX_BAD_GUID_SUM, /* vdev guid sum doesn't match */ VDEV_AUX_TOO_SMALL, /* vdev size is too small */ VDEV_AUX_BAD_LABEL, /* the label is OK but invalid */ VDEV_AUX_VERSION_NEWER, /* on-disk version is too new */ VDEV_AUX_VERSION_OLDER, /* on-disk version is too old */ VDEV_AUX_UNSUP_FEAT, /* unsupported features */ VDEV_AUX_SPARED, /* hot spare used in another pool */ VDEV_AUX_ERR_EXCEEDED, /* too many errors */ VDEV_AUX_IO_FAILURE, /* experienced I/O failure */ VDEV_AUX_BAD_LOG, /* cannot read log chain(s) */ VDEV_AUX_EXTERNAL, /* external diagnosis or forced fault */ VDEV_AUX_SPLIT_POOL, /* vdev was split off into another pool */ VDEV_AUX_BAD_ASHIFT, /* vdev ashift is invalid */ VDEV_AUX_EXTERNAL_PERSIST, /* persistent forced fault */ VDEV_AUX_ACTIVE, /* vdev active on a different host */ VDEV_AUX_CHILDREN_OFFLINE, /* all children are offline */ } vdev_aux_t; /* * pool state. The following states are written to disk as part of the normal * SPA lifecycle: ACTIVE, EXPORTED, DESTROYED, SPARE, L2CACHE. The remaining * states are software abstractions used at various levels to communicate * pool state. */ typedef enum pool_state { POOL_STATE_ACTIVE = 0, /* In active use */ POOL_STATE_EXPORTED, /* Explicitly exported */ POOL_STATE_DESTROYED, /* Explicitly destroyed */ POOL_STATE_SPARE, /* Reserved for hot spare use */ POOL_STATE_L2CACHE, /* Level 2 ARC device */ POOL_STATE_UNINITIALIZED, /* Internal spa_t state */ POOL_STATE_UNAVAIL, /* Internal libzfs state */ POOL_STATE_POTENTIALLY_ACTIVE /* Internal libzfs state */ } pool_state_t; /* * mmp state. The following states provide additional detail describing * why a pool couldn't be safely imported. */ typedef enum mmp_state { MMP_STATE_ACTIVE = 0, /* In active use */ MMP_STATE_INACTIVE, /* Inactive and safe to import */ MMP_STATE_NO_HOSTID /* System hostid is not set */ } mmp_state_t; /* * Scan Functions. */ typedef enum pool_scan_func { POOL_SCAN_NONE, POOL_SCAN_SCRUB, POOL_SCAN_RESILVER, POOL_SCAN_FUNCS } pool_scan_func_t; /* * Used to control scrub pause and resume. */ typedef enum pool_scrub_cmd { POOL_SCRUB_NORMAL = 0, POOL_SCRUB_PAUSE, POOL_SCRUB_FLAGS_END } pool_scrub_cmd_t; typedef enum { CS_NONE, CS_CHECKPOINT_EXISTS, CS_CHECKPOINT_DISCARDING, CS_NUM_STATES } checkpoint_state_t; typedef struct pool_checkpoint_stat { uint64_t pcs_state; /* checkpoint_state_t */ uint64_t pcs_start_time; /* time checkpoint/discard started */ uint64_t pcs_space; /* checkpointed space */ } pool_checkpoint_stat_t; /* * ZIO types. Needed to interpret vdev statistics below. */ typedef enum zio_type { ZIO_TYPE_NULL = 0, ZIO_TYPE_READ, ZIO_TYPE_WRITE, ZIO_TYPE_FREE, ZIO_TYPE_CLAIM, ZIO_TYPE_IOCTL, ZIO_TYPES } zio_type_t; /* * Pool statistics. Note: all fields should be 64-bit because this * is passed between kernel and userland as an nvlist uint64 array. */ typedef struct pool_scan_stat { /* values stored on disk */ uint64_t pss_func; /* pool_scan_func_t */ uint64_t pss_state; /* dsl_scan_state_t */ uint64_t pss_start_time; /* scan start time */ uint64_t pss_end_time; /* scan end time */ uint64_t pss_to_examine; /* total bytes to scan */ uint64_t pss_examined; /* total bytes located by scanner */ uint64_t pss_to_process; /* total bytes to process */ uint64_t pss_processed; /* total processed bytes */ uint64_t pss_errors; /* scan errors */ /* values not stored on disk */ uint64_t pss_pass_exam; /* examined bytes per scan pass */ uint64_t pss_pass_start; /* start time of a scan pass */ uint64_t pss_pass_scrub_pause; /* pause time of a scurb pass */ /* cumulative time scrub spent paused, needed for rate calculation */ uint64_t pss_pass_scrub_spent_paused; uint64_t pss_pass_issued; /* issued bytes per scan pass */ uint64_t pss_issued; /* total bytes checked by scanner */ } pool_scan_stat_t; typedef struct pool_removal_stat { uint64_t prs_state; /* dsl_scan_state_t */ uint64_t prs_removing_vdev; uint64_t prs_start_time; uint64_t prs_end_time; uint64_t prs_to_copy; /* bytes that need to be copied */ uint64_t prs_copied; /* bytes copied so far */ /* * bytes of memory used for indirect mappings. * This includes all removed vdevs. */ uint64_t prs_mapping_memory; } pool_removal_stat_t; typedef enum dsl_scan_state { DSS_NONE, DSS_SCANNING, DSS_FINISHED, DSS_CANCELED, DSS_NUM_STATES } dsl_scan_state_t; /* * Errata described by http://zfsonlinux.org/msg/ZFS-8000-ER. The ordering * of this enum must be maintained to ensure the errata identifiers map to * the correct documentation. New errata may only be appended to the list * and must contain corresponding documentation at the above link. */ typedef enum zpool_errata { ZPOOL_ERRATA_NONE, ZPOOL_ERRATA_ZOL_2094_SCRUB, ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY, ZPOOL_ERRATA_ZOL_6845_ENCRYPTION, } zpool_errata_t; /* * Vdev statistics. Note: all fields should be 64-bit because this * is passed between kernel and userland as an nvlist uint64 array. */ typedef struct vdev_stat { hrtime_t vs_timestamp; /* time since vdev load */ uint64_t vs_state; /* vdev state */ uint64_t vs_aux; /* see vdev_aux_t */ uint64_t vs_alloc; /* space allocated */ uint64_t vs_space; /* total capacity */ uint64_t vs_dspace; /* deflated capacity */ uint64_t vs_rsize; /* replaceable dev size */ uint64_t vs_esize; /* expandable dev size */ uint64_t vs_ops[ZIO_TYPES]; /* operation count */ uint64_t vs_bytes[ZIO_TYPES]; /* bytes read/written */ uint64_t vs_read_errors; /* read errors */ uint64_t vs_write_errors; /* write errors */ uint64_t vs_checksum_errors; /* checksum errors */ uint64_t vs_initialize_errors; /* initializing errors */ uint64_t vs_self_healed; /* self-healed bytes */ uint64_t vs_scan_removing; /* removing? */ uint64_t vs_scan_processed; /* scan processed bytes */ uint64_t vs_fragmentation; /* device fragmentation */ uint64_t vs_initialize_bytes_done; /* bytes initialized */ uint64_t vs_initialize_bytes_est; /* total bytes to initialize */ uint64_t vs_initialize_state; /* vdev_initialzing_state_t */ uint64_t vs_initialize_action_time; /* time_t */ uint64_t vs_checkpoint_space; /* checkpoint-consumed space */ uint64_t vs_resilver_deferred; /* resilver deferred */ uint64_t vs_slow_ios; /* slow IOs */ } vdev_stat_t; /* * Extended stats * * These are stats which aren't included in the original iostat output. For * convenience, they are grouped together in vdev_stat_ex, although each stat * is individually exported as an nvlist. */ typedef struct vdev_stat_ex { /* Number of ZIOs issued to disk and waiting to finish */ uint64_t vsx_active_queue[ZIO_PRIORITY_NUM_QUEUEABLE]; /* Number of ZIOs pending to be issued to disk */ uint64_t vsx_pend_queue[ZIO_PRIORITY_NUM_QUEUEABLE]; /* * Below are the histograms for various latencies. Buckets are in * units of nanoseconds. */ /* * 2^37 nanoseconds = 134s. Timeouts will probably start kicking in * before this. */ #define VDEV_L_HISTO_BUCKETS 37 /* Latency histo buckets */ #define VDEV_RQ_HISTO_BUCKETS 25 /* Request size histo buckets */ /* Amount of time in ZIO queue (ns) */ uint64_t vsx_queue_histo[ZIO_PRIORITY_NUM_QUEUEABLE] [VDEV_L_HISTO_BUCKETS]; /* Total ZIO latency (ns). Includes queuing and disk access time */ uint64_t vsx_total_histo[ZIO_TYPES][VDEV_L_HISTO_BUCKETS]; /* Amount of time to read/write the disk (ns) */ uint64_t vsx_disk_histo[ZIO_TYPES][VDEV_L_HISTO_BUCKETS]; /* "lookup the bucket for a value" histogram macros */ #define HISTO(val, buckets) (val != 0 ? MIN(highbit64(val) - 1, \ buckets - 1) : 0) #define L_HISTO(a) HISTO(a, VDEV_L_HISTO_BUCKETS) #define RQ_HISTO(a) HISTO(a, VDEV_RQ_HISTO_BUCKETS) /* Physical IO histogram */ uint64_t vsx_ind_histo[ZIO_PRIORITY_NUM_QUEUEABLE] [VDEV_RQ_HISTO_BUCKETS]; /* Delegated (aggregated) physical IO histogram */ uint64_t vsx_agg_histo[ZIO_PRIORITY_NUM_QUEUEABLE] [VDEV_RQ_HISTO_BUCKETS]; } vdev_stat_ex_t; /* * Initialize functions. */ typedef enum pool_initialize_func { POOL_INITIALIZE_DO, POOL_INITIALIZE_CANCEL, POOL_INITIALIZE_SUSPEND, POOL_INITIALIZE_FUNCS } pool_initialize_func_t; /* * DDT statistics. Note: all fields should be 64-bit because this * is passed between kernel and userland as an nvlist uint64 array. */ typedef struct ddt_object { uint64_t ddo_count; /* number of elements in ddt */ uint64_t ddo_dspace; /* size of ddt on disk */ uint64_t ddo_mspace; /* size of ddt in-core */ } ddt_object_t; typedef struct ddt_stat { uint64_t dds_blocks; /* blocks */ uint64_t dds_lsize; /* logical size */ uint64_t dds_psize; /* physical size */ uint64_t dds_dsize; /* deflated allocated size */ uint64_t dds_ref_blocks; /* referenced blocks */ uint64_t dds_ref_lsize; /* referenced lsize * refcnt */ uint64_t dds_ref_psize; /* referenced psize * refcnt */ uint64_t dds_ref_dsize; /* referenced dsize * refcnt */ } ddt_stat_t; typedef struct ddt_histogram { ddt_stat_t ddh_stat[64]; /* power-of-two histogram buckets */ } ddt_histogram_t; #define ZVOL_DRIVER "zvol" #define ZFS_DRIVER "zfs" #define ZFS_DEV "/dev/zfs" #define ZFS_SHARETAB "/etc/dfs/sharetab" #define ZFS_SUPER_MAGIC 0x2fc12fc1 /* general zvol path */ #define ZVOL_DIR "/dev" #define ZVOL_MAJOR 230 #define ZVOL_MINOR_BITS 4 #define ZVOL_MINOR_MASK ((1U << ZVOL_MINOR_BITS) - 1) #define ZVOL_MINORS (1 << 4) #define ZVOL_DEV_NAME "zd" #define ZVOL_PROP_NAME "name" #define ZVOL_DEFAULT_BLOCKSIZE 8192 typedef enum { VDEV_INITIALIZE_NONE, VDEV_INITIALIZE_ACTIVE, VDEV_INITIALIZE_CANCELED, VDEV_INITIALIZE_SUSPENDED, VDEV_INITIALIZE_COMPLETE } vdev_initializing_state_t; /* * /dev/zfs ioctl numbers. * * These numbers cannot change over time. New ioctl numbers must be appended. */ typedef enum zfs_ioc { /* * Illumos - 71/128 numbers reserved. */ ZFS_IOC_FIRST = ('Z' << 8), ZFS_IOC = ZFS_IOC_FIRST, ZFS_IOC_POOL_CREATE = ZFS_IOC_FIRST, /* 0x5a00 */ ZFS_IOC_POOL_DESTROY, /* 0x5a01 */ ZFS_IOC_POOL_IMPORT, /* 0x5a02 */ ZFS_IOC_POOL_EXPORT, /* 0x5a03 */ ZFS_IOC_POOL_CONFIGS, /* 0x5a04 */ ZFS_IOC_POOL_STATS, /* 0x5a05 */ ZFS_IOC_POOL_TRYIMPORT, /* 0x5a06 */ ZFS_IOC_POOL_SCAN, /* 0x5a07 */ ZFS_IOC_POOL_FREEZE, /* 0x5a08 */ ZFS_IOC_POOL_UPGRADE, /* 0x5a09 */ ZFS_IOC_POOL_GET_HISTORY, /* 0x5a0a */ ZFS_IOC_VDEV_ADD, /* 0x5a0b */ ZFS_IOC_VDEV_REMOVE, /* 0x5a0c */ ZFS_IOC_VDEV_SET_STATE, /* 0x5a0d */ ZFS_IOC_VDEV_ATTACH, /* 0x5a0e */ ZFS_IOC_VDEV_DETACH, /* 0x5a0f */ ZFS_IOC_VDEV_SETPATH, /* 0x5a10 */ ZFS_IOC_VDEV_SETFRU, /* 0x5a11 */ ZFS_IOC_OBJSET_STATS, /* 0x5a12 */ ZFS_IOC_OBJSET_ZPLPROPS, /* 0x5a13 */ ZFS_IOC_DATASET_LIST_NEXT, /* 0x5a14 */ ZFS_IOC_SNAPSHOT_LIST_NEXT, /* 0x5a15 */ ZFS_IOC_SET_PROP, /* 0x5a16 */ ZFS_IOC_CREATE, /* 0x5a17 */ ZFS_IOC_DESTROY, /* 0x5a18 */ ZFS_IOC_ROLLBACK, /* 0x5a19 */ ZFS_IOC_RENAME, /* 0x5a1a */ ZFS_IOC_RECV, /* 0x5a1b */ ZFS_IOC_SEND, /* 0x5a1c */ ZFS_IOC_INJECT_FAULT, /* 0x5a1d */ ZFS_IOC_CLEAR_FAULT, /* 0x5a1e */ ZFS_IOC_INJECT_LIST_NEXT, /* 0x5a1f */ ZFS_IOC_ERROR_LOG, /* 0x5a20 */ ZFS_IOC_CLEAR, /* 0x5a21 */ ZFS_IOC_PROMOTE, /* 0x5a22 */ ZFS_IOC_SNAPSHOT, /* 0x5a23 */ ZFS_IOC_DSOBJ_TO_DSNAME, /* 0x5a24 */ ZFS_IOC_OBJ_TO_PATH, /* 0x5a25 */ ZFS_IOC_POOL_SET_PROPS, /* 0x5a26 */ ZFS_IOC_POOL_GET_PROPS, /* 0x5a27 */ ZFS_IOC_SET_FSACL, /* 0x5a28 */ ZFS_IOC_GET_FSACL, /* 0x5a29 */ ZFS_IOC_SHARE, /* 0x5a2a */ ZFS_IOC_INHERIT_PROP, /* 0x5a2b */ ZFS_IOC_SMB_ACL, /* 0x5a2c */ ZFS_IOC_USERSPACE_ONE, /* 0x5a2d */ ZFS_IOC_USERSPACE_MANY, /* 0x5a2e */ ZFS_IOC_USERSPACE_UPGRADE, /* 0x5a2f */ ZFS_IOC_HOLD, /* 0x5a30 */ ZFS_IOC_RELEASE, /* 0x5a31 */ ZFS_IOC_GET_HOLDS, /* 0x5a32 */ ZFS_IOC_OBJSET_RECVD_PROPS, /* 0x5a33 */ ZFS_IOC_VDEV_SPLIT, /* 0x5a34 */ ZFS_IOC_NEXT_OBJ, /* 0x5a35 */ ZFS_IOC_DIFF, /* 0x5a36 */ ZFS_IOC_TMP_SNAPSHOT, /* 0x5a37 */ ZFS_IOC_OBJ_TO_STATS, /* 0x5a38 */ ZFS_IOC_SPACE_WRITTEN, /* 0x5a39 */ ZFS_IOC_SPACE_SNAPS, /* 0x5a3a */ ZFS_IOC_DESTROY_SNAPS, /* 0x5a3b */ ZFS_IOC_POOL_REGUID, /* 0x5a3c */ ZFS_IOC_POOL_REOPEN, /* 0x5a3d */ ZFS_IOC_SEND_PROGRESS, /* 0x5a3e */ ZFS_IOC_LOG_HISTORY, /* 0x5a3f */ ZFS_IOC_SEND_NEW, /* 0x5a40 */ ZFS_IOC_SEND_SPACE, /* 0x5a41 */ ZFS_IOC_CLONE, /* 0x5a42 */ ZFS_IOC_BOOKMARK, /* 0x5a43 */ ZFS_IOC_GET_BOOKMARKS, /* 0x5a44 */ ZFS_IOC_DESTROY_BOOKMARKS, /* 0x5a45 */ ZFS_IOC_CHANNEL_PROGRAM, /* 0x5a46 */ ZFS_IOC_RECV_NEW, /* 0x5a47 */ ZFS_IOC_POOL_SYNC, /* 0x5a48 */ ZFS_IOC_LOAD_KEY, /* 0x5a49 */ ZFS_IOC_UNLOAD_KEY, /* 0x5a4a */ ZFS_IOC_CHANGE_KEY, /* 0x5a4b */ ZFS_IOC_REMAP, /* 0x5a4c */ ZFS_IOC_POOL_CHECKPOINT, /* 0x5a4d */ ZFS_IOC_POOL_DISCARD_CHECKPOINT, /* 0x5a4e */ ZFS_IOC_POOL_INITIALIZE, /* 0x5a4f */ /* * Linux - 3/64 numbers reserved. */ ZFS_IOC_LINUX = ('Z' << 8) + 0x80, ZFS_IOC_EVENTS_NEXT, /* 0x5a81 */ ZFS_IOC_EVENTS_CLEAR, /* 0x5a82 */ ZFS_IOC_EVENTS_SEEK, /* 0x5a83 */ /* * FreeBSD - 1/64 numbers reserved. */ ZFS_IOC_FREEBSD = ('Z' << 8) + 0xC0, ZFS_IOC_LAST } zfs_ioc_t; /* * zvol ioctl to get dataset name */ #define BLKZNAME _IOR(0x12, 125, char[ZFS_MAX_DATASET_NAME_LEN]) /* * ZFS-specific error codes used for returning descriptive errors * to the userland through zfs ioctls. * * The enum implicitly includes all the error codes from errno.h. * New code should use and extend this enum for errors that are * not described precisely by generic errno codes. * * These numbers should not change over time. New entries should be appended. */ typedef enum { ZFS_ERR_CHECKPOINT_EXISTS = 1024, ZFS_ERR_DISCARDING_CHECKPOINT, ZFS_ERR_NO_CHECKPOINT, ZFS_ERR_DEVRM_IN_PROGRESS, ZFS_ERR_VDEV_TOO_BIG, ZFS_ERR_IOC_CMD_UNAVAIL, ZFS_ERR_IOC_ARG_UNAVAIL, ZFS_ERR_IOC_ARG_REQUIRED, ZFS_ERR_IOC_ARG_BADTYPE, + ZFS_ERR_WRONG_PARENT, } zfs_errno_t; /* * Internal SPA load state. Used by FMA diagnosis engine. */ typedef enum { SPA_LOAD_NONE, /* no load in progress */ SPA_LOAD_OPEN, /* normal open */ SPA_LOAD_IMPORT, /* import in progress */ SPA_LOAD_TRYIMPORT, /* tryimport in progress */ SPA_LOAD_RECOVER, /* recovery requested */ SPA_LOAD_ERROR, /* load failed */ SPA_LOAD_CREATE /* creation in progress */ } spa_load_state_t; /* * Bookmark name values. */ #define ZPOOL_ERR_LIST "error list" #define ZPOOL_ERR_DATASET "dataset" #define ZPOOL_ERR_OBJECT "object" #define HIS_MAX_RECORD_LEN (MAXPATHLEN + MAXPATHLEN + 1) /* * The following are names used in the nvlist describing * the pool's history log. */ #define ZPOOL_HIST_RECORD "history record" #define ZPOOL_HIST_TIME "history time" #define ZPOOL_HIST_CMD "history command" #define ZPOOL_HIST_WHO "history who" #define ZPOOL_HIST_ZONE "history zone" #define ZPOOL_HIST_HOST "history hostname" #define ZPOOL_HIST_TXG "history txg" #define ZPOOL_HIST_INT_EVENT "history internal event" #define ZPOOL_HIST_INT_STR "history internal str" #define ZPOOL_HIST_INT_NAME "internal_name" #define ZPOOL_HIST_IOCTL "ioctl" #define ZPOOL_HIST_INPUT_NVL "in_nvl" #define ZPOOL_HIST_OUTPUT_NVL "out_nvl" #define ZPOOL_HIST_DSNAME "dsname" #define ZPOOL_HIST_DSID "dsid" #define ZPOOL_HIST_ERRNO "errno" /* * Special nvlist name that will not have its args recorded in the pool's * history log. */ #define ZPOOL_HIDDEN_ARGS "hidden_args" /* * The following are names used when invoking ZFS_IOC_POOL_INITIALIZE. */ #define ZPOOL_INITIALIZE_COMMAND "initialize_command" #define ZPOOL_INITIALIZE_VDEVS "initialize_vdevs" /* * Flags for ZFS_IOC_VDEV_SET_STATE */ #define ZFS_ONLINE_CHECKREMOVE 0x1 #define ZFS_ONLINE_UNSPARE 0x2 #define ZFS_ONLINE_FORCEFAULT 0x4 #define ZFS_ONLINE_EXPAND 0x8 #define ZFS_OFFLINE_TEMPORARY 0x1 /* * Flags for ZFS_IOC_POOL_IMPORT */ #define ZFS_IMPORT_NORMAL 0x0 #define ZFS_IMPORT_VERBATIM 0x1 #define ZFS_IMPORT_ANY_HOST 0x2 #define ZFS_IMPORT_MISSING_LOG 0x4 #define ZFS_IMPORT_ONLY 0x8 #define ZFS_IMPORT_TEMP_NAME 0x10 #define ZFS_IMPORT_SKIP_MMP 0x20 #define ZFS_IMPORT_LOAD_KEYS 0x40 #define ZFS_IMPORT_CHECKPOINT 0x80 /* * Channel program argument/return nvlist keys and defaults. */ #define ZCP_ARG_PROGRAM "program" #define ZCP_ARG_ARGLIST "arg" #define ZCP_ARG_SYNC "sync" #define ZCP_ARG_INSTRLIMIT "instrlimit" #define ZCP_ARG_MEMLIMIT "memlimit" #define ZCP_ARG_CLIARGV "argv" #define ZCP_RET_ERROR "error" #define ZCP_RET_RETURN "return" #define ZCP_DEFAULT_INSTRLIMIT (10 * 1000 * 1000) #define ZCP_MAX_INSTRLIMIT (10 * ZCP_DEFAULT_INSTRLIMIT) #define ZCP_DEFAULT_MEMLIMIT (10 * 1024 * 1024) #define ZCP_MAX_MEMLIMIT (10 * ZCP_DEFAULT_MEMLIMIT) /* * Sysevent payload members. ZFS will generate the following sysevents with the * given payloads: * * ESC_ZFS_RESILVER_START * ESC_ZFS_RESILVER_END * ESC_ZFS_POOL_DESTROY * ESC_ZFS_POOL_REGUID * * ZFS_EV_POOL_NAME DATA_TYPE_STRING * ZFS_EV_POOL_GUID DATA_TYPE_UINT64 * * ESC_ZFS_VDEV_REMOVE * ESC_ZFS_VDEV_CLEAR * ESC_ZFS_VDEV_CHECK * * ZFS_EV_POOL_NAME DATA_TYPE_STRING * ZFS_EV_POOL_GUID DATA_TYPE_UINT64 * ZFS_EV_VDEV_PATH DATA_TYPE_STRING (optional) * ZFS_EV_VDEV_GUID DATA_TYPE_UINT64 * * ESC_ZFS_HISTORY_EVENT * * ZFS_EV_POOL_NAME DATA_TYPE_STRING * ZFS_EV_POOL_GUID DATA_TYPE_UINT64 * ZFS_EV_HIST_TIME DATA_TYPE_UINT64 (optional) * ZFS_EV_HIST_CMD DATA_TYPE_STRING (optional) * ZFS_EV_HIST_WHO DATA_TYPE_UINT64 (optional) * ZFS_EV_HIST_ZONE DATA_TYPE_STRING (optional) * ZFS_EV_HIST_HOST DATA_TYPE_STRING (optional) * ZFS_EV_HIST_TXG DATA_TYPE_UINT64 (optional) * ZFS_EV_HIST_INT_EVENT DATA_TYPE_UINT64 (optional) * ZFS_EV_HIST_INT_STR DATA_TYPE_STRING (optional) * ZFS_EV_HIST_INT_NAME DATA_TYPE_STRING (optional) * ZFS_EV_HIST_IOCTL DATA_TYPE_STRING (optional) * ZFS_EV_HIST_DSNAME DATA_TYPE_STRING (optional) * ZFS_EV_HIST_DSID DATA_TYPE_UINT64 (optional) * * The ZFS_EV_HIST_* members will correspond to the ZPOOL_HIST_* members in the * history log nvlist. The keynames will be free of any spaces or other * characters that could be potentially unexpected to consumers of the * sysevents. */ #define ZFS_EV_POOL_NAME "pool_name" #define ZFS_EV_POOL_GUID "pool_guid" #define ZFS_EV_VDEV_PATH "vdev_path" #define ZFS_EV_VDEV_GUID "vdev_guid" #define ZFS_EV_HIST_TIME "history_time" #define ZFS_EV_HIST_CMD "history_command" #define ZFS_EV_HIST_WHO "history_who" #define ZFS_EV_HIST_ZONE "history_zone" #define ZFS_EV_HIST_HOST "history_hostname" #define ZFS_EV_HIST_TXG "history_txg" #define ZFS_EV_HIST_INT_EVENT "history_internal_event" #define ZFS_EV_HIST_INT_STR "history_internal_str" #define ZFS_EV_HIST_INT_NAME "history_internal_name" #define ZFS_EV_HIST_IOCTL "history_ioctl" #define ZFS_EV_HIST_DSNAME "history_dsname" #define ZFS_EV_HIST_DSID "history_dsid" #ifdef __cplusplus } #endif #endif /* _SYS_FS_ZFS_H */ diff --git a/lib/libzfs/libzfs_dataset.c b/lib/libzfs/libzfs_dataset.c index 6445c9d7acf3..be86e5692b0d 100644 --- a/lib/libzfs/libzfs_dataset.c +++ b/lib/libzfs/libzfs_dataset.c @@ -1,5392 +1,5387 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2018, Joyent, Inc. All rights reserved. * Copyright (c) 2011, 2017 by Delphix. All rights reserved. * Copyright (c) 2012 DEY Storage Systems, Inc. All rights reserved. * Copyright (c) 2012 Pawel Jakub Dawidek . * Copyright (c) 2013 Martin Matuska. All rights reserved. * Copyright (c) 2013 Steven Hartland. All rights reserved. * Copyright 2017 Nexenta Systems, Inc. * Copyright 2016 Igor Kozhukhov * Copyright 2017-2018 RackTop Systems. * Copyright (c) 2018 Datto Inc. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_IDMAP #include #include #include #endif /* HAVE_IDMAP */ #include #include #include #include #include #include #include "zfs_namecheck.h" #include "zfs_prop.h" #include "libzfs_impl.h" #include "libzfs.h" #include "zfs_deleg.h" static int userquota_propname_decode(const char *propname, boolean_t zoned, zfs_userquota_prop_t *typep, char *domain, int domainlen, uint64_t *ridp); /* * Given a single type (not a mask of types), return the type in a human * readable form. */ const char * zfs_type_to_name(zfs_type_t type) { switch (type) { case ZFS_TYPE_FILESYSTEM: return (dgettext(TEXT_DOMAIN, "filesystem")); case ZFS_TYPE_SNAPSHOT: return (dgettext(TEXT_DOMAIN, "snapshot")); case ZFS_TYPE_VOLUME: return (dgettext(TEXT_DOMAIN, "volume")); case ZFS_TYPE_POOL: return (dgettext(TEXT_DOMAIN, "pool")); case ZFS_TYPE_BOOKMARK: return (dgettext(TEXT_DOMAIN, "bookmark")); default: assert(!"unhandled zfs_type_t"); } return (NULL); } /* * Validate a ZFS path. This is used even before trying to open the dataset, to * provide a more meaningful error message. We call zfs_error_aux() to * explain exactly why the name was not valid. */ int zfs_validate_name(libzfs_handle_t *hdl, const char *path, int type, boolean_t modifying) { namecheck_err_t why; char what; if (entity_namecheck(path, &why, &what) != 0) { if (hdl != NULL) { switch (why) { case NAME_ERR_TOOLONG: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "name is too long")); break; case NAME_ERR_LEADING_SLASH: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "leading slash in name")); break; case NAME_ERR_EMPTY_COMPONENT: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "empty component in name")); break; case NAME_ERR_TRAILING_SLASH: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "trailing slash in name")); break; case NAME_ERR_INVALCHAR: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid character " "'%c' in name"), what); break; case NAME_ERR_MULTIPLE_DELIMITERS: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "multiple '@' and/or '#' delimiters in " "name")); break; case NAME_ERR_NOLETTER: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool doesn't begin with a letter")); break; case NAME_ERR_RESERVED: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "name is reserved")); break; case NAME_ERR_DISKLIKE: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "reserved disk name")); break; default: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "(%d) not defined"), why); break; } } return (0); } if (!(type & ZFS_TYPE_SNAPSHOT) && strchr(path, '@') != NULL) { if (hdl != NULL) zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "snapshot delimiter '@' is not expected here")); return (0); } if (type == ZFS_TYPE_SNAPSHOT && strchr(path, '@') == NULL) { if (hdl != NULL) zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "missing '@' delimiter in snapshot name, " "did you mean to use -r?")); return (0); } if (!(type & ZFS_TYPE_BOOKMARK) && strchr(path, '#') != NULL) { if (hdl != NULL) zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "bookmark delimiter '#' is not expected here")); return (0); } if (type == ZFS_TYPE_BOOKMARK && strchr(path, '#') == NULL) { if (hdl != NULL) zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "missing '#' delimiter in bookmark name, " "did you mean to use -r?")); return (0); } if (modifying && strchr(path, '%') != NULL) { if (hdl != NULL) zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid character %c in name"), '%'); return (0); } return (-1); } int zfs_name_valid(const char *name, zfs_type_t type) { if (type == ZFS_TYPE_POOL) return (zpool_name_valid(NULL, B_FALSE, name)); return (zfs_validate_name(NULL, name, type, B_FALSE)); } /* * This function takes the raw DSL properties, and filters out the user-defined * properties into a separate nvlist. */ static nvlist_t * process_user_props(zfs_handle_t *zhp, nvlist_t *props) { libzfs_handle_t *hdl = zhp->zfs_hdl; nvpair_t *elem; nvlist_t *propval; nvlist_t *nvl; if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) { (void) no_memory(hdl); return (NULL); } elem = NULL; while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { if (!zfs_prop_user(nvpair_name(elem))) continue; verify(nvpair_value_nvlist(elem, &propval) == 0); if (nvlist_add_nvlist(nvl, nvpair_name(elem), propval) != 0) { nvlist_free(nvl); (void) no_memory(hdl); return (NULL); } } return (nvl); } static zpool_handle_t * zpool_add_handle(zfs_handle_t *zhp, const char *pool_name) { libzfs_handle_t *hdl = zhp->zfs_hdl; zpool_handle_t *zph; if ((zph = zpool_open_canfail(hdl, pool_name)) != NULL) { if (hdl->libzfs_pool_handles != NULL) zph->zpool_next = hdl->libzfs_pool_handles; hdl->libzfs_pool_handles = zph; } return (zph); } static zpool_handle_t * zpool_find_handle(zfs_handle_t *zhp, const char *pool_name, int len) { libzfs_handle_t *hdl = zhp->zfs_hdl; zpool_handle_t *zph = hdl->libzfs_pool_handles; while ((zph != NULL) && (strncmp(pool_name, zpool_get_name(zph), len) != 0)) zph = zph->zpool_next; return (zph); } /* * Returns a handle to the pool that contains the provided dataset. * If a handle to that pool already exists then that handle is returned. * Otherwise, a new handle is created and added to the list of handles. */ static zpool_handle_t * zpool_handle(zfs_handle_t *zhp) { char *pool_name; int len; zpool_handle_t *zph; len = strcspn(zhp->zfs_name, "/@#") + 1; pool_name = zfs_alloc(zhp->zfs_hdl, len); (void) strlcpy(pool_name, zhp->zfs_name, len); zph = zpool_find_handle(zhp, pool_name, len); if (zph == NULL) zph = zpool_add_handle(zhp, pool_name); free(pool_name); return (zph); } void zpool_free_handles(libzfs_handle_t *hdl) { zpool_handle_t *next, *zph = hdl->libzfs_pool_handles; while (zph != NULL) { next = zph->zpool_next; zpool_close(zph); zph = next; } hdl->libzfs_pool_handles = NULL; } /* * Utility function to gather stats (objset and zpl) for the given object. */ static int get_stats_ioctl(zfs_handle_t *zhp, zfs_cmd_t *zc) { libzfs_handle_t *hdl = zhp->zfs_hdl; (void) strlcpy(zc->zc_name, zhp->zfs_name, sizeof (zc->zc_name)); while (ioctl(hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, zc) != 0) { if (errno == ENOMEM) { if (zcmd_expand_dst_nvlist(hdl, zc) != 0) { return (-1); } } else { return (-1); } } return (0); } /* * Utility function to get the received properties of the given object. */ static int get_recvd_props_ioctl(zfs_handle_t *zhp) { libzfs_handle_t *hdl = zhp->zfs_hdl; nvlist_t *recvdprops; zfs_cmd_t zc = {"\0"}; int err; if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) return (-1); (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); while (ioctl(hdl->libzfs_fd, ZFS_IOC_OBJSET_RECVD_PROPS, &zc) != 0) { if (errno == ENOMEM) { if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { return (-1); } } else { zcmd_free_nvlists(&zc); return (-1); } } err = zcmd_read_dst_nvlist(zhp->zfs_hdl, &zc, &recvdprops); zcmd_free_nvlists(&zc); if (err != 0) return (-1); nvlist_free(zhp->zfs_recvd_props); zhp->zfs_recvd_props = recvdprops; return (0); } static int put_stats_zhdl(zfs_handle_t *zhp, zfs_cmd_t *zc) { nvlist_t *allprops, *userprops; zhp->zfs_dmustats = zc->zc_objset_stats; /* structure assignment */ if (zcmd_read_dst_nvlist(zhp->zfs_hdl, zc, &allprops) != 0) { return (-1); } /* * XXX Why do we store the user props separately, in addition to * storing them in zfs_props? */ if ((userprops = process_user_props(zhp, allprops)) == NULL) { nvlist_free(allprops); return (-1); } nvlist_free(zhp->zfs_props); nvlist_free(zhp->zfs_user_props); zhp->zfs_props = allprops; zhp->zfs_user_props = userprops; return (0); } static int get_stats(zfs_handle_t *zhp) { int rc = 0; zfs_cmd_t zc = {"\0"}; if (zcmd_alloc_dst_nvlist(zhp->zfs_hdl, &zc, 0) != 0) return (-1); if (get_stats_ioctl(zhp, &zc) != 0) rc = -1; else if (put_stats_zhdl(zhp, &zc) != 0) rc = -1; zcmd_free_nvlists(&zc); return (rc); } /* * Refresh the properties currently stored in the handle. */ void zfs_refresh_properties(zfs_handle_t *zhp) { (void) get_stats(zhp); } /* * Makes a handle from the given dataset name. Used by zfs_open() and * zfs_iter_* to create child handles on the fly. */ static int make_dataset_handle_common(zfs_handle_t *zhp, zfs_cmd_t *zc) { if (put_stats_zhdl(zhp, zc) != 0) return (-1); /* * We've managed to open the dataset and gather statistics. Determine * the high-level type. */ if (zhp->zfs_dmustats.dds_type == DMU_OST_ZVOL) zhp->zfs_head_type = ZFS_TYPE_VOLUME; else if (zhp->zfs_dmustats.dds_type == DMU_OST_ZFS) zhp->zfs_head_type = ZFS_TYPE_FILESYSTEM; else if (zhp->zfs_dmustats.dds_type == DMU_OST_OTHER) return (-1); else abort(); if (zhp->zfs_dmustats.dds_is_snapshot) zhp->zfs_type = ZFS_TYPE_SNAPSHOT; else if (zhp->zfs_dmustats.dds_type == DMU_OST_ZVOL) zhp->zfs_type = ZFS_TYPE_VOLUME; else if (zhp->zfs_dmustats.dds_type == DMU_OST_ZFS) zhp->zfs_type = ZFS_TYPE_FILESYSTEM; else abort(); /* we should never see any other types */ if ((zhp->zpool_hdl = zpool_handle(zhp)) == NULL) return (-1); return (0); } zfs_handle_t * make_dataset_handle(libzfs_handle_t *hdl, const char *path) { zfs_cmd_t zc = {"\0"}; zfs_handle_t *zhp = calloc(1, sizeof (zfs_handle_t)); if (zhp == NULL) return (NULL); zhp->zfs_hdl = hdl; (void) strlcpy(zhp->zfs_name, path, sizeof (zhp->zfs_name)); if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) { free(zhp); return (NULL); } if (get_stats_ioctl(zhp, &zc) == -1) { zcmd_free_nvlists(&zc); free(zhp); return (NULL); } if (make_dataset_handle_common(zhp, &zc) == -1) { free(zhp); zhp = NULL; } zcmd_free_nvlists(&zc); return (zhp); } zfs_handle_t * make_dataset_handle_zc(libzfs_handle_t *hdl, zfs_cmd_t *zc) { zfs_handle_t *zhp = calloc(1, sizeof (zfs_handle_t)); if (zhp == NULL) return (NULL); zhp->zfs_hdl = hdl; (void) strlcpy(zhp->zfs_name, zc->zc_name, sizeof (zhp->zfs_name)); if (make_dataset_handle_common(zhp, zc) == -1) { free(zhp); return (NULL); } return (zhp); } zfs_handle_t * make_dataset_simple_handle_zc(zfs_handle_t *pzhp, zfs_cmd_t *zc) { zfs_handle_t *zhp = calloc(1, sizeof (zfs_handle_t)); if (zhp == NULL) return (NULL); zhp->zfs_hdl = pzhp->zfs_hdl; (void) strlcpy(zhp->zfs_name, zc->zc_name, sizeof (zhp->zfs_name)); zhp->zfs_head_type = pzhp->zfs_type; zhp->zfs_type = ZFS_TYPE_SNAPSHOT; zhp->zpool_hdl = zpool_handle(zhp); return (zhp); } zfs_handle_t * zfs_handle_dup(zfs_handle_t *zhp_orig) { zfs_handle_t *zhp = calloc(1, sizeof (zfs_handle_t)); if (zhp == NULL) return (NULL); zhp->zfs_hdl = zhp_orig->zfs_hdl; zhp->zpool_hdl = zhp_orig->zpool_hdl; (void) strlcpy(zhp->zfs_name, zhp_orig->zfs_name, sizeof (zhp->zfs_name)); zhp->zfs_type = zhp_orig->zfs_type; zhp->zfs_head_type = zhp_orig->zfs_head_type; zhp->zfs_dmustats = zhp_orig->zfs_dmustats; if (zhp_orig->zfs_props != NULL) { if (nvlist_dup(zhp_orig->zfs_props, &zhp->zfs_props, 0) != 0) { (void) no_memory(zhp->zfs_hdl); zfs_close(zhp); return (NULL); } } if (zhp_orig->zfs_user_props != NULL) { if (nvlist_dup(zhp_orig->zfs_user_props, &zhp->zfs_user_props, 0) != 0) { (void) no_memory(zhp->zfs_hdl); zfs_close(zhp); return (NULL); } } if (zhp_orig->zfs_recvd_props != NULL) { if (nvlist_dup(zhp_orig->zfs_recvd_props, &zhp->zfs_recvd_props, 0)) { (void) no_memory(zhp->zfs_hdl); zfs_close(zhp); return (NULL); } } zhp->zfs_mntcheck = zhp_orig->zfs_mntcheck; if (zhp_orig->zfs_mntopts != NULL) { zhp->zfs_mntopts = zfs_strdup(zhp_orig->zfs_hdl, zhp_orig->zfs_mntopts); } zhp->zfs_props_table = zhp_orig->zfs_props_table; return (zhp); } boolean_t zfs_bookmark_exists(const char *path) { nvlist_t *bmarks; nvlist_t *props; char fsname[ZFS_MAX_DATASET_NAME_LEN]; char *bmark_name; char *pound; int err; boolean_t rv; (void) strlcpy(fsname, path, sizeof (fsname)); pound = strchr(fsname, '#'); if (pound == NULL) return (B_FALSE); *pound = '\0'; bmark_name = pound + 1; props = fnvlist_alloc(); err = lzc_get_bookmarks(fsname, props, &bmarks); nvlist_free(props); if (err != 0) { nvlist_free(bmarks); return (B_FALSE); } rv = nvlist_exists(bmarks, bmark_name); nvlist_free(bmarks); return (rv); } zfs_handle_t * make_bookmark_handle(zfs_handle_t *parent, const char *path, nvlist_t *bmark_props) { zfs_handle_t *zhp = calloc(1, sizeof (zfs_handle_t)); if (zhp == NULL) return (NULL); /* Fill in the name. */ zhp->zfs_hdl = parent->zfs_hdl; (void) strlcpy(zhp->zfs_name, path, sizeof (zhp->zfs_name)); /* Set the property lists. */ if (nvlist_dup(bmark_props, &zhp->zfs_props, 0) != 0) { free(zhp); return (NULL); } /* Set the types. */ zhp->zfs_head_type = parent->zfs_head_type; zhp->zfs_type = ZFS_TYPE_BOOKMARK; if ((zhp->zpool_hdl = zpool_handle(zhp)) == NULL) { nvlist_free(zhp->zfs_props); free(zhp); return (NULL); } return (zhp); } struct zfs_open_bookmarks_cb_data { const char *path; zfs_handle_t *zhp; }; static int zfs_open_bookmarks_cb(zfs_handle_t *zhp, void *data) { struct zfs_open_bookmarks_cb_data *dp = data; /* * Is it the one we are looking for? */ if (strcmp(dp->path, zfs_get_name(zhp)) == 0) { /* * We found it. Save it and let the caller know we are done. */ dp->zhp = zhp; return (EEXIST); } /* * Not found. Close the handle and ask for another one. */ zfs_close(zhp); return (0); } /* * Opens the given snapshot, bookmark, filesystem, or volume. The 'types' * argument is a mask of acceptable types. The function will print an * appropriate error message and return NULL if it can't be opened. */ zfs_handle_t * zfs_open(libzfs_handle_t *hdl, const char *path, int types) { zfs_handle_t *zhp; char errbuf[1024]; char *bookp; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot open '%s'"), path); /* * Validate the name before we even try to open it. */ if (!zfs_validate_name(hdl, path, types, B_FALSE)) { (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); return (NULL); } /* * Bookmarks needs to be handled separately. */ bookp = strchr(path, '#'); if (bookp == NULL) { /* * Try to get stats for the dataset, which will tell us if it * exists. */ errno = 0; if ((zhp = make_dataset_handle(hdl, path)) == NULL) { (void) zfs_standard_error(hdl, errno, errbuf); return (NULL); } } else { char dsname[ZFS_MAX_DATASET_NAME_LEN]; zfs_handle_t *pzhp; struct zfs_open_bookmarks_cb_data cb_data = {path, NULL}; /* * We need to cut out '#' and everything after '#' * to get the parent dataset name only. */ assert(bookp - path < sizeof (dsname)); (void) strncpy(dsname, path, bookp - path); dsname[bookp - path] = '\0'; /* * Create handle for the parent dataset. */ errno = 0; if ((pzhp = make_dataset_handle(hdl, dsname)) == NULL) { (void) zfs_standard_error(hdl, errno, errbuf); return (NULL); } /* * Iterate bookmarks to find the right one. */ errno = 0; if ((zfs_iter_bookmarks(pzhp, zfs_open_bookmarks_cb, &cb_data) == 0) && (cb_data.zhp == NULL)) { (void) zfs_error(hdl, EZFS_NOENT, errbuf); zfs_close(pzhp); return (NULL); } if (cb_data.zhp == NULL) { (void) zfs_standard_error(hdl, errno, errbuf); zfs_close(pzhp); return (NULL); } zhp = cb_data.zhp; /* * Cleanup. */ zfs_close(pzhp); } if (!(types & zhp->zfs_type)) { (void) zfs_error(hdl, EZFS_BADTYPE, errbuf); zfs_close(zhp); return (NULL); } return (zhp); } /* * Release a ZFS handle. Nothing to do but free the associated memory. */ void zfs_close(zfs_handle_t *zhp) { if (zhp->zfs_mntopts) free(zhp->zfs_mntopts); nvlist_free(zhp->zfs_props); nvlist_free(zhp->zfs_user_props); nvlist_free(zhp->zfs_recvd_props); free(zhp); } typedef struct mnttab_node { struct mnttab mtn_mt; avl_node_t mtn_node; } mnttab_node_t; static int libzfs_mnttab_cache_compare(const void *arg1, const void *arg2) { const mnttab_node_t *mtn1 = (const mnttab_node_t *)arg1; const mnttab_node_t *mtn2 = (const mnttab_node_t *)arg2; int rv; rv = strcmp(mtn1->mtn_mt.mnt_special, mtn2->mtn_mt.mnt_special); return (AVL_ISIGN(rv)); } void libzfs_mnttab_init(libzfs_handle_t *hdl) { pthread_mutex_init(&hdl->libzfs_mnttab_cache_lock, NULL); assert(avl_numnodes(&hdl->libzfs_mnttab_cache) == 0); avl_create(&hdl->libzfs_mnttab_cache, libzfs_mnttab_cache_compare, sizeof (mnttab_node_t), offsetof(mnttab_node_t, mtn_node)); } int libzfs_mnttab_update(libzfs_handle_t *hdl) { struct mnttab entry; /* Reopen MNTTAB to prevent reading stale data from open file */ if (freopen(MNTTAB, "r", hdl->libzfs_mnttab) == NULL) return (ENOENT); while (getmntent(hdl->libzfs_mnttab, &entry) == 0) { mnttab_node_t *mtn; avl_index_t where; if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0) continue; mtn = zfs_alloc(hdl, sizeof (mnttab_node_t)); mtn->mtn_mt.mnt_special = zfs_strdup(hdl, entry.mnt_special); mtn->mtn_mt.mnt_mountp = zfs_strdup(hdl, entry.mnt_mountp); mtn->mtn_mt.mnt_fstype = zfs_strdup(hdl, entry.mnt_fstype); mtn->mtn_mt.mnt_mntopts = zfs_strdup(hdl, entry.mnt_mntopts); /* Exclude duplicate mounts */ if (avl_find(&hdl->libzfs_mnttab_cache, mtn, &where) != NULL) { free(mtn->mtn_mt.mnt_special); free(mtn->mtn_mt.mnt_mountp); free(mtn->mtn_mt.mnt_fstype); free(mtn->mtn_mt.mnt_mntopts); free(mtn); continue; } avl_add(&hdl->libzfs_mnttab_cache, mtn); } return (0); } void libzfs_mnttab_fini(libzfs_handle_t *hdl) { void *cookie = NULL; mnttab_node_t *mtn; while ((mtn = avl_destroy_nodes(&hdl->libzfs_mnttab_cache, &cookie)) != NULL) { free(mtn->mtn_mt.mnt_special); free(mtn->mtn_mt.mnt_mountp); free(mtn->mtn_mt.mnt_fstype); free(mtn->mtn_mt.mnt_mntopts); free(mtn); } avl_destroy(&hdl->libzfs_mnttab_cache); (void) pthread_mutex_destroy(&hdl->libzfs_mnttab_cache_lock); } void libzfs_mnttab_cache(libzfs_handle_t *hdl, boolean_t enable) { hdl->libzfs_mnttab_enable = enable; } int libzfs_mnttab_find(libzfs_handle_t *hdl, const char *fsname, struct mnttab *entry) { mnttab_node_t find; mnttab_node_t *mtn; int ret = ENOENT; if (!hdl->libzfs_mnttab_enable) { struct mnttab srch = { 0 }; if (avl_numnodes(&hdl->libzfs_mnttab_cache)) libzfs_mnttab_fini(hdl); /* Reopen MNTTAB to prevent reading stale data from open file */ if (freopen(MNTTAB, "r", hdl->libzfs_mnttab) == NULL) return (ENOENT); srch.mnt_special = (char *)fsname; srch.mnt_fstype = MNTTYPE_ZFS; if (getmntany(hdl->libzfs_mnttab, entry, &srch) == 0) return (0); else return (ENOENT); } pthread_mutex_lock(&hdl->libzfs_mnttab_cache_lock); if (avl_numnodes(&hdl->libzfs_mnttab_cache) == 0) { int error; if ((error = libzfs_mnttab_update(hdl)) != 0) { pthread_mutex_unlock(&hdl->libzfs_mnttab_cache_lock); return (error); } } find.mtn_mt.mnt_special = (char *)fsname; mtn = avl_find(&hdl->libzfs_mnttab_cache, &find, NULL); if (mtn) { *entry = mtn->mtn_mt; ret = 0; } pthread_mutex_unlock(&hdl->libzfs_mnttab_cache_lock); return (ret); } void libzfs_mnttab_add(libzfs_handle_t *hdl, const char *special, const char *mountp, const char *mntopts) { mnttab_node_t *mtn; pthread_mutex_lock(&hdl->libzfs_mnttab_cache_lock); if (avl_numnodes(&hdl->libzfs_mnttab_cache) != 0) { mtn = zfs_alloc(hdl, sizeof (mnttab_node_t)); mtn->mtn_mt.mnt_special = zfs_strdup(hdl, special); mtn->mtn_mt.mnt_mountp = zfs_strdup(hdl, mountp); mtn->mtn_mt.mnt_fstype = zfs_strdup(hdl, MNTTYPE_ZFS); mtn->mtn_mt.mnt_mntopts = zfs_strdup(hdl, mntopts); /* * Another thread may have already added this entry * via libzfs_mnttab_update. If so we should skip it. */ if (avl_find(&hdl->libzfs_mnttab_cache, mtn, NULL) != NULL) free(mtn); else avl_add(&hdl->libzfs_mnttab_cache, mtn); } pthread_mutex_unlock(&hdl->libzfs_mnttab_cache_lock); } void libzfs_mnttab_remove(libzfs_handle_t *hdl, const char *fsname) { mnttab_node_t find; mnttab_node_t *ret; pthread_mutex_lock(&hdl->libzfs_mnttab_cache_lock); find.mtn_mt.mnt_special = (char *)fsname; if ((ret = avl_find(&hdl->libzfs_mnttab_cache, (void *)&find, NULL)) != NULL) { avl_remove(&hdl->libzfs_mnttab_cache, ret); free(ret->mtn_mt.mnt_special); free(ret->mtn_mt.mnt_mountp); free(ret->mtn_mt.mnt_fstype); free(ret->mtn_mt.mnt_mntopts); free(ret); } pthread_mutex_unlock(&hdl->libzfs_mnttab_cache_lock); } int zfs_spa_version(zfs_handle_t *zhp, int *spa_version) { zpool_handle_t *zpool_handle = zhp->zpool_hdl; if (zpool_handle == NULL) return (-1); *spa_version = zpool_get_prop_int(zpool_handle, ZPOOL_PROP_VERSION, NULL); return (0); } /* * The choice of reservation property depends on the SPA version. */ static int zfs_which_resv_prop(zfs_handle_t *zhp, zfs_prop_t *resv_prop) { int spa_version; if (zfs_spa_version(zhp, &spa_version) < 0) return (-1); if (spa_version >= SPA_VERSION_REFRESERVATION) *resv_prop = ZFS_PROP_REFRESERVATION; else *resv_prop = ZFS_PROP_RESERVATION; return (0); } /* * Given an nvlist of properties to set, validates that they are correct, and * parses any numeric properties (index, boolean, etc) if they are specified as * strings. */ nvlist_t * zfs_valid_proplist(libzfs_handle_t *hdl, zfs_type_t type, nvlist_t *nvl, uint64_t zoned, zfs_handle_t *zhp, zpool_handle_t *zpool_hdl, boolean_t key_params_ok, const char *errbuf) { nvpair_t *elem; uint64_t intval; char *strval; zfs_prop_t prop; nvlist_t *ret; int chosen_normal = -1; int chosen_utf = -1; if (nvlist_alloc(&ret, NV_UNIQUE_NAME, 0) != 0) { (void) no_memory(hdl); return (NULL); } /* * Make sure this property is valid and applies to this type. */ elem = NULL; while ((elem = nvlist_next_nvpair(nvl, elem)) != NULL) { const char *propname = nvpair_name(elem); prop = zfs_name_to_prop(propname); if (prop == ZPROP_INVAL && zfs_prop_user(propname)) { /* * This is a user property: make sure it's a * string, and that it's less than ZAP_MAXNAMELEN. */ if (nvpair_type(elem) != DATA_TYPE_STRING) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' must be a string"), propname); (void) zfs_error(hdl, EZFS_BADPROP, errbuf); goto error; } if (strlen(nvpair_name(elem)) >= ZAP_MAXNAMELEN) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "property name '%s' is too long"), propname); (void) zfs_error(hdl, EZFS_BADPROP, errbuf); goto error; } (void) nvpair_value_string(elem, &strval); if (nvlist_add_string(ret, propname, strval) != 0) { (void) no_memory(hdl); goto error; } continue; } /* * Currently, only user properties can be modified on * snapshots. */ if (type == ZFS_TYPE_SNAPSHOT) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "this property can not be modified for snapshots")); (void) zfs_error(hdl, EZFS_PROPTYPE, errbuf); goto error; } if (prop == ZPROP_INVAL && zfs_prop_userquota(propname)) { zfs_userquota_prop_t uqtype; char *newpropname = NULL; char domain[128]; uint64_t rid; uint64_t valary[3]; int rc; if (userquota_propname_decode(propname, zoned, &uqtype, domain, sizeof (domain), &rid) != 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' has an invalid user/group name"), propname); (void) zfs_error(hdl, EZFS_BADPROP, errbuf); goto error; } if (uqtype != ZFS_PROP_USERQUOTA && uqtype != ZFS_PROP_GROUPQUOTA && uqtype != ZFS_PROP_USEROBJQUOTA && uqtype != ZFS_PROP_GROUPOBJQUOTA && uqtype != ZFS_PROP_PROJECTQUOTA && uqtype != ZFS_PROP_PROJECTOBJQUOTA) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' is readonly"), propname); (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf); goto error; } if (nvpair_type(elem) == DATA_TYPE_STRING) { (void) nvpair_value_string(elem, &strval); if (strcmp(strval, "none") == 0) { intval = 0; } else if (zfs_nicestrtonum(hdl, strval, &intval) != 0) { (void) zfs_error(hdl, EZFS_BADPROP, errbuf); goto error; } } else if (nvpair_type(elem) == DATA_TYPE_UINT64) { (void) nvpair_value_uint64(elem, &intval); if (intval == 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "use 'none' to disable " "{user|group|project}quota")); goto error; } } else { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' must be a number"), propname); (void) zfs_error(hdl, EZFS_BADPROP, errbuf); goto error; } /* * Encode the prop name as * userquota@-domain, to make it easy * for the kernel to decode. */ rc = asprintf(&newpropname, "%s%llx-%s", zfs_userquota_prop_prefixes[uqtype], (longlong_t)rid, domain); if (rc == -1 || newpropname == NULL) { (void) no_memory(hdl); goto error; } valary[0] = uqtype; valary[1] = rid; valary[2] = intval; if (nvlist_add_uint64_array(ret, newpropname, valary, 3) != 0) { free(newpropname); (void) no_memory(hdl); goto error; } free(newpropname); continue; } else if (prop == ZPROP_INVAL && zfs_prop_written(propname)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' is readonly"), propname); (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf); goto error; } if (prop == ZPROP_INVAL) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid property '%s'"), propname); (void) zfs_error(hdl, EZFS_BADPROP, errbuf); goto error; } if (!zfs_prop_valid_for_type(prop, type, B_FALSE)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' does not " "apply to datasets of this type"), propname); (void) zfs_error(hdl, EZFS_PROPTYPE, errbuf); goto error; } if (zfs_prop_readonly(prop) && !(zfs_prop_setonce(prop) && zhp == NULL) && !(zfs_prop_encryption_key_param(prop) && key_params_ok)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' is readonly"), propname); (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf); goto error; } if (zprop_parse_value(hdl, elem, prop, type, ret, &strval, &intval, errbuf) != 0) goto error; /* * Perform some additional checks for specific properties. */ switch (prop) { case ZFS_PROP_VERSION: { int version; if (zhp == NULL) break; version = zfs_prop_get_int(zhp, ZFS_PROP_VERSION); if (intval < version) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Can not downgrade; already at version %u"), version); (void) zfs_error(hdl, EZFS_BADPROP, errbuf); goto error; } break; } case ZFS_PROP_VOLBLOCKSIZE: case ZFS_PROP_RECORDSIZE: { int maxbs = SPA_MAXBLOCKSIZE; char buf[64]; if (zpool_hdl != NULL) { maxbs = zpool_get_prop_int(zpool_hdl, ZPOOL_PROP_MAXBLOCKSIZE, NULL); } /* * The value must be a power of two between * SPA_MINBLOCKSIZE and maxbs. */ if (intval < SPA_MINBLOCKSIZE || intval > maxbs || !ISP2(intval)) { zfs_nicebytes(maxbs, buf, sizeof (buf)); zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' must be power of 2 from 512B " "to %s"), propname, buf); (void) zfs_error(hdl, EZFS_BADPROP, errbuf); goto error; } break; } case ZFS_PROP_SPECIAL_SMALL_BLOCKS: if (zpool_hdl != NULL) { char state[64] = ""; /* * Issue a warning but do not fail so that * tests for setable properties succeed. */ if (zpool_prop_get_feature(zpool_hdl, "feature@allocation_classes", state, sizeof (state)) != 0 || strcmp(state, ZFS_FEATURE_ACTIVE) != 0) { (void) fprintf(stderr, gettext( "%s: property requires a special " "device in the pool\n"), propname); } } if (intval != 0 && (intval < SPA_MINBLOCKSIZE || intval > SPA_OLD_MAXBLOCKSIZE || !ISP2(intval))) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid '%s=%d' property: must be zero or " "a power of 2 from 512B to 128K"), propname, intval); (void) zfs_error(hdl, EZFS_BADPROP, errbuf); goto error; } break; case ZFS_PROP_MLSLABEL: { #ifdef HAVE_MLSLABEL /* * Verify the mlslabel string and convert to * internal hex label string. */ m_label_t *new_sl; char *hex = NULL; /* internal label string */ /* Default value is already OK. */ if (strcasecmp(strval, ZFS_MLSLABEL_DEFAULT) == 0) break; /* Verify the label can be converted to binary form */ if (((new_sl = m_label_alloc(MAC_LABEL)) == NULL) || (str_to_label(strval, &new_sl, MAC_LABEL, L_NO_CORRECTION, NULL) == -1)) { goto badlabel; } /* Now translate to hex internal label string */ if (label_to_str(new_sl, &hex, M_INTERNAL, DEF_NAMES) != 0) { if (hex) free(hex); goto badlabel; } m_label_free(new_sl); /* If string is already in internal form, we're done. */ if (strcmp(strval, hex) == 0) { free(hex); break; } /* Replace the label string with the internal form. */ (void) nvlist_remove(ret, zfs_prop_to_name(prop), DATA_TYPE_STRING); verify(nvlist_add_string(ret, zfs_prop_to_name(prop), hex) == 0); free(hex); break; badlabel: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid mlslabel '%s'"), strval); (void) zfs_error(hdl, EZFS_BADPROP, errbuf); m_label_free(new_sl); /* OK if null */ goto error; #else zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "mlslabels are unsupported")); (void) zfs_error(hdl, EZFS_BADPROP, errbuf); goto error; #endif /* HAVE_MLSLABEL */ } case ZFS_PROP_MOUNTPOINT: { namecheck_err_t why; if (strcmp(strval, ZFS_MOUNTPOINT_NONE) == 0 || strcmp(strval, ZFS_MOUNTPOINT_LEGACY) == 0) break; if (mountpoint_namecheck(strval, &why)) { switch (why) { case NAME_ERR_LEADING_SLASH: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' must be an absolute path, " "'none', or 'legacy'"), propname); break; case NAME_ERR_TOOLONG: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "component of '%s' is too long"), propname); break; default: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "(%d) not defined"), why); break; } (void) zfs_error(hdl, EZFS_BADPROP, errbuf); goto error; } } /*FALLTHRU*/ case ZFS_PROP_SHARESMB: case ZFS_PROP_SHARENFS: /* * For the mountpoint and sharenfs or sharesmb * properties, check if it can be set in a * global/non-global zone based on * the zoned property value: * * global zone non-global zone * -------------------------------------------------- * zoned=on mountpoint (no) mountpoint (yes) * sharenfs (no) sharenfs (no) * sharesmb (no) sharesmb (no) * * zoned=off mountpoint (yes) N/A * sharenfs (yes) * sharesmb (yes) */ if (zoned) { if (getzoneid() == GLOBAL_ZONEID) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' cannot be set on " "dataset in a non-global zone"), propname); (void) zfs_error(hdl, EZFS_ZONED, errbuf); goto error; } else if (prop == ZFS_PROP_SHARENFS || prop == ZFS_PROP_SHARESMB) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' cannot be set in " "a non-global zone"), propname); (void) zfs_error(hdl, EZFS_ZONED, errbuf); goto error; } } else if (getzoneid() != GLOBAL_ZONEID) { /* * If zoned property is 'off', this must be in * a global zone. If not, something is wrong. */ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' cannot be set while dataset " "'zoned' property is set"), propname); (void) zfs_error(hdl, EZFS_ZONED, errbuf); goto error; } /* * At this point, it is legitimate to set the * property. Now we want to make sure that the * property value is valid if it is sharenfs. */ if ((prop == ZFS_PROP_SHARENFS || prop == ZFS_PROP_SHARESMB) && strcmp(strval, "on") != 0 && strcmp(strval, "off") != 0) { zfs_share_proto_t proto; if (prop == ZFS_PROP_SHARESMB) proto = PROTO_SMB; else proto = PROTO_NFS; /* * Must be an valid sharing protocol * option string so init the libshare * in order to enable the parser and * then parse the options. We use the * control API since we don't care about * the current configuration and don't * want the overhead of loading it * until we actually do something. */ if (zfs_init_libshare(hdl, SA_INIT_CONTROL_API) != SA_OK) { /* * An error occurred so we can't do * anything */ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' cannot be set: problem " "in share initialization"), propname); (void) zfs_error(hdl, EZFS_BADPROP, errbuf); goto error; } if (zfs_parse_options(strval, proto) != SA_OK) { /* * There was an error in parsing so * deal with it by issuing an error * message and leaving after * uninitializing the the libshare * interface. */ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' cannot be set to invalid " "options"), propname); (void) zfs_error(hdl, EZFS_BADPROP, errbuf); zfs_uninit_libshare(hdl); goto error; } zfs_uninit_libshare(hdl); } break; case ZFS_PROP_KEYLOCATION: if (!zfs_prop_valid_keylocation(strval, B_FALSE)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid keylocation")); (void) zfs_error(hdl, EZFS_BADPROP, errbuf); goto error; } if (zhp != NULL) { uint64_t crypt = zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION); if (crypt == ZIO_CRYPT_OFF && strcmp(strval, "none") != 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "keylocation must be 'none' " "for unencrypted datasets")); (void) zfs_error(hdl, EZFS_BADPROP, errbuf); goto error; } else if (crypt != ZIO_CRYPT_OFF && strcmp(strval, "none") == 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "keylocation must not be 'none' " "for encrypted datasets")); (void) zfs_error(hdl, EZFS_BADPROP, errbuf); goto error; } } break; case ZFS_PROP_PBKDF2_ITERS: if (intval < MIN_PBKDF2_ITERATIONS) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "minimum pbkdf2 iterations is %u"), MIN_PBKDF2_ITERATIONS); (void) zfs_error(hdl, EZFS_BADPROP, errbuf); goto error; } break; case ZFS_PROP_UTF8ONLY: chosen_utf = (int)intval; break; case ZFS_PROP_NORMALIZE: chosen_normal = (int)intval; break; default: break; } /* * For changes to existing volumes, we have some additional * checks to enforce. */ if (type == ZFS_TYPE_VOLUME && zhp != NULL) { uint64_t blocksize = zfs_prop_get_int(zhp, ZFS_PROP_VOLBLOCKSIZE); char buf[64]; switch (prop) { case ZFS_PROP_VOLSIZE: if (intval % blocksize != 0) { zfs_nicebytes(blocksize, buf, sizeof (buf)); zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' must be a multiple of " "volume block size (%s)"), propname, buf); (void) zfs_error(hdl, EZFS_BADPROP, errbuf); goto error; } if (intval == 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' cannot be zero"), propname); (void) zfs_error(hdl, EZFS_BADPROP, errbuf); goto error; } break; default: break; } } /* check encryption properties */ if (zhp != NULL) { int64_t crypt = zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION); switch (prop) { case ZFS_PROP_COPIES: if (crypt != ZIO_CRYPT_OFF && intval > 2) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "encrypted datasets cannot have " "3 copies")); (void) zfs_error(hdl, EZFS_BADPROP, errbuf); goto error; } break; default: break; } } } /* * If normalization was chosen, but no UTF8 choice was made, * enforce rejection of non-UTF8 names. * * If normalization was chosen, but rejecting non-UTF8 names * was explicitly not chosen, it is an error. */ if (chosen_normal > 0 && chosen_utf < 0) { if (nvlist_add_uint64(ret, zfs_prop_to_name(ZFS_PROP_UTF8ONLY), 1) != 0) { (void) no_memory(hdl); goto error; } } else if (chosen_normal > 0 && chosen_utf == 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' must be set 'on' if normalization chosen"), zfs_prop_to_name(ZFS_PROP_UTF8ONLY)); (void) zfs_error(hdl, EZFS_BADPROP, errbuf); goto error; } return (ret); error: nvlist_free(ret); return (NULL); } int zfs_add_synthetic_resv(zfs_handle_t *zhp, nvlist_t *nvl) { uint64_t old_volsize; uint64_t new_volsize; uint64_t old_reservation; uint64_t new_reservation; zfs_prop_t resv_prop; nvlist_t *props; /* * If this is an existing volume, and someone is setting the volsize, * make sure that it matches the reservation, or add it if necessary. */ old_volsize = zfs_prop_get_int(zhp, ZFS_PROP_VOLSIZE); if (zfs_which_resv_prop(zhp, &resv_prop) < 0) return (-1); old_reservation = zfs_prop_get_int(zhp, resv_prop); props = fnvlist_alloc(); fnvlist_add_uint64(props, zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), zfs_prop_get_int(zhp, ZFS_PROP_VOLBLOCKSIZE)); if ((zvol_volsize_to_reservation(old_volsize, props) != old_reservation) || nvlist_exists(nvl, zfs_prop_to_name(resv_prop))) { fnvlist_free(props); return (0); } if (nvlist_lookup_uint64(nvl, zfs_prop_to_name(ZFS_PROP_VOLSIZE), &new_volsize) != 0) { fnvlist_free(props); return (-1); } new_reservation = zvol_volsize_to_reservation(new_volsize, props); fnvlist_free(props); if (nvlist_add_uint64(nvl, zfs_prop_to_name(resv_prop), new_reservation) != 0) { (void) no_memory(zhp->zfs_hdl); return (-1); } return (1); } /* * Helper for 'zfs {set|clone} refreservation=auto'. Must be called after * zfs_valid_proplist(), as it is what sets the UINT64_MAX sentinal value. * Return codes must match zfs_add_synthetic_resv(). */ static int zfs_fix_auto_resv(zfs_handle_t *zhp, nvlist_t *nvl) { uint64_t volsize; uint64_t resvsize; zfs_prop_t prop; nvlist_t *props; if (!ZFS_IS_VOLUME(zhp)) { return (0); } if (zfs_which_resv_prop(zhp, &prop) != 0) { return (-1); } if (prop != ZFS_PROP_REFRESERVATION) { return (0); } if (nvlist_lookup_uint64(nvl, zfs_prop_to_name(prop), &resvsize) != 0) { /* No value being set, so it can't be "auto" */ return (0); } if (resvsize != UINT64_MAX) { /* Being set to a value other than "auto" */ return (0); } props = fnvlist_alloc(); fnvlist_add_uint64(props, zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), zfs_prop_get_int(zhp, ZFS_PROP_VOLBLOCKSIZE)); if (nvlist_lookup_uint64(nvl, zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) != 0) { volsize = zfs_prop_get_int(zhp, ZFS_PROP_VOLSIZE); } resvsize = zvol_volsize_to_reservation(volsize, props); fnvlist_free(props); (void) nvlist_remove_all(nvl, zfs_prop_to_name(prop)); if (nvlist_add_uint64(nvl, zfs_prop_to_name(prop), resvsize) != 0) { (void) no_memory(zhp->zfs_hdl); return (-1); } return (1); } void zfs_setprop_error(libzfs_handle_t *hdl, zfs_prop_t prop, int err, char *errbuf) { switch (err) { case ENOSPC: /* * For quotas and reservations, ENOSPC indicates * something different; setting a quota or reservation * doesn't use any disk space. */ switch (prop) { case ZFS_PROP_QUOTA: case ZFS_PROP_REFQUOTA: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "size is less than current used or " "reserved space")); (void) zfs_error(hdl, EZFS_PROPSPACE, errbuf); break; case ZFS_PROP_RESERVATION: case ZFS_PROP_REFRESERVATION: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "size is greater than available space")); (void) zfs_error(hdl, EZFS_PROPSPACE, errbuf); break; default: (void) zfs_standard_error(hdl, err, errbuf); break; } break; case EBUSY: (void) zfs_standard_error(hdl, EBUSY, errbuf); break; case EROFS: (void) zfs_error(hdl, EZFS_DSREADONLY, errbuf); break; case E2BIG: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "property value too long")); (void) zfs_error(hdl, EZFS_BADPROP, errbuf); break; case ENOTSUP: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool and or dataset must be upgraded to set this " "property or value")); (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); break; case ERANGE: if (prop == ZFS_PROP_COMPRESSION || prop == ZFS_PROP_DNODESIZE || prop == ZFS_PROP_RECORDSIZE) { (void) zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "property setting is not allowed on " "bootable datasets")); (void) zfs_error(hdl, EZFS_NOTSUP, errbuf); } else if (prop == ZFS_PROP_CHECKSUM || prop == ZFS_PROP_DEDUP) { (void) zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "property setting is not allowed on " "root pools")); (void) zfs_error(hdl, EZFS_NOTSUP, errbuf); } else { (void) zfs_standard_error(hdl, err, errbuf); } break; case EINVAL: if (prop == ZPROP_INVAL) { (void) zfs_error(hdl, EZFS_BADPROP, errbuf); } else { (void) zfs_standard_error(hdl, err, errbuf); } break; case EACCES: if (prop == ZFS_PROP_KEYLOCATION) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "keylocation may only be set on encryption roots")); (void) zfs_error(hdl, EZFS_BADPROP, errbuf); } else { (void) zfs_standard_error(hdl, err, errbuf); } break; case EOVERFLOW: /* * This platform can't address a volume this big. */ #ifdef _ILP32 if (prop == ZFS_PROP_VOLSIZE) { (void) zfs_error(hdl, EZFS_VOLTOOBIG, errbuf); break; } #endif /* FALLTHROUGH */ default: (void) zfs_standard_error(hdl, err, errbuf); } } static boolean_t zfs_is_namespace_prop(zfs_prop_t prop) { switch (prop) { case ZFS_PROP_ATIME: case ZFS_PROP_RELATIME: case ZFS_PROP_DEVICES: case ZFS_PROP_EXEC: case ZFS_PROP_SETUID: case ZFS_PROP_READONLY: case ZFS_PROP_XATTR: case ZFS_PROP_NBMAND: return (B_TRUE); default: return (B_FALSE); } } /* * Given a property name and value, set the property for the given dataset. */ int zfs_prop_set(zfs_handle_t *zhp, const char *propname, const char *propval) { int ret = -1; char errbuf[1024]; libzfs_handle_t *hdl = zhp->zfs_hdl; nvlist_t *nvl = NULL; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot set property for '%s'"), zhp->zfs_name); if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0 || nvlist_add_string(nvl, propname, propval) != 0) { (void) no_memory(hdl); goto error; } ret = zfs_prop_set_list(zhp, nvl); error: nvlist_free(nvl); return (ret); } /* * Given an nvlist of property names and values, set the properties for the * given dataset. */ int zfs_prop_set_list(zfs_handle_t *zhp, nvlist_t *props) { zfs_cmd_t zc = {"\0"}; int ret = -1; prop_changelist_t **cls = NULL; int cl_idx; char errbuf[1024]; libzfs_handle_t *hdl = zhp->zfs_hdl; nvlist_t *nvl; int nvl_len = 0; int added_resv = 0; zfs_prop_t prop = 0; nvpair_t *elem; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot set property for '%s'"), zhp->zfs_name); if ((nvl = zfs_valid_proplist(hdl, zhp->zfs_type, props, zfs_prop_get_int(zhp, ZFS_PROP_ZONED), zhp, zhp->zpool_hdl, B_FALSE, errbuf)) == NULL) goto error; /* * We have to check for any extra properties which need to be added * before computing the length of the nvlist. */ for (elem = nvlist_next_nvpair(nvl, NULL); elem != NULL; elem = nvlist_next_nvpair(nvl, elem)) { if (zfs_name_to_prop(nvpair_name(elem)) == ZFS_PROP_VOLSIZE && (added_resv = zfs_add_synthetic_resv(zhp, nvl)) == -1) { goto error; } } if (added_resv != 1 && (added_resv = zfs_fix_auto_resv(zhp, nvl)) == -1) { goto error; } /* * Check how many properties we're setting and allocate an array to * store changelist pointers for postfix(). */ for (elem = nvlist_next_nvpair(nvl, NULL); elem != NULL; elem = nvlist_next_nvpair(nvl, elem)) nvl_len++; if ((cls = calloc(nvl_len, sizeof (prop_changelist_t *))) == NULL) goto error; cl_idx = 0; for (elem = nvlist_next_nvpair(nvl, NULL); elem != NULL; elem = nvlist_next_nvpair(nvl, elem)) { prop = zfs_name_to_prop(nvpair_name(elem)); assert(cl_idx < nvl_len); /* * We don't want to unmount & remount the dataset when changing * its canmount property to 'on' or 'noauto'. We only use * the changelist logic to unmount when setting canmount=off. */ if (prop != ZFS_PROP_CANMOUNT || (fnvpair_value_uint64(elem) == ZFS_CANMOUNT_OFF && zfs_is_mounted(zhp, NULL))) { cls[cl_idx] = changelist_gather(zhp, prop, 0, 0); if (cls[cl_idx] == NULL) goto error; } if (prop == ZFS_PROP_MOUNTPOINT && changelist_haszonedchild(cls[cl_idx])) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "child dataset with inherited mountpoint is used " "in a non-global zone")); ret = zfs_error(hdl, EZFS_ZONED, errbuf); goto error; } if (cls[cl_idx] != NULL && (ret = changelist_prefix(cls[cl_idx])) != 0) goto error; cl_idx++; } assert(cl_idx == nvl_len); /* * Execute the corresponding ioctl() to set this list of properties. */ (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); if ((ret = zcmd_write_src_nvlist(hdl, &zc, nvl)) != 0 || (ret = zcmd_alloc_dst_nvlist(hdl, &zc, 0)) != 0) goto error; ret = zfs_ioctl(hdl, ZFS_IOC_SET_PROP, &zc); if (ret != 0) { if (zc.zc_nvlist_dst_filled == B_FALSE) { (void) zfs_standard_error(hdl, errno, errbuf); goto error; } /* Get the list of unset properties back and report them. */ nvlist_t *errorprops = NULL; if (zcmd_read_dst_nvlist(hdl, &zc, &errorprops) != 0) goto error; for (nvpair_t *elem = nvlist_next_nvpair(errorprops, NULL); elem != NULL; elem = nvlist_next_nvpair(errorprops, elem)) { prop = zfs_name_to_prop(nvpair_name(elem)); zfs_setprop_error(hdl, prop, errno, errbuf); } nvlist_free(errorprops); if (added_resv && errno == ENOSPC) { /* clean up the volsize property we tried to set */ uint64_t old_volsize = zfs_prop_get_int(zhp, ZFS_PROP_VOLSIZE); nvlist_free(nvl); nvl = NULL; zcmd_free_nvlists(&zc); if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) goto error; if (nvlist_add_uint64(nvl, zfs_prop_to_name(ZFS_PROP_VOLSIZE), old_volsize) != 0) goto error; if (zcmd_write_src_nvlist(hdl, &zc, nvl) != 0) goto error; (void) zfs_ioctl(hdl, ZFS_IOC_SET_PROP, &zc); } } else { for (cl_idx = 0; cl_idx < nvl_len; cl_idx++) { if (cls[cl_idx] != NULL) { int clp_err = changelist_postfix(cls[cl_idx]); if (clp_err != 0) ret = clp_err; } } if (ret == 0) { /* * Refresh the statistics so the new property * value is reflected. */ (void) get_stats(zhp); /* * Remount the filesystem to propagate the change * if one of the options handled by the generic * Linux namespace layer has been modified. */ if (zfs_is_namespace_prop(prop) && zfs_is_mounted(zhp, NULL)) ret = zfs_mount(zhp, MNTOPT_REMOUNT, 0); } } error: nvlist_free(nvl); zcmd_free_nvlists(&zc); if (cls != NULL) { for (cl_idx = 0; cl_idx < nvl_len; cl_idx++) { if (cls[cl_idx] != NULL) changelist_free(cls[cl_idx]); } free(cls); } return (ret); } /* * Given a property, inherit the value from the parent dataset, or if received * is TRUE, revert to the received value, if any. */ int zfs_prop_inherit(zfs_handle_t *zhp, const char *propname, boolean_t received) { zfs_cmd_t zc = {"\0"}; int ret; prop_changelist_t *cl; libzfs_handle_t *hdl = zhp->zfs_hdl; char errbuf[1024]; zfs_prop_t prop; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot inherit %s for '%s'"), propname, zhp->zfs_name); zc.zc_cookie = received; if ((prop = zfs_name_to_prop(propname)) == ZPROP_INVAL) { /* * For user properties, the amount of work we have to do is very * small, so just do it here. */ if (!zfs_prop_user(propname)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid property")); return (zfs_error(hdl, EZFS_BADPROP, errbuf)); } (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); (void) strlcpy(zc.zc_value, propname, sizeof (zc.zc_value)); if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_INHERIT_PROP, &zc) != 0) return (zfs_standard_error(hdl, errno, errbuf)); return (0); } /* * Verify that this property is inheritable. */ if (zfs_prop_readonly(prop)) return (zfs_error(hdl, EZFS_PROPREADONLY, errbuf)); if (!zfs_prop_inheritable(prop) && !received) return (zfs_error(hdl, EZFS_PROPNONINHERIT, errbuf)); /* * Check to see if the value applies to this type */ if (!zfs_prop_valid_for_type(prop, zhp->zfs_type, B_FALSE)) return (zfs_error(hdl, EZFS_PROPTYPE, errbuf)); /* * Normalize the name, to get rid of shorthand abbreviations. */ propname = zfs_prop_to_name(prop); (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); (void) strlcpy(zc.zc_value, propname, sizeof (zc.zc_value)); if (prop == ZFS_PROP_MOUNTPOINT && getzoneid() == GLOBAL_ZONEID && zfs_prop_get_int(zhp, ZFS_PROP_ZONED)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "dataset is used in a non-global zone")); return (zfs_error(hdl, EZFS_ZONED, errbuf)); } /* * Determine datasets which will be affected by this change, if any. */ if ((cl = changelist_gather(zhp, prop, 0, 0)) == NULL) return (-1); if (prop == ZFS_PROP_MOUNTPOINT && changelist_haszonedchild(cl)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "child dataset with inherited mountpoint is used " "in a non-global zone")); ret = zfs_error(hdl, EZFS_ZONED, errbuf); goto error; } if ((ret = changelist_prefix(cl)) != 0) goto error; if ((ret = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_INHERIT_PROP, &zc)) != 0) { return (zfs_standard_error(hdl, errno, errbuf)); } else { if ((ret = changelist_postfix(cl)) != 0) goto error; /* * Refresh the statistics so the new property is reflected. */ (void) get_stats(zhp); /* * Remount the filesystem to propagate the change * if one of the options handled by the generic * Linux namespace layer has been modified. */ if (zfs_is_namespace_prop(prop) && zfs_is_mounted(zhp, NULL)) ret = zfs_mount(zhp, MNTOPT_REMOUNT, 0); } error: changelist_free(cl); return (ret); } /* * True DSL properties are stored in an nvlist. The following two functions * extract them appropriately. */ uint64_t getprop_uint64(zfs_handle_t *zhp, zfs_prop_t prop, char **source) { nvlist_t *nv; uint64_t value; *source = NULL; if (nvlist_lookup_nvlist(zhp->zfs_props, zfs_prop_to_name(prop), &nv) == 0) { verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0); (void) nvlist_lookup_string(nv, ZPROP_SOURCE, source); } else { verify(!zhp->zfs_props_table || zhp->zfs_props_table[prop] == B_TRUE); value = zfs_prop_default_numeric(prop); *source = ""; } return (value); } static const char * getprop_string(zfs_handle_t *zhp, zfs_prop_t prop, char **source) { nvlist_t *nv; const char *value; *source = NULL; if (nvlist_lookup_nvlist(zhp->zfs_props, zfs_prop_to_name(prop), &nv) == 0) { value = fnvlist_lookup_string(nv, ZPROP_VALUE); (void) nvlist_lookup_string(nv, ZPROP_SOURCE, source); } else { verify(!zhp->zfs_props_table || zhp->zfs_props_table[prop] == B_TRUE); value = zfs_prop_default_string(prop); *source = ""; } return (value); } static boolean_t zfs_is_recvd_props_mode(zfs_handle_t *zhp) { return (zhp->zfs_props == zhp->zfs_recvd_props); } static void zfs_set_recvd_props_mode(zfs_handle_t *zhp, uint64_t *cookie) { *cookie = (uint64_t)(uintptr_t)zhp->zfs_props; zhp->zfs_props = zhp->zfs_recvd_props; } static void zfs_unset_recvd_props_mode(zfs_handle_t *zhp, uint64_t *cookie) { zhp->zfs_props = (nvlist_t *)(uintptr_t)*cookie; *cookie = 0; } /* * Internal function for getting a numeric property. Both zfs_prop_get() and * zfs_prop_get_int() are built using this interface. * * Certain properties can be overridden using 'mount -o'. In this case, scan * the contents of the /proc/self/mounts entry, searching for the * appropriate options. If they differ from the on-disk values, report the * current values and mark the source "temporary". */ static int get_numeric_property(zfs_handle_t *zhp, zfs_prop_t prop, zprop_source_t *src, char **source, uint64_t *val) { zfs_cmd_t zc = {"\0"}; nvlist_t *zplprops = NULL; struct mnttab mnt; char *mntopt_on = NULL; char *mntopt_off = NULL; boolean_t received = zfs_is_recvd_props_mode(zhp); *source = NULL; /* * If the property is being fetched for a snapshot, check whether * the property is valid for the snapshot's head dataset type. */ if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT && !zfs_prop_valid_for_type(prop, zhp->zfs_head_type, B_TRUE)) { *val = zfs_prop_default_numeric(prop); return (-1); } switch (prop) { case ZFS_PROP_ATIME: mntopt_on = MNTOPT_ATIME; mntopt_off = MNTOPT_NOATIME; break; case ZFS_PROP_RELATIME: mntopt_on = MNTOPT_RELATIME; mntopt_off = MNTOPT_NORELATIME; break; case ZFS_PROP_DEVICES: mntopt_on = MNTOPT_DEVICES; mntopt_off = MNTOPT_NODEVICES; break; case ZFS_PROP_EXEC: mntopt_on = MNTOPT_EXEC; mntopt_off = MNTOPT_NOEXEC; break; case ZFS_PROP_READONLY: mntopt_on = MNTOPT_RO; mntopt_off = MNTOPT_RW; break; case ZFS_PROP_SETUID: mntopt_on = MNTOPT_SETUID; mntopt_off = MNTOPT_NOSETUID; break; case ZFS_PROP_XATTR: mntopt_on = MNTOPT_XATTR; mntopt_off = MNTOPT_NOXATTR; break; case ZFS_PROP_NBMAND: mntopt_on = MNTOPT_NBMAND; mntopt_off = MNTOPT_NONBMAND; break; default: break; } /* * Because looking up the mount options is potentially expensive * (iterating over all of /proc/self/mounts), we defer its * calculation until we're looking up a property which requires * its presence. */ if (!zhp->zfs_mntcheck && (mntopt_on != NULL || prop == ZFS_PROP_MOUNTED)) { libzfs_handle_t *hdl = zhp->zfs_hdl; struct mnttab entry; if (libzfs_mnttab_find(hdl, zhp->zfs_name, &entry) == 0) { zhp->zfs_mntopts = zfs_strdup(hdl, entry.mnt_mntopts); if (zhp->zfs_mntopts == NULL) return (-1); } zhp->zfs_mntcheck = B_TRUE; } if (zhp->zfs_mntopts == NULL) mnt.mnt_mntopts = ""; else mnt.mnt_mntopts = zhp->zfs_mntopts; switch (prop) { case ZFS_PROP_ATIME: case ZFS_PROP_RELATIME: case ZFS_PROP_DEVICES: case ZFS_PROP_EXEC: case ZFS_PROP_READONLY: case ZFS_PROP_SETUID: case ZFS_PROP_XATTR: case ZFS_PROP_NBMAND: *val = getprop_uint64(zhp, prop, source); if (received) break; if (hasmntopt(&mnt, mntopt_on) && !*val) { *val = B_TRUE; if (src) *src = ZPROP_SRC_TEMPORARY; } else if (hasmntopt(&mnt, mntopt_off) && *val) { *val = B_FALSE; if (src) *src = ZPROP_SRC_TEMPORARY; } break; case ZFS_PROP_CANMOUNT: case ZFS_PROP_VOLSIZE: case ZFS_PROP_QUOTA: case ZFS_PROP_REFQUOTA: case ZFS_PROP_RESERVATION: case ZFS_PROP_REFRESERVATION: case ZFS_PROP_FILESYSTEM_LIMIT: case ZFS_PROP_SNAPSHOT_LIMIT: case ZFS_PROP_FILESYSTEM_COUNT: case ZFS_PROP_SNAPSHOT_COUNT: *val = getprop_uint64(zhp, prop, source); if (*source == NULL) { /* not default, must be local */ *source = zhp->zfs_name; } break; case ZFS_PROP_MOUNTED: *val = (zhp->zfs_mntopts != NULL); break; case ZFS_PROP_NUMCLONES: *val = zhp->zfs_dmustats.dds_num_clones; break; case ZFS_PROP_VERSION: case ZFS_PROP_NORMALIZE: case ZFS_PROP_UTF8ONLY: case ZFS_PROP_CASE: if (zcmd_alloc_dst_nvlist(zhp->zfs_hdl, &zc, 0) != 0) return (-1); (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_OBJSET_ZPLPROPS, &zc)) { zcmd_free_nvlists(&zc); if (prop == ZFS_PROP_VERSION && zhp->zfs_type == ZFS_TYPE_VOLUME) *val = zfs_prop_default_numeric(prop); return (-1); } if (zcmd_read_dst_nvlist(zhp->zfs_hdl, &zc, &zplprops) != 0 || nvlist_lookup_uint64(zplprops, zfs_prop_to_name(prop), val) != 0) { zcmd_free_nvlists(&zc); return (-1); } nvlist_free(zplprops); zcmd_free_nvlists(&zc); break; case ZFS_PROP_INCONSISTENT: *val = zhp->zfs_dmustats.dds_inconsistent; break; default: switch (zfs_prop_get_type(prop)) { case PROP_TYPE_NUMBER: case PROP_TYPE_INDEX: *val = getprop_uint64(zhp, prop, source); /* * If we tried to use a default value for a * readonly property, it means that it was not * present. Note this only applies to "truly" * readonly properties, not set-once properties * like volblocksize. */ if (zfs_prop_readonly(prop) && !zfs_prop_setonce(prop) && *source != NULL && (*source)[0] == '\0') { *source = NULL; return (-1); } break; case PROP_TYPE_STRING: default: zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN, "cannot get non-numeric property")); return (zfs_error(zhp->zfs_hdl, EZFS_BADPROP, dgettext(TEXT_DOMAIN, "internal error"))); } } return (0); } /* * Calculate the source type, given the raw source string. */ static void get_source(zfs_handle_t *zhp, zprop_source_t *srctype, char *source, char *statbuf, size_t statlen) { if (statbuf == NULL || srctype == NULL || *srctype == ZPROP_SRC_TEMPORARY) { return; } if (source == NULL) { *srctype = ZPROP_SRC_NONE; } else if (source[0] == '\0') { *srctype = ZPROP_SRC_DEFAULT; } else if (strstr(source, ZPROP_SOURCE_VAL_RECVD) != NULL) { *srctype = ZPROP_SRC_RECEIVED; } else { if (strcmp(source, zhp->zfs_name) == 0) { *srctype = ZPROP_SRC_LOCAL; } else { (void) strlcpy(statbuf, source, statlen); *srctype = ZPROP_SRC_INHERITED; } } } int zfs_prop_get_recvd(zfs_handle_t *zhp, const char *propname, char *propbuf, size_t proplen, boolean_t literal) { zfs_prop_t prop; int err = 0; if (zhp->zfs_recvd_props == NULL) if (get_recvd_props_ioctl(zhp) != 0) return (-1); prop = zfs_name_to_prop(propname); if (prop != ZPROP_INVAL) { uint64_t cookie; if (!nvlist_exists(zhp->zfs_recvd_props, propname)) return (-1); zfs_set_recvd_props_mode(zhp, &cookie); err = zfs_prop_get(zhp, prop, propbuf, proplen, NULL, NULL, 0, literal); zfs_unset_recvd_props_mode(zhp, &cookie); } else { nvlist_t *propval; char *recvdval; if (nvlist_lookup_nvlist(zhp->zfs_recvd_props, propname, &propval) != 0) return (-1); verify(nvlist_lookup_string(propval, ZPROP_VALUE, &recvdval) == 0); (void) strlcpy(propbuf, recvdval, proplen); } return (err == 0 ? 0 : -1); } static int get_clones_string(zfs_handle_t *zhp, char *propbuf, size_t proplen) { nvlist_t *value; nvpair_t *pair; value = zfs_get_clones_nvl(zhp); if (value == NULL) return (-1); propbuf[0] = '\0'; for (pair = nvlist_next_nvpair(value, NULL); pair != NULL; pair = nvlist_next_nvpair(value, pair)) { if (propbuf[0] != '\0') (void) strlcat(propbuf, ",", proplen); (void) strlcat(propbuf, nvpair_name(pair), proplen); } return (0); } struct get_clones_arg { uint64_t numclones; nvlist_t *value; const char *origin; char buf[ZFS_MAX_DATASET_NAME_LEN]; }; int get_clones_cb(zfs_handle_t *zhp, void *arg) { struct get_clones_arg *gca = arg; if (gca->numclones == 0) { zfs_close(zhp); return (0); } if (zfs_prop_get(zhp, ZFS_PROP_ORIGIN, gca->buf, sizeof (gca->buf), NULL, NULL, 0, B_TRUE) != 0) goto out; if (strcmp(gca->buf, gca->origin) == 0) { fnvlist_add_boolean(gca->value, zfs_get_name(zhp)); gca->numclones--; } out: (void) zfs_iter_children(zhp, get_clones_cb, gca); zfs_close(zhp); return (0); } nvlist_t * zfs_get_clones_nvl(zfs_handle_t *zhp) { nvlist_t *nv, *value; if (nvlist_lookup_nvlist(zhp->zfs_props, zfs_prop_to_name(ZFS_PROP_CLONES), &nv) != 0) { struct get_clones_arg gca; /* * if this is a snapshot, then the kernel wasn't able * to get the clones. Do it by slowly iterating. */ if (zhp->zfs_type != ZFS_TYPE_SNAPSHOT) return (NULL); if (nvlist_alloc(&nv, NV_UNIQUE_NAME, 0) != 0) return (NULL); if (nvlist_alloc(&value, NV_UNIQUE_NAME, 0) != 0) { nvlist_free(nv); return (NULL); } gca.numclones = zfs_prop_get_int(zhp, ZFS_PROP_NUMCLONES); gca.value = value; gca.origin = zhp->zfs_name; if (gca.numclones != 0) { zfs_handle_t *root; char pool[ZFS_MAX_DATASET_NAME_LEN]; char *cp = pool; /* get the pool name */ (void) strlcpy(pool, zhp->zfs_name, sizeof (pool)); (void) strsep(&cp, "/@"); root = zfs_open(zhp->zfs_hdl, pool, ZFS_TYPE_FILESYSTEM); if (root == NULL) { nvlist_free(nv); nvlist_free(value); return (NULL); } (void) get_clones_cb(root, &gca); } if (gca.numclones != 0 || nvlist_add_nvlist(nv, ZPROP_VALUE, value) != 0 || nvlist_add_nvlist(zhp->zfs_props, zfs_prop_to_name(ZFS_PROP_CLONES), nv) != 0) { nvlist_free(nv); nvlist_free(value); return (NULL); } nvlist_free(nv); nvlist_free(value); verify(0 == nvlist_lookup_nvlist(zhp->zfs_props, zfs_prop_to_name(ZFS_PROP_CLONES), &nv)); } verify(nvlist_lookup_nvlist(nv, ZPROP_VALUE, &value) == 0); return (value); } /* * Accepts a property and value and checks that the value * matches the one found by the channel program. If they are * not equal, print both of them. */ static void zcp_check(zfs_handle_t *zhp, zfs_prop_t prop, uint64_t intval, const char *strval) { if (!zhp->zfs_hdl->libzfs_prop_debug) return; int error; char *poolname = zhp->zpool_hdl->zpool_name; const char *prop_name = zfs_prop_to_name(prop); const char *program = "args = ...\n" "ds = args['dataset']\n" "prop = args['property']\n" "value, setpoint = zfs.get_prop(ds, prop)\n" "return {value=value, setpoint=setpoint}\n"; nvlist_t *outnvl; nvlist_t *retnvl; nvlist_t *argnvl = fnvlist_alloc(); fnvlist_add_string(argnvl, "dataset", zhp->zfs_name); fnvlist_add_string(argnvl, "property", zfs_prop_to_name(prop)); error = lzc_channel_program_nosync(poolname, program, 10 * 1000 * 1000, 10 * 1024 * 1024, argnvl, &outnvl); if (error == 0) { retnvl = fnvlist_lookup_nvlist(outnvl, "return"); if (zfs_prop_get_type(prop) == PROP_TYPE_NUMBER) { int64_t ans; error = nvlist_lookup_int64(retnvl, "value", &ans); if (error != 0) { (void) fprintf(stderr, "%s: zcp check error: " "%u\n", prop_name, error); return; } if (ans != intval) { (void) fprintf(stderr, "%s: zfs found %llu, " "but zcp found %llu\n", prop_name, (u_longlong_t)intval, (u_longlong_t)ans); } } else { char *str_ans; error = nvlist_lookup_string(retnvl, "value", &str_ans); if (error != 0) { (void) fprintf(stderr, "%s: zcp check error: " "%u\n", prop_name, error); return; } if (strcmp(strval, str_ans) != 0) { (void) fprintf(stderr, "%s: zfs found '%s', but zcp found '%s'\n", prop_name, strval, str_ans); } } } else { (void) fprintf(stderr, "%s: zcp check failed, channel program " "error: %u\n", prop_name, error); } nvlist_free(argnvl); nvlist_free(outnvl); } /* * Retrieve a property from the given object. If 'literal' is specified, then * numbers are left as exact values. Otherwise, numbers are converted to a * human-readable form. * * Returns 0 on success, or -1 on error. */ int zfs_prop_get(zfs_handle_t *zhp, zfs_prop_t prop, char *propbuf, size_t proplen, zprop_source_t *src, char *statbuf, size_t statlen, boolean_t literal) { char *source = NULL; uint64_t val; const char *str; const char *strval; boolean_t received = zfs_is_recvd_props_mode(zhp); /* * Check to see if this property applies to our object */ if (!zfs_prop_valid_for_type(prop, zhp->zfs_type, B_FALSE)) return (-1); if (received && zfs_prop_readonly(prop)) return (-1); if (src) *src = ZPROP_SRC_NONE; switch (prop) { case ZFS_PROP_CREATION: /* * 'creation' is a time_t stored in the statistics. We convert * this into a string unless 'literal' is specified. */ { val = getprop_uint64(zhp, prop, &source); time_t time = (time_t)val; struct tm t; if (literal || localtime_r(&time, &t) == NULL || strftime(propbuf, proplen, "%a %b %e %k:%M %Y", &t) == 0) (void) snprintf(propbuf, proplen, "%llu", (u_longlong_t)val); } zcp_check(zhp, prop, val, NULL); break; case ZFS_PROP_MOUNTPOINT: /* * Getting the precise mountpoint can be tricky. * * - for 'none' or 'legacy', return those values. * - for inherited mountpoints, we want to take everything * after our ancestor and append it to the inherited value. * * If the pool has an alternate root, we want to prepend that * root to any values we return. */ str = getprop_string(zhp, prop, &source); if (str[0] == '/') { char buf[MAXPATHLEN]; char *root = buf; const char *relpath; /* * If we inherit the mountpoint, even from a dataset * with a received value, the source will be the path of * the dataset we inherit from. If source is * ZPROP_SOURCE_VAL_RECVD, the received value is not * inherited. */ if (strcmp(source, ZPROP_SOURCE_VAL_RECVD) == 0) { relpath = ""; } else { relpath = zhp->zfs_name + strlen(source); if (relpath[0] == '/') relpath++; } if ((zpool_get_prop(zhp->zpool_hdl, ZPOOL_PROP_ALTROOT, buf, MAXPATHLEN, NULL, B_FALSE)) || (strcmp(root, "-") == 0)) root[0] = '\0'; /* * Special case an alternate root of '/'. This will * avoid having multiple leading slashes in the * mountpoint path. */ if (strcmp(root, "/") == 0) root++; /* * If the mountpoint is '/' then skip over this * if we are obtaining either an alternate root or * an inherited mountpoint. */ if (str[1] == '\0' && (root[0] != '\0' || relpath[0] != '\0')) str++; if (relpath[0] == '\0') (void) snprintf(propbuf, proplen, "%s%s", root, str); else (void) snprintf(propbuf, proplen, "%s%s%s%s", root, str, relpath[0] == '@' ? "" : "/", relpath); } else { /* 'legacy' or 'none' */ (void) strlcpy(propbuf, str, proplen); } zcp_check(zhp, prop, 0, propbuf); break; case ZFS_PROP_ORIGIN: str = getprop_string(zhp, prop, &source); if (str == NULL) return (-1); (void) strlcpy(propbuf, str, proplen); zcp_check(zhp, prop, 0, str); break; case ZFS_PROP_CLONES: if (get_clones_string(zhp, propbuf, proplen) != 0) return (-1); break; case ZFS_PROP_QUOTA: case ZFS_PROP_REFQUOTA: case ZFS_PROP_RESERVATION: case ZFS_PROP_REFRESERVATION: if (get_numeric_property(zhp, prop, src, &source, &val) != 0) return (-1); /* * If quota or reservation is 0, we translate this into 'none' * (unless literal is set), and indicate that it's the default * value. Otherwise, we print the number nicely and indicate * that its set locally. */ if (val == 0) { if (literal) (void) strlcpy(propbuf, "0", proplen); else (void) strlcpy(propbuf, "none", proplen); } else { if (literal) (void) snprintf(propbuf, proplen, "%llu", (u_longlong_t)val); else zfs_nicebytes(val, propbuf, proplen); } zcp_check(zhp, prop, val, NULL); break; case ZFS_PROP_FILESYSTEM_LIMIT: case ZFS_PROP_SNAPSHOT_LIMIT: case ZFS_PROP_FILESYSTEM_COUNT: case ZFS_PROP_SNAPSHOT_COUNT: if (get_numeric_property(zhp, prop, src, &source, &val) != 0) return (-1); /* * If limit is UINT64_MAX, we translate this into 'none' (unless * literal is set), and indicate that it's the default value. * Otherwise, we print the number nicely and indicate that it's * set locally. */ if (literal) { (void) snprintf(propbuf, proplen, "%llu", (u_longlong_t)val); } else if (val == UINT64_MAX) { (void) strlcpy(propbuf, "none", proplen); } else { zfs_nicenum(val, propbuf, proplen); } zcp_check(zhp, prop, val, NULL); break; case ZFS_PROP_REFRATIO: case ZFS_PROP_COMPRESSRATIO: if (get_numeric_property(zhp, prop, src, &source, &val) != 0) return (-1); if (literal) (void) snprintf(propbuf, proplen, "%llu.%02llu", (u_longlong_t)(val / 100), (u_longlong_t)(val % 100)); else (void) snprintf(propbuf, proplen, "%llu.%02llux", (u_longlong_t)(val / 100), (u_longlong_t)(val % 100)); zcp_check(zhp, prop, val, NULL); break; case ZFS_PROP_TYPE: switch (zhp->zfs_type) { case ZFS_TYPE_FILESYSTEM: str = "filesystem"; break; case ZFS_TYPE_VOLUME: str = "volume"; break; case ZFS_TYPE_SNAPSHOT: str = "snapshot"; break; case ZFS_TYPE_BOOKMARK: str = "bookmark"; break; default: abort(); } (void) snprintf(propbuf, proplen, "%s", str); zcp_check(zhp, prop, 0, propbuf); break; case ZFS_PROP_MOUNTED: /* * The 'mounted' property is a pseudo-property that described * whether the filesystem is currently mounted. Even though * it's a boolean value, the typical values of "on" and "off" * don't make sense, so we translate to "yes" and "no". */ if (get_numeric_property(zhp, ZFS_PROP_MOUNTED, src, &source, &val) != 0) return (-1); if (val) (void) strlcpy(propbuf, "yes", proplen); else (void) strlcpy(propbuf, "no", proplen); break; case ZFS_PROP_NAME: /* * The 'name' property is a pseudo-property derived from the * dataset name. It is presented as a real property to simplify * consumers. */ (void) strlcpy(propbuf, zhp->zfs_name, proplen); zcp_check(zhp, prop, 0, propbuf); break; case ZFS_PROP_MLSLABEL: { #ifdef HAVE_MLSLABEL m_label_t *new_sl = NULL; char *ascii = NULL; /* human readable label */ (void) strlcpy(propbuf, getprop_string(zhp, prop, &source), proplen); if (literal || (strcasecmp(propbuf, ZFS_MLSLABEL_DEFAULT) == 0)) break; /* * Try to translate the internal hex string to * human-readable output. If there are any * problems just use the hex string. */ if (str_to_label(propbuf, &new_sl, MAC_LABEL, L_NO_CORRECTION, NULL) == -1) { m_label_free(new_sl); break; } if (label_to_str(new_sl, &ascii, M_LABEL, DEF_NAMES) != 0) { if (ascii) free(ascii); m_label_free(new_sl); break; } m_label_free(new_sl); (void) strlcpy(propbuf, ascii, proplen); free(ascii); #else (void) strlcpy(propbuf, getprop_string(zhp, prop, &source), proplen); #endif /* HAVE_MLSLABEL */ } break; case ZFS_PROP_GUID: case ZFS_PROP_CREATETXG: /* * GUIDs are stored as numbers, but they are identifiers. * We don't want them to be pretty printed, because pretty * printing mangles the ID into a truncated and useless value. */ if (get_numeric_property(zhp, prop, src, &source, &val) != 0) return (-1); (void) snprintf(propbuf, proplen, "%llu", (u_longlong_t)val); zcp_check(zhp, prop, val, NULL); break; case ZFS_PROP_REFERENCED: case ZFS_PROP_AVAILABLE: case ZFS_PROP_USED: case ZFS_PROP_USEDSNAP: case ZFS_PROP_USEDDS: case ZFS_PROP_USEDREFRESERV: case ZFS_PROP_USEDCHILD: if (get_numeric_property(zhp, prop, src, &source, &val) != 0) return (-1); if (literal) { (void) snprintf(propbuf, proplen, "%llu", (u_longlong_t)val); } else { zfs_nicebytes(val, propbuf, proplen); } zcp_check(zhp, prop, val, NULL); break; default: switch (zfs_prop_get_type(prop)) { case PROP_TYPE_NUMBER: if (get_numeric_property(zhp, prop, src, &source, &val) != 0) { return (-1); } if (literal) { (void) snprintf(propbuf, proplen, "%llu", (u_longlong_t)val); } else { zfs_nicenum(val, propbuf, proplen); } zcp_check(zhp, prop, val, NULL); break; case PROP_TYPE_STRING: str = getprop_string(zhp, prop, &source); if (str == NULL) return (-1); (void) strlcpy(propbuf, str, proplen); zcp_check(zhp, prop, 0, str); break; case PROP_TYPE_INDEX: if (get_numeric_property(zhp, prop, src, &source, &val) != 0) return (-1); if (zfs_prop_index_to_string(prop, val, &strval) != 0) return (-1); (void) strlcpy(propbuf, strval, proplen); zcp_check(zhp, prop, 0, strval); break; default: abort(); } } get_source(zhp, src, source, statbuf, statlen); return (0); } /* * Utility function to get the given numeric property. Does no validation that * the given property is the appropriate type; should only be used with * hard-coded property types. */ uint64_t zfs_prop_get_int(zfs_handle_t *zhp, zfs_prop_t prop) { char *source; uint64_t val = 0; (void) get_numeric_property(zhp, prop, NULL, &source, &val); return (val); } int zfs_prop_set_int(zfs_handle_t *zhp, zfs_prop_t prop, uint64_t val) { char buf[64]; (void) snprintf(buf, sizeof (buf), "%llu", (longlong_t)val); return (zfs_prop_set(zhp, zfs_prop_to_name(prop), buf)); } /* * Similar to zfs_prop_get(), but returns the value as an integer. */ int zfs_prop_get_numeric(zfs_handle_t *zhp, zfs_prop_t prop, uint64_t *value, zprop_source_t *src, char *statbuf, size_t statlen) { char *source; /* * Check to see if this property applies to our object */ if (!zfs_prop_valid_for_type(prop, zhp->zfs_type, B_FALSE)) { return (zfs_error_fmt(zhp->zfs_hdl, EZFS_PROPTYPE, dgettext(TEXT_DOMAIN, "cannot get property '%s'"), zfs_prop_to_name(prop))); } if (src) *src = ZPROP_SRC_NONE; if (get_numeric_property(zhp, prop, src, &source, value) != 0) return (-1); get_source(zhp, src, source, statbuf, statlen); return (0); } #ifdef HAVE_IDMAP static int idmap_id_to_numeric_domain_rid(uid_t id, boolean_t isuser, char **domainp, idmap_rid_t *ridp) { idmap_get_handle_t *get_hdl = NULL; idmap_stat status; int err = EINVAL; if (idmap_get_create(&get_hdl) != IDMAP_SUCCESS) goto out; if (isuser) { err = idmap_get_sidbyuid(get_hdl, id, IDMAP_REQ_FLG_USE_CACHE, domainp, ridp, &status); } else { err = idmap_get_sidbygid(get_hdl, id, IDMAP_REQ_FLG_USE_CACHE, domainp, ridp, &status); } if (err == IDMAP_SUCCESS && idmap_get_mappings(get_hdl) == IDMAP_SUCCESS && status == IDMAP_SUCCESS) err = 0; else err = EINVAL; out: if (get_hdl) idmap_get_destroy(get_hdl); return (err); } #endif /* HAVE_IDMAP */ /* * convert the propname into parameters needed by kernel * Eg: userquota@ahrens -> ZFS_PROP_USERQUOTA, "", 126829 * Eg: userused@matt@domain -> ZFS_PROP_USERUSED, "S-1-123-456", 789 * Eg: groupquota@staff -> ZFS_PROP_GROUPQUOTA, "", 1234 * Eg: groupused@staff -> ZFS_PROP_GROUPUSED, "", 1234 * Eg: projectquota@123 -> ZFS_PROP_PROJECTQUOTA, "", 123 * Eg: projectused@789 -> ZFS_PROP_PROJECTUSED, "", 789 */ static int userquota_propname_decode(const char *propname, boolean_t zoned, zfs_userquota_prop_t *typep, char *domain, int domainlen, uint64_t *ridp) { zfs_userquota_prop_t type; char *cp; boolean_t isuser; boolean_t isgroup; boolean_t isproject; struct passwd *pw; struct group *gr; domain[0] = '\0'; /* Figure out the property type ({user|group|project}{quota|space}) */ for (type = 0; type < ZFS_NUM_USERQUOTA_PROPS; type++) { if (strncmp(propname, zfs_userquota_prop_prefixes[type], strlen(zfs_userquota_prop_prefixes[type])) == 0) break; } if (type == ZFS_NUM_USERQUOTA_PROPS) return (EINVAL); *typep = type; isuser = (type == ZFS_PROP_USERQUOTA || type == ZFS_PROP_USERUSED || type == ZFS_PROP_USEROBJQUOTA || type == ZFS_PROP_USEROBJUSED); isgroup = (type == ZFS_PROP_GROUPQUOTA || type == ZFS_PROP_GROUPUSED || type == ZFS_PROP_GROUPOBJQUOTA || type == ZFS_PROP_GROUPOBJUSED); isproject = (type == ZFS_PROP_PROJECTQUOTA || type == ZFS_PROP_PROJECTUSED || type == ZFS_PROP_PROJECTOBJQUOTA || type == ZFS_PROP_PROJECTOBJUSED); cp = strchr(propname, '@') + 1; if (isuser && (pw = getpwnam(cp)) != NULL) { if (zoned && getzoneid() == GLOBAL_ZONEID) return (ENOENT); *ridp = pw->pw_uid; } else if (isgroup && (gr = getgrnam(cp)) != NULL) { if (zoned && getzoneid() == GLOBAL_ZONEID) return (ENOENT); *ridp = gr->gr_gid; } else if (!isproject && strchr(cp, '@')) { #ifdef HAVE_IDMAP /* * It's a SID name (eg "user@domain") that needs to be * turned into S-1-domainID-RID. */ directory_error_t e; char *numericsid = NULL; char *end; if (zoned && getzoneid() == GLOBAL_ZONEID) return (ENOENT); if (isuser) { e = directory_sid_from_user_name(NULL, cp, &numericsid); } else { e = directory_sid_from_group_name(NULL, cp, &numericsid); } if (e != NULL) { directory_error_free(e); return (ENOENT); } if (numericsid == NULL) return (ENOENT); cp = numericsid; (void) strlcpy(domain, cp, domainlen); cp = strrchr(domain, '-'); *cp = '\0'; cp++; errno = 0; *ridp = strtoull(cp, &end, 10); free(numericsid); if (errno != 0 || *end != '\0') return (EINVAL); #else return (ENOSYS); #endif /* HAVE_IDMAP */ } else { /* It's a user/group/project ID (eg "12345"). */ uid_t id; char *end; id = strtoul(cp, &end, 10); if (*end != '\0') return (EINVAL); if (id > MAXUID && !isproject) { #ifdef HAVE_IDMAP /* It's an ephemeral ID. */ idmap_rid_t rid; char *mapdomain; if (idmap_id_to_numeric_domain_rid(id, isuser, &mapdomain, &rid) != 0) return (ENOENT); (void) strlcpy(domain, mapdomain, domainlen); *ridp = rid; #else return (ENOSYS); #endif /* HAVE_IDMAP */ } else { *ridp = id; } } return (0); } static int zfs_prop_get_userquota_common(zfs_handle_t *zhp, const char *propname, uint64_t *propvalue, zfs_userquota_prop_t *typep) { int err; zfs_cmd_t zc = {"\0"}; (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); err = userquota_propname_decode(propname, zfs_prop_get_int(zhp, ZFS_PROP_ZONED), typep, zc.zc_value, sizeof (zc.zc_value), &zc.zc_guid); zc.zc_objset_type = *typep; if (err) return (err); err = ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_USERSPACE_ONE, &zc); if (err) return (err); *propvalue = zc.zc_cookie; return (0); } int zfs_prop_get_userquota_int(zfs_handle_t *zhp, const char *propname, uint64_t *propvalue) { zfs_userquota_prop_t type; return (zfs_prop_get_userquota_common(zhp, propname, propvalue, &type)); } int zfs_prop_get_userquota(zfs_handle_t *zhp, const char *propname, char *propbuf, int proplen, boolean_t literal) { int err; uint64_t propvalue; zfs_userquota_prop_t type; err = zfs_prop_get_userquota_common(zhp, propname, &propvalue, &type); if (err) return (err); if (literal) { (void) snprintf(propbuf, proplen, "%llu", (u_longlong_t)propvalue); } else if (propvalue == 0 && (type == ZFS_PROP_USERQUOTA || type == ZFS_PROP_GROUPQUOTA || type == ZFS_PROP_USEROBJQUOTA || type == ZFS_PROP_GROUPOBJQUOTA || type == ZFS_PROP_PROJECTQUOTA || type == ZFS_PROP_PROJECTOBJQUOTA)) { (void) strlcpy(propbuf, "none", proplen); } else if (type == ZFS_PROP_USERQUOTA || type == ZFS_PROP_GROUPQUOTA || type == ZFS_PROP_USERUSED || type == ZFS_PROP_GROUPUSED || type == ZFS_PROP_PROJECTUSED || type == ZFS_PROP_PROJECTQUOTA) { zfs_nicebytes(propvalue, propbuf, proplen); } else { zfs_nicenum(propvalue, propbuf, proplen); } return (0); } int zfs_prop_get_written_int(zfs_handle_t *zhp, const char *propname, uint64_t *propvalue) { int err; zfs_cmd_t zc = {"\0"}; const char *snapname; (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); snapname = strchr(propname, '@') + 1; if (strchr(snapname, '@')) { (void) strlcpy(zc.zc_value, snapname, sizeof (zc.zc_value)); } else { /* snapname is the short name, append it to zhp's fsname */ char *cp; (void) strlcpy(zc.zc_value, zhp->zfs_name, sizeof (zc.zc_value)); cp = strchr(zc.zc_value, '@'); if (cp != NULL) *cp = '\0'; (void) strlcat(zc.zc_value, "@", sizeof (zc.zc_value)); (void) strlcat(zc.zc_value, snapname, sizeof (zc.zc_value)); } err = ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_SPACE_WRITTEN, &zc); if (err) return (err); *propvalue = zc.zc_cookie; return (0); } int zfs_prop_get_written(zfs_handle_t *zhp, const char *propname, char *propbuf, int proplen, boolean_t literal) { int err; uint64_t propvalue; err = zfs_prop_get_written_int(zhp, propname, &propvalue); if (err) return (err); if (literal) { (void) snprintf(propbuf, proplen, "%llu", (u_longlong_t)propvalue); } else { zfs_nicebytes(propvalue, propbuf, proplen); } return (0); } /* * Returns the name of the given zfs handle. */ const char * zfs_get_name(const zfs_handle_t *zhp) { return (zhp->zfs_name); } /* * Returns the name of the parent pool for the given zfs handle. */ const char * zfs_get_pool_name(const zfs_handle_t *zhp) { return (zhp->zpool_hdl->zpool_name); } /* * Returns the type of the given zfs handle. */ zfs_type_t zfs_get_type(const zfs_handle_t *zhp) { return (zhp->zfs_type); } /* * Is one dataset name a child dataset of another? * * Needs to handle these cases: * Dataset 1 "a/foo" "a/foo" "a/foo" "a/foo" * Dataset 2 "a/fo" "a/foobar" "a/bar/baz" "a/foo/bar" * Descendant? No. No. No. Yes. */ static boolean_t is_descendant(const char *ds1, const char *ds2) { size_t d1len = strlen(ds1); /* ds2 can't be a descendant if it's smaller */ if (strlen(ds2) < d1len) return (B_FALSE); /* otherwise, compare strings and verify that there's a '/' char */ return (ds2[d1len] == '/' && (strncmp(ds1, ds2, d1len) == 0)); } /* * Given a complete name, return just the portion that refers to the parent. * Will return -1 if there is no parent (path is just the name of the * pool). */ static int parent_name(const char *path, char *buf, size_t buflen) { char *slashp; (void) strlcpy(buf, path, buflen); if ((slashp = strrchr(buf, '/')) == NULL) return (-1); *slashp = '\0'; return (0); } int zfs_parent_name(zfs_handle_t *zhp, char *buf, size_t buflen) { return (parent_name(zfs_get_name(zhp), buf, buflen)); } /* * If accept_ancestor is false, then check to make sure that the given path has * a parent, and that it exists. If accept_ancestor is true, then find the * closest existing ancestor for the given path. In prefixlen return the * length of already existing prefix of the given path. We also fetch the * 'zoned' property, which is used to validate property settings when creating * new datasets. */ static int check_parents(libzfs_handle_t *hdl, const char *path, uint64_t *zoned, boolean_t accept_ancestor, int *prefixlen) { zfs_cmd_t zc = {"\0"}; char parent[ZFS_MAX_DATASET_NAME_LEN]; char *slash; zfs_handle_t *zhp; char errbuf[1024]; uint64_t is_zoned; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot create '%s'"), path); /* get parent, and check to see if this is just a pool */ if (parent_name(path, parent, sizeof (parent)) != 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "missing dataset name")); return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf)); } /* check to see if the pool exists */ if ((slash = strchr(parent, '/')) == NULL) slash = parent + strlen(parent); (void) strncpy(zc.zc_name, parent, slash - parent); zc.zc_name[slash - parent] = '\0'; if (ioctl(hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0 && errno == ENOENT) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool '%s'"), zc.zc_name); return (zfs_error(hdl, EZFS_NOENT, errbuf)); } /* check to see if the parent dataset exists */ while ((zhp = make_dataset_handle(hdl, parent)) == NULL) { if (errno == ENOENT && accept_ancestor) { /* * Go deeper to find an ancestor, give up on top level. */ if (parent_name(parent, parent, sizeof (parent)) != 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool '%s'"), zc.zc_name); return (zfs_error(hdl, EZFS_NOENT, errbuf)); } } else if (errno == ENOENT) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "parent does not exist")); return (zfs_error(hdl, EZFS_NOENT, errbuf)); } else return (zfs_standard_error(hdl, errno, errbuf)); } is_zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED); if (zoned != NULL) *zoned = is_zoned; /* we are in a non-global zone, but parent is in the global zone */ if (getzoneid() != GLOBAL_ZONEID && !is_zoned) { (void) zfs_standard_error(hdl, EPERM, errbuf); zfs_close(zhp); return (-1); } /* make sure parent is a filesystem */ if (zfs_get_type(zhp) != ZFS_TYPE_FILESYSTEM) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "parent is not a filesystem")); (void) zfs_error(hdl, EZFS_BADTYPE, errbuf); zfs_close(zhp); return (-1); } zfs_close(zhp); if (prefixlen != NULL) *prefixlen = strlen(parent); return (0); } /* * Finds whether the dataset of the given type(s) exists. */ boolean_t zfs_dataset_exists(libzfs_handle_t *hdl, const char *path, zfs_type_t types) { zfs_handle_t *zhp; if (!zfs_validate_name(hdl, path, types, B_FALSE)) return (B_FALSE); /* * Try to get stats for the dataset, which will tell us if it exists. */ if ((zhp = make_dataset_handle(hdl, path)) != NULL) { int ds_type = zhp->zfs_type; zfs_close(zhp); if (types & ds_type) return (B_TRUE); } return (B_FALSE); } /* * Given a path to 'target', create all the ancestors between * the prefixlen portion of the path, and the target itself. * Fail if the initial prefixlen-ancestor does not already exist. */ int create_parents(libzfs_handle_t *hdl, char *target, int prefixlen) { zfs_handle_t *h; char *cp; const char *opname; /* make sure prefix exists */ cp = target + prefixlen; if (*cp != '/') { assert(strchr(cp, '/') == NULL); h = zfs_open(hdl, target, ZFS_TYPE_FILESYSTEM); } else { *cp = '\0'; h = zfs_open(hdl, target, ZFS_TYPE_FILESYSTEM); *cp = '/'; } if (h == NULL) return (-1); zfs_close(h); /* * Attempt to create, mount, and share any ancestor filesystems, * up to the prefixlen-long one. */ for (cp = target + prefixlen + 1; (cp = strchr(cp, '/')) != NULL; *cp = '/', cp++) { *cp = '\0'; h = make_dataset_handle(hdl, target); if (h) { /* it already exists, nothing to do here */ zfs_close(h); continue; } if (zfs_create(hdl, target, ZFS_TYPE_FILESYSTEM, NULL) != 0) { opname = dgettext(TEXT_DOMAIN, "create"); goto ancestorerr; } h = zfs_open(hdl, target, ZFS_TYPE_FILESYSTEM); if (h == NULL) { opname = dgettext(TEXT_DOMAIN, "open"); goto ancestorerr; } if (zfs_mount(h, NULL, 0) != 0) { opname = dgettext(TEXT_DOMAIN, "mount"); goto ancestorerr; } if (zfs_share(h) != 0) { opname = dgettext(TEXT_DOMAIN, "share"); goto ancestorerr; } zfs_close(h); } return (0); ancestorerr: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "failed to %s ancestor '%s'"), opname, target); return (-1); } /* * Creates non-existing ancestors of the given path. */ int zfs_create_ancestors(libzfs_handle_t *hdl, const char *path) { int prefix; char *path_copy; char errbuf[1024]; int rc = 0; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot create '%s'"), path); /* * Check that we are not passing the nesting limit * before we start creating any ancestors. */ if (dataset_nestcheck(path) != 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "maximum name nesting depth exceeded")); return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf)); } if (check_parents(hdl, path, NULL, B_TRUE, &prefix) != 0) return (-1); if ((path_copy = strdup(path)) != NULL) { rc = create_parents(hdl, path_copy, prefix); free(path_copy); } if (path_copy == NULL || rc != 0) return (-1); return (0); } /* * Create a new filesystem or volume. */ int zfs_create(libzfs_handle_t *hdl, const char *path, zfs_type_t type, nvlist_t *props) { int ret; uint64_t size = 0; uint64_t blocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE); uint64_t zoned; enum lzc_dataset_type ost; zpool_handle_t *zpool_handle; uint8_t *wkeydata = NULL; uint_t wkeylen = 0; char errbuf[1024]; char parent[ZFS_MAX_DATASET_NAME_LEN]; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot create '%s'"), path); /* validate the path, taking care to note the extended error message */ if (!zfs_validate_name(hdl, path, type, B_TRUE)) return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf)); if (dataset_nestcheck(path) != 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "maximum name nesting depth exceeded")); return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf)); } /* validate parents exist */ if (check_parents(hdl, path, &zoned, B_FALSE, NULL) != 0) return (-1); /* * The failure modes when creating a dataset of a different type over * one that already exists is a little strange. In particular, if you * try to create a dataset on top of an existing dataset, the ioctl() * will return ENOENT, not EEXIST. To prevent this from happening, we * first try to see if the dataset exists. */ if (zfs_dataset_exists(hdl, path, ZFS_TYPE_DATASET)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "dataset already exists")); return (zfs_error(hdl, EZFS_EXISTS, errbuf)); } if (type == ZFS_TYPE_VOLUME) ost = LZC_DATSET_TYPE_ZVOL; else ost = LZC_DATSET_TYPE_ZFS; /* open zpool handle for prop validation */ char pool_path[ZFS_MAX_DATASET_NAME_LEN]; (void) strlcpy(pool_path, path, sizeof (pool_path)); /* truncate pool_path at first slash */ char *p = strchr(pool_path, '/'); if (p != NULL) *p = '\0'; if ((zpool_handle = zpool_open(hdl, pool_path)) == NULL) return (-1); if (props && (props = zfs_valid_proplist(hdl, type, props, zoned, NULL, zpool_handle, B_TRUE, errbuf)) == 0) { zpool_close(zpool_handle); return (-1); } zpool_close(zpool_handle); if (type == ZFS_TYPE_VOLUME) { /* * If we are creating a volume, the size and block size must * satisfy a few restraints. First, the blocksize must be a * valid block size between SPA_{MIN,MAX}BLOCKSIZE. Second, the * volsize must be a multiple of the block size, and cannot be * zero. */ if (props == NULL || nvlist_lookup_uint64(props, zfs_prop_to_name(ZFS_PROP_VOLSIZE), &size) != 0) { nvlist_free(props); zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "missing volume size")); return (zfs_error(hdl, EZFS_BADPROP, errbuf)); } if ((ret = nvlist_lookup_uint64(props, zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &blocksize)) != 0) { if (ret == ENOENT) { blocksize = zfs_prop_default_numeric( ZFS_PROP_VOLBLOCKSIZE); } else { nvlist_free(props); zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "missing volume block size")); return (zfs_error(hdl, EZFS_BADPROP, errbuf)); } } if (size == 0) { nvlist_free(props); zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "volume size cannot be zero")); return (zfs_error(hdl, EZFS_BADPROP, errbuf)); } if (size % blocksize != 0) { nvlist_free(props); zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "volume size must be a multiple of volume block " "size")); return (zfs_error(hdl, EZFS_BADPROP, errbuf)); } } (void) parent_name(path, parent, sizeof (parent)); if (zfs_crypto_create(hdl, parent, props, NULL, B_TRUE, &wkeydata, &wkeylen) != 0) { nvlist_free(props); return (zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf)); } /* create the dataset */ ret = lzc_create(path, ost, props, wkeydata, wkeylen); nvlist_free(props); if (wkeydata != NULL) free(wkeydata); /* check for failure */ if (ret != 0) { switch (errno) { case ENOENT: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such parent '%s'"), parent); return (zfs_error(hdl, EZFS_NOENT, errbuf)); - case EINVAL: - zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, - "parent '%s' is not a filesystem"), parent); - return (zfs_error(hdl, EZFS_BADTYPE, errbuf)); - case ENOTSUP: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be upgraded to set this " "property or value")); return (zfs_error(hdl, EZFS_BADVERSION, errbuf)); case EACCES: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "encryption root's key is not loaded " "or provided")); return (zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf)); case ERANGE: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid property value(s) specified")); return (zfs_error(hdl, EZFS_BADPROP, errbuf)); #ifdef _ILP32 case EOVERFLOW: /* * This platform can't address a volume this big. */ if (type == ZFS_TYPE_VOLUME) return (zfs_error(hdl, EZFS_VOLTOOBIG, errbuf)); #endif /* FALLTHROUGH */ default: return (zfs_standard_error(hdl, errno, errbuf)); } } return (0); } /* * Destroys the given dataset. The caller must make sure that the filesystem * isn't mounted, and that there are no active dependents. If the file system * does not exist this function does nothing. */ int zfs_destroy(zfs_handle_t *zhp, boolean_t defer) { int error; if (zhp->zfs_type != ZFS_TYPE_SNAPSHOT && defer) return (EINVAL); if (zhp->zfs_type == ZFS_TYPE_BOOKMARK) { nvlist_t *nv = fnvlist_alloc(); fnvlist_add_boolean(nv, zhp->zfs_name); error = lzc_destroy_bookmarks(nv, NULL); fnvlist_free(nv); if (error != 0) { return (zfs_standard_error_fmt(zhp->zfs_hdl, error, dgettext(TEXT_DOMAIN, "cannot destroy '%s'"), zhp->zfs_name)); } return (0); } if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT) { nvlist_t *nv = fnvlist_alloc(); fnvlist_add_boolean(nv, zhp->zfs_name); error = lzc_destroy_snaps(nv, defer, NULL); fnvlist_free(nv); } else { error = lzc_destroy(zhp->zfs_name); } if (error != 0 && error != ENOENT) { return (zfs_standard_error_fmt(zhp->zfs_hdl, errno, dgettext(TEXT_DOMAIN, "cannot destroy '%s'"), zhp->zfs_name)); } remove_mountpoint(zhp); return (0); } struct destroydata { nvlist_t *nvl; const char *snapname; }; static int zfs_check_snap_cb(zfs_handle_t *zhp, void *arg) { struct destroydata *dd = arg; char name[ZFS_MAX_DATASET_NAME_LEN]; int rv = 0; if (snprintf(name, sizeof (name), "%s@%s", zhp->zfs_name, dd->snapname) >= sizeof (name)) return (EINVAL); if (lzc_exists(name)) verify(nvlist_add_boolean(dd->nvl, name) == 0); rv = zfs_iter_filesystems(zhp, zfs_check_snap_cb, dd); zfs_close(zhp); return (rv); } /* * Destroys all snapshots with the given name in zhp & descendants. */ int zfs_destroy_snaps(zfs_handle_t *zhp, char *snapname, boolean_t defer) { int ret; struct destroydata dd = { 0 }; dd.snapname = snapname; verify(nvlist_alloc(&dd.nvl, NV_UNIQUE_NAME, 0) == 0); (void) zfs_check_snap_cb(zfs_handle_dup(zhp), &dd); if (nvlist_empty(dd.nvl)) { ret = zfs_standard_error_fmt(zhp->zfs_hdl, ENOENT, dgettext(TEXT_DOMAIN, "cannot destroy '%s@%s'"), zhp->zfs_name, snapname); } else { ret = zfs_destroy_snaps_nvl(zhp->zfs_hdl, dd.nvl, defer); } nvlist_free(dd.nvl); return (ret); } /* * Destroys all the snapshots named in the nvlist. */ int zfs_destroy_snaps_nvl(libzfs_handle_t *hdl, nvlist_t *snaps, boolean_t defer) { int ret; nvlist_t *errlist = NULL; nvpair_t *pair; ret = lzc_destroy_snaps(snaps, defer, &errlist); if (ret == 0) { nvlist_free(errlist); return (0); } if (nvlist_empty(errlist)) { char errbuf[1024]; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot destroy snapshots")); ret = zfs_standard_error(hdl, ret, errbuf); } for (pair = nvlist_next_nvpair(errlist, NULL); pair != NULL; pair = nvlist_next_nvpair(errlist, pair)) { char errbuf[1024]; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot destroy snapshot %s"), nvpair_name(pair)); switch (fnvpair_value_int32(pair)) { case EEXIST: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "snapshot is cloned")); ret = zfs_error(hdl, EZFS_EXISTS, errbuf); break; default: ret = zfs_standard_error(hdl, errno, errbuf); break; } } nvlist_free(errlist); return (ret); } /* * Clones the given dataset. The target must be of the same type as the source. */ int zfs_clone(zfs_handle_t *zhp, const char *target, nvlist_t *props) { char parent[ZFS_MAX_DATASET_NAME_LEN]; int ret; char errbuf[1024]; libzfs_handle_t *hdl = zhp->zfs_hdl; uint64_t zoned; assert(zhp->zfs_type == ZFS_TYPE_SNAPSHOT); (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot create '%s'"), target); /* validate the target/clone name */ if (!zfs_validate_name(hdl, target, ZFS_TYPE_FILESYSTEM, B_TRUE)) return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf)); /* validate parents exist */ if (check_parents(hdl, target, &zoned, B_FALSE, NULL) != 0) return (-1); (void) parent_name(target, parent, sizeof (parent)); /* do the clone */ if (props) { zfs_type_t type; if (ZFS_IS_VOLUME(zhp)) { type = ZFS_TYPE_VOLUME; } else { type = ZFS_TYPE_FILESYSTEM; } if ((props = zfs_valid_proplist(hdl, type, props, zoned, zhp, zhp->zpool_hdl, B_TRUE, errbuf)) == NULL) return (-1); if (zfs_fix_auto_resv(zhp, props) == -1) { nvlist_free(props); return (-1); } } if (zfs_crypto_clone_check(hdl, zhp, parent, props) != 0) { nvlist_free(props); return (zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf)); } ret = lzc_clone(target, zhp->zfs_name, props); nvlist_free(props); if (ret != 0) { switch (errno) { case ENOENT: /* * The parent doesn't exist. We should have caught this * above, but there may a race condition that has since * destroyed the parent. * * At this point, we don't know whether it's the source * that doesn't exist anymore, or whether the target * dataset doesn't exist. */ zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN, "no such parent '%s'"), parent); return (zfs_error(zhp->zfs_hdl, EZFS_NOENT, errbuf)); case EXDEV: zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN, "source and target pools differ")); return (zfs_error(zhp->zfs_hdl, EZFS_CROSSTARGET, errbuf)); default: return (zfs_standard_error(zhp->zfs_hdl, errno, errbuf)); } } return (ret); } /* * Promotes the given clone fs to be the clone parent. */ int zfs_promote(zfs_handle_t *zhp) { libzfs_handle_t *hdl = zhp->zfs_hdl; char snapname[ZFS_MAX_DATASET_NAME_LEN]; int ret; char errbuf[1024]; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot promote '%s'"), zhp->zfs_name); if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "snapshots can not be promoted")); return (zfs_error(hdl, EZFS_BADTYPE, errbuf)); } if (zhp->zfs_dmustats.dds_origin[0] == '\0') { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "not a cloned filesystem")); return (zfs_error(hdl, EZFS_BADTYPE, errbuf)); } if (!zfs_validate_name(hdl, zhp->zfs_name, zhp->zfs_type, B_TRUE)) return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf)); ret = lzc_promote(zhp->zfs_name, snapname, sizeof (snapname)); if (ret != 0) { switch (ret) { case EEXIST: /* There is a conflicting snapshot name. */ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "conflicting snapshot '%s' from parent '%s'"), snapname, zhp->zfs_dmustats.dds_origin); return (zfs_error(hdl, EZFS_EXISTS, errbuf)); default: return (zfs_standard_error(hdl, ret, errbuf)); } } return (ret); } typedef struct snapdata { nvlist_t *sd_nvl; const char *sd_snapname; } snapdata_t; static int zfs_snapshot_cb(zfs_handle_t *zhp, void *arg) { snapdata_t *sd = arg; char name[ZFS_MAX_DATASET_NAME_LEN]; int rv = 0; if (zfs_prop_get_int(zhp, ZFS_PROP_INCONSISTENT) == 0) { if (snprintf(name, sizeof (name), "%s@%s", zfs_get_name(zhp), sd->sd_snapname) >= sizeof (name)) return (EINVAL); fnvlist_add_boolean(sd->sd_nvl, name); rv = zfs_iter_filesystems(zhp, zfs_snapshot_cb, sd); } zfs_close(zhp); return (rv); } int zfs_remap_indirects(libzfs_handle_t *hdl, const char *fs) { int err; char errbuf[1024]; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot remap dataset '%s'"), fs); err = lzc_remap(fs); if (err != 0) { switch (err) { case ENOTSUP: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be upgraded")); (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); break; case EINVAL: (void) zfs_error(hdl, EZFS_BADTYPE, errbuf); break; default: (void) zfs_standard_error(hdl, err, errbuf); break; } } return (err); } /* * Creates snapshots. The keys in the snaps nvlist are the snapshots to be * created. */ int zfs_snapshot_nvl(libzfs_handle_t *hdl, nvlist_t *snaps, nvlist_t *props) { int ret; char errbuf[1024]; nvpair_t *elem; nvlist_t *errors; zpool_handle_t *zpool_hdl; char pool[ZFS_MAX_DATASET_NAME_LEN]; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot create snapshots ")); elem = NULL; while ((elem = nvlist_next_nvpair(snaps, elem)) != NULL) { const char *snapname = nvpair_name(elem); /* validate the target name */ if (!zfs_validate_name(hdl, snapname, ZFS_TYPE_SNAPSHOT, B_TRUE)) { (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot create snapshot '%s'"), snapname); return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf)); } } /* * get pool handle for prop validation. assumes all snaps are in the * same pool, as does lzc_snapshot (below). */ elem = nvlist_next_nvpair(snaps, NULL); (void) strlcpy(pool, nvpair_name(elem), sizeof (pool)); pool[strcspn(pool, "/@")] = '\0'; zpool_hdl = zpool_open(hdl, pool); if (zpool_hdl == NULL) return (-1); if (props != NULL && (props = zfs_valid_proplist(hdl, ZFS_TYPE_SNAPSHOT, props, B_FALSE, NULL, zpool_hdl, B_FALSE, errbuf)) == NULL) { zpool_close(zpool_hdl); return (-1); } zpool_close(zpool_hdl); ret = lzc_snapshot(snaps, props, &errors); if (ret != 0) { boolean_t printed = B_FALSE; for (elem = nvlist_next_nvpair(errors, NULL); elem != NULL; elem = nvlist_next_nvpair(errors, elem)) { (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot create snapshot '%s'"), nvpair_name(elem)); (void) zfs_standard_error(hdl, fnvpair_value_int32(elem), errbuf); printed = B_TRUE; } if (!printed) { switch (ret) { case EXDEV: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "multiple snapshots of same " "fs not allowed")); (void) zfs_error(hdl, EZFS_EXISTS, errbuf); break; default: (void) zfs_standard_error(hdl, ret, errbuf); } } } nvlist_free(props); nvlist_free(errors); return (ret); } int zfs_snapshot(libzfs_handle_t *hdl, const char *path, boolean_t recursive, nvlist_t *props) { int ret; snapdata_t sd = { 0 }; char fsname[ZFS_MAX_DATASET_NAME_LEN]; char *cp; zfs_handle_t *zhp; char errbuf[1024]; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot snapshot %s"), path); if (!zfs_validate_name(hdl, path, ZFS_TYPE_SNAPSHOT, B_TRUE)) return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf)); (void) strlcpy(fsname, path, sizeof (fsname)); cp = strchr(fsname, '@'); *cp = '\0'; sd.sd_snapname = cp + 1; if ((zhp = zfs_open(hdl, fsname, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME)) == NULL) { return (-1); } verify(nvlist_alloc(&sd.sd_nvl, NV_UNIQUE_NAME, 0) == 0); if (recursive) { (void) zfs_snapshot_cb(zfs_handle_dup(zhp), &sd); } else { fnvlist_add_boolean(sd.sd_nvl, path); } ret = zfs_snapshot_nvl(hdl, sd.sd_nvl, props); nvlist_free(sd.sd_nvl); zfs_close(zhp); return (ret); } /* * Destroy any more recent snapshots. We invoke this callback on any dependents * of the snapshot first. If the 'cb_dependent' member is non-zero, then this * is a dependent and we should just destroy it without checking the transaction * group. */ typedef struct rollback_data { const char *cb_target; /* the snapshot */ uint64_t cb_create; /* creation time reference */ boolean_t cb_error; boolean_t cb_force; } rollback_data_t; static int rollback_destroy_dependent(zfs_handle_t *zhp, void *data) { rollback_data_t *cbp = data; prop_changelist_t *clp; /* We must destroy this clone; first unmount it */ clp = changelist_gather(zhp, ZFS_PROP_NAME, 0, cbp->cb_force ? MS_FORCE: 0); if (clp == NULL || changelist_prefix(clp) != 0) { cbp->cb_error = B_TRUE; zfs_close(zhp); return (0); } if (zfs_destroy(zhp, B_FALSE) != 0) cbp->cb_error = B_TRUE; else changelist_remove(clp, zhp->zfs_name); (void) changelist_postfix(clp); changelist_free(clp); zfs_close(zhp); return (0); } static int rollback_destroy(zfs_handle_t *zhp, void *data) { rollback_data_t *cbp = data; if (zfs_prop_get_int(zhp, ZFS_PROP_CREATETXG) > cbp->cb_create) { cbp->cb_error |= zfs_iter_dependents(zhp, B_FALSE, rollback_destroy_dependent, cbp); cbp->cb_error |= zfs_destroy(zhp, B_FALSE); } zfs_close(zhp); return (0); } /* * Given a dataset, rollback to a specific snapshot, discarding any * data changes since then and making it the active dataset. * * Any snapshots and bookmarks more recent than the target are * destroyed, along with their dependents (i.e. clones). */ int zfs_rollback(zfs_handle_t *zhp, zfs_handle_t *snap, boolean_t force) { rollback_data_t cb = { 0 }; int err; boolean_t restore_resv = 0; uint64_t old_volsize = 0, new_volsize; zfs_prop_t resv_prop = { 0 }; assert(zhp->zfs_type == ZFS_TYPE_FILESYSTEM || zhp->zfs_type == ZFS_TYPE_VOLUME); /* * Destroy all recent snapshots and their dependents. */ cb.cb_force = force; cb.cb_target = snap->zfs_name; cb.cb_create = zfs_prop_get_int(snap, ZFS_PROP_CREATETXG); (void) zfs_iter_snapshots(zhp, B_FALSE, rollback_destroy, &cb); (void) zfs_iter_bookmarks(zhp, rollback_destroy, &cb); if (cb.cb_error) return (-1); /* * Now that we have verified that the snapshot is the latest, * rollback to the given snapshot. */ if (zhp->zfs_type == ZFS_TYPE_VOLUME) { if (zfs_which_resv_prop(zhp, &resv_prop) < 0) return (-1); old_volsize = zfs_prop_get_int(zhp, ZFS_PROP_VOLSIZE); restore_resv = (old_volsize == zfs_prop_get_int(zhp, resv_prop)); } /* * Pass both the filesystem and the wanted snapshot names, * we would get an error back if the snapshot is destroyed or * a new snapshot is created before this request is processed. */ err = lzc_rollback_to(zhp->zfs_name, snap->zfs_name); if (err != 0) { char errbuf[1024]; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot rollback '%s'"), zhp->zfs_name); switch (err) { case EEXIST: zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN, "there is a snapshot or bookmark more recent " "than '%s'"), snap->zfs_name); (void) zfs_error(zhp->zfs_hdl, EZFS_EXISTS, errbuf); break; case ESRCH: zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN, "'%s' is not found among snapshots of '%s'"), snap->zfs_name, zhp->zfs_name); (void) zfs_error(zhp->zfs_hdl, EZFS_NOENT, errbuf); break; case EINVAL: (void) zfs_error(zhp->zfs_hdl, EZFS_BADTYPE, errbuf); break; default: (void) zfs_standard_error(zhp->zfs_hdl, err, errbuf); } return (err); } /* * For volumes, if the pre-rollback volsize matched the pre- * rollback reservation and the volsize has changed then set * the reservation property to the post-rollback volsize. * Make a new handle since the rollback closed the dataset. */ if ((zhp->zfs_type == ZFS_TYPE_VOLUME) && (zhp = make_dataset_handle(zhp->zfs_hdl, zhp->zfs_name))) { if (restore_resv) { new_volsize = zfs_prop_get_int(zhp, ZFS_PROP_VOLSIZE); if (old_volsize != new_volsize) err = zfs_prop_set_int(zhp, resv_prop, new_volsize); } zfs_close(zhp); } return (err); } /* * Renames the given dataset. */ int zfs_rename(zfs_handle_t *zhp, const char *target, boolean_t recursive, boolean_t force_unmount) { int ret = 0; zfs_cmd_t zc = {"\0"}; char *delim; prop_changelist_t *cl = NULL; zfs_handle_t *zhrp = NULL; char *parentname = NULL; char parent[ZFS_MAX_DATASET_NAME_LEN]; libzfs_handle_t *hdl = zhp->zfs_hdl; char errbuf[1024]; /* if we have the same exact name, just return success */ if (strcmp(zhp->zfs_name, target) == 0) return (0); (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot rename to '%s'"), target); /* make sure source name is valid */ if (!zfs_validate_name(hdl, zhp->zfs_name, zhp->zfs_type, B_TRUE)) return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf)); /* * Make sure the target name is valid */ if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT) { if ((strchr(target, '@') == NULL) || *target == '@') { /* * Snapshot target name is abbreviated, * reconstruct full dataset name */ (void) strlcpy(parent, zhp->zfs_name, sizeof (parent)); delim = strchr(parent, '@'); if (strchr(target, '@') == NULL) *(++delim) = '\0'; else *delim = '\0'; (void) strlcat(parent, target, sizeof (parent)); target = parent; } else { /* * Make sure we're renaming within the same dataset. */ delim = strchr(target, '@'); if (strncmp(zhp->zfs_name, target, delim - target) != 0 || zhp->zfs_name[delim - target] != '@') { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "snapshots must be part of same " "dataset")); return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf)); } } if (!zfs_validate_name(hdl, target, zhp->zfs_type, B_TRUE)) return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf)); } else { if (recursive) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "recursive rename must be a snapshot")); return (zfs_error(hdl, EZFS_BADTYPE, errbuf)); } if (!zfs_validate_name(hdl, target, zhp->zfs_type, B_TRUE)) return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf)); /* validate parents */ if (check_parents(hdl, target, NULL, B_FALSE, NULL) != 0) return (-1); /* make sure we're in the same pool */ verify((delim = strchr(target, '/')) != NULL); if (strncmp(zhp->zfs_name, target, delim - target) != 0 || zhp->zfs_name[delim - target] != '/') { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "datasets must be within same pool")); return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf)); } /* new name cannot be a child of the current dataset name */ if (is_descendant(zhp->zfs_name, target)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "New dataset name cannot be a descendant of " "current dataset name")); return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf)); } } (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot rename '%s'"), zhp->zfs_name); if (getzoneid() == GLOBAL_ZONEID && zfs_prop_get_int(zhp, ZFS_PROP_ZONED)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "dataset is used in a non-global zone")); return (zfs_error(hdl, EZFS_ZONED, errbuf)); } if (recursive) { parentname = zfs_strdup(zhp->zfs_hdl, zhp->zfs_name); if (parentname == NULL) { ret = -1; goto error; } delim = strchr(parentname, '@'); *delim = '\0'; zhrp = zfs_open(zhp->zfs_hdl, parentname, ZFS_TYPE_DATASET); if (zhrp == NULL) { ret = -1; goto error; } } else if (zhp->zfs_type != ZFS_TYPE_SNAPSHOT) { if ((cl = changelist_gather(zhp, ZFS_PROP_NAME, CL_GATHER_ITER_MOUNTED, force_unmount ? MS_FORCE : 0)) == NULL) return (-1); if (changelist_haszonedchild(cl)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "child dataset with inherited mountpoint is used " "in a non-global zone")); (void) zfs_error(hdl, EZFS_ZONED, errbuf); ret = -1; goto error; } if ((ret = changelist_prefix(cl)) != 0) goto error; } if (ZFS_IS_VOLUME(zhp)) zc.zc_objset_type = DMU_OST_ZVOL; else zc.zc_objset_type = DMU_OST_ZFS; (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); (void) strlcpy(zc.zc_value, target, sizeof (zc.zc_value)); zc.zc_cookie = recursive; if ((ret = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_RENAME, &zc)) != 0) { /* * if it was recursive, the one that actually failed will * be in zc.zc_name */ (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot rename '%s'"), zc.zc_name); if (recursive && errno == EEXIST) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "a child dataset already has a snapshot " "with the new name")); (void) zfs_error(hdl, EZFS_EXISTS, errbuf); } else if (errno == EACCES) { if (zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION) == ZIO_CRYPT_OFF) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot rename an unencrypted dataset to " "be a decendent of an encrypted one")); } else { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot move encryption child outside of " "its encryption root")); } (void) zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf); } else { (void) zfs_standard_error(zhp->zfs_hdl, errno, errbuf); } /* * On failure, we still want to remount any filesystems that * were previously mounted, so we don't alter the system state. */ if (cl != NULL) (void) changelist_postfix(cl); } else { if (cl != NULL) { changelist_rename(cl, zfs_get_name(zhp), target); ret = changelist_postfix(cl); } } error: if (parentname != NULL) { free(parentname); } if (zhrp != NULL) { zfs_close(zhrp); } if (cl != NULL) { changelist_free(cl); } return (ret); } nvlist_t * zfs_get_all_props(zfs_handle_t *zhp) { return (zhp->zfs_props); } nvlist_t * zfs_get_recvd_props(zfs_handle_t *zhp) { if (zhp->zfs_recvd_props == NULL) if (get_recvd_props_ioctl(zhp) != 0) return (NULL); return (zhp->zfs_recvd_props); } nvlist_t * zfs_get_user_props(zfs_handle_t *zhp) { return (zhp->zfs_user_props); } /* * This function is used by 'zfs list' to determine the exact set of columns to * display, and their maximum widths. This does two main things: * * - If this is a list of all properties, then expand the list to include * all native properties, and set a flag so that for each dataset we look * for new unique user properties and add them to the list. * * - For non fixed-width properties, keep track of the maximum width seen * so that we can size the column appropriately. If the user has * requested received property values, we also need to compute the width * of the RECEIVED column. */ int zfs_expand_proplist(zfs_handle_t *zhp, zprop_list_t **plp, boolean_t received, boolean_t literal) { libzfs_handle_t *hdl = zhp->zfs_hdl; zprop_list_t *entry; zprop_list_t **last, **start; nvlist_t *userprops, *propval; nvpair_t *elem; char *strval; char buf[ZFS_MAXPROPLEN]; if (zprop_expand_list(hdl, plp, ZFS_TYPE_DATASET) != 0) return (-1); userprops = zfs_get_user_props(zhp); entry = *plp; if (entry->pl_all && nvlist_next_nvpair(userprops, NULL) != NULL) { /* * Go through and add any user properties as necessary. We * start by incrementing our list pointer to the first * non-native property. */ start = plp; while (*start != NULL) { if ((*start)->pl_prop == ZPROP_INVAL) break; start = &(*start)->pl_next; } elem = NULL; while ((elem = nvlist_next_nvpair(userprops, elem)) != NULL) { /* * See if we've already found this property in our list. */ for (last = start; *last != NULL; last = &(*last)->pl_next) { if (strcmp((*last)->pl_user_prop, nvpair_name(elem)) == 0) break; } if (*last == NULL) { if ((entry = zfs_alloc(hdl, sizeof (zprop_list_t))) == NULL || ((entry->pl_user_prop = zfs_strdup(hdl, nvpair_name(elem)))) == NULL) { free(entry); return (-1); } entry->pl_prop = ZPROP_INVAL; entry->pl_width = strlen(nvpair_name(elem)); entry->pl_all = B_TRUE; *last = entry; } } } /* * Now go through and check the width of any non-fixed columns */ for (entry = *plp; entry != NULL; entry = entry->pl_next) { if (entry->pl_fixed && !literal) continue; if (entry->pl_prop != ZPROP_INVAL) { if (zfs_prop_get(zhp, entry->pl_prop, buf, sizeof (buf), NULL, NULL, 0, literal) == 0) { if (strlen(buf) > entry->pl_width) entry->pl_width = strlen(buf); } if (received && zfs_prop_get_recvd(zhp, zfs_prop_to_name(entry->pl_prop), buf, sizeof (buf), literal) == 0) if (strlen(buf) > entry->pl_recvd_width) entry->pl_recvd_width = strlen(buf); } else { if (nvlist_lookup_nvlist(userprops, entry->pl_user_prop, &propval) == 0) { verify(nvlist_lookup_string(propval, ZPROP_VALUE, &strval) == 0); if (strlen(strval) > entry->pl_width) entry->pl_width = strlen(strval); } if (received && zfs_prop_get_recvd(zhp, entry->pl_user_prop, buf, sizeof (buf), literal) == 0) if (strlen(buf) > entry->pl_recvd_width) entry->pl_recvd_width = strlen(buf); } } return (0); } void zfs_prune_proplist(zfs_handle_t *zhp, uint8_t *props) { nvpair_t *curr; nvpair_t *next; /* * Keep a reference to the props-table against which we prune the * properties. */ zhp->zfs_props_table = props; curr = nvlist_next_nvpair(zhp->zfs_props, NULL); while (curr) { zfs_prop_t zfs_prop = zfs_name_to_prop(nvpair_name(curr)); next = nvlist_next_nvpair(zhp->zfs_props, curr); /* * User properties will result in ZPROP_INVAL, and since we * only know how to prune standard ZFS properties, we always * leave these in the list. This can also happen if we * encounter an unknown DSL property (when running older * software, for example). */ if (zfs_prop != ZPROP_INVAL && props[zfs_prop] == B_FALSE) (void) nvlist_remove(zhp->zfs_props, nvpair_name(curr), nvpair_type(curr)); curr = next; } } static int zfs_smb_acl_mgmt(libzfs_handle_t *hdl, char *dataset, char *path, zfs_smb_acl_op_t cmd, char *resource1, char *resource2) { zfs_cmd_t zc = {"\0"}; nvlist_t *nvlist = NULL; int error; (void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name)); (void) strlcpy(zc.zc_value, path, sizeof (zc.zc_value)); zc.zc_cookie = (uint64_t)cmd; if (cmd == ZFS_SMB_ACL_RENAME) { if (nvlist_alloc(&nvlist, NV_UNIQUE_NAME, 0) != 0) { (void) no_memory(hdl); return (0); } } switch (cmd) { case ZFS_SMB_ACL_ADD: case ZFS_SMB_ACL_REMOVE: (void) strlcpy(zc.zc_string, resource1, sizeof (zc.zc_string)); break; case ZFS_SMB_ACL_RENAME: if (nvlist_add_string(nvlist, ZFS_SMB_ACL_SRC, resource1) != 0) { (void) no_memory(hdl); return (-1); } if (nvlist_add_string(nvlist, ZFS_SMB_ACL_TARGET, resource2) != 0) { (void) no_memory(hdl); return (-1); } if (zcmd_write_src_nvlist(hdl, &zc, nvlist) != 0) { nvlist_free(nvlist); return (-1); } break; case ZFS_SMB_ACL_PURGE: break; default: return (-1); } error = ioctl(hdl->libzfs_fd, ZFS_IOC_SMB_ACL, &zc); nvlist_free(nvlist); return (error); } int zfs_smb_acl_add(libzfs_handle_t *hdl, char *dataset, char *path, char *resource) { return (zfs_smb_acl_mgmt(hdl, dataset, path, ZFS_SMB_ACL_ADD, resource, NULL)); } int zfs_smb_acl_remove(libzfs_handle_t *hdl, char *dataset, char *path, char *resource) { return (zfs_smb_acl_mgmt(hdl, dataset, path, ZFS_SMB_ACL_REMOVE, resource, NULL)); } int zfs_smb_acl_purge(libzfs_handle_t *hdl, char *dataset, char *path) { return (zfs_smb_acl_mgmt(hdl, dataset, path, ZFS_SMB_ACL_PURGE, NULL, NULL)); } int zfs_smb_acl_rename(libzfs_handle_t *hdl, char *dataset, char *path, char *oldname, char *newname) { return (zfs_smb_acl_mgmt(hdl, dataset, path, ZFS_SMB_ACL_RENAME, oldname, newname)); } int zfs_userspace(zfs_handle_t *zhp, zfs_userquota_prop_t type, zfs_userspace_cb_t func, void *arg) { zfs_cmd_t zc = {"\0"}; zfs_useracct_t buf[100]; libzfs_handle_t *hdl = zhp->zfs_hdl; int ret; (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); zc.zc_objset_type = type; zc.zc_nvlist_dst = (uintptr_t)buf; for (;;) { zfs_useracct_t *zua = buf; zc.zc_nvlist_dst_size = sizeof (buf); if (zfs_ioctl(hdl, ZFS_IOC_USERSPACE_MANY, &zc) != 0) { char errbuf[1024]; if ((errno == ENOTSUP && (type == ZFS_PROP_USEROBJUSED || type == ZFS_PROP_GROUPOBJUSED || type == ZFS_PROP_USEROBJQUOTA || type == ZFS_PROP_GROUPOBJQUOTA || type == ZFS_PROP_PROJECTOBJUSED || type == ZFS_PROP_PROJECTOBJQUOTA || type == ZFS_PROP_PROJECTUSED || type == ZFS_PROP_PROJECTQUOTA))) break; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot get used/quota for %s"), zc.zc_name); return (zfs_standard_error_fmt(hdl, errno, errbuf)); } if (zc.zc_nvlist_dst_size == 0) break; while (zc.zc_nvlist_dst_size > 0) { if ((ret = func(arg, zua->zu_domain, zua->zu_rid, zua->zu_space)) != 0) return (ret); zua++; zc.zc_nvlist_dst_size -= sizeof (zfs_useracct_t); } } return (0); } struct holdarg { nvlist_t *nvl; const char *snapname; const char *tag; boolean_t recursive; int error; }; static int zfs_hold_one(zfs_handle_t *zhp, void *arg) { struct holdarg *ha = arg; char name[ZFS_MAX_DATASET_NAME_LEN]; int rv = 0; if (snprintf(name, sizeof (name), "%s@%s", zhp->zfs_name, ha->snapname) >= sizeof (name)) return (EINVAL); if (lzc_exists(name)) fnvlist_add_string(ha->nvl, name, ha->tag); if (ha->recursive) rv = zfs_iter_filesystems(zhp, zfs_hold_one, ha); zfs_close(zhp); return (rv); } int zfs_hold(zfs_handle_t *zhp, const char *snapname, const char *tag, boolean_t recursive, int cleanup_fd) { int ret; struct holdarg ha; ha.nvl = fnvlist_alloc(); ha.snapname = snapname; ha.tag = tag; ha.recursive = recursive; (void) zfs_hold_one(zfs_handle_dup(zhp), &ha); if (nvlist_empty(ha.nvl)) { char errbuf[1024]; fnvlist_free(ha.nvl); ret = ENOENT; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot hold snapshot '%s@%s'"), zhp->zfs_name, snapname); (void) zfs_standard_error(zhp->zfs_hdl, ret, errbuf); return (ret); } ret = zfs_hold_nvl(zhp, cleanup_fd, ha.nvl); fnvlist_free(ha.nvl); return (ret); } int zfs_hold_nvl(zfs_handle_t *zhp, int cleanup_fd, nvlist_t *holds) { int ret; nvlist_t *errors; libzfs_handle_t *hdl = zhp->zfs_hdl; char errbuf[1024]; nvpair_t *elem; errors = NULL; ret = lzc_hold(holds, cleanup_fd, &errors); if (ret == 0) { /* There may be errors even in the success case. */ fnvlist_free(errors); return (0); } if (nvlist_empty(errors)) { /* no hold-specific errors */ (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot hold")); switch (ret) { case ENOTSUP: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be upgraded")); (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); break; case EINVAL: (void) zfs_error(hdl, EZFS_BADTYPE, errbuf); break; default: (void) zfs_standard_error(hdl, ret, errbuf); } } for (elem = nvlist_next_nvpair(errors, NULL); elem != NULL; elem = nvlist_next_nvpair(errors, elem)) { (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot hold snapshot '%s'"), nvpair_name(elem)); switch (fnvpair_value_int32(elem)) { case E2BIG: /* * Temporary tags wind up having the ds object id * prepended. So even if we passed the length check * above, it's still possible for the tag to wind * up being slightly too long. */ (void) zfs_error(hdl, EZFS_TAGTOOLONG, errbuf); break; case EINVAL: (void) zfs_error(hdl, EZFS_BADTYPE, errbuf); break; case EEXIST: (void) zfs_error(hdl, EZFS_REFTAG_HOLD, errbuf); break; default: (void) zfs_standard_error(hdl, fnvpair_value_int32(elem), errbuf); } } fnvlist_free(errors); return (ret); } static int zfs_release_one(zfs_handle_t *zhp, void *arg) { struct holdarg *ha = arg; char name[ZFS_MAX_DATASET_NAME_LEN]; int rv = 0; nvlist_t *existing_holds; if (snprintf(name, sizeof (name), "%s@%s", zhp->zfs_name, ha->snapname) >= sizeof (name)) { ha->error = EINVAL; rv = EINVAL; } if (lzc_get_holds(name, &existing_holds) != 0) { ha->error = ENOENT; } else if (!nvlist_exists(existing_holds, ha->tag)) { ha->error = ESRCH; } else { nvlist_t *torelease = fnvlist_alloc(); fnvlist_add_boolean(torelease, ha->tag); fnvlist_add_nvlist(ha->nvl, name, torelease); fnvlist_free(torelease); } if (ha->recursive) rv = zfs_iter_filesystems(zhp, zfs_release_one, ha); zfs_close(zhp); return (rv); } int zfs_release(zfs_handle_t *zhp, const char *snapname, const char *tag, boolean_t recursive) { int ret; struct holdarg ha; nvlist_t *errors = NULL; nvpair_t *elem; libzfs_handle_t *hdl = zhp->zfs_hdl; char errbuf[1024]; ha.nvl = fnvlist_alloc(); ha.snapname = snapname; ha.tag = tag; ha.recursive = recursive; ha.error = 0; (void) zfs_release_one(zfs_handle_dup(zhp), &ha); if (nvlist_empty(ha.nvl)) { fnvlist_free(ha.nvl); ret = ha.error; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot release hold from snapshot '%s@%s'"), zhp->zfs_name, snapname); if (ret == ESRCH) { (void) zfs_error(hdl, EZFS_REFTAG_RELE, errbuf); } else { (void) zfs_standard_error(hdl, ret, errbuf); } return (ret); } ret = lzc_release(ha.nvl, &errors); fnvlist_free(ha.nvl); if (ret == 0) { /* There may be errors even in the success case. */ fnvlist_free(errors); return (0); } if (nvlist_empty(errors)) { /* no hold-specific errors */ (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot release")); switch (errno) { case ENOTSUP: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be upgraded")); (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); break; default: (void) zfs_standard_error_fmt(hdl, errno, errbuf); } } for (elem = nvlist_next_nvpair(errors, NULL); elem != NULL; elem = nvlist_next_nvpair(errors, elem)) { (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot release hold from snapshot '%s'"), nvpair_name(elem)); switch (fnvpair_value_int32(elem)) { case ESRCH: (void) zfs_error(hdl, EZFS_REFTAG_RELE, errbuf); break; case EINVAL: (void) zfs_error(hdl, EZFS_BADTYPE, errbuf); break; default: (void) zfs_standard_error_fmt(hdl, fnvpair_value_int32(elem), errbuf); } } fnvlist_free(errors); return (ret); } int zfs_get_fsacl(zfs_handle_t *zhp, nvlist_t **nvl) { zfs_cmd_t zc = {"\0"}; libzfs_handle_t *hdl = zhp->zfs_hdl; int nvsz = 2048; void *nvbuf; int err = 0; char errbuf[1024]; assert(zhp->zfs_type == ZFS_TYPE_VOLUME || zhp->zfs_type == ZFS_TYPE_FILESYSTEM); tryagain: nvbuf = malloc(nvsz); if (nvbuf == NULL) { err = (zfs_error(hdl, EZFS_NOMEM, strerror(errno))); goto out; } zc.zc_nvlist_dst_size = nvsz; zc.zc_nvlist_dst = (uintptr_t)nvbuf; (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); if (ioctl(hdl->libzfs_fd, ZFS_IOC_GET_FSACL, &zc) != 0) { (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot get permissions on '%s'"), zc.zc_name); switch (errno) { case ENOMEM: free(nvbuf); nvsz = zc.zc_nvlist_dst_size; goto tryagain; case ENOTSUP: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be upgraded")); err = zfs_error(hdl, EZFS_BADVERSION, errbuf); break; case EINVAL: err = zfs_error(hdl, EZFS_BADTYPE, errbuf); break; case ENOENT: err = zfs_error(hdl, EZFS_NOENT, errbuf); break; default: err = zfs_standard_error_fmt(hdl, errno, errbuf); break; } } else { /* success */ int rc = nvlist_unpack(nvbuf, zc.zc_nvlist_dst_size, nvl, 0); if (rc) { (void) snprintf(errbuf, sizeof (errbuf), dgettext( TEXT_DOMAIN, "cannot get permissions on '%s'"), zc.zc_name); err = zfs_standard_error_fmt(hdl, rc, errbuf); } } free(nvbuf); out: return (err); } int zfs_set_fsacl(zfs_handle_t *zhp, boolean_t un, nvlist_t *nvl) { zfs_cmd_t zc = {"\0"}; libzfs_handle_t *hdl = zhp->zfs_hdl; char *nvbuf; char errbuf[1024]; size_t nvsz; int err; assert(zhp->zfs_type == ZFS_TYPE_VOLUME || zhp->zfs_type == ZFS_TYPE_FILESYSTEM); err = nvlist_size(nvl, &nvsz, NV_ENCODE_NATIVE); assert(err == 0); nvbuf = malloc(nvsz); err = nvlist_pack(nvl, &nvbuf, &nvsz, NV_ENCODE_NATIVE, 0); assert(err == 0); zc.zc_nvlist_src_size = nvsz; zc.zc_nvlist_src = (uintptr_t)nvbuf; zc.zc_perm_action = un; (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); if (zfs_ioctl(hdl, ZFS_IOC_SET_FSACL, &zc) != 0) { (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot set permissions on '%s'"), zc.zc_name); switch (errno) { case ENOTSUP: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be upgraded")); err = zfs_error(hdl, EZFS_BADVERSION, errbuf); break; case EINVAL: err = zfs_error(hdl, EZFS_BADTYPE, errbuf); break; case ENOENT: err = zfs_error(hdl, EZFS_NOENT, errbuf); break; default: err = zfs_standard_error_fmt(hdl, errno, errbuf); break; } } free(nvbuf); return (err); } int zfs_get_holds(zfs_handle_t *zhp, nvlist_t **nvl) { int err; char errbuf[1024]; err = lzc_get_holds(zhp->zfs_name, nvl); if (err != 0) { libzfs_handle_t *hdl = zhp->zfs_hdl; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot get holds for '%s'"), zhp->zfs_name); switch (err) { case ENOTSUP: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be upgraded")); err = zfs_error(hdl, EZFS_BADVERSION, errbuf); break; case EINVAL: err = zfs_error(hdl, EZFS_BADTYPE, errbuf); break; case ENOENT: err = zfs_error(hdl, EZFS_NOENT, errbuf); break; default: err = zfs_standard_error_fmt(hdl, errno, errbuf); break; } } return (err); } /* * Convert the zvol's volume size to an appropriate reservation. * Note: If this routine is updated, it is necessary to update the ZFS test * suite's shell version in reservation.kshlib. */ uint64_t zvol_volsize_to_reservation(uint64_t volsize, nvlist_t *props) { uint64_t numdb; uint64_t nblocks, volblocksize; int ncopies; char *strval; if (nvlist_lookup_string(props, zfs_prop_to_name(ZFS_PROP_COPIES), &strval) == 0) ncopies = atoi(strval); else ncopies = 1; if (nvlist_lookup_uint64(props, zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0) volblocksize = ZVOL_DEFAULT_BLOCKSIZE; nblocks = volsize/volblocksize; /* start with metadnode L0-L6 */ numdb = 7; /* calculate number of indirects */ while (nblocks > 1) { nblocks += DNODES_PER_LEVEL - 1; nblocks /= DNODES_PER_LEVEL; numdb += nblocks; } numdb *= MIN(SPA_DVAS_PER_BP, ncopies + 1); volsize *= ncopies; /* * this is exactly DN_MAX_INDBLKSHIFT when metadata isn't * compressed, but in practice they compress down to about * 1100 bytes */ numdb *= 1ULL << DN_MAX_INDBLKSHIFT; volsize += numdb; return (volsize); } diff --git a/lib/libzfs/libzfs_sendrecv.c b/lib/libzfs/libzfs_sendrecv.c index ae24454d7c79..1d8292101dd6 100644 --- a/lib/libzfs/libzfs_sendrecv.c +++ b/lib/libzfs/libzfs_sendrecv.c @@ -1,4647 +1,4675 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2011, 2015 by Delphix. All rights reserved. * Copyright (c) 2012, Joyent, Inc. All rights reserved. * Copyright (c) 2012 Pawel Jakub Dawidek . * All rights reserved * Copyright (c) 2013 Steven Hartland. All rights reserved. * Copyright 2015, OmniTI Computer Consulting, Inc. All rights reserved. * Copyright 2016 Igor Kozhukhov - * Copyright (c) 2017, loli10K . All rights reserved. + * Copyright (c) 2018, loli10K . All rights reserved. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "zfs_namecheck.h" #include "zfs_prop.h" #include "zfs_fletcher.h" #include "libzfs_impl.h" #include #include #include #include #include #include /* in libzfs_dataset.c */ extern void zfs_setprop_error(libzfs_handle_t *, zfs_prop_t, int, char *); static int zfs_receive_impl(libzfs_handle_t *, const char *, const char *, recvflags_t *, int, const char *, nvlist_t *, avl_tree_t *, char **, int, uint64_t *, const char *, nvlist_t *); static int guid_to_name(libzfs_handle_t *, const char *, uint64_t, boolean_t, char *); static const zio_cksum_t zero_cksum = { { 0 } }; typedef struct dedup_arg { int inputfd; int outputfd; libzfs_handle_t *dedup_hdl; } dedup_arg_t; typedef struct progress_arg { zfs_handle_t *pa_zhp; int pa_fd; boolean_t pa_parsable; } progress_arg_t; typedef struct dataref { uint64_t ref_guid; uint64_t ref_object; uint64_t ref_offset; } dataref_t; typedef struct dedup_entry { struct dedup_entry *dde_next; zio_cksum_t dde_chksum; uint64_t dde_prop; dataref_t dde_ref; } dedup_entry_t; #define MAX_DDT_PHYSMEM_PERCENT 20 #define SMALLEST_POSSIBLE_MAX_DDT_MB 128 typedef struct dedup_table { dedup_entry_t **dedup_hash_array; umem_cache_t *ddecache; uint64_t max_ddt_size; /* max dedup table size in bytes */ uint64_t cur_ddt_size; /* current dedup table size in bytes */ uint64_t ddt_count; int numhashbits; boolean_t ddt_full; } dedup_table_t; static int high_order_bit(uint64_t n) { int count; for (count = 0; n != 0; count++) n >>= 1; return (count); } static size_t ssread(void *buf, size_t len, FILE *stream) { size_t outlen; if ((outlen = fread(buf, len, 1, stream)) == 0) return (0); return (outlen); } static void ddt_hash_append(libzfs_handle_t *hdl, dedup_table_t *ddt, dedup_entry_t **ddepp, zio_cksum_t *cs, uint64_t prop, dataref_t *dr) { dedup_entry_t *dde; if (ddt->cur_ddt_size >= ddt->max_ddt_size) { if (ddt->ddt_full == B_FALSE) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Dedup table full. Deduplication will continue " "with existing table entries")); ddt->ddt_full = B_TRUE; } return; } if ((dde = umem_cache_alloc(ddt->ddecache, UMEM_DEFAULT)) != NULL) { assert(*ddepp == NULL); dde->dde_next = NULL; dde->dde_chksum = *cs; dde->dde_prop = prop; dde->dde_ref = *dr; *ddepp = dde; ddt->cur_ddt_size += sizeof (dedup_entry_t); ddt->ddt_count++; } } /* * Using the specified dedup table, do a lookup for an entry with * the checksum cs. If found, return the block's reference info * in *dr. Otherwise, insert a new entry in the dedup table, using * the reference information specified by *dr. * * return value: true - entry was found * false - entry was not found */ static boolean_t ddt_update(libzfs_handle_t *hdl, dedup_table_t *ddt, zio_cksum_t *cs, uint64_t prop, dataref_t *dr) { uint32_t hashcode; dedup_entry_t **ddepp; hashcode = BF64_GET(cs->zc_word[0], 0, ddt->numhashbits); for (ddepp = &(ddt->dedup_hash_array[hashcode]); *ddepp != NULL; ddepp = &((*ddepp)->dde_next)) { if (ZIO_CHECKSUM_EQUAL(((*ddepp)->dde_chksum), *cs) && (*ddepp)->dde_prop == prop) { *dr = (*ddepp)->dde_ref; return (B_TRUE); } } ddt_hash_append(hdl, ddt, ddepp, cs, prop, dr); return (B_FALSE); } static int dump_record(dmu_replay_record_t *drr, void *payload, int payload_len, zio_cksum_t *zc, int outfd) { ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum), ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t)); fletcher_4_incremental_native(drr, offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum), zc); if (drr->drr_type != DRR_BEGIN) { ASSERT(ZIO_CHECKSUM_IS_ZERO(&drr->drr_u. drr_checksum.drr_checksum)); drr->drr_u.drr_checksum.drr_checksum = *zc; } fletcher_4_incremental_native(&drr->drr_u.drr_checksum.drr_checksum, sizeof (zio_cksum_t), zc); if (write(outfd, drr, sizeof (*drr)) == -1) return (errno); if (payload_len != 0) { fletcher_4_incremental_native(payload, payload_len, zc); if (write(outfd, payload, payload_len) == -1) return (errno); } return (0); } /* * This function is started in a separate thread when the dedup option * has been requested. The main send thread determines the list of * snapshots to be included in the send stream and makes the ioctl calls * for each one. But instead of having the ioctl send the output to the * the output fd specified by the caller of zfs_send()), the * ioctl is told to direct the output to a pipe, which is read by the * alternate thread running THIS function. This function does the * dedup'ing by: * 1. building a dedup table (the DDT) * 2. doing checksums on each data block and inserting a record in the DDT * 3. looking for matching checksums, and * 4. sending a DRR_WRITE_BYREF record instead of a write record whenever * a duplicate block is found. * The output of this function then goes to the output fd requested * by the caller of zfs_send(). */ static void * cksummer(void *arg) { dedup_arg_t *dda = arg; char *buf = zfs_alloc(dda->dedup_hdl, SPA_MAXBLOCKSIZE); dmu_replay_record_t thedrr = { 0 }; dmu_replay_record_t *drr = &thedrr; FILE *ofp; int outfd; dedup_table_t ddt; zio_cksum_t stream_cksum; uint64_t numbuckets; #ifdef _ILP32 ddt.max_ddt_size = SMALLEST_POSSIBLE_MAX_DDT_MB << 20; #else uint64_t physmem = sysconf(_SC_PHYS_PAGES) * sysconf(_SC_PAGESIZE); ddt.max_ddt_size = MAX((physmem * MAX_DDT_PHYSMEM_PERCENT) / 100, SMALLEST_POSSIBLE_MAX_DDT_MB << 20); #endif numbuckets = ddt.max_ddt_size / (sizeof (dedup_entry_t)); /* * numbuckets must be a power of 2. Increase number to * a power of 2 if necessary. */ if (!ISP2(numbuckets)) numbuckets = 1ULL << high_order_bit(numbuckets); ddt.dedup_hash_array = calloc(numbuckets, sizeof (dedup_entry_t *)); ddt.ddecache = umem_cache_create("dde", sizeof (dedup_entry_t), 0, NULL, NULL, NULL, NULL, NULL, 0); ddt.cur_ddt_size = numbuckets * sizeof (dedup_entry_t *); ddt.numhashbits = high_order_bit(numbuckets) - 1; ddt.ddt_full = B_FALSE; outfd = dda->outputfd; ofp = fdopen(dda->inputfd, "r"); while (ssread(drr, sizeof (*drr), ofp) != 0) { /* * kernel filled in checksum, we are going to write same * record, but need to regenerate checksum. */ if (drr->drr_type != DRR_BEGIN) { bzero(&drr->drr_u.drr_checksum.drr_checksum, sizeof (drr->drr_u.drr_checksum.drr_checksum)); } switch (drr->drr_type) { case DRR_BEGIN: { struct drr_begin *drrb = &drr->drr_u.drr_begin; int fflags; int sz = 0; ZIO_SET_CHECKSUM(&stream_cksum, 0, 0, 0, 0); ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC); /* set the DEDUP feature flag for this stream */ fflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo); fflags |= (DMU_BACKUP_FEATURE_DEDUP | DMU_BACKUP_FEATURE_DEDUPPROPS); DMU_SET_FEATUREFLAGS(drrb->drr_versioninfo, fflags); if (drr->drr_payloadlen != 0) { sz = drr->drr_payloadlen; if (sz > SPA_MAXBLOCKSIZE) { buf = zfs_realloc(dda->dedup_hdl, buf, SPA_MAXBLOCKSIZE, sz); } (void) ssread(buf, sz, ofp); if (ferror(stdin)) perror("fread"); } if (dump_record(drr, buf, sz, &stream_cksum, outfd) != 0) goto out; break; } case DRR_END: { struct drr_end *drre = &drr->drr_u.drr_end; /* use the recalculated checksum */ drre->drr_checksum = stream_cksum; if (dump_record(drr, NULL, 0, &stream_cksum, outfd) != 0) goto out; break; } case DRR_OBJECT: { struct drr_object *drro = &drr->drr_u.drr_object; if (drro->drr_bonuslen > 0) { (void) ssread(buf, DRR_OBJECT_PAYLOAD_SIZE(drro), ofp); } if (dump_record(drr, buf, DRR_OBJECT_PAYLOAD_SIZE(drro), &stream_cksum, outfd) != 0) goto out; break; } case DRR_SPILL: { struct drr_spill *drrs = &drr->drr_u.drr_spill; (void) ssread(buf, DRR_SPILL_PAYLOAD_SIZE(drrs), ofp); if (dump_record(drr, buf, DRR_SPILL_PAYLOAD_SIZE(drrs), &stream_cksum, outfd) != 0) goto out; break; } case DRR_FREEOBJECTS: { if (dump_record(drr, NULL, 0, &stream_cksum, outfd) != 0) goto out; break; } case DRR_WRITE: { struct drr_write *drrw = &drr->drr_u.drr_write; dataref_t dataref; uint64_t payload_size; payload_size = DRR_WRITE_PAYLOAD_SIZE(drrw); (void) ssread(buf, payload_size, ofp); /* * Use the existing checksum if it's dedup-capable, * else calculate a SHA256 checksum for it. */ if (ZIO_CHECKSUM_EQUAL(drrw->drr_key.ddk_cksum, zero_cksum) || !DRR_IS_DEDUP_CAPABLE(drrw->drr_flags)) { SHA2_CTX ctx; zio_cksum_t tmpsha256; SHA2Init(SHA256, &ctx); SHA2Update(&ctx, buf, payload_size); SHA2Final(&tmpsha256, &ctx); drrw->drr_key.ddk_cksum.zc_word[0] = BE_64(tmpsha256.zc_word[0]); drrw->drr_key.ddk_cksum.zc_word[1] = BE_64(tmpsha256.zc_word[1]); drrw->drr_key.ddk_cksum.zc_word[2] = BE_64(tmpsha256.zc_word[2]); drrw->drr_key.ddk_cksum.zc_word[3] = BE_64(tmpsha256.zc_word[3]); drrw->drr_checksumtype = ZIO_CHECKSUM_SHA256; drrw->drr_flags |= DRR_CHECKSUM_DEDUP; } dataref.ref_guid = drrw->drr_toguid; dataref.ref_object = drrw->drr_object; dataref.ref_offset = drrw->drr_offset; if (ddt_update(dda->dedup_hdl, &ddt, &drrw->drr_key.ddk_cksum, drrw->drr_key.ddk_prop, &dataref)) { dmu_replay_record_t wbr_drr = {0}; struct drr_write_byref *wbr_drrr = &wbr_drr.drr_u.drr_write_byref; /* block already present in stream */ wbr_drr.drr_type = DRR_WRITE_BYREF; wbr_drrr->drr_object = drrw->drr_object; wbr_drrr->drr_offset = drrw->drr_offset; wbr_drrr->drr_length = drrw->drr_logical_size; wbr_drrr->drr_toguid = drrw->drr_toguid; wbr_drrr->drr_refguid = dataref.ref_guid; wbr_drrr->drr_refobject = dataref.ref_object; wbr_drrr->drr_refoffset = dataref.ref_offset; wbr_drrr->drr_checksumtype = drrw->drr_checksumtype; wbr_drrr->drr_flags = drrw->drr_flags; wbr_drrr->drr_key.ddk_cksum = drrw->drr_key.ddk_cksum; wbr_drrr->drr_key.ddk_prop = drrw->drr_key.ddk_prop; if (dump_record(&wbr_drr, NULL, 0, &stream_cksum, outfd) != 0) goto out; } else { /* block not previously seen */ if (dump_record(drr, buf, payload_size, &stream_cksum, outfd) != 0) goto out; } break; } case DRR_WRITE_EMBEDDED: { struct drr_write_embedded *drrwe = &drr->drr_u.drr_write_embedded; (void) ssread(buf, P2ROUNDUP((uint64_t)drrwe->drr_psize, 8), ofp); if (dump_record(drr, buf, P2ROUNDUP((uint64_t)drrwe->drr_psize, 8), &stream_cksum, outfd) != 0) goto out; break; } case DRR_FREE: { if (dump_record(drr, NULL, 0, &stream_cksum, outfd) != 0) goto out; break; } case DRR_OBJECT_RANGE: { if (dump_record(drr, NULL, 0, &stream_cksum, outfd) != 0) goto out; break; } default: (void) fprintf(stderr, "INVALID record type 0x%x\n", drr->drr_type); /* should never happen, so assert */ assert(B_FALSE); } } out: umem_cache_destroy(ddt.ddecache); free(ddt.dedup_hash_array); free(buf); (void) fclose(ofp); return (NULL); } /* * Routines for dealing with the AVL tree of fs-nvlists */ typedef struct fsavl_node { avl_node_t fn_node; nvlist_t *fn_nvfs; char *fn_snapname; uint64_t fn_guid; } fsavl_node_t; static int fsavl_compare(const void *arg1, const void *arg2) { const fsavl_node_t *fn1 = (const fsavl_node_t *)arg1; const fsavl_node_t *fn2 = (const fsavl_node_t *)arg2; return (AVL_CMP(fn1->fn_guid, fn2->fn_guid)); } /* * Given the GUID of a snapshot, find its containing filesystem and * (optionally) name. */ static nvlist_t * fsavl_find(avl_tree_t *avl, uint64_t snapguid, char **snapname) { fsavl_node_t fn_find; fsavl_node_t *fn; fn_find.fn_guid = snapguid; fn = avl_find(avl, &fn_find, NULL); if (fn) { if (snapname) *snapname = fn->fn_snapname; return (fn->fn_nvfs); } return (NULL); } static void fsavl_destroy(avl_tree_t *avl) { fsavl_node_t *fn; void *cookie; if (avl == NULL) return; cookie = NULL; while ((fn = avl_destroy_nodes(avl, &cookie)) != NULL) free(fn); avl_destroy(avl); free(avl); } /* * Given an nvlist, produce an avl tree of snapshots, ordered by guid */ static avl_tree_t * fsavl_create(nvlist_t *fss) { avl_tree_t *fsavl; nvpair_t *fselem = NULL; if ((fsavl = malloc(sizeof (avl_tree_t))) == NULL) return (NULL); avl_create(fsavl, fsavl_compare, sizeof (fsavl_node_t), offsetof(fsavl_node_t, fn_node)); while ((fselem = nvlist_next_nvpair(fss, fselem)) != NULL) { nvlist_t *nvfs, *snaps; nvpair_t *snapelem = NULL; VERIFY(0 == nvpair_value_nvlist(fselem, &nvfs)); VERIFY(0 == nvlist_lookup_nvlist(nvfs, "snaps", &snaps)); while ((snapelem = nvlist_next_nvpair(snaps, snapelem)) != NULL) { fsavl_node_t *fn; uint64_t guid; VERIFY(0 == nvpair_value_uint64(snapelem, &guid)); if ((fn = malloc(sizeof (fsavl_node_t))) == NULL) { fsavl_destroy(fsavl); return (NULL); } fn->fn_nvfs = nvfs; fn->fn_snapname = nvpair_name(snapelem); fn->fn_guid = guid; /* * Note: if there are multiple snaps with the * same GUID, we ignore all but one. */ if (avl_find(fsavl, fn, NULL) == NULL) avl_add(fsavl, fn); else free(fn); } } return (fsavl); } /* * Routines for dealing with the giant nvlist of fs-nvlists, etc. */ typedef struct send_data { /* * assigned inside every recursive call, * restored from *_save on return: * * guid of fromsnap snapshot in parent dataset * txg of fromsnap snapshot in current dataset * txg of tosnap snapshot in current dataset */ uint64_t parent_fromsnap_guid; uint64_t fromsnap_txg; uint64_t tosnap_txg; /* the nvlists get accumulated during depth-first traversal */ nvlist_t *parent_snaps; nvlist_t *fss; nvlist_t *snapprops; /* send-receive configuration, does not change during traversal */ const char *fsname; const char *fromsnap; const char *tosnap; boolean_t raw; boolean_t backup; boolean_t recursive; boolean_t verbose; boolean_t seenfrom; boolean_t seento; /* * The header nvlist is of the following format: * { * "tosnap" -> string * "fromsnap" -> string (if incremental) * "fss" -> { * id -> { * * "name" -> string (full name; for debugging) * "parentfromsnap" -> number (guid of fromsnap in parent) * * "props" -> { name -> value (only if set here) } * "snaps" -> { name (lastname) -> number (guid) } * "snapprops" -> { name (lastname) -> { name -> value } } * * "origin" -> number (guid) (if clone) * "is_encroot" -> boolean * "sent" -> boolean (not on-disk) * } * } * } * */ } send_data_t; static void send_iterate_prop(zfs_handle_t *zhp, boolean_t received_only, nvlist_t *nv); static int send_iterate_snap(zfs_handle_t *zhp, void *arg) { send_data_t *sd = arg; uint64_t guid = zhp->zfs_dmustats.dds_guid; uint64_t txg = zhp->zfs_dmustats.dds_creation_txg; char *snapname; nvlist_t *nv; boolean_t isfromsnap, istosnap, istosnapwithnofrom; snapname = strrchr(zhp->zfs_name, '@')+1; isfromsnap = (sd->fromsnap != NULL && strcmp(sd->fromsnap, snapname) == 0); istosnap = (sd->tosnap != NULL && (strcmp(sd->tosnap, snapname) == 0)); istosnapwithnofrom = (istosnap && sd->fromsnap == NULL); if (sd->tosnap_txg != 0 && txg > sd->tosnap_txg) { if (sd->verbose) { (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "skipping snapshot %s because it was created " "after the destination snapshot (%s)\n"), zhp->zfs_name, sd->tosnap); } zfs_close(zhp); return (0); } VERIFY(0 == nvlist_add_uint64(sd->parent_snaps, snapname, guid)); /* * NB: if there is no fromsnap here (it's a newly created fs in * an incremental replication), we will substitute the tosnap. */ if (isfromsnap || (sd->parent_fromsnap_guid == 0 && istosnap)) { sd->parent_fromsnap_guid = guid; } if (!sd->recursive) { if (!sd->seenfrom && isfromsnap) { sd->seenfrom = B_TRUE; zfs_close(zhp); return (0); } if ((sd->seento || !sd->seenfrom) && !istosnapwithnofrom) { zfs_close(zhp); return (0); } if (istosnap) sd->seento = B_TRUE; } VERIFY(0 == nvlist_alloc(&nv, NV_UNIQUE_NAME, 0)); send_iterate_prop(zhp, sd->backup, nv); VERIFY(0 == nvlist_add_nvlist(sd->snapprops, snapname, nv)); nvlist_free(nv); zfs_close(zhp); return (0); } static void send_iterate_prop(zfs_handle_t *zhp, boolean_t received_only, nvlist_t *nv) { nvlist_t *props = NULL; nvpair_t *elem = NULL; if (received_only) props = zfs_get_recvd_props(zhp); else props = zhp->zfs_props; while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { char *propname = nvpair_name(elem); zfs_prop_t prop = zfs_name_to_prop(propname); nvlist_t *propnv; if (!zfs_prop_user(propname)) { /* * Realistically, this should never happen. However, * we want the ability to add DSL properties without * needing to make incompatible version changes. We * need to ignore unknown properties to allow older * software to still send datasets containing these * properties, with the unknown properties elided. */ if (prop == ZPROP_INVAL) continue; if (zfs_prop_readonly(prop)) continue; } verify(nvpair_value_nvlist(elem, &propnv) == 0); if (prop == ZFS_PROP_QUOTA || prop == ZFS_PROP_RESERVATION || prop == ZFS_PROP_REFQUOTA || prop == ZFS_PROP_REFRESERVATION) { char *source; uint64_t value; verify(nvlist_lookup_uint64(propnv, ZPROP_VALUE, &value) == 0); if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT) continue; /* * May have no source before SPA_VERSION_RECVD_PROPS, * but is still modifiable. */ if (nvlist_lookup_string(propnv, ZPROP_SOURCE, &source) == 0) { if ((strcmp(source, zhp->zfs_name) != 0) && (strcmp(source, ZPROP_SOURCE_VAL_RECVD) != 0)) continue; } } else { char *source; if (nvlist_lookup_string(propnv, ZPROP_SOURCE, &source) != 0) continue; if ((strcmp(source, zhp->zfs_name) != 0) && (strcmp(source, ZPROP_SOURCE_VAL_RECVD) != 0)) continue; } if (zfs_prop_user(propname) || zfs_prop_get_type(prop) == PROP_TYPE_STRING) { char *value; verify(nvlist_lookup_string(propnv, ZPROP_VALUE, &value) == 0); VERIFY(0 == nvlist_add_string(nv, propname, value)); } else { uint64_t value; verify(nvlist_lookup_uint64(propnv, ZPROP_VALUE, &value) == 0); VERIFY(0 == nvlist_add_uint64(nv, propname, value)); } } } /* * returns snapshot creation txg * and returns 0 if the snapshot does not exist */ static uint64_t get_snap_txg(libzfs_handle_t *hdl, const char *fs, const char *snap) { char name[ZFS_MAX_DATASET_NAME_LEN]; uint64_t txg = 0; if (fs == NULL || fs[0] == '\0' || snap == NULL || snap[0] == '\0') return (txg); (void) snprintf(name, sizeof (name), "%s@%s", fs, snap); if (zfs_dataset_exists(hdl, name, ZFS_TYPE_SNAPSHOT)) { zfs_handle_t *zhp = zfs_open(hdl, name, ZFS_TYPE_SNAPSHOT); if (zhp != NULL) { txg = zfs_prop_get_int(zhp, ZFS_PROP_CREATETXG); zfs_close(zhp); } } return (txg); } /* * recursively generate nvlists describing datasets. See comment * for the data structure send_data_t above for description of contents * of the nvlist. */ static int send_iterate_fs(zfs_handle_t *zhp, void *arg) { send_data_t *sd = arg; nvlist_t *nvfs = NULL, *nv = NULL; int rv = 0; uint64_t parent_fromsnap_guid_save = sd->parent_fromsnap_guid; uint64_t fromsnap_txg_save = sd->fromsnap_txg; uint64_t tosnap_txg_save = sd->tosnap_txg; uint64_t txg = zhp->zfs_dmustats.dds_creation_txg; uint64_t guid = zhp->zfs_dmustats.dds_guid; uint64_t fromsnap_txg, tosnap_txg; char guidstring[64]; fromsnap_txg = get_snap_txg(zhp->zfs_hdl, zhp->zfs_name, sd->fromsnap); if (fromsnap_txg != 0) sd->fromsnap_txg = fromsnap_txg; tosnap_txg = get_snap_txg(zhp->zfs_hdl, zhp->zfs_name, sd->tosnap); if (tosnap_txg != 0) sd->tosnap_txg = tosnap_txg; /* * on the send side, if the current dataset does not have tosnap, * perform two additional checks: * * - skip sending the current dataset if it was created later than * the parent tosnap * - return error if the current dataset was created earlier than * the parent tosnap */ if (sd->tosnap != NULL && tosnap_txg == 0) { if (sd->tosnap_txg != 0 && txg > sd->tosnap_txg) { if (sd->verbose) { (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "skipping dataset %s: snapshot %s does " "not exist\n"), zhp->zfs_name, sd->tosnap); } } else { (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "cannot send %s@%s%s: snapshot %s@%s does not " "exist\n"), sd->fsname, sd->tosnap, sd->recursive ? dgettext(TEXT_DOMAIN, " recursively") : "", zhp->zfs_name, sd->tosnap); rv = -1; } goto out; } VERIFY(0 == nvlist_alloc(&nvfs, NV_UNIQUE_NAME, 0)); VERIFY(0 == nvlist_add_string(nvfs, "name", zhp->zfs_name)); VERIFY(0 == nvlist_add_uint64(nvfs, "parentfromsnap", sd->parent_fromsnap_guid)); if (zhp->zfs_dmustats.dds_origin[0]) { zfs_handle_t *origin = zfs_open(zhp->zfs_hdl, zhp->zfs_dmustats.dds_origin, ZFS_TYPE_SNAPSHOT); if (origin == NULL) { rv = -1; goto out; } VERIFY(0 == nvlist_add_uint64(nvfs, "origin", origin->zfs_dmustats.dds_guid)); zfs_close(origin); } /* iterate over props */ VERIFY(0 == nvlist_alloc(&nv, NV_UNIQUE_NAME, 0)); send_iterate_prop(zhp, sd->backup, nv); if (zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION) != ZIO_CRYPT_OFF) { boolean_t encroot; /* determine if this dataset is an encryption root */ if (zfs_crypto_get_encryption_root(zhp, &encroot, NULL) != 0) { rv = -1; goto out; } if (encroot) VERIFY(0 == nvlist_add_boolean(nvfs, "is_encroot")); /* * Encrypted datasets can only be sent with properties if * the raw flag is specified because the receive side doesn't * currently have a mechanism for recursively asking the user * for new encryption parameters. */ if (!sd->raw) { (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "cannot send %s@%s: encrypted dataset %s may not " "be sent with properties without the raw flag\n"), sd->fsname, sd->tosnap, zhp->zfs_name); rv = -1; goto out; } } VERIFY(0 == nvlist_add_nvlist(nvfs, "props", nv)); /* iterate over snaps, and set sd->parent_fromsnap_guid */ sd->parent_fromsnap_guid = 0; VERIFY(0 == nvlist_alloc(&sd->parent_snaps, NV_UNIQUE_NAME, 0)); VERIFY(0 == nvlist_alloc(&sd->snapprops, NV_UNIQUE_NAME, 0)); (void) zfs_iter_snapshots_sorted(zhp, send_iterate_snap, sd); VERIFY(0 == nvlist_add_nvlist(nvfs, "snaps", sd->parent_snaps)); VERIFY(0 == nvlist_add_nvlist(nvfs, "snapprops", sd->snapprops)); nvlist_free(sd->parent_snaps); nvlist_free(sd->snapprops); /* add this fs to nvlist */ (void) snprintf(guidstring, sizeof (guidstring), "0x%llx", (longlong_t)guid); VERIFY(0 == nvlist_add_nvlist(sd->fss, guidstring, nvfs)); /* iterate over children */ if (sd->recursive) rv = zfs_iter_filesystems(zhp, send_iterate_fs, sd); out: sd->parent_fromsnap_guid = parent_fromsnap_guid_save; sd->fromsnap_txg = fromsnap_txg_save; sd->tosnap_txg = tosnap_txg_save; nvlist_free(nv); nvlist_free(nvfs); zfs_close(zhp); return (rv); } static int gather_nvlist(libzfs_handle_t *hdl, const char *fsname, const char *fromsnap, const char *tosnap, boolean_t recursive, boolean_t raw, boolean_t verbose, boolean_t backup, nvlist_t **nvlp, avl_tree_t **avlp) { zfs_handle_t *zhp; send_data_t sd = { 0 }; int error; zhp = zfs_open(hdl, fsname, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME); if (zhp == NULL) return (EZFS_BADTYPE); VERIFY(0 == nvlist_alloc(&sd.fss, NV_UNIQUE_NAME, 0)); sd.fsname = fsname; sd.fromsnap = fromsnap; sd.tosnap = tosnap; sd.recursive = recursive; sd.raw = raw; sd.verbose = verbose; sd.backup = backup; if ((error = send_iterate_fs(zhp, &sd)) != 0) { nvlist_free(sd.fss); if (avlp != NULL) *avlp = NULL; *nvlp = NULL; return (error); } if (avlp != NULL && (*avlp = fsavl_create(sd.fss)) == NULL) { nvlist_free(sd.fss); *nvlp = NULL; return (EZFS_NOMEM); } *nvlp = sd.fss; return (0); } /* * Routines specific to "zfs send" */ typedef struct send_dump_data { /* these are all just the short snapname (the part after the @) */ const char *fromsnap; const char *tosnap; char prevsnap[ZFS_MAX_DATASET_NAME_LEN]; uint64_t prevsnap_obj; boolean_t seenfrom, seento, replicate, doall, fromorigin; boolean_t verbose, dryrun, parsable, progress, embed_data, std_out; boolean_t large_block, compress, raw; int outfd; boolean_t err; nvlist_t *fss; nvlist_t *snapholds; avl_tree_t *fsavl; snapfilter_cb_t *filter_cb; void *filter_cb_arg; nvlist_t *debugnv; char holdtag[ZFS_MAX_DATASET_NAME_LEN]; int cleanup_fd; uint64_t size; } send_dump_data_t; static int zfs_send_space(zfs_handle_t *zhp, const char *snapname, const char *from, enum lzc_send_flags flags, uint64_t *spacep) { libzfs_handle_t *hdl = zhp->zfs_hdl; int error; assert(snapname != NULL); error = lzc_send_space(snapname, from, flags, spacep); if (error != 0) { char errbuf[1024]; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "warning: cannot estimate space for '%s'"), snapname); switch (error) { case EXDEV: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "not an earlier snapshot from the same fs")); return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf)); case ENOENT: if (zfs_dataset_exists(hdl, snapname, ZFS_TYPE_SNAPSHOT)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "incremental source (%s) does not exist"), snapname); } return (zfs_error(hdl, EZFS_NOENT, errbuf)); case EDQUOT: case EFBIG: case EIO: case ENOLINK: case ENOSPC: case ENOSTR: case ENXIO: case EPIPE: case ERANGE: case EFAULT: case EROFS: case EINVAL: zfs_error_aux(hdl, strerror(error)); return (zfs_error(hdl, EZFS_BADBACKUP, errbuf)); default: return (zfs_standard_error(hdl, error, errbuf)); } } return (0); } /* * Dumps a backup of the given snapshot (incremental from fromsnap if it's not * NULL) to the file descriptor specified by outfd. */ static int dump_ioctl(zfs_handle_t *zhp, const char *fromsnap, uint64_t fromsnap_obj, boolean_t fromorigin, int outfd, enum lzc_send_flags flags, nvlist_t *debugnv) { zfs_cmd_t zc = {"\0"}; libzfs_handle_t *hdl = zhp->zfs_hdl; nvlist_t *thisdbg; assert(zhp->zfs_type == ZFS_TYPE_SNAPSHOT); assert(fromsnap_obj == 0 || !fromorigin); (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); zc.zc_cookie = outfd; zc.zc_obj = fromorigin; zc.zc_sendobj = zfs_prop_get_int(zhp, ZFS_PROP_OBJSETID); zc.zc_fromobj = fromsnap_obj; zc.zc_flags = flags; VERIFY(0 == nvlist_alloc(&thisdbg, NV_UNIQUE_NAME, 0)); if (fromsnap && fromsnap[0] != '\0') { VERIFY(0 == nvlist_add_string(thisdbg, "fromsnap", fromsnap)); } if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_SEND, &zc) != 0) { char errbuf[1024]; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "warning: cannot send '%s'"), zhp->zfs_name); VERIFY(0 == nvlist_add_uint64(thisdbg, "error", errno)); if (debugnv) { VERIFY(0 == nvlist_add_nvlist(debugnv, zhp->zfs_name, thisdbg)); } nvlist_free(thisdbg); switch (errno) { case EXDEV: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "not an earlier snapshot from the same fs")); return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf)); case EACCES: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "source key must be loaded")); return (zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf)); case ENOENT: if (zfs_dataset_exists(hdl, zc.zc_name, ZFS_TYPE_SNAPSHOT)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "incremental source (@%s) does not exist"), zc.zc_value); } return (zfs_error(hdl, EZFS_NOENT, errbuf)); case EDQUOT: case EFBIG: case EIO: case ENOLINK: case ENOSPC: case ENOSTR: case ENXIO: case EPIPE: case ERANGE: case EFAULT: case EROFS: zfs_error_aux(hdl, strerror(errno)); return (zfs_error(hdl, EZFS_BADBACKUP, errbuf)); default: return (zfs_standard_error(hdl, errno, errbuf)); } } if (debugnv) VERIFY(0 == nvlist_add_nvlist(debugnv, zhp->zfs_name, thisdbg)); nvlist_free(thisdbg); return (0); } static void gather_holds(zfs_handle_t *zhp, send_dump_data_t *sdd) { assert(zhp->zfs_type == ZFS_TYPE_SNAPSHOT); /* * zfs_send() only sets snapholds for sends that need them, * e.g. replication and doall. */ if (sdd->snapholds == NULL) return; fnvlist_add_string(sdd->snapholds, zhp->zfs_name, sdd->holdtag); } static void * send_progress_thread(void *arg) { progress_arg_t *pa = arg; zfs_cmd_t zc = {"\0"}; zfs_handle_t *zhp = pa->pa_zhp; libzfs_handle_t *hdl = zhp->zfs_hdl; unsigned long long bytes; char buf[16]; time_t t; struct tm *tm; (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); if (!pa->pa_parsable) (void) fprintf(stderr, "TIME SENT SNAPSHOT\n"); /* * Print the progress from ZFS_IOC_SEND_PROGRESS every second. */ for (;;) { (void) sleep(1); zc.zc_cookie = pa->pa_fd; if (zfs_ioctl(hdl, ZFS_IOC_SEND_PROGRESS, &zc) != 0) return ((void *)-1); (void) time(&t); tm = localtime(&t); bytes = zc.zc_cookie; if (pa->pa_parsable) { (void) fprintf(stderr, "%02d:%02d:%02d\t%llu\t%s\n", tm->tm_hour, tm->tm_min, tm->tm_sec, bytes, zhp->zfs_name); } else { zfs_nicebytes(bytes, buf, sizeof (buf)); (void) fprintf(stderr, "%02d:%02d:%02d %5s %s\n", tm->tm_hour, tm->tm_min, tm->tm_sec, buf, zhp->zfs_name); } } } static void send_print_verbose(FILE *fout, const char *tosnap, const char *fromsnap, uint64_t size, boolean_t parsable) { if (parsable) { if (fromsnap != NULL) { (void) fprintf(fout, "incremental\t%s\t%s", fromsnap, tosnap); } else { (void) fprintf(fout, "full\t%s", tosnap); } } else { if (fromsnap != NULL) { if (strchr(fromsnap, '@') == NULL && strchr(fromsnap, '#') == NULL) { (void) fprintf(fout, dgettext(TEXT_DOMAIN, "send from @%s to %s"), fromsnap, tosnap); } else { (void) fprintf(fout, dgettext(TEXT_DOMAIN, "send from %s to %s"), fromsnap, tosnap); } } else { (void) fprintf(fout, dgettext(TEXT_DOMAIN, "full send of %s"), tosnap); } } if (parsable) { (void) fprintf(fout, "\t%llu", (longlong_t)size); } else if (size != 0) { char buf[16]; zfs_nicebytes(size, buf, sizeof (buf)); (void) fprintf(fout, dgettext(TEXT_DOMAIN, " estimated size is %s"), buf); } (void) fprintf(fout, "\n"); } static int dump_snapshot(zfs_handle_t *zhp, void *arg) { send_dump_data_t *sdd = arg; progress_arg_t pa = { 0 }; pthread_t tid; char *thissnap; enum lzc_send_flags flags = 0; int err; boolean_t isfromsnap, istosnap, fromorigin; boolean_t exclude = B_FALSE; FILE *fout = sdd->std_out ? stdout : stderr; err = 0; thissnap = strchr(zhp->zfs_name, '@') + 1; isfromsnap = (sdd->fromsnap != NULL && strcmp(sdd->fromsnap, thissnap) == 0); if (!sdd->seenfrom && isfromsnap) { gather_holds(zhp, sdd); sdd->seenfrom = B_TRUE; (void) strlcpy(sdd->prevsnap, thissnap, sizeof (sdd->prevsnap)); sdd->prevsnap_obj = zfs_prop_get_int(zhp, ZFS_PROP_OBJSETID); zfs_close(zhp); return (0); } if (sdd->seento || !sdd->seenfrom) { zfs_close(zhp); return (0); } istosnap = (strcmp(sdd->tosnap, thissnap) == 0); if (istosnap) sdd->seento = B_TRUE; if (sdd->large_block) flags |= LZC_SEND_FLAG_LARGE_BLOCK; if (sdd->embed_data) flags |= LZC_SEND_FLAG_EMBED_DATA; if (sdd->compress) flags |= LZC_SEND_FLAG_COMPRESS; if (sdd->raw) flags |= LZC_SEND_FLAG_RAW; if (!sdd->doall && !isfromsnap && !istosnap) { if (sdd->replicate) { char *snapname; nvlist_t *snapprops; /* * Filter out all intermediate snapshots except origin * snapshots needed to replicate clones. */ nvlist_t *nvfs = fsavl_find(sdd->fsavl, zhp->zfs_dmustats.dds_guid, &snapname); VERIFY(0 == nvlist_lookup_nvlist(nvfs, "snapprops", &snapprops)); VERIFY(0 == nvlist_lookup_nvlist(snapprops, thissnap, &snapprops)); exclude = !nvlist_exists(snapprops, "is_clone_origin"); } else { exclude = B_TRUE; } } /* * If a filter function exists, call it to determine whether * this snapshot will be sent. */ if (exclude || (sdd->filter_cb != NULL && sdd->filter_cb(zhp, sdd->filter_cb_arg) == B_FALSE)) { /* * This snapshot is filtered out. Don't send it, and don't * set prevsnap_obj, so it will be as if this snapshot didn't * exist, and the next accepted snapshot will be sent as * an incremental from the last accepted one, or as the * first (and full) snapshot in the case of a replication, * non-incremental send. */ zfs_close(zhp); return (0); } gather_holds(zhp, sdd); fromorigin = sdd->prevsnap[0] == '\0' && (sdd->fromorigin || sdd->replicate); if (sdd->verbose) { uint64_t size = 0; char fromds[ZFS_MAX_DATASET_NAME_LEN]; if (sdd->prevsnap[0] != '\0') { (void) strlcpy(fromds, zhp->zfs_name, sizeof (fromds)); *(strchr(fromds, '@') + 1) = '\0'; (void) strlcat(fromds, sdd->prevsnap, sizeof (fromds)); } if (zfs_send_space(zhp, zhp->zfs_name, sdd->prevsnap[0] ? fromds : NULL, flags, &size) != 0) { size = 0; /* cannot estimate send space */ } else { send_print_verbose(fout, zhp->zfs_name, sdd->prevsnap[0] ? sdd->prevsnap : NULL, size, sdd->parsable); } sdd->size += size; } if (!sdd->dryrun) { /* * If progress reporting is requested, spawn a new thread to * poll ZFS_IOC_SEND_PROGRESS at a regular interval. */ if (sdd->progress) { pa.pa_zhp = zhp; pa.pa_fd = sdd->outfd; pa.pa_parsable = sdd->parsable; if ((err = pthread_create(&tid, NULL, send_progress_thread, &pa)) != 0) { zfs_close(zhp); return (err); } } err = dump_ioctl(zhp, sdd->prevsnap, sdd->prevsnap_obj, fromorigin, sdd->outfd, flags, sdd->debugnv); if (sdd->progress) { (void) pthread_cancel(tid); (void) pthread_join(tid, NULL); } } (void) strcpy(sdd->prevsnap, thissnap); sdd->prevsnap_obj = zfs_prop_get_int(zhp, ZFS_PROP_OBJSETID); zfs_close(zhp); return (err); } static int dump_filesystem(zfs_handle_t *zhp, void *arg) { int rv = 0; send_dump_data_t *sdd = arg; boolean_t missingfrom = B_FALSE; zfs_cmd_t zc = {"\0"}; (void) snprintf(zc.zc_name, sizeof (zc.zc_name), "%s@%s", zhp->zfs_name, sdd->tosnap); if (ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0) { (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "WARNING: could not send %s@%s: does not exist\n"), zhp->zfs_name, sdd->tosnap); sdd->err = B_TRUE; return (0); } if (sdd->replicate && sdd->fromsnap) { /* * If this fs does not have fromsnap, and we're doing * recursive, we need to send a full stream from the * beginning (or an incremental from the origin if this * is a clone). If we're doing non-recursive, then let * them get the error. */ (void) snprintf(zc.zc_name, sizeof (zc.zc_name), "%s@%s", zhp->zfs_name, sdd->fromsnap); if (ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0) { missingfrom = B_TRUE; } } sdd->seenfrom = sdd->seento = sdd->prevsnap[0] = 0; sdd->prevsnap_obj = 0; if (sdd->fromsnap == NULL || missingfrom) sdd->seenfrom = B_TRUE; rv = zfs_iter_snapshots_sorted(zhp, dump_snapshot, arg); if (!sdd->seenfrom) { (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "WARNING: could not send %s@%s:\n" "incremental source (%s@%s) does not exist\n"), zhp->zfs_name, sdd->tosnap, zhp->zfs_name, sdd->fromsnap); sdd->err = B_TRUE; } else if (!sdd->seento) { if (sdd->fromsnap) { (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "WARNING: could not send %s@%s:\n" "incremental source (%s@%s) " "is not earlier than it\n"), zhp->zfs_name, sdd->tosnap, zhp->zfs_name, sdd->fromsnap); } else { (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "WARNING: " "could not send %s@%s: does not exist\n"), zhp->zfs_name, sdd->tosnap); } sdd->err = B_TRUE; } return (rv); } static int dump_filesystems(zfs_handle_t *rzhp, void *arg) { send_dump_data_t *sdd = arg; nvpair_t *fspair; boolean_t needagain, progress; if (!sdd->replicate) return (dump_filesystem(rzhp, sdd)); /* Mark the clone origin snapshots. */ for (fspair = nvlist_next_nvpair(sdd->fss, NULL); fspair; fspair = nvlist_next_nvpair(sdd->fss, fspair)) { nvlist_t *nvfs; uint64_t origin_guid = 0; VERIFY(0 == nvpair_value_nvlist(fspair, &nvfs)); (void) nvlist_lookup_uint64(nvfs, "origin", &origin_guid); if (origin_guid != 0) { char *snapname; nvlist_t *origin_nv = fsavl_find(sdd->fsavl, origin_guid, &snapname); if (origin_nv != NULL) { nvlist_t *snapprops; VERIFY(0 == nvlist_lookup_nvlist(origin_nv, "snapprops", &snapprops)); VERIFY(0 == nvlist_lookup_nvlist(snapprops, snapname, &snapprops)); VERIFY(0 == nvlist_add_boolean( snapprops, "is_clone_origin")); } } } again: needagain = progress = B_FALSE; for (fspair = nvlist_next_nvpair(sdd->fss, NULL); fspair; fspair = nvlist_next_nvpair(sdd->fss, fspair)) { nvlist_t *fslist, *parent_nv; char *fsname; zfs_handle_t *zhp; int err; uint64_t origin_guid = 0; uint64_t parent_guid = 0; VERIFY(nvpair_value_nvlist(fspair, &fslist) == 0); if (nvlist_lookup_boolean(fslist, "sent") == 0) continue; VERIFY(nvlist_lookup_string(fslist, "name", &fsname) == 0); (void) nvlist_lookup_uint64(fslist, "origin", &origin_guid); (void) nvlist_lookup_uint64(fslist, "parentfromsnap", &parent_guid); if (parent_guid != 0) { parent_nv = fsavl_find(sdd->fsavl, parent_guid, NULL); if (!nvlist_exists(parent_nv, "sent")) { /* parent has not been sent; skip this one */ needagain = B_TRUE; continue; } } if (origin_guid != 0) { nvlist_t *origin_nv = fsavl_find(sdd->fsavl, origin_guid, NULL); if (origin_nv != NULL && !nvlist_exists(origin_nv, "sent")) { /* * origin has not been sent yet; * skip this clone. */ needagain = B_TRUE; continue; } } zhp = zfs_open(rzhp->zfs_hdl, fsname, ZFS_TYPE_DATASET); if (zhp == NULL) return (-1); err = dump_filesystem(zhp, sdd); VERIFY(nvlist_add_boolean(fslist, "sent") == 0); progress = B_TRUE; zfs_close(zhp); if (err) return (err); } if (needagain) { assert(progress); goto again; } /* clean out the sent flags in case we reuse this fss */ for (fspair = nvlist_next_nvpair(sdd->fss, NULL); fspair; fspair = nvlist_next_nvpair(sdd->fss, fspair)) { nvlist_t *fslist; VERIFY(nvpair_value_nvlist(fspair, &fslist) == 0); (void) nvlist_remove_all(fslist, "sent"); } return (0); } nvlist_t * zfs_send_resume_token_to_nvlist(libzfs_handle_t *hdl, const char *token) { unsigned int version; int nread, i; unsigned long long checksum, packed_len; /* * Decode token header, which is: * -- * Note that the only supported token version is 1. */ nread = sscanf(token, "%u-%llx-%llx-", &version, &checksum, &packed_len); if (nread != 3) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "resume token is corrupt (invalid format)")); return (NULL); } if (version != ZFS_SEND_RESUME_TOKEN_VERSION) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "resume token is corrupt (invalid version %u)"), version); return (NULL); } /* convert hexadecimal representation to binary */ token = strrchr(token, '-') + 1; int len = strlen(token) / 2; unsigned char *compressed = zfs_alloc(hdl, len); for (i = 0; i < len; i++) { nread = sscanf(token + i * 2, "%2hhx", compressed + i); if (nread != 1) { free(compressed); zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "resume token is corrupt " "(payload is not hex-encoded)")); return (NULL); } } /* verify checksum */ zio_cksum_t cksum; fletcher_4_native_varsize(compressed, len, &cksum); if (cksum.zc_word[0] != checksum) { free(compressed); zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "resume token is corrupt (incorrect checksum)")); return (NULL); } /* uncompress */ void *packed = zfs_alloc(hdl, packed_len); uLongf packed_len_long = packed_len; if (uncompress(packed, &packed_len_long, compressed, len) != Z_OK || packed_len_long != packed_len) { free(packed); free(compressed); zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "resume token is corrupt (decompression failed)")); return (NULL); } /* unpack nvlist */ nvlist_t *nv; int error = nvlist_unpack(packed, packed_len, &nv, KM_SLEEP); free(packed); free(compressed); if (error != 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "resume token is corrupt (nvlist_unpack failed)")); return (NULL); } return (nv); } int zfs_send_resume(libzfs_handle_t *hdl, sendflags_t *flags, int outfd, const char *resume_token) { char errbuf[1024]; char *toname; char *fromname = NULL; uint64_t resumeobj, resumeoff, toguid, fromguid, bytes; zfs_handle_t *zhp; int error = 0; char name[ZFS_MAX_DATASET_NAME_LEN]; enum lzc_send_flags lzc_flags = 0; FILE *fout = (flags->verbose && flags->dryrun) ? stdout : stderr; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot resume send")); nvlist_t *resume_nvl = zfs_send_resume_token_to_nvlist(hdl, resume_token); if (resume_nvl == NULL) { /* * zfs_error_aux has already been set by * zfs_send_resume_token_to_nvlist */ return (zfs_error(hdl, EZFS_FAULT, errbuf)); } if (flags->verbose) { (void) fprintf(fout, dgettext(TEXT_DOMAIN, "resume token contents:\n")); nvlist_print(fout, resume_nvl); } if (nvlist_lookup_string(resume_nvl, "toname", &toname) != 0 || nvlist_lookup_uint64(resume_nvl, "object", &resumeobj) != 0 || nvlist_lookup_uint64(resume_nvl, "offset", &resumeoff) != 0 || nvlist_lookup_uint64(resume_nvl, "bytes", &bytes) != 0 || nvlist_lookup_uint64(resume_nvl, "toguid", &toguid) != 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "resume token is corrupt")); return (zfs_error(hdl, EZFS_FAULT, errbuf)); } fromguid = 0; (void) nvlist_lookup_uint64(resume_nvl, "fromguid", &fromguid); if (flags->largeblock || nvlist_exists(resume_nvl, "largeblockok")) lzc_flags |= LZC_SEND_FLAG_LARGE_BLOCK; if (flags->embed_data || nvlist_exists(resume_nvl, "embedok")) lzc_flags |= LZC_SEND_FLAG_EMBED_DATA; if (flags->compress || nvlist_exists(resume_nvl, "compressok")) lzc_flags |= LZC_SEND_FLAG_COMPRESS; if (flags->raw || nvlist_exists(resume_nvl, "rawok")) lzc_flags |= LZC_SEND_FLAG_RAW; if (guid_to_name(hdl, toname, toguid, B_FALSE, name) != 0) { if (zfs_dataset_exists(hdl, toname, ZFS_TYPE_DATASET)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' is no longer the same snapshot used in " "the initial send"), toname); } else { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' used in the initial send no longer exists"), toname); } return (zfs_error(hdl, EZFS_BADPATH, errbuf)); } zhp = zfs_open(hdl, name, ZFS_TYPE_DATASET); if (zhp == NULL) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "unable to access '%s'"), name); return (zfs_error(hdl, EZFS_BADPATH, errbuf)); } if (fromguid != 0) { if (guid_to_name(hdl, toname, fromguid, B_TRUE, name) != 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "incremental source %#llx no longer exists"), (longlong_t)fromguid); return (zfs_error(hdl, EZFS_BADPATH, errbuf)); } fromname = name; } if (flags->verbose) { uint64_t size = 0; error = lzc_send_space(zhp->zfs_name, fromname, lzc_flags, &size); if (error == 0) size = MAX(0, (int64_t)(size - bytes)); send_print_verbose(fout, zhp->zfs_name, fromname, size, flags->parsable); } if (!flags->dryrun) { progress_arg_t pa = { 0 }; pthread_t tid; /* * If progress reporting is requested, spawn a new thread to * poll ZFS_IOC_SEND_PROGRESS at a regular interval. */ if (flags->progress) { pa.pa_zhp = zhp; pa.pa_fd = outfd; pa.pa_parsable = flags->parsable; error = pthread_create(&tid, NULL, send_progress_thread, &pa); if (error != 0) { zfs_close(zhp); return (error); } } error = lzc_send_resume(zhp->zfs_name, fromname, outfd, lzc_flags, resumeobj, resumeoff); if (flags->progress) { (void) pthread_cancel(tid); (void) pthread_join(tid, NULL); } char errbuf[1024]; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "warning: cannot send '%s'"), zhp->zfs_name); zfs_close(zhp); switch (error) { case 0: return (0); case EACCES: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "source key must be loaded")); return (zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf)); case EXDEV: case ENOENT: case EDQUOT: case EFBIG: case EIO: case ENOLINK: case ENOSPC: case ENOSTR: case ENXIO: case EPIPE: case ERANGE: case EFAULT: case EROFS: zfs_error_aux(hdl, strerror(errno)); return (zfs_error(hdl, EZFS_BADBACKUP, errbuf)); default: return (zfs_standard_error(hdl, errno, errbuf)); } } zfs_close(zhp); return (error); } /* * Generate a send stream for the dataset identified by the argument zhp. * * The content of the send stream is the snapshot identified by * 'tosnap'. Incremental streams are requested in two ways: * - from the snapshot identified by "fromsnap" (if non-null) or * - from the origin of the dataset identified by zhp, which must * be a clone. In this case, "fromsnap" is null and "fromorigin" * is TRUE. * * The send stream is recursive (i.e. dumps a hierarchy of snapshots) and * uses a special header (with a hdrtype field of DMU_COMPOUNDSTREAM) * if "replicate" is set. If "doall" is set, dump all the intermediate * snapshots. The DMU_COMPOUNDSTREAM header is used in the "doall" * case too. If "props" is set, send properties. */ int zfs_send(zfs_handle_t *zhp, const char *fromsnap, const char *tosnap, sendflags_t *flags, int outfd, snapfilter_cb_t filter_func, void *cb_arg, nvlist_t **debugnvp) { char errbuf[1024]; send_dump_data_t sdd = { 0 }; int err = 0; nvlist_t *fss = NULL; avl_tree_t *fsavl = NULL; static uint64_t holdseq; int spa_version; pthread_t tid = 0; int pipefd[2]; dedup_arg_t dda = { 0 }; int featureflags = 0; FILE *fout; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot send '%s'"), zhp->zfs_name); if (fromsnap && fromsnap[0] == '\0') { zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN, "zero-length incremental source")); return (zfs_error(zhp->zfs_hdl, EZFS_NOENT, errbuf)); } if (zhp->zfs_type == ZFS_TYPE_FILESYSTEM) { uint64_t version; version = zfs_prop_get_int(zhp, ZFS_PROP_VERSION); if (version >= ZPL_VERSION_SA) { featureflags |= DMU_BACKUP_FEATURE_SA_SPILL; } } /* * Start the dedup thread if this is a dedup stream. We do not bother * doing this if this a raw send of an encrypted dataset with dedup off * because normal encrypted blocks won't dedup. */ if (flags->dedup && !flags->dryrun && !(flags->raw && zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION) != ZIO_CRYPT_OFF && zfs_prop_get_int(zhp, ZFS_PROP_DEDUP) == ZIO_CHECKSUM_OFF)) { featureflags |= (DMU_BACKUP_FEATURE_DEDUP | DMU_BACKUP_FEATURE_DEDUPPROPS); if ((err = socketpair(AF_UNIX, SOCK_STREAM, 0, pipefd)) != 0) { zfs_error_aux(zhp->zfs_hdl, strerror(errno)); return (zfs_error(zhp->zfs_hdl, EZFS_PIPEFAILED, errbuf)); } dda.outputfd = outfd; dda.inputfd = pipefd[1]; dda.dedup_hdl = zhp->zfs_hdl; if ((err = pthread_create(&tid, NULL, cksummer, &dda)) != 0) { (void) close(pipefd[0]); (void) close(pipefd[1]); zfs_error_aux(zhp->zfs_hdl, strerror(errno)); return (zfs_error(zhp->zfs_hdl, EZFS_THREADCREATEFAILED, errbuf)); } } if (flags->replicate || flags->doall || flags->props || flags->backup) { dmu_replay_record_t drr = { 0 }; char *packbuf = NULL; size_t buflen = 0; zio_cksum_t zc; ZIO_SET_CHECKSUM(&zc, 0, 0, 0, 0); if (flags->replicate || flags->props || flags->backup) { nvlist_t *hdrnv; VERIFY(0 == nvlist_alloc(&hdrnv, NV_UNIQUE_NAME, 0)); if (fromsnap) { VERIFY(0 == nvlist_add_string(hdrnv, "fromsnap", fromsnap)); } VERIFY(0 == nvlist_add_string(hdrnv, "tosnap", tosnap)); if (!flags->replicate) { VERIFY(0 == nvlist_add_boolean(hdrnv, "not_recursive")); } if (flags->raw) { VERIFY(0 == nvlist_add_boolean(hdrnv, "raw")); } err = gather_nvlist(zhp->zfs_hdl, zhp->zfs_name, fromsnap, tosnap, flags->replicate, flags->raw, flags->verbose, flags->backup, &fss, &fsavl); if (err) goto err_out; VERIFY(0 == nvlist_add_nvlist(hdrnv, "fss", fss)); err = nvlist_pack(hdrnv, &packbuf, &buflen, NV_ENCODE_XDR, 0); if (debugnvp) *debugnvp = hdrnv; else nvlist_free(hdrnv); if (err) goto stderr_out; } if (!flags->dryrun) { /* write first begin record */ drr.drr_type = DRR_BEGIN; drr.drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC; DMU_SET_STREAM_HDRTYPE(drr.drr_u.drr_begin. drr_versioninfo, DMU_COMPOUNDSTREAM); DMU_SET_FEATUREFLAGS(drr.drr_u.drr_begin. drr_versioninfo, featureflags); if (snprintf(drr.drr_u.drr_begin.drr_toname, sizeof (drr.drr_u.drr_begin.drr_toname), "%s@%s", zhp->zfs_name, tosnap) >= sizeof (drr.drr_u.drr_begin.drr_toname)) { err = EINVAL; goto stderr_out; } drr.drr_payloadlen = buflen; err = dump_record(&drr, packbuf, buflen, &zc, outfd); free(packbuf); if (err != 0) goto stderr_out; /* write end record */ bzero(&drr, sizeof (drr)); drr.drr_type = DRR_END; drr.drr_u.drr_end.drr_checksum = zc; err = write(outfd, &drr, sizeof (drr)); if (err == -1) { err = errno; goto stderr_out; } err = 0; } } /* dump each stream */ sdd.fromsnap = fromsnap; sdd.tosnap = tosnap; if (tid != 0) sdd.outfd = pipefd[0]; else sdd.outfd = outfd; sdd.replicate = flags->replicate; sdd.doall = flags->doall; sdd.fromorigin = flags->fromorigin; sdd.fss = fss; sdd.fsavl = fsavl; sdd.verbose = flags->verbose; sdd.parsable = flags->parsable; sdd.progress = flags->progress; sdd.dryrun = flags->dryrun; sdd.large_block = flags->largeblock; sdd.embed_data = flags->embed_data; sdd.compress = flags->compress; sdd.raw = flags->raw; sdd.filter_cb = filter_func; sdd.filter_cb_arg = cb_arg; if (debugnvp) sdd.debugnv = *debugnvp; if (sdd.verbose && sdd.dryrun) sdd.std_out = B_TRUE; fout = sdd.std_out ? stdout : stderr; /* * Some flags require that we place user holds on the datasets that are * being sent so they don't get destroyed during the send. We can skip * this step if the pool is imported read-only since the datasets cannot * be destroyed. */ if (!flags->dryrun && !zpool_get_prop_int(zfs_get_pool_handle(zhp), ZPOOL_PROP_READONLY, NULL) && zfs_spa_version(zhp, &spa_version) == 0 && spa_version >= SPA_VERSION_USERREFS && (flags->doall || flags->replicate)) { ++holdseq; (void) snprintf(sdd.holdtag, sizeof (sdd.holdtag), ".send-%d-%llu", getpid(), (u_longlong_t)holdseq); sdd.cleanup_fd = open(ZFS_DEV, O_RDWR); if (sdd.cleanup_fd < 0) { err = errno; goto stderr_out; } sdd.snapholds = fnvlist_alloc(); } else { sdd.cleanup_fd = -1; sdd.snapholds = NULL; } if (flags->verbose || sdd.snapholds != NULL) { /* * Do a verbose no-op dry run to get all the verbose output * or to gather snapshot hold's before generating any data, * then do a non-verbose real run to generate the streams. */ sdd.dryrun = B_TRUE; err = dump_filesystems(zhp, &sdd); if (err != 0) goto stderr_out; if (flags->verbose) { if (flags->parsable) { (void) fprintf(fout, "size\t%llu\n", (longlong_t)sdd.size); } else { char buf[16]; zfs_nicebytes(sdd.size, buf, sizeof (buf)); (void) fprintf(fout, dgettext(TEXT_DOMAIN, "total estimated size is %s\n"), buf); } } /* Ensure no snaps found is treated as an error. */ if (!sdd.seento) { err = ENOENT; goto err_out; } /* Skip the second run if dryrun was requested. */ if (flags->dryrun) goto err_out; if (sdd.snapholds != NULL) { err = zfs_hold_nvl(zhp, sdd.cleanup_fd, sdd.snapholds); if (err != 0) goto stderr_out; fnvlist_free(sdd.snapholds); sdd.snapholds = NULL; } sdd.dryrun = B_FALSE; sdd.verbose = B_FALSE; } err = dump_filesystems(zhp, &sdd); fsavl_destroy(fsavl); nvlist_free(fss); /* Ensure no snaps found is treated as an error. */ if (err == 0 && !sdd.seento) err = ENOENT; if (tid != 0) { if (err != 0) (void) pthread_cancel(tid); (void) close(pipefd[0]); (void) pthread_join(tid, NULL); } if (sdd.cleanup_fd != -1) { VERIFY(0 == close(sdd.cleanup_fd)); sdd.cleanup_fd = -1; } if (!flags->dryrun && (flags->replicate || flags->doall || flags->props || flags->backup)) { /* * write final end record. NB: want to do this even if * there was some error, because it might not be totally * failed. */ dmu_replay_record_t drr = { 0 }; drr.drr_type = DRR_END; if (write(outfd, &drr, sizeof (drr)) == -1) { return (zfs_standard_error(zhp->zfs_hdl, errno, errbuf)); } } return (err || sdd.err); stderr_out: err = zfs_standard_error(zhp->zfs_hdl, err, errbuf); err_out: fsavl_destroy(fsavl); nvlist_free(fss); fnvlist_free(sdd.snapholds); if (sdd.cleanup_fd != -1) VERIFY(0 == close(sdd.cleanup_fd)); if (tid != 0) { (void) pthread_cancel(tid); (void) close(pipefd[0]); (void) pthread_join(tid, NULL); } return (err); } int zfs_send_one(zfs_handle_t *zhp, const char *from, int fd, sendflags_t flags) { int err = 0; libzfs_handle_t *hdl = zhp->zfs_hdl; enum lzc_send_flags lzc_flags = 0; FILE *fout = (flags.verbose && flags.dryrun) ? stdout : stderr; char errbuf[1024]; if (flags.largeblock) lzc_flags |= LZC_SEND_FLAG_LARGE_BLOCK; if (flags.embed_data) lzc_flags |= LZC_SEND_FLAG_EMBED_DATA; if (flags.compress) lzc_flags |= LZC_SEND_FLAG_COMPRESS; if (flags.raw) lzc_flags |= LZC_SEND_FLAG_RAW; if (flags.verbose) { uint64_t size = 0; err = lzc_send_space(zhp->zfs_name, from, lzc_flags, &size); if (err == 0) { send_print_verbose(fout, zhp->zfs_name, from, size, flags.parsable); } else { (void) fprintf(stderr, "Cannot estimate send size: " "%s\n", strerror(errno)); } } if (flags.dryrun) return (err); (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "warning: cannot send '%s'"), zhp->zfs_name); err = lzc_send(zhp->zfs_name, from, fd, lzc_flags); if (err != 0) { switch (errno) { case EXDEV: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "not an earlier snapshot from the same fs")); return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf)); case ENOENT: case ESRCH: if (lzc_exists(zhp->zfs_name)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "incremental source (%s) does not exist"), from); } return (zfs_error(hdl, EZFS_NOENT, errbuf)); case EACCES: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "dataset key must be loaded")); return (zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf)); case EBUSY: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "target is busy; if a filesystem, " "it must not be mounted")); return (zfs_error(hdl, EZFS_BUSY, errbuf)); case EDQUOT: case EFBIG: case EIO: case ENOLINK: case ENOSPC: case ENOSTR: case ENXIO: case EPIPE: case ERANGE: case EFAULT: case EROFS: zfs_error_aux(hdl, strerror(errno)); return (zfs_error(hdl, EZFS_BADBACKUP, errbuf)); default: return (zfs_standard_error(hdl, errno, errbuf)); } } return (err != 0); } /* * Routines specific to "zfs recv" */ static int recv_read(libzfs_handle_t *hdl, int fd, void *buf, int ilen, boolean_t byteswap, zio_cksum_t *zc) { char *cp = buf; int rv; int len = ilen; assert(ilen <= SPA_MAXBLOCKSIZE); do { rv = read(fd, cp, len); cp += rv; len -= rv; } while (rv > 0); if (rv < 0 || len != 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "failed to read from stream")); return (zfs_error(hdl, EZFS_BADSTREAM, dgettext(TEXT_DOMAIN, "cannot receive"))); } if (zc) { if (byteswap) fletcher_4_incremental_byteswap(buf, ilen, zc); else fletcher_4_incremental_native(buf, ilen, zc); } return (0); } static int recv_read_nvlist(libzfs_handle_t *hdl, int fd, int len, nvlist_t **nvp, boolean_t byteswap, zio_cksum_t *zc) { char *buf; int err; buf = zfs_alloc(hdl, len); if (buf == NULL) return (ENOMEM); err = recv_read(hdl, fd, buf, len, byteswap, zc); if (err != 0) { free(buf); return (err); } err = nvlist_unpack(buf, len, nvp, 0); free(buf); if (err != 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid " "stream (malformed nvlist)")); return (EINVAL); } return (0); } /* * Returns the grand origin (origin of origin of origin...) of a given handle. * If this dataset is not a clone, it simply returns a copy of the original * handle. */ static zfs_handle_t * recv_open_grand_origin(zfs_handle_t *zhp) { char origin[ZFS_MAX_DATASET_NAME_LEN]; zprop_source_t src; zfs_handle_t *ozhp = zfs_handle_dup(zhp); while (ozhp != NULL) { if (zfs_prop_get(ozhp, ZFS_PROP_ORIGIN, origin, sizeof (origin), &src, NULL, 0, B_FALSE) != 0) break; (void) zfs_close(ozhp); ozhp = zfs_open(zhp->zfs_hdl, origin, ZFS_TYPE_FILESYSTEM); } return (ozhp); } static int recv_rename_impl(zfs_handle_t *zhp, const char *name, const char *newname) { int err; zfs_handle_t *ozhp = NULL; /* * Attempt to rename the dataset. If it fails with EACCES we have * attempted to rename the dataset outside of its encryption root. * Force the dataset to become an encryption root and try again. */ err = lzc_rename(name, newname); if (err == EACCES) { ozhp = recv_open_grand_origin(zhp); if (ozhp == NULL) { err = ENOENT; goto out; } err = lzc_change_key(ozhp->zfs_name, DCP_CMD_FORCE_NEW_KEY, NULL, NULL, 0); if (err != 0) goto out; err = lzc_rename(name, newname); } out: if (ozhp != NULL) zfs_close(ozhp); return (err); } static int recv_rename(libzfs_handle_t *hdl, const char *name, const char *tryname, int baselen, char *newname, recvflags_t *flags) { static int seq; int err; prop_changelist_t *clp = NULL; zfs_handle_t *zhp = NULL; zhp = zfs_open(hdl, name, ZFS_TYPE_DATASET); if (zhp == NULL) { err = -1; goto out; } clp = changelist_gather(zhp, ZFS_PROP_NAME, 0, flags->force ? MS_FORCE : 0); if (clp == NULL) { err = -1; goto out; } err = changelist_prefix(clp); if (err) goto out; if (tryname) { (void) strcpy(newname, tryname); if (flags->verbose) { (void) printf("attempting rename %s to %s\n", name, newname); } err = recv_rename_impl(zhp, name, newname); if (err == 0) changelist_rename(clp, name, tryname); } else { err = ENOENT; } if (err != 0 && strncmp(name + baselen, "recv-", 5) != 0) { seq++; (void) snprintf(newname, ZFS_MAX_DATASET_NAME_LEN, "%.*srecv-%u-%u", baselen, name, getpid(), seq); if (flags->verbose) { (void) printf("failed - trying rename %s to %s\n", name, newname); } err = recv_rename_impl(zhp, name, newname); if (err == 0) changelist_rename(clp, name, newname); if (err && flags->verbose) { (void) printf("failed (%u) - " "will try again on next pass\n", errno); } err = EAGAIN; } else if (flags->verbose) { if (err == 0) (void) printf("success\n"); else (void) printf("failed (%u)\n", errno); } (void) changelist_postfix(clp); out: if (clp != NULL) changelist_free(clp); if (zhp != NULL) zfs_close(zhp); return (err); } static int recv_promote(libzfs_handle_t *hdl, const char *fsname, const char *origin_fsname, recvflags_t *flags) { int err; zfs_cmd_t zc = {"\0"}; zfs_handle_t *zhp = NULL, *ozhp = NULL; if (flags->verbose) (void) printf("promoting %s\n", fsname); (void) strlcpy(zc.zc_value, origin_fsname, sizeof (zc.zc_value)); (void) strlcpy(zc.zc_name, fsname, sizeof (zc.zc_name)); /* * Attempt to promote the dataset. If it fails with EACCES the * promotion would cause this dataset to leave its encryption root. * Force the origin to become an encryption root and try again. */ err = zfs_ioctl(hdl, ZFS_IOC_PROMOTE, &zc); if (err == EACCES) { zhp = zfs_open(hdl, fsname, ZFS_TYPE_DATASET); if (zhp == NULL) { err = -1; goto out; } ozhp = recv_open_grand_origin(zhp); if (ozhp == NULL) { err = -1; goto out; } err = lzc_change_key(ozhp->zfs_name, DCP_CMD_FORCE_NEW_KEY, NULL, NULL, 0); if (err != 0) goto out; err = zfs_ioctl(hdl, ZFS_IOC_PROMOTE, &zc); } out: if (zhp != NULL) zfs_close(zhp); if (ozhp != NULL) zfs_close(ozhp); return (err); } static int recv_destroy(libzfs_handle_t *hdl, const char *name, int baselen, char *newname, recvflags_t *flags) { int err = 0; prop_changelist_t *clp; zfs_handle_t *zhp; boolean_t defer = B_FALSE; int spa_version; zhp = zfs_open(hdl, name, ZFS_TYPE_DATASET); if (zhp == NULL) return (-1); clp = changelist_gather(zhp, ZFS_PROP_NAME, 0, flags->force ? MS_FORCE : 0); if (zfs_get_type(zhp) == ZFS_TYPE_SNAPSHOT && zfs_spa_version(zhp, &spa_version) == 0 && spa_version >= SPA_VERSION_USERREFS) defer = B_TRUE; zfs_close(zhp); if (clp == NULL) return (-1); err = changelist_prefix(clp); if (err) return (err); if (flags->verbose) (void) printf("attempting destroy %s\n", name); if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT) { nvlist_t *nv = fnvlist_alloc(); fnvlist_add_boolean(nv, name); err = lzc_destroy_snaps(nv, defer, NULL); fnvlist_free(nv); } else { err = lzc_destroy(name); } if (err == 0) { if (flags->verbose) (void) printf("success\n"); changelist_remove(clp, name); } (void) changelist_postfix(clp); changelist_free(clp); /* * Deferred destroy might destroy the snapshot or only mark it to be * destroyed later, and it returns success in either case. */ if (err != 0 || (defer && zfs_dataset_exists(hdl, name, ZFS_TYPE_SNAPSHOT))) { err = recv_rename(hdl, name, NULL, baselen, newname, flags); } return (err); } typedef struct guid_to_name_data { uint64_t guid; boolean_t bookmark_ok; char *name; char *skip; } guid_to_name_data_t; static int guid_to_name_cb(zfs_handle_t *zhp, void *arg) { guid_to_name_data_t *gtnd = arg; const char *slash; int err; if (gtnd->skip != NULL && (slash = strrchr(zhp->zfs_name, '/')) != NULL && strcmp(slash + 1, gtnd->skip) == 0) { zfs_close(zhp); return (0); } if (zfs_prop_get_int(zhp, ZFS_PROP_GUID) == gtnd->guid) { (void) strcpy(gtnd->name, zhp->zfs_name); zfs_close(zhp); return (EEXIST); } err = zfs_iter_children(zhp, guid_to_name_cb, gtnd); if (err != EEXIST && gtnd->bookmark_ok) err = zfs_iter_bookmarks(zhp, guid_to_name_cb, gtnd); zfs_close(zhp); return (err); } /* * Attempt to find the local dataset associated with this guid. In the case of * multiple matches, we attempt to find the "best" match by searching * progressively larger portions of the hierarchy. This allows one to send a * tree of datasets individually and guarantee that we will find the source * guid within that hierarchy, even if there are multiple matches elsewhere. */ static int guid_to_name(libzfs_handle_t *hdl, const char *parent, uint64_t guid, boolean_t bookmark_ok, char *name) { char pname[ZFS_MAX_DATASET_NAME_LEN]; guid_to_name_data_t gtnd; gtnd.guid = guid; gtnd.bookmark_ok = bookmark_ok; gtnd.name = name; gtnd.skip = NULL; /* * Search progressively larger portions of the hierarchy, starting * with the filesystem specified by 'parent'. This will * select the "most local" version of the origin snapshot in the case * that there are multiple matching snapshots in the system. */ (void) strlcpy(pname, parent, sizeof (pname)); char *cp = strrchr(pname, '@'); if (cp == NULL) cp = strchr(pname, '\0'); for (; cp != NULL; cp = strrchr(pname, '/')) { /* Chop off the last component and open the parent */ *cp = '\0'; zfs_handle_t *zhp = make_dataset_handle(hdl, pname); if (zhp == NULL) continue; int err = guid_to_name_cb(zfs_handle_dup(zhp), >nd); if (err != EEXIST) err = zfs_iter_children(zhp, guid_to_name_cb, >nd); if (err != EEXIST && bookmark_ok) err = zfs_iter_bookmarks(zhp, guid_to_name_cb, >nd); zfs_close(zhp); if (err == EEXIST) return (0); /* * Remember the last portion of the dataset so we skip it next * time through (as we've already searched that portion of the * hierarchy). */ gtnd.skip = strrchr(pname, '/') + 1; } return (ENOENT); } /* * Return +1 if guid1 is before guid2, 0 if they are the same, and -1 if * guid1 is after guid2. */ static int created_before(libzfs_handle_t *hdl, avl_tree_t *avl, uint64_t guid1, uint64_t guid2) { nvlist_t *nvfs; char *fsname = NULL, *snapname = NULL; char buf[ZFS_MAX_DATASET_NAME_LEN]; int rv; zfs_handle_t *guid1hdl, *guid2hdl; uint64_t create1, create2; if (guid2 == 0) return (0); if (guid1 == 0) return (1); nvfs = fsavl_find(avl, guid1, &snapname); VERIFY(0 == nvlist_lookup_string(nvfs, "name", &fsname)); (void) snprintf(buf, sizeof (buf), "%s@%s", fsname, snapname); guid1hdl = zfs_open(hdl, buf, ZFS_TYPE_SNAPSHOT); if (guid1hdl == NULL) return (-1); nvfs = fsavl_find(avl, guid2, &snapname); VERIFY(0 == nvlist_lookup_string(nvfs, "name", &fsname)); (void) snprintf(buf, sizeof (buf), "%s@%s", fsname, snapname); guid2hdl = zfs_open(hdl, buf, ZFS_TYPE_SNAPSHOT); if (guid2hdl == NULL) { zfs_close(guid1hdl); return (-1); } create1 = zfs_prop_get_int(guid1hdl, ZFS_PROP_CREATETXG); create2 = zfs_prop_get_int(guid2hdl, ZFS_PROP_CREATETXG); if (create1 < create2) rv = -1; else if (create1 > create2) rv = +1; else rv = 0; zfs_close(guid1hdl); zfs_close(guid2hdl); return (rv); } /* * This function reestablishes the heirarchy of encryption roots after a * recursive incremental receive has completed. This must be done after the * second call to recv_incremental_replication() has renamed and promoted all * sent datasets to their final locations in the dataset heriarchy. */ static int recv_fix_encryption_heirarchy(libzfs_handle_t *hdl, const char *destname, nvlist_t *stream_nv, avl_tree_t *stream_avl) { int err; nvpair_t *fselem = NULL; nvlist_t *stream_fss; char *cp; char top_zfs[ZFS_MAX_DATASET_NAME_LEN]; (void) strcpy(top_zfs, destname); cp = strrchr(top_zfs, '@'); if (cp != NULL) *cp = '\0'; VERIFY(0 == nvlist_lookup_nvlist(stream_nv, "fss", &stream_fss)); while ((fselem = nvlist_next_nvpair(stream_fss, fselem)) != NULL) { zfs_handle_t *zhp = NULL; uint64_t crypt; nvlist_t *snaps, *props, *stream_nvfs = NULL; nvpair_t *snapel = NULL; boolean_t is_encroot, is_clone, stream_encroot; char *cp; char *stream_keylocation = NULL; char keylocation[MAXNAMELEN]; char fsname[ZFS_MAX_DATASET_NAME_LEN]; keylocation[0] = '\0'; VERIFY(0 == nvpair_value_nvlist(fselem, &stream_nvfs)); VERIFY(0 == nvlist_lookup_nvlist(stream_nvfs, "snaps", &snaps)); VERIFY(0 == nvlist_lookup_nvlist(stream_nvfs, "props", &props)); stream_encroot = nvlist_exists(stream_nvfs, "is_encroot"); /* find a snapshot from the stream that exists locally */ err = ENOENT; while ((snapel = nvlist_next_nvpair(snaps, snapel)) != NULL) { uint64_t guid; VERIFY(0 == nvpair_value_uint64(snapel, &guid)); err = guid_to_name(hdl, destname, guid, B_FALSE, fsname); if (err == 0) break; } if (err != 0) continue; cp = strchr(fsname, '@'); if (cp != NULL) *cp = '\0'; zhp = zfs_open(hdl, fsname, ZFS_TYPE_DATASET); if (zhp == NULL) { err = ENOENT; goto error; } crypt = zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION); is_clone = zhp->zfs_dmustats.dds_origin[0] != '\0'; (void) zfs_crypto_get_encryption_root(zhp, &is_encroot, NULL); /* we don't need to do anything for unencrypted filesystems */ if (crypt == ZIO_CRYPT_OFF) { zfs_close(zhp); continue; } /* * If the dataset is flagged as an encryption root, was not * received as a clone and is not currently an encryption root, * force it to become one. Fixup the keylocation if necessary. */ if (stream_encroot) { if (!is_clone && !is_encroot) { err = lzc_change_key(fsname, DCP_CMD_FORCE_NEW_KEY, NULL, NULL, 0); if (err != 0) { zfs_close(zhp); goto error; } } VERIFY(0 == nvlist_lookup_string(props, zfs_prop_to_name(ZFS_PROP_KEYLOCATION), &stream_keylocation)); /* * Refresh the properties in case the call to * lzc_change_key() changed the value. */ zfs_refresh_properties(zhp); err = zfs_prop_get(zhp, ZFS_PROP_KEYLOCATION, keylocation, sizeof (keylocation), NULL, NULL, 0, B_TRUE); if (err != 0) { zfs_close(zhp); goto error; } if (strcmp(keylocation, stream_keylocation) != 0) { err = zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_KEYLOCATION), stream_keylocation); if (err != 0) { zfs_close(zhp); goto error; } } } /* * If the dataset is not flagged as an encryption root and is * currently an encryption root, force it to inherit from its * parent. The root of a raw send should never be * force-inherited. */ if (!stream_encroot && is_encroot && strcmp(top_zfs, fsname) != 0) { err = lzc_change_key(fsname, DCP_CMD_FORCE_INHERIT, NULL, NULL, 0); if (err != 0) { zfs_close(zhp); goto error; } } zfs_close(zhp); } return (0); error: return (err); } static int recv_incremental_replication(libzfs_handle_t *hdl, const char *tofs, recvflags_t *flags, nvlist_t *stream_nv, avl_tree_t *stream_avl, nvlist_t *renamed) { nvlist_t *local_nv, *deleted = NULL; avl_tree_t *local_avl; nvpair_t *fselem, *nextfselem; char *fromsnap; char newname[ZFS_MAX_DATASET_NAME_LEN]; char guidname[32]; int error; boolean_t needagain, progress, recursive; char *s1, *s2; VERIFY(0 == nvlist_lookup_string(stream_nv, "fromsnap", &fromsnap)); recursive = (nvlist_lookup_boolean(stream_nv, "not_recursive") == ENOENT); if (flags->dryrun) return (0); again: needagain = progress = B_FALSE; VERIFY(0 == nvlist_alloc(&deleted, NV_UNIQUE_NAME, 0)); if ((error = gather_nvlist(hdl, tofs, fromsnap, NULL, recursive, B_TRUE, B_FALSE, B_FALSE, &local_nv, &local_avl)) != 0) return (error); /* * Process deletes and renames */ for (fselem = nvlist_next_nvpair(local_nv, NULL); fselem; fselem = nextfselem) { nvlist_t *nvfs, *snaps; nvlist_t *stream_nvfs = NULL; nvpair_t *snapelem, *nextsnapelem; uint64_t fromguid = 0; uint64_t originguid = 0; uint64_t stream_originguid = 0; uint64_t parent_fromsnap_guid, stream_parent_fromsnap_guid; char *fsname, *stream_fsname; nextfselem = nvlist_next_nvpair(local_nv, fselem); VERIFY(0 == nvpair_value_nvlist(fselem, &nvfs)); VERIFY(0 == nvlist_lookup_nvlist(nvfs, "snaps", &snaps)); VERIFY(0 == nvlist_lookup_string(nvfs, "name", &fsname)); VERIFY(0 == nvlist_lookup_uint64(nvfs, "parentfromsnap", &parent_fromsnap_guid)); (void) nvlist_lookup_uint64(nvfs, "origin", &originguid); /* * First find the stream's fs, so we can check for * a different origin (due to "zfs promote") */ for (snapelem = nvlist_next_nvpair(snaps, NULL); snapelem; snapelem = nvlist_next_nvpair(snaps, snapelem)) { uint64_t thisguid; VERIFY(0 == nvpair_value_uint64(snapelem, &thisguid)); stream_nvfs = fsavl_find(stream_avl, thisguid, NULL); if (stream_nvfs != NULL) break; } /* check for promote */ (void) nvlist_lookup_uint64(stream_nvfs, "origin", &stream_originguid); if (stream_nvfs && originguid != stream_originguid) { switch (created_before(hdl, local_avl, stream_originguid, originguid)) { case 1: { /* promote it! */ nvlist_t *origin_nvfs; char *origin_fsname; origin_nvfs = fsavl_find(local_avl, originguid, NULL); VERIFY(0 == nvlist_lookup_string(origin_nvfs, "name", &origin_fsname)); error = recv_promote(hdl, fsname, origin_fsname, flags); if (error == 0) progress = B_TRUE; break; } default: break; case -1: fsavl_destroy(local_avl); nvlist_free(local_nv); return (-1); } /* * We had/have the wrong origin, therefore our * list of snapshots is wrong. Need to handle * them on the next pass. */ needagain = B_TRUE; continue; } for (snapelem = nvlist_next_nvpair(snaps, NULL); snapelem; snapelem = nextsnapelem) { uint64_t thisguid; char *stream_snapname; nvlist_t *found, *props; nextsnapelem = nvlist_next_nvpair(snaps, snapelem); VERIFY(0 == nvpair_value_uint64(snapelem, &thisguid)); found = fsavl_find(stream_avl, thisguid, &stream_snapname); /* check for delete */ if (found == NULL) { char name[ZFS_MAX_DATASET_NAME_LEN]; if (!flags->force) continue; (void) snprintf(name, sizeof (name), "%s@%s", fsname, nvpair_name(snapelem)); error = recv_destroy(hdl, name, strlen(fsname)+1, newname, flags); if (error) needagain = B_TRUE; else progress = B_TRUE; sprintf(guidname, "%llu", (u_longlong_t)thisguid); nvlist_add_boolean(deleted, guidname); continue; } stream_nvfs = found; if (0 == nvlist_lookup_nvlist(stream_nvfs, "snapprops", &props) && 0 == nvlist_lookup_nvlist(props, stream_snapname, &props)) { zfs_cmd_t zc = {"\0"}; zc.zc_cookie = B_TRUE; /* received */ (void) snprintf(zc.zc_name, sizeof (zc.zc_name), "%s@%s", fsname, nvpair_name(snapelem)); if (zcmd_write_src_nvlist(hdl, &zc, props) == 0) { (void) zfs_ioctl(hdl, ZFS_IOC_SET_PROP, &zc); zcmd_free_nvlists(&zc); } } /* check for different snapname */ if (strcmp(nvpair_name(snapelem), stream_snapname) != 0) { char name[ZFS_MAX_DATASET_NAME_LEN]; char tryname[ZFS_MAX_DATASET_NAME_LEN]; (void) snprintf(name, sizeof (name), "%s@%s", fsname, nvpair_name(snapelem)); (void) snprintf(tryname, sizeof (name), "%s@%s", fsname, stream_snapname); error = recv_rename(hdl, name, tryname, strlen(fsname)+1, newname, flags); if (error) needagain = B_TRUE; else progress = B_TRUE; } if (strcmp(stream_snapname, fromsnap) == 0) fromguid = thisguid; } /* check for delete */ if (stream_nvfs == NULL) { if (!flags->force) continue; error = recv_destroy(hdl, fsname, strlen(tofs)+1, newname, flags); if (error) needagain = B_TRUE; else progress = B_TRUE; sprintf(guidname, "%llu", (u_longlong_t)parent_fromsnap_guid); nvlist_add_boolean(deleted, guidname); continue; } if (fromguid == 0) { if (flags->verbose) { (void) printf("local fs %s does not have " "fromsnap (%s in stream); must have " "been deleted locally; ignoring\n", fsname, fromsnap); } continue; } VERIFY(0 == nvlist_lookup_string(stream_nvfs, "name", &stream_fsname)); VERIFY(0 == nvlist_lookup_uint64(stream_nvfs, "parentfromsnap", &stream_parent_fromsnap_guid)); s1 = strrchr(fsname, '/'); s2 = strrchr(stream_fsname, '/'); /* * Check if we're going to rename based on parent guid change * and the current parent guid was also deleted. If it was then * rename will fail and is likely unneeded, so avoid this and * force an early retry to determine the new * parent_fromsnap_guid. */ if (stream_parent_fromsnap_guid != 0 && parent_fromsnap_guid != 0 && stream_parent_fromsnap_guid != parent_fromsnap_guid) { sprintf(guidname, "%llu", (u_longlong_t)parent_fromsnap_guid); if (nvlist_exists(deleted, guidname)) { progress = B_TRUE; needagain = B_TRUE; goto doagain; } } /* * Check for rename. If the exact receive path is specified, it * does not count as a rename, but we still need to check the * datasets beneath it. */ if ((stream_parent_fromsnap_guid != 0 && parent_fromsnap_guid != 0 && stream_parent_fromsnap_guid != parent_fromsnap_guid) || ((flags->isprefix || strcmp(tofs, fsname) != 0) && (s1 != NULL) && (s2 != NULL) && strcmp(s1, s2) != 0)) { nvlist_t *parent; char tryname[ZFS_MAX_DATASET_NAME_LEN]; parent = fsavl_find(local_avl, stream_parent_fromsnap_guid, NULL); /* * NB: parent might not be found if we used the * tosnap for stream_parent_fromsnap_guid, * because the parent is a newly-created fs; * we'll be able to rename it after we recv the * new fs. */ if (parent != NULL) { char *pname; VERIFY(0 == nvlist_lookup_string(parent, "name", &pname)); (void) snprintf(tryname, sizeof (tryname), "%s%s", pname, strrchr(stream_fsname, '/')); } else { tryname[0] = '\0'; if (flags->verbose) { (void) printf("local fs %s new parent " "not found\n", fsname); } } newname[0] = '\0'; error = recv_rename(hdl, fsname, tryname, strlen(tofs)+1, newname, flags); if (renamed != NULL && newname[0] != '\0') { VERIFY(0 == nvlist_add_boolean(renamed, newname)); } if (error) needagain = B_TRUE; else progress = B_TRUE; } } doagain: fsavl_destroy(local_avl); nvlist_free(local_nv); nvlist_free(deleted); if (needagain && progress) { /* do another pass to fix up temporary names */ if (flags->verbose) (void) printf("another pass:\n"); goto again; } return (needagain || error != 0); } static int zfs_receive_package(libzfs_handle_t *hdl, int fd, const char *destname, recvflags_t *flags, dmu_replay_record_t *drr, zio_cksum_t *zc, char **top_zfs, int cleanup_fd, uint64_t *action_handlep, nvlist_t *cmdprops) { nvlist_t *stream_nv = NULL; avl_tree_t *stream_avl = NULL; char *fromsnap = NULL; char *sendsnap = NULL; char *cp; char tofs[ZFS_MAX_DATASET_NAME_LEN]; char sendfs[ZFS_MAX_DATASET_NAME_LEN]; char errbuf[1024]; dmu_replay_record_t drre; int error; boolean_t anyerr = B_FALSE; boolean_t softerr = B_FALSE; boolean_t recursive, raw; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot receive")); assert(drr->drr_type == DRR_BEGIN); assert(drr->drr_u.drr_begin.drr_magic == DMU_BACKUP_MAGIC); assert(DMU_GET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo) == DMU_COMPOUNDSTREAM); /* * Read in the nvlist from the stream. */ if (drr->drr_payloadlen != 0) { error = recv_read_nvlist(hdl, fd, drr->drr_payloadlen, &stream_nv, flags->byteswap, zc); if (error) { error = zfs_error(hdl, EZFS_BADSTREAM, errbuf); goto out; } } recursive = (nvlist_lookup_boolean(stream_nv, "not_recursive") == ENOENT); raw = (nvlist_lookup_boolean(stream_nv, "raw") == 0); if (recursive && strchr(destname, '@')) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot specify snapshot name for multi-snapshot stream")); error = zfs_error(hdl, EZFS_BADSTREAM, errbuf); goto out; } /* * Read in the end record and verify checksum. */ if (0 != (error = recv_read(hdl, fd, &drre, sizeof (drre), flags->byteswap, NULL))) goto out; if (flags->byteswap) { drre.drr_type = BSWAP_32(drre.drr_type); drre.drr_u.drr_end.drr_checksum.zc_word[0] = BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[0]); drre.drr_u.drr_end.drr_checksum.zc_word[1] = BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[1]); drre.drr_u.drr_end.drr_checksum.zc_word[2] = BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[2]); drre.drr_u.drr_end.drr_checksum.zc_word[3] = BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[3]); } if (drre.drr_type != DRR_END) { error = zfs_error(hdl, EZFS_BADSTREAM, errbuf); goto out; } if (!ZIO_CHECKSUM_EQUAL(drre.drr_u.drr_end.drr_checksum, *zc)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "incorrect header checksum")); error = zfs_error(hdl, EZFS_BADSTREAM, errbuf); goto out; } (void) nvlist_lookup_string(stream_nv, "fromsnap", &fromsnap); if (drr->drr_payloadlen != 0) { nvlist_t *stream_fss; VERIFY(0 == nvlist_lookup_nvlist(stream_nv, "fss", &stream_fss)); if ((stream_avl = fsavl_create(stream_fss)) == NULL) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "couldn't allocate avl tree")); error = zfs_error(hdl, EZFS_NOMEM, errbuf); goto out; } if (fromsnap != NULL && recursive) { nvlist_t *renamed = NULL; nvpair_t *pair = NULL; (void) strlcpy(tofs, destname, sizeof (tofs)); if (flags->isprefix) { struct drr_begin *drrb = &drr->drr_u.drr_begin; int i; if (flags->istail) { cp = strrchr(drrb->drr_toname, '/'); if (cp == NULL) { (void) strlcat(tofs, "/", sizeof (tofs)); i = 0; } else { i = (cp - drrb->drr_toname); } } else { i = strcspn(drrb->drr_toname, "/@"); } /* zfs_receive_one() will create_parents() */ (void) strlcat(tofs, &drrb->drr_toname[i], sizeof (tofs)); *strchr(tofs, '@') = '\0'; } if (!flags->dryrun && !flags->nomount) { VERIFY(0 == nvlist_alloc(&renamed, NV_UNIQUE_NAME, 0)); } softerr = recv_incremental_replication(hdl, tofs, flags, stream_nv, stream_avl, renamed); /* Unmount renamed filesystems before receiving. */ while ((pair = nvlist_next_nvpair(renamed, pair)) != NULL) { zfs_handle_t *zhp; prop_changelist_t *clp = NULL; zhp = zfs_open(hdl, nvpair_name(pair), ZFS_TYPE_FILESYSTEM); if (zhp != NULL) { clp = changelist_gather(zhp, ZFS_PROP_MOUNTPOINT, 0, 0); zfs_close(zhp); if (clp != NULL) { softerr |= changelist_prefix(clp); changelist_free(clp); } } } nvlist_free(renamed); } } /* * Get the fs specified by the first path in the stream (the top level * specified by 'zfs send') and pass it to each invocation of * zfs_receive_one(). */ (void) strlcpy(sendfs, drr->drr_u.drr_begin.drr_toname, sizeof (sendfs)); if ((cp = strchr(sendfs, '@')) != NULL) { *cp = '\0'; /* * Find the "sendsnap", the final snapshot in a replication * stream. zfs_receive_one() handles certain errors * differently, depending on if the contained stream is the * last one or not. */ sendsnap = (cp + 1); } /* Finally, receive each contained stream */ do { /* * we should figure out if it has a recoverable * error, in which case do a recv_skip() and drive on. * Note, if we fail due to already having this guid, * zfs_receive_one() will take care of it (ie, * recv_skip() and return 0). */ error = zfs_receive_impl(hdl, destname, NULL, flags, fd, sendfs, stream_nv, stream_avl, top_zfs, cleanup_fd, action_handlep, sendsnap, cmdprops); if (error == ENODATA) { error = 0; break; } anyerr |= error; } while (error == 0); if (drr->drr_payloadlen != 0 && recursive && fromsnap != NULL) { /* * Now that we have the fs's they sent us, try the * renames again. */ softerr = recv_incremental_replication(hdl, tofs, flags, stream_nv, stream_avl, NULL); } if (raw && softerr == 0) { softerr = recv_fix_encryption_heirarchy(hdl, destname, stream_nv, stream_avl); } out: fsavl_destroy(stream_avl); nvlist_free(stream_nv); if (softerr) error = -2; if (anyerr) error = -1; return (error); } static void trunc_prop_errs(int truncated) { ASSERT(truncated != 0); if (truncated == 1) (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "1 more property could not be set\n")); else (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "%d more properties could not be set\n"), truncated); } static int recv_skip(libzfs_handle_t *hdl, int fd, boolean_t byteswap) { dmu_replay_record_t *drr; void *buf = zfs_alloc(hdl, SPA_MAXBLOCKSIZE); char errbuf[1024]; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot receive:")); /* XXX would be great to use lseek if possible... */ drr = buf; while (recv_read(hdl, fd, drr, sizeof (dmu_replay_record_t), byteswap, NULL) == 0) { if (byteswap) drr->drr_type = BSWAP_32(drr->drr_type); switch (drr->drr_type) { case DRR_BEGIN: if (drr->drr_payloadlen != 0) { (void) recv_read(hdl, fd, buf, drr->drr_payloadlen, B_FALSE, NULL); } break; case DRR_END: free(buf); return (0); case DRR_OBJECT: if (byteswap) { drr->drr_u.drr_object.drr_bonuslen = BSWAP_32(drr->drr_u.drr_object. drr_bonuslen); } (void) recv_read(hdl, fd, buf, P2ROUNDUP(drr->drr_u.drr_object.drr_bonuslen, 8), B_FALSE, NULL); break; case DRR_WRITE: if (byteswap) { drr->drr_u.drr_write.drr_logical_size = BSWAP_64( drr->drr_u.drr_write.drr_logical_size); drr->drr_u.drr_write.drr_compressed_size = BSWAP_64( drr->drr_u.drr_write.drr_compressed_size); } uint64_t payload_size = DRR_WRITE_PAYLOAD_SIZE(&drr->drr_u.drr_write); (void) recv_read(hdl, fd, buf, payload_size, B_FALSE, NULL); break; case DRR_SPILL: if (byteswap) { drr->drr_u.drr_spill.drr_length = BSWAP_64(drr->drr_u.drr_spill.drr_length); } (void) recv_read(hdl, fd, buf, drr->drr_u.drr_spill.drr_length, B_FALSE, NULL); break; case DRR_WRITE_EMBEDDED: if (byteswap) { drr->drr_u.drr_write_embedded.drr_psize = BSWAP_32(drr->drr_u.drr_write_embedded. drr_psize); } (void) recv_read(hdl, fd, buf, P2ROUNDUP(drr->drr_u.drr_write_embedded.drr_psize, 8), B_FALSE, NULL); break; case DRR_WRITE_BYREF: case DRR_FREEOBJECTS: case DRR_FREE: break; default: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid record type")); free(buf); return (zfs_error(hdl, EZFS_BADSTREAM, errbuf)); } } free(buf); return (-1); } static void recv_ecksum_set_aux(libzfs_handle_t *hdl, const char *target_snap, boolean_t resumable) { char target_fs[ZFS_MAX_DATASET_NAME_LEN]; zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "checksum mismatch or incomplete stream")); if (!resumable) return; (void) strlcpy(target_fs, target_snap, sizeof (target_fs)); *strchr(target_fs, '@') = '\0'; zfs_handle_t *zhp = zfs_open(hdl, target_fs, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME); if (zhp == NULL) return; char token_buf[ZFS_MAXPROPLEN]; int error = zfs_prop_get(zhp, ZFS_PROP_RECEIVE_RESUME_TOKEN, token_buf, sizeof (token_buf), NULL, NULL, 0, B_TRUE); if (error == 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "checksum mismatch or incomplete stream.\n" "Partially received snapshot is saved.\n" "A resuming stream can be generated on the sending " "system by running:\n" " zfs send -t %s"), token_buf); } zfs_close(zhp); } /* * Prepare a new nvlist of properties that are to override (-o) or be excluded * (-x) from the received dataset * recvprops: received properties from the send stream * cmdprops: raw input properties from command line * origprops: properties, both locally-set and received, currently set on the * target dataset if it exists, NULL otherwise. * oxprops: valid output override (-o) and excluded (-x) properties */ static int zfs_setup_cmdline_props(libzfs_handle_t *hdl, zfs_type_t type, char *fsname, boolean_t zoned, boolean_t recursive, boolean_t newfs, boolean_t raw, boolean_t toplevel, nvlist_t *recvprops, nvlist_t *cmdprops, nvlist_t *origprops, nvlist_t **oxprops, uint8_t **wkeydata_out, uint_t *wkeylen_out, const char *errbuf) { nvpair_t *nvp; nvlist_t *oprops, *voprops; zfs_handle_t *zhp = NULL; zpool_handle_t *zpool_hdl = NULL; char *cp; int ret = 0; char namebuf[ZFS_MAX_DATASET_NAME_LEN]; if (nvlist_empty(cmdprops)) return (0); /* No properties to override or exclude */ *oxprops = fnvlist_alloc(); oprops = fnvlist_alloc(); strlcpy(namebuf, fsname, ZFS_MAX_DATASET_NAME_LEN); /* * Get our dataset handle. The target dataset may not exist yet. */ if (zfs_dataset_exists(hdl, namebuf, ZFS_TYPE_DATASET)) { zhp = zfs_open(hdl, namebuf, ZFS_TYPE_DATASET); if (zhp == NULL) { ret = -1; goto error; } } /* open the zpool handle */ cp = strchr(namebuf, '/'); if (cp != NULL) *cp = '\0'; zpool_hdl = zpool_open(hdl, namebuf); if (zpool_hdl == NULL) { ret = -1; goto error; } /* restore namebuf to match fsname for later use */ if (cp != NULL) *cp = '/'; /* * first iteration: process excluded (-x) properties now and gather * added (-o) properties to be later processed by zfs_valid_proplist() */ nvp = NULL; while ((nvp = nvlist_next_nvpair(cmdprops, nvp)) != NULL) { const char *name = nvpair_name(nvp); zfs_prop_t prop = zfs_name_to_prop(name); /* "origin" is processed separately, don't handle it here */ if (prop == ZFS_PROP_ORIGIN) continue; /* * we're trying to override or exclude a property that does not * make sense for this type of dataset, but we don't want to * fail if the receive is recursive: this comes in handy when * the send stream contains, for instance, a child ZVOL and * we're trying to receive it with "-o atime=on" */ if (!zfs_prop_valid_for_type(prop, type, B_FALSE) && !zfs_prop_user(name)) { if (recursive) continue; zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "property '%s' does not apply to datasets of this " "type"), name); ret = zfs_error(hdl, EZFS_BADPROP, errbuf); goto error; } /* raw streams can't override encryption properties */ if ((zfs_prop_encryption_key_param(prop) || prop == ZFS_PROP_ENCRYPTION) && (raw || !newfs)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "encryption property '%s' cannot " "be set or excluded for raw or incremental " "streams."), name); ret = zfs_error(hdl, EZFS_BADPROP, errbuf); goto error; } switch (nvpair_type(nvp)) { case DATA_TYPE_BOOLEAN: /* -x property */ /* * DATA_TYPE_BOOLEAN is the way we're asked to "exclude" * a property: this is done by forcing an explicit * inherit on the destination so the effective value is * not the one we received from the send stream. * We do this only if the property is not already * locally-set, in which case its value will take * priority over the received anyway. */ if (nvlist_exists(origprops, name)) { nvlist_t *attrs; attrs = fnvlist_lookup_nvlist(origprops, name); if (strcmp(fnvlist_lookup_string(attrs, ZPROP_SOURCE), ZPROP_SOURCE_VAL_RECVD) != 0) continue; } /* * We can't force an explicit inherit on non-inheritable * properties: if we're asked to exclude this kind of * values we remove them from "recvprops" input nvlist. */ if (!zfs_prop_inheritable(prop) && !zfs_prop_user(name) && /* can be inherited too */ nvlist_exists(recvprops, name)) fnvlist_remove(recvprops, name); else fnvlist_add_nvpair(*oxprops, nvp); break; case DATA_TYPE_STRING: /* -o property=value */ fnvlist_add_nvpair(oprops, nvp); break; default: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "property '%s' must be a string or boolean"), name); ret = zfs_error(hdl, EZFS_BADPROP, errbuf); goto error; } } if (toplevel) { /* convert override strings properties to native */ if ((voprops = zfs_valid_proplist(hdl, ZFS_TYPE_DATASET, oprops, zoned, zhp, zpool_hdl, B_FALSE, errbuf)) == NULL) { ret = zfs_error(hdl, EZFS_BADPROP, errbuf); goto error; } /* * zfs_crypto_create() requires the parent name. Get it * by truncating the fsname copy stored in namebuf. */ cp = strrchr(namebuf, '/'); if (cp != NULL) *cp = '\0'; if (!raw && zfs_crypto_create(hdl, namebuf, voprops, NULL, B_FALSE, wkeydata_out, wkeylen_out) != 0) { fnvlist_free(voprops); ret = zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf); goto error; } /* second pass: process "-o" properties */ fnvlist_merge(*oxprops, voprops); fnvlist_free(voprops); } else { /* override props on child dataset are inherited */ nvp = NULL; while ((nvp = nvlist_next_nvpair(oprops, nvp)) != NULL) { const char *name = nvpair_name(nvp); fnvlist_add_boolean(*oxprops, name); } } error: if (zhp != NULL) zfs_close(zhp); if (zpool_hdl != NULL) zpool_close(zpool_hdl); fnvlist_free(oprops); return (ret); } /* * Restores a backup of tosnap from the file descriptor specified by infd. */ static int zfs_receive_one(libzfs_handle_t *hdl, int infd, const char *tosnap, const char *originsnap, recvflags_t *flags, dmu_replay_record_t *drr, dmu_replay_record_t *drr_noswap, const char *sendfs, nvlist_t *stream_nv, avl_tree_t *stream_avl, char **top_zfs, int cleanup_fd, uint64_t *action_handlep, const char *finalsnap, nvlist_t *cmdprops) { time_t begin_time; int ioctl_err, ioctl_errno, err; char *cp; struct drr_begin *drrb = &drr->drr_u.drr_begin; char errbuf[1024]; const char *chopprefix; boolean_t newfs = B_FALSE; boolean_t stream_wantsnewfs; boolean_t newprops = B_FALSE; uint64_t read_bytes = 0; uint64_t errflags = 0; uint64_t parent_snapguid = 0; prop_changelist_t *clp = NULL; nvlist_t *snapprops_nvlist = NULL; zprop_errflags_t prop_errflags; nvlist_t *prop_errors = NULL; boolean_t recursive; char *snapname = NULL; char destsnap[MAXPATHLEN * 2]; char origin[MAXNAMELEN]; char name[MAXPATHLEN]; char tmp_keylocation[MAXNAMELEN]; nvlist_t *rcvprops = NULL; /* props received from the send stream */ nvlist_t *oxprops = NULL; /* override (-o) and exclude (-x) props */ nvlist_t *origprops = NULL; /* original props (if destination exists) */ zfs_type_t type; boolean_t toplevel = B_FALSE; boolean_t zoned = B_FALSE; boolean_t hastoken = B_FALSE; uint8_t *wkeydata = NULL; uint_t wkeylen = 0; begin_time = time(NULL); bzero(origin, MAXNAMELEN); bzero(tmp_keylocation, MAXNAMELEN); (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot receive")); recursive = (nvlist_lookup_boolean(stream_nv, "not_recursive") == ENOENT); if (stream_avl != NULL) { char *keylocation = NULL; nvlist_t *lookup = NULL; nvlist_t *fs = fsavl_find(stream_avl, drrb->drr_toguid, &snapname); (void) nvlist_lookup_uint64(fs, "parentfromsnap", &parent_snapguid); err = nvlist_lookup_nvlist(fs, "props", &rcvprops); if (err) { VERIFY(0 == nvlist_alloc(&rcvprops, NV_UNIQUE_NAME, 0)); newprops = B_TRUE; } /* * The keylocation property may only be set on encryption roots, * but this dataset might not become an encryption root until * recv_fix_encryption_heirarchy() is called. That function * will fixup the keylocation anyway, so we temporarily unset * the keylocation for now to avoid any errors from the receive * ioctl. */ err = nvlist_lookup_string(rcvprops, zfs_prop_to_name(ZFS_PROP_KEYLOCATION), &keylocation); if (err == 0) { strcpy(tmp_keylocation, keylocation); (void) nvlist_remove_all(rcvprops, zfs_prop_to_name(ZFS_PROP_KEYLOCATION)); } if (flags->canmountoff) { VERIFY(0 == nvlist_add_uint64(rcvprops, zfs_prop_to_name(ZFS_PROP_CANMOUNT), 0)); } if (0 == nvlist_lookup_nvlist(fs, "snapprops", &lookup)) { VERIFY(0 == nvlist_lookup_nvlist(lookup, snapname, &snapprops_nvlist)); } } cp = NULL; /* * Determine how much of the snapshot name stored in the stream * we are going to tack on to the name they specified on the * command line, and how much we are going to chop off. * * If they specified a snapshot, chop the entire name stored in * the stream. */ if (flags->istail) { /* * A filesystem was specified with -e. We want to tack on only * the tail of the sent snapshot path. */ if (strchr(tosnap, '@')) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid " "argument - snapshot not allowed with -e")); err = zfs_error(hdl, EZFS_INVALIDNAME, errbuf); goto out; } chopprefix = strrchr(sendfs, '/'); if (chopprefix == NULL) { /* * The tail is the poolname, so we need to * prepend a path separator. */ int len = strlen(drrb->drr_toname); cp = malloc(len + 2); cp[0] = '/'; (void) strcpy(&cp[1], drrb->drr_toname); chopprefix = cp; } else { chopprefix = drrb->drr_toname + (chopprefix - sendfs); } } else if (flags->isprefix) { /* * A filesystem was specified with -d. We want to tack on * everything but the first element of the sent snapshot path * (all but the pool name). */ if (strchr(tosnap, '@')) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid " "argument - snapshot not allowed with -d")); err = zfs_error(hdl, EZFS_INVALIDNAME, errbuf); goto out; } chopprefix = strchr(drrb->drr_toname, '/'); if (chopprefix == NULL) chopprefix = strchr(drrb->drr_toname, '@'); } else if (strchr(tosnap, '@') == NULL) { /* * If a filesystem was specified without -d or -e, we want to * tack on everything after the fs specified by 'zfs send'. */ chopprefix = drrb->drr_toname + strlen(sendfs); } else { /* A snapshot was specified as an exact path (no -d or -e). */ if (recursive) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot specify snapshot name for multi-snapshot " "stream")); err = zfs_error(hdl, EZFS_BADSTREAM, errbuf); goto out; } chopprefix = drrb->drr_toname + strlen(drrb->drr_toname); } ASSERT(strstr(drrb->drr_toname, sendfs) == drrb->drr_toname); ASSERT(chopprefix > drrb->drr_toname || strchr(sendfs, '/') == NULL); ASSERT(chopprefix <= drrb->drr_toname + strlen(drrb->drr_toname) || strchr(sendfs, '/') == NULL); ASSERT(chopprefix[0] == '/' || chopprefix[0] == '@' || chopprefix[0] == '\0'); /* * Determine name of destination snapshot. */ (void) strlcpy(destsnap, tosnap, sizeof (destsnap)); (void) strlcat(destsnap, chopprefix, sizeof (destsnap)); free(cp); if (!zfs_name_valid(destsnap, ZFS_TYPE_SNAPSHOT)) { err = zfs_error(hdl, EZFS_INVALIDNAME, errbuf); goto out; } /* * Determine the name of the origin snapshot. */ if (originsnap) { (void) strlcpy(origin, originsnap, sizeof (origin)); if (flags->verbose) (void) printf("using provided clone origin %s\n", origin); } else if (drrb->drr_flags & DRR_FLAG_CLONE) { if (guid_to_name(hdl, destsnap, drrb->drr_fromguid, B_FALSE, origin) != 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "local origin for clone %s does not exist"), destsnap); err = zfs_error(hdl, EZFS_NOENT, errbuf); goto out; } if (flags->verbose) (void) printf("found clone origin %s\n", origin); } boolean_t resuming = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) & DMU_BACKUP_FEATURE_RESUMING; boolean_t raw = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) & DMU_BACKUP_FEATURE_RAW; boolean_t embedded = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) & DMU_BACKUP_FEATURE_EMBED_DATA; stream_wantsnewfs = (drrb->drr_fromguid == 0 || (drrb->drr_flags & DRR_FLAG_CLONE) || originsnap) && !resuming; if (stream_wantsnewfs) { /* * if the parent fs does not exist, look for it based on * the parent snap GUID */ (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot receive new filesystem stream")); (void) strcpy(name, destsnap); cp = strrchr(name, '/'); if (cp) *cp = '\0'; if (cp && !zfs_dataset_exists(hdl, name, ZFS_TYPE_DATASET)) { char suffix[ZFS_MAX_DATASET_NAME_LEN]; (void) strcpy(suffix, strrchr(destsnap, '/')); if (guid_to_name(hdl, name, parent_snapguid, B_FALSE, destsnap) == 0) { *strchr(destsnap, '@') = '\0'; (void) strcat(destsnap, suffix); } } } else { /* * if the fs does not exist, look for it based on the * fromsnap GUID */ (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot receive incremental stream")); (void) strcpy(name, destsnap); *strchr(name, '@') = '\0'; /* * If the exact receive path was specified and this is the * topmost path in the stream, then if the fs does not exist we * should look no further. */ if ((flags->isprefix || (*(chopprefix = drrb->drr_toname + strlen(sendfs)) != '\0' && *chopprefix != '@')) && !zfs_dataset_exists(hdl, name, ZFS_TYPE_DATASET)) { char snap[ZFS_MAX_DATASET_NAME_LEN]; (void) strcpy(snap, strchr(destsnap, '@')); if (guid_to_name(hdl, name, drrb->drr_fromguid, B_FALSE, destsnap) == 0) { *strchr(destsnap, '@') = '\0'; (void) strcat(destsnap, snap); } } } (void) strcpy(name, destsnap); *strchr(name, '@') = '\0'; if (zfs_dataset_exists(hdl, name, ZFS_TYPE_DATASET)) { zfs_cmd_t zc = {"\0"}; zfs_handle_t *zhp; boolean_t encrypted; (void) strcpy(zc.zc_name, name); /* * Destination fs exists. It must be one of these cases: * - an incremental send stream * - the stream specifies a new fs (full stream or clone) * and they want us to blow away the existing fs (and * have therefore specified -F and removed any snapshots) * - we are resuming a failed receive. */ if (stream_wantsnewfs) { + boolean_t is_volume = drrb->drr_type == DMU_OST_ZVOL; if (!flags->force) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "destination '%s' exists\n" "must specify -F to overwrite it"), name); err = zfs_error(hdl, EZFS_EXISTS, errbuf); goto out; } if (ioctl(hdl->libzfs_fd, ZFS_IOC_SNAPSHOT_LIST_NEXT, &zc) == 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "destination has snapshots (eg. %s)\n" "must destroy them to overwrite it"), zc.zc_name); err = zfs_error(hdl, EZFS_EXISTS, errbuf); goto out; } + if (is_volume && strrchr(name, '/') == NULL) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "destination %s is the root dataset\n" + "cannot overwrite with a ZVOL"), + name); + err = zfs_error(hdl, EZFS_EXISTS, errbuf); + goto out; + } + if (is_volume && + ioctl(hdl->libzfs_fd, ZFS_IOC_DATASET_LIST_NEXT, + &zc) == 0) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "destination has children (eg. %s)\n" + "cannot overwrite with a ZVOL"), + zc.zc_name); + err = zfs_error(hdl, EZFS_WRONG_PARENT, errbuf); + goto out; + } } if ((zhp = zfs_open(hdl, name, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME)) == NULL) { err = -1; goto out; } if (stream_wantsnewfs && zhp->zfs_dmustats.dds_origin[0]) { zfs_close(zhp); zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "destination '%s' is a clone\n" "must destroy it to overwrite it"), name); err = zfs_error(hdl, EZFS_EXISTS, errbuf); goto out; } /* * Raw sends can not be performed as an incremental on top * of existing unencryppted datasets. zfs recv -F cant be * used to blow away an existing encrypted filesystem. This * is because it would require the dsl dir to point to the * new key (or lack of a key) and the old key at the same * time. The -F flag may still be used for deleting * intermediate snapshots that would otherwise prevent the * receive from working. */ encrypted = zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION) != ZIO_CRYPT_OFF; if (!stream_wantsnewfs && !encrypted && raw) { zfs_close(zhp); zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot perform raw receive on top of " "existing unencrypted dataset")); err = zfs_error(hdl, EZFS_BADRESTORE, errbuf); goto out; } if (stream_wantsnewfs && flags->force && ((raw && !encrypted) || encrypted)) { zfs_close(zhp); zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "zfs receive -F cannot be used to destroy an " "encrypted filesystem or overwrite an " "unencrypted one with an encrypted one")); err = zfs_error(hdl, EZFS_BADRESTORE, errbuf); goto out; } if (!flags->dryrun && zhp->zfs_type == ZFS_TYPE_FILESYSTEM && stream_wantsnewfs) { /* We can't do online recv in this case */ clp = changelist_gather(zhp, ZFS_PROP_NAME, 0, 0); if (clp == NULL) { zfs_close(zhp); err = -1; goto out; } if (changelist_prefix(clp) != 0) { changelist_free(clp); zfs_close(zhp); err = -1; goto out; } } /* * If we are resuming a newfs, set newfs here so that we will * mount it if the recv succeeds this time. We can tell * that it was a newfs on the first recv because the fs * itself will be inconsistent (if the fs existed when we * did the first recv, we would have received it into * .../%recv). */ if (resuming && zfs_prop_get_int(zhp, ZFS_PROP_INCONSISTENT)) newfs = B_TRUE; /* we want to know if we're zoned when validating -o|-x props */ zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED); /* may need this info later, get it now we have zhp around */ if (zfs_prop_get(zhp, ZFS_PROP_RECEIVE_RESUME_TOKEN, NULL, 0, NULL, NULL, 0, B_TRUE) == 0) hastoken = B_TRUE; /* gather existing properties on destination */ origprops = fnvlist_alloc(); fnvlist_merge(origprops, zhp->zfs_props); fnvlist_merge(origprops, zhp->zfs_user_props); zfs_close(zhp); } else { zfs_handle_t *zhp; /* * Destination filesystem does not exist. Therefore we better * be creating a new filesystem (either from a full backup, or * a clone). It would therefore be invalid if the user * specified only the pool name (i.e. if the destination name * contained no slash character). */ cp = strrchr(name, '/'); if (!stream_wantsnewfs || cp == NULL) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "destination '%s' does not exist"), name); err = zfs_error(hdl, EZFS_NOENT, errbuf); goto out; } /* * Trim off the final dataset component so we perform the * recvbackup ioctl to the filesystems's parent. */ *cp = '\0'; if (flags->isprefix && !flags->istail && !flags->dryrun && create_parents(hdl, destsnap, strlen(tosnap)) != 0) { err = zfs_error(hdl, EZFS_BADRESTORE, errbuf); goto out; } + /* validate parent */ + zhp = zfs_open(hdl, name, ZFS_TYPE_DATASET); + if (zhp == NULL) { + err = zfs_error(hdl, EZFS_BADRESTORE, errbuf); + goto out; + } + if (zfs_get_type(zhp) != ZFS_TYPE_FILESYSTEM) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "parent '%s' is not a filesystem"), name); + err = zfs_error(hdl, EZFS_WRONG_PARENT, errbuf); + zfs_close(zhp); + goto out; + } + /* * It is invalid to receive a properties stream that was * unencrypted on the send side as a child of an encrypted * parent. Technically there is nothing preventing this, but * it would mean that the encryption=off property which is * locally set on the send side would not be received correctly. * We can infer encryption=off if the stream is not raw and * properties were included since the send side will only ever * send the encryption property in a raw nvlist header. This * check will be avoided if the user specifically overrides * the encryption property on the command line. */ if (!raw && rcvprops != NULL && !nvlist_exists(cmdprops, zfs_prop_to_name(ZFS_PROP_ENCRYPTION))) { uint64_t crypt; - zhp = zfs_open(hdl, name, ZFS_TYPE_DATASET); - if (zhp == NULL) { - err = zfs_error(hdl, EZFS_BADRESTORE, errbuf); - goto out; - } - crypt = zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION); - zfs_close(zhp); if (crypt != ZIO_CRYPT_OFF) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "parent '%s' must not be encrypted to " "receive unenecrypted property"), name); err = zfs_error(hdl, EZFS_BADPROP, errbuf); + zfs_close(zhp); goto out; } } + zfs_close(zhp); newfs = B_TRUE; *cp = '/'; } if (flags->verbose) { (void) printf("%s %s stream of %s into %s\n", flags->dryrun ? "would receive" : "receiving", drrb->drr_fromguid ? "incremental" : "full", drrb->drr_toname, destsnap); (void) fflush(stdout); } if (flags->dryrun) { err = recv_skip(hdl, infd, flags->byteswap); goto out; } if (top_zfs && (*top_zfs == NULL || strcmp(*top_zfs, name) == 0)) toplevel = B_TRUE; if (drrb->drr_type == DMU_OST_ZVOL) { type = ZFS_TYPE_VOLUME; } else if (drrb->drr_type == DMU_OST_ZFS) { type = ZFS_TYPE_FILESYSTEM; } else { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid record type: 0x%d"), drrb->drr_type); err = zfs_error(hdl, EZFS_BADSTREAM, errbuf); goto out; } if ((err = zfs_setup_cmdline_props(hdl, type, name, zoned, recursive, stream_wantsnewfs, raw, toplevel, rcvprops, cmdprops, origprops, &oxprops, &wkeydata, &wkeylen, errbuf)) != 0) goto out; err = ioctl_err = lzc_receive_with_cmdprops(destsnap, rcvprops, oxprops, wkeydata, wkeylen, origin, flags->force, flags->resumable, raw, infd, drr_noswap, cleanup_fd, &read_bytes, &errflags, action_handlep, &prop_errors); ioctl_errno = ioctl_err; prop_errflags = errflags; if (err == 0) { nvpair_t *prop_err = NULL; while ((prop_err = nvlist_next_nvpair(prop_errors, prop_err)) != NULL) { char tbuf[1024]; zfs_prop_t prop; int intval; prop = zfs_name_to_prop(nvpair_name(prop_err)); (void) nvpair_value_int32(prop_err, &intval); if (strcmp(nvpair_name(prop_err), ZPROP_N_MORE_ERRORS) == 0) { trunc_prop_errs(intval); break; } else if (snapname == NULL || finalsnap == NULL || strcmp(finalsnap, snapname) == 0 || strcmp(nvpair_name(prop_err), zfs_prop_to_name(ZFS_PROP_REFQUOTA)) != 0) { /* * Skip the special case of, for example, * "refquota", errors on intermediate * snapshots leading up to a final one. * That's why we have all of the checks above. * * See zfs_ioctl.c's extract_delay_props() for * a list of props which can fail on * intermediate snapshots, but shouldn't * affect the overall receive. */ (void) snprintf(tbuf, sizeof (tbuf), dgettext(TEXT_DOMAIN, "cannot receive %s property on %s"), nvpair_name(prop_err), name); zfs_setprop_error(hdl, prop, intval, tbuf); } } } if (err == 0 && snapprops_nvlist) { zfs_cmd_t zc = {"\0"}; (void) strcpy(zc.zc_name, destsnap); zc.zc_cookie = B_TRUE; /* received */ if (zcmd_write_src_nvlist(hdl, &zc, snapprops_nvlist) == 0) { (void) zfs_ioctl(hdl, ZFS_IOC_SET_PROP, &zc); zcmd_free_nvlists(&zc); } } if (err && (ioctl_errno == ENOENT || ioctl_errno == EEXIST)) { /* * It may be that this snapshot already exists, * in which case we want to consume & ignore it * rather than failing. */ avl_tree_t *local_avl; nvlist_t *local_nv, *fs; cp = strchr(destsnap, '@'); /* * XXX Do this faster by just iterating over snaps in * this fs. Also if zc_value does not exist, we will * get a strange "does not exist" error message. */ *cp = '\0'; if (gather_nvlist(hdl, destsnap, NULL, NULL, B_FALSE, B_TRUE, B_FALSE, B_FALSE, &local_nv, &local_avl) == 0) { *cp = '@'; fs = fsavl_find(local_avl, drrb->drr_toguid, NULL); fsavl_destroy(local_avl); nvlist_free(local_nv); if (fs != NULL) { if (flags->verbose) { (void) printf("snap %s already exists; " "ignoring\n", destsnap); } err = ioctl_err = recv_skip(hdl, infd, flags->byteswap); } } *cp = '@'; } if (ioctl_err != 0) { switch (ioctl_errno) { case ENODEV: cp = strchr(destsnap, '@'); *cp = '\0'; zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "most recent snapshot of %s does not\n" "match incremental source"), destsnap); (void) zfs_error(hdl, EZFS_BADRESTORE, errbuf); *cp = '@'; break; case ETXTBSY: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "destination %s has been modified\n" "since most recent snapshot"), name); (void) zfs_error(hdl, EZFS_BADRESTORE, errbuf); break; case EACCES: if (raw && stream_wantsnewfs) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "failed to create encryption key")); } else if (raw && !stream_wantsnewfs) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "encryption key does not match " "existing key")); } else { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "inherited key must be loaded")); } (void) zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf); break; case EEXIST: cp = strchr(destsnap, '@'); if (newfs) { /* it's the containing fs that exists */ *cp = '\0'; } zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "destination already exists")); (void) zfs_error_fmt(hdl, EZFS_EXISTS, dgettext(TEXT_DOMAIN, "cannot restore to %s"), destsnap); *cp = '@'; break; case EINVAL: if (flags->resumable) zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "kernel modules must be upgraded to " "receive this stream.")); if (embedded && !raw) zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "incompatible embedded data stream " "feature with encrypted receive.")); (void) zfs_error(hdl, EZFS_BADSTREAM, errbuf); break; case ECKSUM: recv_ecksum_set_aux(hdl, destsnap, flags->resumable); (void) zfs_error(hdl, EZFS_BADSTREAM, errbuf); break; case ENOTSUP: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be upgraded to receive this stream.")); (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); break; case EDQUOT: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "destination %s space quota exceeded."), name); (void) zfs_error(hdl, EZFS_NOSPC, errbuf); break; case EBUSY: if (hastoken) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "destination %s contains " "partially-complete state from " "\"zfs receive -s\"."), name); (void) zfs_error(hdl, EZFS_BUSY, errbuf); break; } /* fallthru */ default: (void) zfs_standard_error(hdl, ioctl_errno, errbuf); } } /* * Mount the target filesystem (if created). Also mount any * children of the target filesystem if we did a replication * receive (indicated by stream_avl being non-NULL). */ cp = strchr(destsnap, '@'); if (cp && (ioctl_err == 0 || !newfs)) { zfs_handle_t *h; *cp = '\0'; h = zfs_open(hdl, destsnap, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME); if (h != NULL) { if (h->zfs_type == ZFS_TYPE_VOLUME) { *cp = '@'; } else if (newfs || stream_avl) { /* * Track the first/top of hierarchy fs, * for mounting and sharing later. */ if (top_zfs && *top_zfs == NULL) *top_zfs = zfs_strdup(hdl, destsnap); } zfs_close(h); } *cp = '@'; } if (clp) { if (!flags->nomount) err |= changelist_postfix(clp); changelist_free(clp); } if (prop_errflags & ZPROP_ERR_NOCLEAR) { (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Warning: " "failed to clear unreceived properties on %s"), name); (void) fprintf(stderr, "\n"); } if (prop_errflags & ZPROP_ERR_NORESTORE) { (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Warning: " "failed to restore original properties on %s"), name); (void) fprintf(stderr, "\n"); } if (err || ioctl_err) { err = -1; goto out; } if (flags->verbose) { char buf1[64]; char buf2[64]; uint64_t bytes = read_bytes; time_t delta = time(NULL) - begin_time; if (delta == 0) delta = 1; zfs_nicebytes(bytes, buf1, sizeof (buf1)); zfs_nicebytes(bytes/delta, buf2, sizeof (buf1)); (void) printf("received %s stream in %lu seconds (%s/sec)\n", buf1, delta, buf2); } err = 0; out: if (prop_errors != NULL) nvlist_free(prop_errors); if (tmp_keylocation[0] != '\0') { VERIFY(0 == nvlist_add_string(rcvprops, zfs_prop_to_name(ZFS_PROP_KEYLOCATION), tmp_keylocation)); } if (newprops) nvlist_free(rcvprops); nvlist_free(oxprops); nvlist_free(origprops); return (err); } /* * Check properties we were asked to override (both -o|-x) */ static boolean_t zfs_receive_checkprops(libzfs_handle_t *hdl, nvlist_t *props, const char *errbuf) { nvpair_t *nvp; zfs_prop_t prop; const char *name; nvp = NULL; while ((nvp = nvlist_next_nvpair(props, nvp)) != NULL) { name = nvpair_name(nvp); prop = zfs_name_to_prop(name); if (prop == ZPROP_INVAL) { if (!zfs_prop_user(name)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid property '%s'"), name); return (B_FALSE); } continue; } /* * "origin" is readonly but is used to receive datasets as * clones so we don't raise an error here */ if (prop == ZFS_PROP_ORIGIN) continue; /* encryption params have their own verification later */ if (prop == ZFS_PROP_ENCRYPTION || zfs_prop_encryption_key_param(prop)) continue; /* * cannot override readonly, set-once and other specific * settable properties */ if (zfs_prop_readonly(prop) || prop == ZFS_PROP_VERSION || prop == ZFS_PROP_VOLSIZE) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid property '%s'"), name); return (B_FALSE); } } return (B_TRUE); } static int zfs_receive_impl(libzfs_handle_t *hdl, const char *tosnap, const char *originsnap, recvflags_t *flags, int infd, const char *sendfs, nvlist_t *stream_nv, avl_tree_t *stream_avl, char **top_zfs, int cleanup_fd, uint64_t *action_handlep, const char *finalsnap, nvlist_t *cmdprops) { int err; dmu_replay_record_t drr, drr_noswap; struct drr_begin *drrb = &drr.drr_u.drr_begin; char errbuf[1024]; zio_cksum_t zcksum = { { 0 } }; uint64_t featureflags; int hdrtype; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot receive")); /* check cmdline props, raise an error if they cannot be received */ if (!zfs_receive_checkprops(hdl, cmdprops, errbuf)) { return (zfs_error(hdl, EZFS_BADPROP, errbuf)); } if (flags->isprefix && !zfs_dataset_exists(hdl, tosnap, ZFS_TYPE_DATASET)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "specified fs " "(%s) does not exist"), tosnap); return (zfs_error(hdl, EZFS_NOENT, errbuf)); } if (originsnap && !zfs_dataset_exists(hdl, originsnap, ZFS_TYPE_DATASET)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "specified origin fs " "(%s) does not exist"), originsnap); return (zfs_error(hdl, EZFS_NOENT, errbuf)); } /* read in the BEGIN record */ if (0 != (err = recv_read(hdl, infd, &drr, sizeof (drr), B_FALSE, &zcksum))) return (err); if (drr.drr_type == DRR_END || drr.drr_type == BSWAP_32(DRR_END)) { /* It's the double end record at the end of a package */ return (ENODATA); } /* the kernel needs the non-byteswapped begin record */ drr_noswap = drr; flags->byteswap = B_FALSE; if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) { /* * We computed the checksum in the wrong byteorder in * recv_read() above; do it again correctly. */ bzero(&zcksum, sizeof (zio_cksum_t)); fletcher_4_incremental_byteswap(&drr, sizeof (drr), &zcksum); flags->byteswap = B_TRUE; drr.drr_type = BSWAP_32(drr.drr_type); drr.drr_payloadlen = BSWAP_32(drr.drr_payloadlen); drrb->drr_magic = BSWAP_64(drrb->drr_magic); drrb->drr_versioninfo = BSWAP_64(drrb->drr_versioninfo); drrb->drr_creation_time = BSWAP_64(drrb->drr_creation_time); drrb->drr_type = BSWAP_32(drrb->drr_type); drrb->drr_flags = BSWAP_32(drrb->drr_flags); drrb->drr_toguid = BSWAP_64(drrb->drr_toguid); drrb->drr_fromguid = BSWAP_64(drrb->drr_fromguid); } if (drrb->drr_magic != DMU_BACKUP_MAGIC || drr.drr_type != DRR_BEGIN) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid " "stream (bad magic number)")); return (zfs_error(hdl, EZFS_BADSTREAM, errbuf)); } featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo); hdrtype = DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo); if (!DMU_STREAM_SUPPORTED(featureflags) || (hdrtype != DMU_SUBSTREAM && hdrtype != DMU_COMPOUNDSTREAM)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "stream has unsupported feature, feature flags = %lx"), featureflags); return (zfs_error(hdl, EZFS_BADSTREAM, errbuf)); } if (strchr(drrb->drr_toname, '@') == NULL) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid " "stream (bad snapshot name)")); return (zfs_error(hdl, EZFS_BADSTREAM, errbuf)); } if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) == DMU_SUBSTREAM) { char nonpackage_sendfs[ZFS_MAX_DATASET_NAME_LEN]; if (sendfs == NULL) { /* * We were not called from zfs_receive_package(). Get * the fs specified by 'zfs send'. */ char *cp; (void) strlcpy(nonpackage_sendfs, drr.drr_u.drr_begin.drr_toname, sizeof (nonpackage_sendfs)); if ((cp = strchr(nonpackage_sendfs, '@')) != NULL) *cp = '\0'; sendfs = nonpackage_sendfs; VERIFY(finalsnap == NULL); } return (zfs_receive_one(hdl, infd, tosnap, originsnap, flags, &drr, &drr_noswap, sendfs, stream_nv, stream_avl, top_zfs, cleanup_fd, action_handlep, finalsnap, cmdprops)); } else { assert(DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) == DMU_COMPOUNDSTREAM); return (zfs_receive_package(hdl, infd, tosnap, flags, &drr, &zcksum, top_zfs, cleanup_fd, action_handlep, cmdprops)); } } /* * Restores a backup of tosnap from the file descriptor specified by infd. * Return 0 on total success, -2 if some things couldn't be * destroyed/renamed/promoted, -1 if some things couldn't be received. * (-1 will override -2, if -1 and the resumable flag was specified the * transfer can be resumed if the sending side supports it). */ int zfs_receive(libzfs_handle_t *hdl, const char *tosnap, nvlist_t *props, recvflags_t *flags, int infd, avl_tree_t *stream_avl) { char *top_zfs = NULL; int err; int cleanup_fd; uint64_t action_handle = 0; struct stat sb; char *originsnap = NULL; /* * The only way fstat can fail is if we do not have a valid file * descriptor. */ if (fstat(infd, &sb) == -1) { perror("fstat"); return (-2); } #ifdef __linux__ #ifndef F_SETPIPE_SZ #define F_SETPIPE_SZ (F_SETLEASE + 7) #endif /* F_SETPIPE_SZ */ #ifndef F_GETPIPE_SZ #define F_GETPIPE_SZ (F_GETLEASE + 7) #endif /* F_GETPIPE_SZ */ /* * It is not uncommon for gigabytes to be processed in zfs receive. * Speculatively increase the buffer size via Linux-specific fcntl() * call. */ if (S_ISFIFO(sb.st_mode)) { FILE *procf = fopen("/proc/sys/fs/pipe-max-size", "r"); if (procf != NULL) { unsigned long max_psize; long cur_psize; if (fscanf(procf, "%lu", &max_psize) > 0) { cur_psize = fcntl(infd, F_GETPIPE_SZ); if (cur_psize > 0 && max_psize > (unsigned long) cur_psize) (void) fcntl(infd, F_SETPIPE_SZ, max_psize); } fclose(procf); } } #endif /* __linux__ */ if (props) { err = nvlist_lookup_string(props, "origin", &originsnap); if (err && err != ENOENT) return (err); } cleanup_fd = open(ZFS_DEV, O_RDWR); VERIFY(cleanup_fd >= 0); err = zfs_receive_impl(hdl, tosnap, originsnap, flags, infd, NULL, NULL, stream_avl, &top_zfs, cleanup_fd, &action_handle, NULL, props); VERIFY(0 == close(cleanup_fd)); if (err == 0 && !flags->nomount && top_zfs) { zfs_handle_t *zhp = NULL; prop_changelist_t *clp = NULL; zhp = zfs_open(hdl, top_zfs, ZFS_TYPE_FILESYSTEM); if (zhp != NULL) { clp = changelist_gather(zhp, ZFS_PROP_MOUNTPOINT, CL_GATHER_MOUNT_ALWAYS, 0); zfs_close(zhp); if (clp != NULL) { /* mount and share received datasets */ err = changelist_postfix(clp); changelist_free(clp); } } if (zhp == NULL || clp == NULL || err) err = -1; } if (top_zfs) free(top_zfs); return (err); } diff --git a/lib/libzfs/libzfs_util.c b/lib/libzfs/libzfs_util.c index d7401cdf408a..4ed885880922 100644 --- a/lib/libzfs/libzfs_util.c +++ b/lib/libzfs/libzfs_util.c @@ -1,1930 +1,1935 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2018, Joyent, Inc. All rights reserved. * Copyright (c) 2011, 2018 by Delphix. All rights reserved. * Copyright 2016 Igor Kozhukhov * Copyright (c) 2017 Datto Inc. */ /* * Internal utility routines for the ZFS library. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "libzfs_impl.h" #include "zfs_prop.h" #include "zfeature_common.h" #include #include int libzfs_errno(libzfs_handle_t *hdl) { return (hdl->libzfs_error); } const char * libzfs_error_init(int error) { switch (error) { case ENXIO: return (dgettext(TEXT_DOMAIN, "The ZFS modules are not " "loaded.\nTry running '/sbin/modprobe zfs' as root " "to load them.\n")); case ENOENT: return (dgettext(TEXT_DOMAIN, "/dev/zfs and /proc/self/mounts " "are required.\nTry running 'udevadm trigger' and 'mount " "-t proc proc /proc' as root.\n")); case ENOEXEC: return (dgettext(TEXT_DOMAIN, "The ZFS modules cannot be " "auto-loaded.\nTry running '/sbin/modprobe zfs' as " "root to manually load them.\n")); case EACCES: return (dgettext(TEXT_DOMAIN, "Permission denied the " "ZFS utilities must be run as root.\n")); default: return (dgettext(TEXT_DOMAIN, "Failed to initialize the " "libzfs library.\n")); } } const char * libzfs_error_action(libzfs_handle_t *hdl) { return (hdl->libzfs_action); } const char * libzfs_error_description(libzfs_handle_t *hdl) { if (hdl->libzfs_desc[0] != '\0') return (hdl->libzfs_desc); switch (hdl->libzfs_error) { case EZFS_NOMEM: return (dgettext(TEXT_DOMAIN, "out of memory")); case EZFS_BADPROP: return (dgettext(TEXT_DOMAIN, "invalid property value")); case EZFS_PROPREADONLY: return (dgettext(TEXT_DOMAIN, "read-only property")); case EZFS_PROPTYPE: return (dgettext(TEXT_DOMAIN, "property doesn't apply to " "datasets of this type")); case EZFS_PROPNONINHERIT: return (dgettext(TEXT_DOMAIN, "property cannot be inherited")); case EZFS_PROPSPACE: return (dgettext(TEXT_DOMAIN, "invalid quota or reservation")); case EZFS_BADTYPE: return (dgettext(TEXT_DOMAIN, "operation not applicable to " "datasets of this type")); case EZFS_BUSY: return (dgettext(TEXT_DOMAIN, "pool or dataset is busy")); case EZFS_EXISTS: return (dgettext(TEXT_DOMAIN, "pool or dataset exists")); case EZFS_NOENT: return (dgettext(TEXT_DOMAIN, "no such pool or dataset")); case EZFS_BADSTREAM: return (dgettext(TEXT_DOMAIN, "invalid backup stream")); case EZFS_DSREADONLY: return (dgettext(TEXT_DOMAIN, "dataset is read-only")); case EZFS_VOLTOOBIG: return (dgettext(TEXT_DOMAIN, "volume size exceeds limit for " "this system")); case EZFS_INVALIDNAME: return (dgettext(TEXT_DOMAIN, "invalid name")); case EZFS_BADRESTORE: return (dgettext(TEXT_DOMAIN, "unable to restore to " "destination")); case EZFS_BADBACKUP: return (dgettext(TEXT_DOMAIN, "backup failed")); case EZFS_BADTARGET: return (dgettext(TEXT_DOMAIN, "invalid target vdev")); case EZFS_NODEVICE: return (dgettext(TEXT_DOMAIN, "no such device in pool")); case EZFS_BADDEV: return (dgettext(TEXT_DOMAIN, "invalid device")); case EZFS_NOREPLICAS: return (dgettext(TEXT_DOMAIN, "no valid replicas")); case EZFS_RESILVERING: return (dgettext(TEXT_DOMAIN, "currently resilvering")); case EZFS_BADVERSION: return (dgettext(TEXT_DOMAIN, "unsupported version or " "feature")); case EZFS_POOLUNAVAIL: return (dgettext(TEXT_DOMAIN, "pool is unavailable")); case EZFS_DEVOVERFLOW: return (dgettext(TEXT_DOMAIN, "too many devices in one vdev")); case EZFS_BADPATH: return (dgettext(TEXT_DOMAIN, "must be an absolute path")); case EZFS_CROSSTARGET: return (dgettext(TEXT_DOMAIN, "operation crosses datasets or " "pools")); case EZFS_ZONED: return (dgettext(TEXT_DOMAIN, "dataset in use by local zone")); case EZFS_MOUNTFAILED: return (dgettext(TEXT_DOMAIN, "mount failed")); case EZFS_UMOUNTFAILED: return (dgettext(TEXT_DOMAIN, "umount failed")); case EZFS_UNSHARENFSFAILED: return (dgettext(TEXT_DOMAIN, "unshare(1M) failed")); case EZFS_SHARENFSFAILED: return (dgettext(TEXT_DOMAIN, "share(1M) failed")); case EZFS_UNSHARESMBFAILED: return (dgettext(TEXT_DOMAIN, "smb remove share failed")); case EZFS_SHARESMBFAILED: return (dgettext(TEXT_DOMAIN, "smb add share failed")); case EZFS_PERM: return (dgettext(TEXT_DOMAIN, "permission denied")); case EZFS_NOSPC: return (dgettext(TEXT_DOMAIN, "out of space")); case EZFS_FAULT: return (dgettext(TEXT_DOMAIN, "bad address")); case EZFS_IO: return (dgettext(TEXT_DOMAIN, "I/O error")); case EZFS_INTR: return (dgettext(TEXT_DOMAIN, "signal received")); case EZFS_ISSPARE: return (dgettext(TEXT_DOMAIN, "device is reserved as a hot " "spare")); case EZFS_INVALCONFIG: return (dgettext(TEXT_DOMAIN, "invalid vdev configuration")); case EZFS_RECURSIVE: return (dgettext(TEXT_DOMAIN, "recursive dataset dependency")); case EZFS_NOHISTORY: return (dgettext(TEXT_DOMAIN, "no history available")); case EZFS_POOLPROPS: return (dgettext(TEXT_DOMAIN, "failed to retrieve " "pool properties")); case EZFS_POOL_NOTSUP: return (dgettext(TEXT_DOMAIN, "operation not supported " "on this type of pool")); case EZFS_POOL_INVALARG: return (dgettext(TEXT_DOMAIN, "invalid argument for " "this pool operation")); case EZFS_NAMETOOLONG: return (dgettext(TEXT_DOMAIN, "dataset name is too long")); case EZFS_OPENFAILED: return (dgettext(TEXT_DOMAIN, "open failed")); case EZFS_NOCAP: return (dgettext(TEXT_DOMAIN, "disk capacity information could not be retrieved")); case EZFS_LABELFAILED: return (dgettext(TEXT_DOMAIN, "write of label failed")); case EZFS_BADWHO: return (dgettext(TEXT_DOMAIN, "invalid user/group")); case EZFS_BADPERM: return (dgettext(TEXT_DOMAIN, "invalid permission")); case EZFS_BADPERMSET: return (dgettext(TEXT_DOMAIN, "invalid permission set name")); case EZFS_NODELEGATION: return (dgettext(TEXT_DOMAIN, "delegated administration is " "disabled on pool")); case EZFS_BADCACHE: return (dgettext(TEXT_DOMAIN, "invalid or missing cache file")); case EZFS_ISL2CACHE: return (dgettext(TEXT_DOMAIN, "device is in use as a cache")); case EZFS_VDEVNOTSUP: return (dgettext(TEXT_DOMAIN, "vdev specification is not " "supported")); case EZFS_NOTSUP: return (dgettext(TEXT_DOMAIN, "operation not supported " "on this dataset")); case EZFS_IOC_NOTSUPPORTED: return (dgettext(TEXT_DOMAIN, "operation not supported by " "zfs kernel module")); case EZFS_ACTIVE_SPARE: return (dgettext(TEXT_DOMAIN, "pool has active shared spare " "device")); case EZFS_UNPLAYED_LOGS: return (dgettext(TEXT_DOMAIN, "log device has unplayed intent " "logs")); case EZFS_REFTAG_RELE: return (dgettext(TEXT_DOMAIN, "no such tag on this dataset")); case EZFS_REFTAG_HOLD: return (dgettext(TEXT_DOMAIN, "tag already exists on this " "dataset")); case EZFS_TAGTOOLONG: return (dgettext(TEXT_DOMAIN, "tag too long")); case EZFS_PIPEFAILED: return (dgettext(TEXT_DOMAIN, "pipe create failed")); case EZFS_THREADCREATEFAILED: return (dgettext(TEXT_DOMAIN, "thread create failed")); case EZFS_POSTSPLIT_ONLINE: return (dgettext(TEXT_DOMAIN, "disk was split from this pool " "into a new one")); case EZFS_SCRUB_PAUSED: return (dgettext(TEXT_DOMAIN, "scrub is paused; " "use 'zpool scrub' to resume")); case EZFS_SCRUBBING: return (dgettext(TEXT_DOMAIN, "currently scrubbing; " "use 'zpool scrub -s' to cancel current scrub")); case EZFS_NO_SCRUB: return (dgettext(TEXT_DOMAIN, "there is no active scrub")); case EZFS_DIFF: return (dgettext(TEXT_DOMAIN, "unable to generate diffs")); case EZFS_DIFFDATA: return (dgettext(TEXT_DOMAIN, "invalid diff data")); case EZFS_POOLREADONLY: return (dgettext(TEXT_DOMAIN, "pool is read-only")); case EZFS_NO_PENDING: return (dgettext(TEXT_DOMAIN, "operation is not " "in progress")); case EZFS_CHECKPOINT_EXISTS: return (dgettext(TEXT_DOMAIN, "checkpoint exists")); case EZFS_DISCARDING_CHECKPOINT: return (dgettext(TEXT_DOMAIN, "currently discarding " "checkpoint")); case EZFS_NO_CHECKPOINT: return (dgettext(TEXT_DOMAIN, "checkpoint does not exist")); case EZFS_DEVRM_IN_PROGRESS: return (dgettext(TEXT_DOMAIN, "device removal in progress")); case EZFS_VDEV_TOO_BIG: return (dgettext(TEXT_DOMAIN, "device exceeds supported size")); case EZFS_ACTIVE_POOL: return (dgettext(TEXT_DOMAIN, "pool is imported on a " "different host")); case EZFS_CRYPTOFAILED: return (dgettext(TEXT_DOMAIN, "encryption failure")); case EZFS_TOOMANY: return (dgettext(TEXT_DOMAIN, "argument list too long")); case EZFS_INITIALIZING: return (dgettext(TEXT_DOMAIN, "currently initializing")); case EZFS_NO_INITIALIZE: return (dgettext(TEXT_DOMAIN, "there is no active " "initialization")); + case EZFS_WRONG_PARENT: + return (dgettext(TEXT_DOMAIN, "invalid parent dataset")); case EZFS_UNKNOWN: return (dgettext(TEXT_DOMAIN, "unknown error")); default: assert(hdl->libzfs_error == 0); return (dgettext(TEXT_DOMAIN, "no error")); } } /*PRINTFLIKE2*/ void zfs_error_aux(libzfs_handle_t *hdl, const char *fmt, ...) { va_list ap; va_start(ap, fmt); (void) vsnprintf(hdl->libzfs_desc, sizeof (hdl->libzfs_desc), fmt, ap); hdl->libzfs_desc_active = 1; va_end(ap); } static void zfs_verror(libzfs_handle_t *hdl, int error, const char *fmt, va_list ap) { (void) vsnprintf(hdl->libzfs_action, sizeof (hdl->libzfs_action), fmt, ap); hdl->libzfs_error = error; if (hdl->libzfs_desc_active) hdl->libzfs_desc_active = 0; else hdl->libzfs_desc[0] = '\0'; if (hdl->libzfs_printerr) { if (error == EZFS_UNKNOWN) { (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "internal " "error: %s\n"), libzfs_error_description(hdl)); abort(); } (void) fprintf(stderr, "%s: %s\n", hdl->libzfs_action, libzfs_error_description(hdl)); if (error == EZFS_NOMEM) exit(1); } } int zfs_error(libzfs_handle_t *hdl, int error, const char *msg) { return (zfs_error_fmt(hdl, error, "%s", msg)); } /*PRINTFLIKE3*/ int zfs_error_fmt(libzfs_handle_t *hdl, int error, const char *fmt, ...) { va_list ap; va_start(ap, fmt); zfs_verror(hdl, error, fmt, ap); va_end(ap); return (-1); } static int zfs_common_error(libzfs_handle_t *hdl, int error, const char *fmt, va_list ap) { switch (error) { case EPERM: case EACCES: zfs_verror(hdl, EZFS_PERM, fmt, ap); return (-1); case ECANCELED: zfs_verror(hdl, EZFS_NODELEGATION, fmt, ap); return (-1); case EIO: zfs_verror(hdl, EZFS_IO, fmt, ap); return (-1); case EFAULT: zfs_verror(hdl, EZFS_FAULT, fmt, ap); return (-1); case EINTR: zfs_verror(hdl, EZFS_INTR, fmt, ap); return (-1); } return (0); } int zfs_standard_error(libzfs_handle_t *hdl, int error, const char *msg) { return (zfs_standard_error_fmt(hdl, error, "%s", msg)); } /*PRINTFLIKE3*/ int zfs_standard_error_fmt(libzfs_handle_t *hdl, int error, const char *fmt, ...) { va_list ap; va_start(ap, fmt); if (zfs_common_error(hdl, error, fmt, ap) != 0) { va_end(ap); return (-1); } switch (error) { case ENXIO: case ENODEV: case EPIPE: zfs_verror(hdl, EZFS_IO, fmt, ap); break; case ENOENT: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "dataset does not exist")); zfs_verror(hdl, EZFS_NOENT, fmt, ap); break; case ENOSPC: case EDQUOT: zfs_verror(hdl, EZFS_NOSPC, fmt, ap); break; case EEXIST: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "dataset already exists")); zfs_verror(hdl, EZFS_EXISTS, fmt, ap); break; case EBUSY: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "dataset is busy")); zfs_verror(hdl, EZFS_BUSY, fmt, ap); break; case EROFS: zfs_verror(hdl, EZFS_POOLREADONLY, fmt, ap); break; case ENAMETOOLONG: zfs_verror(hdl, EZFS_NAMETOOLONG, fmt, ap); break; case ENOTSUP: zfs_verror(hdl, EZFS_BADVERSION, fmt, ap); break; case EAGAIN: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool I/O is currently suspended")); zfs_verror(hdl, EZFS_POOLUNAVAIL, fmt, ap); break; case EREMOTEIO: zfs_verror(hdl, EZFS_ACTIVE_POOL, fmt, ap); break; case ZFS_ERR_IOC_CMD_UNAVAIL: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "the loaded zfs " "module does not support this operation. A reboot may " "be required to enable this operation.")); zfs_verror(hdl, EZFS_IOC_NOTSUPPORTED, fmt, ap); break; case ZFS_ERR_IOC_ARG_UNAVAIL: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "the loaded zfs " "module does not support an option for this operation. " "A reboot may be required to enable this option.")); zfs_verror(hdl, EZFS_IOC_NOTSUPPORTED, fmt, ap); break; case ZFS_ERR_IOC_ARG_REQUIRED: case ZFS_ERR_IOC_ARG_BADTYPE: zfs_verror(hdl, EZFS_IOC_NOTSUPPORTED, fmt, ap); break; + case ZFS_ERR_WRONG_PARENT: + zfs_verror(hdl, EZFS_WRONG_PARENT, fmt, ap); + break; default: zfs_error_aux(hdl, strerror(error)); zfs_verror(hdl, EZFS_UNKNOWN, fmt, ap); break; } va_end(ap); return (-1); } int zpool_standard_error(libzfs_handle_t *hdl, int error, const char *msg) { return (zpool_standard_error_fmt(hdl, error, "%s", msg)); } /*PRINTFLIKE3*/ int zpool_standard_error_fmt(libzfs_handle_t *hdl, int error, const char *fmt, ...) { va_list ap; va_start(ap, fmt); if (zfs_common_error(hdl, error, fmt, ap) != 0) { va_end(ap); return (-1); } switch (error) { case ENODEV: zfs_verror(hdl, EZFS_NODEVICE, fmt, ap); break; case ENOENT: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool or dataset")); zfs_verror(hdl, EZFS_NOENT, fmt, ap); break; case EEXIST: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool already exists")); zfs_verror(hdl, EZFS_EXISTS, fmt, ap); break; case EBUSY: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool is busy")); zfs_verror(hdl, EZFS_BUSY, fmt, ap); break; /* There is no pending operation to cancel */ case ENOTACTIVE: zfs_verror(hdl, EZFS_NO_PENDING, fmt, ap); break; case ENXIO: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "one or more devices is currently unavailable")); zfs_verror(hdl, EZFS_BADDEV, fmt, ap); break; case ENAMETOOLONG: zfs_verror(hdl, EZFS_DEVOVERFLOW, fmt, ap); break; case ENOTSUP: zfs_verror(hdl, EZFS_POOL_NOTSUP, fmt, ap); break; case EINVAL: zfs_verror(hdl, EZFS_POOL_INVALARG, fmt, ap); break; case ENOSPC: case EDQUOT: zfs_verror(hdl, EZFS_NOSPC, fmt, ap); return (-1); case EAGAIN: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool I/O is currently suspended")); zfs_verror(hdl, EZFS_POOLUNAVAIL, fmt, ap); break; case EROFS: zfs_verror(hdl, EZFS_POOLREADONLY, fmt, ap); break; case EDOM: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "block size out of range or does not match")); zfs_verror(hdl, EZFS_BADPROP, fmt, ap); break; case EREMOTEIO: zfs_verror(hdl, EZFS_ACTIVE_POOL, fmt, ap); break; case ZFS_ERR_CHECKPOINT_EXISTS: zfs_verror(hdl, EZFS_CHECKPOINT_EXISTS, fmt, ap); break; case ZFS_ERR_DISCARDING_CHECKPOINT: zfs_verror(hdl, EZFS_DISCARDING_CHECKPOINT, fmt, ap); break; case ZFS_ERR_NO_CHECKPOINT: zfs_verror(hdl, EZFS_NO_CHECKPOINT, fmt, ap); break; case ZFS_ERR_DEVRM_IN_PROGRESS: zfs_verror(hdl, EZFS_DEVRM_IN_PROGRESS, fmt, ap); break; case ZFS_ERR_VDEV_TOO_BIG: zfs_verror(hdl, EZFS_VDEV_TOO_BIG, fmt, ap); break; case ZFS_ERR_IOC_CMD_UNAVAIL: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "the loaded zfs " "module does not support this operation. A reboot may " "be required to enable this operation.")); zfs_verror(hdl, EZFS_IOC_NOTSUPPORTED, fmt, ap); break; case ZFS_ERR_IOC_ARG_UNAVAIL: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "the loaded zfs " "module does not support an option for this operation. " "A reboot may be required to enable this option.")); zfs_verror(hdl, EZFS_IOC_NOTSUPPORTED, fmt, ap); break; case ZFS_ERR_IOC_ARG_REQUIRED: case ZFS_ERR_IOC_ARG_BADTYPE: zfs_verror(hdl, EZFS_IOC_NOTSUPPORTED, fmt, ap); break; default: zfs_error_aux(hdl, strerror(error)); zfs_verror(hdl, EZFS_UNKNOWN, fmt, ap); } va_end(ap); return (-1); } /* * Display an out of memory error message and abort the current program. */ int no_memory(libzfs_handle_t *hdl) { return (zfs_error(hdl, EZFS_NOMEM, "internal error")); } /* * A safe form of malloc() which will die if the allocation fails. */ void * zfs_alloc(libzfs_handle_t *hdl, size_t size) { void *data; if ((data = calloc(1, size)) == NULL) (void) no_memory(hdl); return (data); } /* * A safe form of asprintf() which will die if the allocation fails. */ /*PRINTFLIKE2*/ char * zfs_asprintf(libzfs_handle_t *hdl, const char *fmt, ...) { va_list ap; char *ret; int err; va_start(ap, fmt); err = vasprintf(&ret, fmt, ap); va_end(ap); if (err < 0) (void) no_memory(hdl); return (ret); } /* * A safe form of realloc(), which also zeroes newly allocated space. */ void * zfs_realloc(libzfs_handle_t *hdl, void *ptr, size_t oldsize, size_t newsize) { void *ret; if ((ret = realloc(ptr, newsize)) == NULL) { (void) no_memory(hdl); return (NULL); } bzero((char *)ret + oldsize, (newsize - oldsize)); return (ret); } /* * A safe form of strdup() which will die if the allocation fails. */ char * zfs_strdup(libzfs_handle_t *hdl, const char *str) { char *ret; if ((ret = strdup(str)) == NULL) (void) no_memory(hdl); return (ret); } void libzfs_print_on_error(libzfs_handle_t *hdl, boolean_t printerr) { hdl->libzfs_printerr = printerr; } static int libzfs_module_loaded(const char *module) { const char path_prefix[] = "/sys/module/"; char path[256]; memcpy(path, path_prefix, sizeof (path_prefix) - 1); strcpy(path + sizeof (path_prefix) - 1, module); return (access(path, F_OK) == 0); } /* * Read lines from an open file descriptor and store them in an array of * strings until EOF. lines[] will be allocated and populated with all the * lines read. All newlines are replaced with NULL terminators for * convenience. lines[] must be freed after use with libzfs_free_str_array(). * * Returns the number of lines read. */ static int libzfs_read_stdout_from_fd(int fd, char **lines[]) { FILE *fp; int lines_cnt = 0; size_t len = 0; char *line = NULL; char **tmp_lines = NULL, **tmp; char *nl = NULL; int rc; fp = fdopen(fd, "r"); if (fp == NULL) return (0); while (1) { rc = getline(&line, &len, fp); if (rc == -1) break; tmp = realloc(tmp_lines, sizeof (*tmp_lines) * (lines_cnt + 1)); if (tmp == NULL) { /* Return the lines we were able to process */ break; } tmp_lines = tmp; /* Terminate newlines */ if ((nl = strchr(line, '\n')) != NULL) *nl = '\0'; tmp_lines[lines_cnt] = line; lines_cnt++; line = NULL; } fclose(fp); *lines = tmp_lines; return (lines_cnt); } static int libzfs_run_process_impl(const char *path, char *argv[], char *env[], int flags, char **lines[], int *lines_cnt) { pid_t pid; int error, devnull_fd; int link[2]; /* * Setup a pipe between our child and parent process if we're * reading stdout. */ if ((lines != NULL) && pipe(link) == -1) return (-ESTRPIPE); pid = vfork(); if (pid == 0) { /* Child process */ devnull_fd = open("/dev/null", O_WRONLY); if (devnull_fd < 0) _exit(-1); if (!(flags & STDOUT_VERBOSE) && (lines == NULL)) (void) dup2(devnull_fd, STDOUT_FILENO); else if (lines != NULL) { /* Save the output to lines[] */ dup2(link[1], STDOUT_FILENO); close(link[0]); close(link[1]); } if (!(flags & STDERR_VERBOSE)) (void) dup2(devnull_fd, STDERR_FILENO); close(devnull_fd); if (flags & NO_DEFAULT_PATH) { if (env == NULL) execv(path, argv); else execve(path, argv, env); } else { if (env == NULL) execvp(path, argv); else execvpe(path, argv, env); } _exit(-1); } else if (pid > 0) { /* Parent process */ int status; while ((error = waitpid(pid, &status, 0)) == -1 && errno == EINTR) { } if (error < 0 || !WIFEXITED(status)) return (-1); if (lines != NULL) { close(link[1]); *lines_cnt = libzfs_read_stdout_from_fd(link[0], lines); } return (WEXITSTATUS(status)); } return (-1); } int libzfs_run_process(const char *path, char *argv[], int flags) { return (libzfs_run_process_impl(path, argv, NULL, flags, NULL, NULL)); } /* * Run a command and store its stdout lines in an array of strings (lines[]). * lines[] is allocated and populated for you, and the number of lines is set in * lines_cnt. lines[] must be freed after use with libzfs_free_str_array(). * All newlines (\n) in lines[] are terminated for convenience. */ int libzfs_run_process_get_stdout(const char *path, char *argv[], char *env[], char **lines[], int *lines_cnt) { return (libzfs_run_process_impl(path, argv, env, 0, lines, lines_cnt)); } /* * Same as libzfs_run_process_get_stdout(), but run without $PATH set. This * means that *path needs to be the full path to the executable. */ int libzfs_run_process_get_stdout_nopath(const char *path, char *argv[], char *env[], char **lines[], int *lines_cnt) { return (libzfs_run_process_impl(path, argv, env, NO_DEFAULT_PATH, lines, lines_cnt)); } /* * Free an array of strings. Free both the strings contained in the array and * the array itself. */ void libzfs_free_str_array(char **strs, int count) { while (--count >= 0) free(strs[count]); free(strs); } /* * Returns 1 if environment variable is set to "YES", "yes", "ON", "on", or * a non-zero number. * * Returns 0 otherwise. */ int libzfs_envvar_is_set(char *envvar) { char *env = getenv(envvar); if (env && (strtoul(env, NULL, 0) > 0 || (!strncasecmp(env, "YES", 3) && strnlen(env, 4) == 3) || (!strncasecmp(env, "ON", 2) && strnlen(env, 3) == 2))) return (1); return (0); } /* * Verify the required ZFS_DEV device is available and optionally attempt * to load the ZFS modules. Under normal circumstances the modules * should already have been loaded by some external mechanism. * * Environment variables: * - ZFS_MODULE_LOADING="YES|yes|ON|on" - Attempt to load modules. * - ZFS_MODULE_TIMEOUT="" - Seconds to wait for ZFS_DEV */ static int libzfs_load_module(const char *module) { char *argv[4] = {"/sbin/modprobe", "-q", (char *)module, (char *)0}; char *load_str, *timeout_str; long timeout = 10; /* seconds */ long busy_timeout = 10; /* milliseconds */ int load = 0, fd; hrtime_t start; /* Optionally request module loading */ if (!libzfs_module_loaded(module)) { load_str = getenv("ZFS_MODULE_LOADING"); if (load_str) { if (!strncasecmp(load_str, "YES", strlen("YES")) || !strncasecmp(load_str, "ON", strlen("ON"))) load = 1; else load = 0; } if (load) { if (libzfs_run_process("/sbin/modprobe", argv, 0)) return (ENOEXEC); } if (!libzfs_module_loaded(module)) return (ENXIO); } /* * Device creation by udev is asynchronous and waiting may be * required. Busy wait for 10ms and then fall back to polling every * 10ms for the allowed timeout (default 10s, max 10m). This is * done to optimize for the common case where the device is * immediately available and to avoid penalizing the possible * case where udev is slow or unable to create the device. */ timeout_str = getenv("ZFS_MODULE_TIMEOUT"); if (timeout_str) { timeout = strtol(timeout_str, NULL, 0); timeout = MAX(MIN(timeout, (10 * 60)), 0); /* 0 <= N <= 600 */ } start = gethrtime(); do { fd = open(ZFS_DEV, O_RDWR); if (fd >= 0) { (void) close(fd); return (0); } else if (errno != ENOENT) { return (errno); } else if (NSEC2MSEC(gethrtime() - start) < busy_timeout) { sched_yield(); } else { usleep(10 * MILLISEC); } } while (NSEC2MSEC(gethrtime() - start) < (timeout * MILLISEC)); return (ENOENT); } libzfs_handle_t * libzfs_init(void) { libzfs_handle_t *hdl; int error; error = libzfs_load_module(ZFS_DRIVER); if (error) { errno = error; return (NULL); } if ((hdl = calloc(1, sizeof (libzfs_handle_t))) == NULL) { return (NULL); } if ((hdl->libzfs_fd = open(ZFS_DEV, O_RDWR)) < 0) { free(hdl); return (NULL); } #ifdef HAVE_SETMNTENT if ((hdl->libzfs_mnttab = setmntent(MNTTAB, "r")) == NULL) { #else if ((hdl->libzfs_mnttab = fopen(MNTTAB, "r")) == NULL) { #endif (void) close(hdl->libzfs_fd); free(hdl); return (NULL); } hdl->libzfs_sharetab = fopen(ZFS_SHARETAB, "r"); if (libzfs_core_init() != 0) { (void) close(hdl->libzfs_fd); (void) fclose(hdl->libzfs_mnttab); if (hdl->libzfs_sharetab) (void) fclose(hdl->libzfs_sharetab); free(hdl); return (NULL); } zfs_prop_init(); zpool_prop_init(); zpool_feature_init(); libzfs_mnttab_init(hdl); fletcher_4_init(); if (getenv("ZFS_PROP_DEBUG") != NULL) { hdl->libzfs_prop_debug = B_TRUE; } /* * For testing, remove some settable properties and features */ if (libzfs_envvar_is_set("ZFS_SYSFS_PROP_SUPPORT_TEST")) { zprop_desc_t *proptbl; proptbl = zpool_prop_get_table(); proptbl[ZPOOL_PROP_COMMENT].pd_zfs_mod_supported = B_FALSE; proptbl = zfs_prop_get_table(); proptbl[ZFS_PROP_DNODESIZE].pd_zfs_mod_supported = B_FALSE; zfeature_info_t *ftbl = spa_feature_table; ftbl[SPA_FEATURE_LARGE_BLOCKS].fi_zfs_mod_supported = B_FALSE; } return (hdl); } void libzfs_fini(libzfs_handle_t *hdl) { (void) close(hdl->libzfs_fd); if (hdl->libzfs_mnttab) #ifdef HAVE_SETMNTENT (void) endmntent(hdl->libzfs_mnttab); #else (void) fclose(hdl->libzfs_mnttab); #endif if (hdl->libzfs_sharetab) (void) fclose(hdl->libzfs_sharetab); zfs_uninit_libshare(hdl); zpool_free_handles(hdl); namespace_clear(hdl); libzfs_mnttab_fini(hdl); libzfs_core_fini(); fletcher_4_fini(); free(hdl); } libzfs_handle_t * zpool_get_handle(zpool_handle_t *zhp) { return (zhp->zpool_hdl); } libzfs_handle_t * zfs_get_handle(zfs_handle_t *zhp) { return (zhp->zfs_hdl); } zpool_handle_t * zfs_get_pool_handle(const zfs_handle_t *zhp) { return (zhp->zpool_hdl); } /* * Given a name, determine whether or not it's a valid path * (starts with '/' or "./"). If so, walk the mnttab trying * to match the device number. If not, treat the path as an * fs/vol/snap/bkmark name. */ zfs_handle_t * zfs_path_to_zhandle(libzfs_handle_t *hdl, char *path, zfs_type_t argtype) { struct stat64 statbuf; struct extmnttab entry; int ret; if (path[0] != '/' && strncmp(path, "./", strlen("./")) != 0) { /* * It's not a valid path, assume it's a name of type 'argtype'. */ return (zfs_open(hdl, path, argtype)); } if (stat64(path, &statbuf) != 0) { (void) fprintf(stderr, "%s: %s\n", path, strerror(errno)); return (NULL); } /* Reopen MNTTAB to prevent reading stale data from open file */ if (freopen(MNTTAB, "r", hdl->libzfs_mnttab) == NULL) return (NULL); while ((ret = getextmntent(hdl->libzfs_mnttab, &entry, 0)) == 0) { if (makedevice(entry.mnt_major, entry.mnt_minor) == statbuf.st_dev) { break; } } if (ret != 0) { return (NULL); } if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0) { (void) fprintf(stderr, gettext("'%s': not a ZFS filesystem\n"), path); return (NULL); } return (zfs_open(hdl, entry.mnt_special, ZFS_TYPE_FILESYSTEM)); } /* * Initialize the zc_nvlist_dst member to prepare for receiving an nvlist from * an ioctl(). */ int zcmd_alloc_dst_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, size_t len) { if (len == 0) len = 16 * 1024; zc->zc_nvlist_dst_size = len; zc->zc_nvlist_dst = (uint64_t)(uintptr_t)zfs_alloc(hdl, zc->zc_nvlist_dst_size); if (zc->zc_nvlist_dst == 0) return (-1); return (0); } /* * Called when an ioctl() which returns an nvlist fails with ENOMEM. This will * expand the nvlist to the size specified in 'zc_nvlist_dst_size', which was * filled in by the kernel to indicate the actual required size. */ int zcmd_expand_dst_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc) { free((void *)(uintptr_t)zc->zc_nvlist_dst); zc->zc_nvlist_dst = (uint64_t)(uintptr_t)zfs_alloc(hdl, zc->zc_nvlist_dst_size); if (zc->zc_nvlist_dst == 0) return (-1); return (0); } /* * Called to free the src and dst nvlists stored in the command structure. */ void zcmd_free_nvlists(zfs_cmd_t *zc) { free((void *)(uintptr_t)zc->zc_nvlist_conf); free((void *)(uintptr_t)zc->zc_nvlist_src); free((void *)(uintptr_t)zc->zc_nvlist_dst); zc->zc_nvlist_conf = 0; zc->zc_nvlist_src = 0; zc->zc_nvlist_dst = 0; } static int zcmd_write_nvlist_com(libzfs_handle_t *hdl, uint64_t *outnv, uint64_t *outlen, nvlist_t *nvl) { char *packed; size_t len; verify(nvlist_size(nvl, &len, NV_ENCODE_NATIVE) == 0); if ((packed = zfs_alloc(hdl, len)) == NULL) return (-1); verify(nvlist_pack(nvl, &packed, &len, NV_ENCODE_NATIVE, 0) == 0); *outnv = (uint64_t)(uintptr_t)packed; *outlen = len; return (0); } int zcmd_write_conf_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, nvlist_t *nvl) { return (zcmd_write_nvlist_com(hdl, &zc->zc_nvlist_conf, &zc->zc_nvlist_conf_size, nvl)); } int zcmd_write_src_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, nvlist_t *nvl) { return (zcmd_write_nvlist_com(hdl, &zc->zc_nvlist_src, &zc->zc_nvlist_src_size, nvl)); } /* * Unpacks an nvlist from the ZFS ioctl command structure. */ int zcmd_read_dst_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, nvlist_t **nvlp) { if (nvlist_unpack((void *)(uintptr_t)zc->zc_nvlist_dst, zc->zc_nvlist_dst_size, nvlp, 0) != 0) return (no_memory(hdl)); return (0); } int zfs_ioctl(libzfs_handle_t *hdl, int request, zfs_cmd_t *zc) { return (ioctl(hdl->libzfs_fd, request, zc)); } /* * ================================================================ * API shared by zfs and zpool property management * ================================================================ */ static void zprop_print_headers(zprop_get_cbdata_t *cbp, zfs_type_t type) { zprop_list_t *pl = cbp->cb_proplist; int i; char *title; size_t len; cbp->cb_first = B_FALSE; if (cbp->cb_scripted) return; /* * Start with the length of the column headers. */ cbp->cb_colwidths[GET_COL_NAME] = strlen(dgettext(TEXT_DOMAIN, "NAME")); cbp->cb_colwidths[GET_COL_PROPERTY] = strlen(dgettext(TEXT_DOMAIN, "PROPERTY")); cbp->cb_colwidths[GET_COL_VALUE] = strlen(dgettext(TEXT_DOMAIN, "VALUE")); cbp->cb_colwidths[GET_COL_RECVD] = strlen(dgettext(TEXT_DOMAIN, "RECEIVED")); cbp->cb_colwidths[GET_COL_SOURCE] = strlen(dgettext(TEXT_DOMAIN, "SOURCE")); /* first property is always NAME */ assert(cbp->cb_proplist->pl_prop == ((type == ZFS_TYPE_POOL) ? ZPOOL_PROP_NAME : ZFS_PROP_NAME)); /* * Go through and calculate the widths for each column. For the * 'source' column, we kludge it up by taking the worst-case scenario of * inheriting from the longest name. This is acceptable because in the * majority of cases 'SOURCE' is the last column displayed, and we don't * use the width anyway. Note that the 'VALUE' column can be oversized, * if the name of the property is much longer than any values we find. */ for (pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) { /* * 'PROPERTY' column */ if (pl->pl_prop != ZPROP_INVAL) { const char *propname = (type == ZFS_TYPE_POOL) ? zpool_prop_to_name(pl->pl_prop) : zfs_prop_to_name(pl->pl_prop); len = strlen(propname); if (len > cbp->cb_colwidths[GET_COL_PROPERTY]) cbp->cb_colwidths[GET_COL_PROPERTY] = len; } else { len = strlen(pl->pl_user_prop); if (len > cbp->cb_colwidths[GET_COL_PROPERTY]) cbp->cb_colwidths[GET_COL_PROPERTY] = len; } /* * 'VALUE' column. The first property is always the 'name' * property that was tacked on either by /sbin/zfs's * zfs_do_get() or when calling zprop_expand_list(), so we * ignore its width. If the user specified the name property * to display, then it will be later in the list in any case. */ if (pl != cbp->cb_proplist && pl->pl_width > cbp->cb_colwidths[GET_COL_VALUE]) cbp->cb_colwidths[GET_COL_VALUE] = pl->pl_width; /* 'RECEIVED' column. */ if (pl != cbp->cb_proplist && pl->pl_recvd_width > cbp->cb_colwidths[GET_COL_RECVD]) cbp->cb_colwidths[GET_COL_RECVD] = pl->pl_recvd_width; /* * 'NAME' and 'SOURCE' columns */ if (pl->pl_prop == (type == ZFS_TYPE_POOL ? ZPOOL_PROP_NAME : ZFS_PROP_NAME) && pl->pl_width > cbp->cb_colwidths[GET_COL_NAME]) { cbp->cb_colwidths[GET_COL_NAME] = pl->pl_width; cbp->cb_colwidths[GET_COL_SOURCE] = pl->pl_width + strlen(dgettext(TEXT_DOMAIN, "inherited from")); } } /* * Now go through and print the headers. */ for (i = 0; i < ZFS_GET_NCOLS; i++) { switch (cbp->cb_columns[i]) { case GET_COL_NAME: title = dgettext(TEXT_DOMAIN, "NAME"); break; case GET_COL_PROPERTY: title = dgettext(TEXT_DOMAIN, "PROPERTY"); break; case GET_COL_VALUE: title = dgettext(TEXT_DOMAIN, "VALUE"); break; case GET_COL_RECVD: title = dgettext(TEXT_DOMAIN, "RECEIVED"); break; case GET_COL_SOURCE: title = dgettext(TEXT_DOMAIN, "SOURCE"); break; default: title = NULL; } if (title != NULL) { if (i == (ZFS_GET_NCOLS - 1) || cbp->cb_columns[i + 1] == GET_COL_NONE) (void) printf("%s", title); else (void) printf("%-*s ", cbp->cb_colwidths[cbp->cb_columns[i]], title); } } (void) printf("\n"); } /* * Display a single line of output, according to the settings in the callback * structure. */ void zprop_print_one_property(const char *name, zprop_get_cbdata_t *cbp, const char *propname, const char *value, zprop_source_t sourcetype, const char *source, const char *recvd_value) { int i; const char *str = NULL; char buf[128]; /* * Ignore those source types that the user has chosen to ignore. */ if ((sourcetype & cbp->cb_sources) == 0) return; if (cbp->cb_first) zprop_print_headers(cbp, cbp->cb_type); for (i = 0; i < ZFS_GET_NCOLS; i++) { switch (cbp->cb_columns[i]) { case GET_COL_NAME: str = name; break; case GET_COL_PROPERTY: str = propname; break; case GET_COL_VALUE: str = value; break; case GET_COL_SOURCE: switch (sourcetype) { case ZPROP_SRC_NONE: str = "-"; break; case ZPROP_SRC_DEFAULT: str = "default"; break; case ZPROP_SRC_LOCAL: str = "local"; break; case ZPROP_SRC_TEMPORARY: str = "temporary"; break; case ZPROP_SRC_INHERITED: (void) snprintf(buf, sizeof (buf), "inherited from %s", source); str = buf; break; case ZPROP_SRC_RECEIVED: str = "received"; break; default: str = NULL; assert(!"unhandled zprop_source_t"); } break; case GET_COL_RECVD: str = (recvd_value == NULL ? "-" : recvd_value); break; default: continue; } if (i == (ZFS_GET_NCOLS - 1) || cbp->cb_columns[i + 1] == GET_COL_NONE) (void) printf("%s", str); else if (cbp->cb_scripted) (void) printf("%s\t", str); else (void) printf("%-*s ", cbp->cb_colwidths[cbp->cb_columns[i]], str); } (void) printf("\n"); } /* * Given a numeric suffix, convert the value into a number of bits that the * resulting value must be shifted. */ static int str2shift(libzfs_handle_t *hdl, const char *buf) { const char *ends = "BKMGTPEZ"; int i; if (buf[0] == '\0') return (0); for (i = 0; i < strlen(ends); i++) { if (toupper(buf[0]) == ends[i]) break; } if (i == strlen(ends)) { if (hdl) zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid numeric suffix '%s'"), buf); return (-1); } /* * Allow 'G' = 'GB' = 'GiB', case-insensitively. * However, 'BB' and 'BiB' are disallowed. */ if (buf[1] == '\0' || (toupper(buf[0]) != 'B' && ((toupper(buf[1]) == 'B' && buf[2] == '\0') || (toupper(buf[1]) == 'I' && toupper(buf[2]) == 'B' && buf[3] == '\0')))) return (10 * i); if (hdl) zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid numeric suffix '%s'"), buf); return (-1); } /* * Convert a string of the form '100G' into a real number. Used when setting * properties or creating a volume. 'buf' is used to place an extended error * message for the caller to use. */ int zfs_nicestrtonum(libzfs_handle_t *hdl, const char *value, uint64_t *num) { char *end; int shift; *num = 0; /* Check to see if this looks like a number. */ if ((value[0] < '0' || value[0] > '9') && value[0] != '.') { if (hdl) zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "bad numeric value '%s'"), value); return (-1); } /* Rely on strtoull() to process the numeric portion. */ errno = 0; *num = strtoull(value, &end, 10); /* * Check for ERANGE, which indicates that the value is too large to fit * in a 64-bit value. */ if (errno == ERANGE) { if (hdl) zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "numeric value is too large")); return (-1); } /* * If we have a decimal value, then do the computation with floating * point arithmetic. Otherwise, use standard arithmetic. */ if (*end == '.') { double fval = strtod(value, &end); if ((shift = str2shift(hdl, end)) == -1) return (-1); fval *= pow(2, shift); if (fval > UINT64_MAX) { if (hdl) zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "numeric value is too large")); return (-1); } *num = (uint64_t)fval; } else { if ((shift = str2shift(hdl, end)) == -1) return (-1); /* Check for overflow */ if (shift >= 64 || (*num << shift) >> shift != *num) { if (hdl) zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "numeric value is too large")); return (-1); } *num <<= shift; } return (0); } /* * Given a propname=value nvpair to set, parse any numeric properties * (index, boolean, etc) if they are specified as strings and add the * resulting nvpair to the returned nvlist. * * At the DSL layer, all properties are either 64-bit numbers or strings. * We want the user to be able to ignore this fact and specify properties * as native values (numbers, for example) or as strings (to simplify * command line utilities). This also handles converting index types * (compression, checksum, etc) from strings to their on-disk index. */ int zprop_parse_value(libzfs_handle_t *hdl, nvpair_t *elem, int prop, zfs_type_t type, nvlist_t *ret, char **svalp, uint64_t *ivalp, const char *errbuf) { data_type_t datatype = nvpair_type(elem); zprop_type_t proptype; const char *propname; char *value; boolean_t isnone = B_FALSE; boolean_t isauto = B_FALSE; int err = 0; if (type == ZFS_TYPE_POOL) { proptype = zpool_prop_get_type(prop); propname = zpool_prop_to_name(prop); } else { proptype = zfs_prop_get_type(prop); propname = zfs_prop_to_name(prop); } /* * Convert any properties to the internal DSL value types. */ *svalp = NULL; *ivalp = 0; switch (proptype) { case PROP_TYPE_STRING: if (datatype != DATA_TYPE_STRING) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' must be a string"), nvpair_name(elem)); goto error; } err = nvpair_value_string(elem, svalp); if (err != 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' is invalid"), nvpair_name(elem)); goto error; } if (strlen(*svalp) >= ZFS_MAXPROPLEN) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' is too long"), nvpair_name(elem)); goto error; } break; case PROP_TYPE_NUMBER: if (datatype == DATA_TYPE_STRING) { (void) nvpair_value_string(elem, &value); if (strcmp(value, "none") == 0) { isnone = B_TRUE; } else if (strcmp(value, "auto") == 0) { isauto = B_TRUE; } else if (zfs_nicestrtonum(hdl, value, ivalp) != 0) { goto error; } } else if (datatype == DATA_TYPE_UINT64) { (void) nvpair_value_uint64(elem, ivalp); } else { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' must be a number"), nvpair_name(elem)); goto error; } /* * Quota special: force 'none' and don't allow 0. */ if ((type & ZFS_TYPE_DATASET) && *ivalp == 0 && !isnone && (prop == ZFS_PROP_QUOTA || prop == ZFS_PROP_REFQUOTA)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "use 'none' to disable quota/refquota")); goto error; } /* * Special handling for "*_limit=none". In this case it's not * 0 but UINT64_MAX. */ if ((type & ZFS_TYPE_DATASET) && isnone && (prop == ZFS_PROP_FILESYSTEM_LIMIT || prop == ZFS_PROP_SNAPSHOT_LIMIT)) { *ivalp = UINT64_MAX; } /* * Special handling for setting 'refreservation' to 'auto'. Use * UINT64_MAX to tell the caller to use zfs_fix_auto_resv(). * 'auto' is only allowed on volumes. */ if (isauto) { switch (prop) { case ZFS_PROP_REFRESERVATION: if ((type & ZFS_TYPE_VOLUME) == 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s=auto' only allowed on " "volumes"), nvpair_name(elem)); goto error; } *ivalp = UINT64_MAX; break; default: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'auto' is invalid value for '%s'"), nvpair_name(elem)); goto error; } } break; case PROP_TYPE_INDEX: if (datatype != DATA_TYPE_STRING) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' must be a string"), nvpair_name(elem)); goto error; } (void) nvpair_value_string(elem, &value); if (zprop_string_to_index(prop, value, ivalp, type) != 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' must be one of '%s'"), propname, zprop_values(prop, type)); goto error; } break; default: abort(); } /* * Add the result to our return set of properties. */ if (*svalp != NULL) { if (nvlist_add_string(ret, propname, *svalp) != 0) { (void) no_memory(hdl); return (-1); } } else { if (nvlist_add_uint64(ret, propname, *ivalp) != 0) { (void) no_memory(hdl); return (-1); } } return (0); error: (void) zfs_error(hdl, EZFS_BADPROP, errbuf); return (-1); } static int addlist(libzfs_handle_t *hdl, char *propname, zprop_list_t **listp, zfs_type_t type) { int prop; zprop_list_t *entry; prop = zprop_name_to_prop(propname, type); if (prop != ZPROP_INVAL && !zprop_valid_for_type(prop, type, B_FALSE)) prop = ZPROP_INVAL; /* * When no property table entry can be found, return failure if * this is a pool property or if this isn't a user-defined * dataset property, */ if (prop == ZPROP_INVAL && ((type == ZFS_TYPE_POOL && !zpool_prop_feature(propname) && !zpool_prop_unsupported(propname)) || (type == ZFS_TYPE_DATASET && !zfs_prop_user(propname) && !zfs_prop_userquota(propname) && !zfs_prop_written(propname)))) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid property '%s'"), propname); return (zfs_error(hdl, EZFS_BADPROP, dgettext(TEXT_DOMAIN, "bad property list"))); } if ((entry = zfs_alloc(hdl, sizeof (zprop_list_t))) == NULL) return (-1); entry->pl_prop = prop; if (prop == ZPROP_INVAL) { if ((entry->pl_user_prop = zfs_strdup(hdl, propname)) == NULL) { free(entry); return (-1); } entry->pl_width = strlen(propname); } else { entry->pl_width = zprop_width(prop, &entry->pl_fixed, type); } *listp = entry; return (0); } /* * Given a comma-separated list of properties, construct a property list * containing both user-defined and native properties. This function will * return a NULL list if 'all' is specified, which can later be expanded * by zprop_expand_list(). */ int zprop_get_list(libzfs_handle_t *hdl, char *props, zprop_list_t **listp, zfs_type_t type) { *listp = NULL; /* * If 'all' is specified, return a NULL list. */ if (strcmp(props, "all") == 0) return (0); /* * If no props were specified, return an error. */ if (props[0] == '\0') { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no properties specified")); return (zfs_error(hdl, EZFS_BADPROP, dgettext(TEXT_DOMAIN, "bad property list"))); } /* * It would be nice to use getsubopt() here, but the inclusion of column * aliases makes this more effort than it's worth. */ while (*props != '\0') { size_t len; char *p; char c; if ((p = strchr(props, ',')) == NULL) { len = strlen(props); p = props + len; } else { len = p - props; } /* * Check for empty options. */ if (len == 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "empty property name")); return (zfs_error(hdl, EZFS_BADPROP, dgettext(TEXT_DOMAIN, "bad property list"))); } /* * Check all regular property names. */ c = props[len]; props[len] = '\0'; if (strcmp(props, "space") == 0) { static char *spaceprops[] = { "name", "avail", "used", "usedbysnapshots", "usedbydataset", "usedbyrefreservation", "usedbychildren", NULL }; int i; for (i = 0; spaceprops[i]; i++) { if (addlist(hdl, spaceprops[i], listp, type)) return (-1); listp = &(*listp)->pl_next; } } else { if (addlist(hdl, props, listp, type)) return (-1); listp = &(*listp)->pl_next; } props = p; if (c == ',') props++; } return (0); } void zprop_free_list(zprop_list_t *pl) { zprop_list_t *next; while (pl != NULL) { next = pl->pl_next; free(pl->pl_user_prop); free(pl); pl = next; } } typedef struct expand_data { zprop_list_t **last; libzfs_handle_t *hdl; zfs_type_t type; } expand_data_t; int zprop_expand_list_cb(int prop, void *cb) { zprop_list_t *entry; expand_data_t *edp = cb; if ((entry = zfs_alloc(edp->hdl, sizeof (zprop_list_t))) == NULL) return (ZPROP_INVAL); entry->pl_prop = prop; entry->pl_width = zprop_width(prop, &entry->pl_fixed, edp->type); entry->pl_all = B_TRUE; *(edp->last) = entry; edp->last = &entry->pl_next; return (ZPROP_CONT); } int zprop_expand_list(libzfs_handle_t *hdl, zprop_list_t **plp, zfs_type_t type) { zprop_list_t *entry; zprop_list_t **last; expand_data_t exp; if (*plp == NULL) { /* * If this is the very first time we've been called for an 'all' * specification, expand the list to include all native * properties. */ last = plp; exp.last = last; exp.hdl = hdl; exp.type = type; if (zprop_iter_common(zprop_expand_list_cb, &exp, B_FALSE, B_FALSE, type) == ZPROP_INVAL) return (-1); /* * Add 'name' to the beginning of the list, which is handled * specially. */ if ((entry = zfs_alloc(hdl, sizeof (zprop_list_t))) == NULL) return (-1); entry->pl_prop = (type == ZFS_TYPE_POOL) ? ZPOOL_PROP_NAME : ZFS_PROP_NAME; entry->pl_width = zprop_width(entry->pl_prop, &entry->pl_fixed, type); entry->pl_all = B_TRUE; entry->pl_next = *plp; *plp = entry; } return (0); } int zprop_iter(zprop_func func, void *cb, boolean_t show_all, boolean_t ordered, zfs_type_t type) { return (zprop_iter_common(func, cb, show_all, ordered, type)); } diff --git a/module/zfs/dmu_objset.c b/module/zfs/dmu_objset.c index a8304c18df28..fa80e3a6181a 100644 --- a/module/zfs/dmu_objset.c +++ b/module/zfs/dmu_objset.c @@ -1,3036 +1,3062 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2018 by Delphix. All rights reserved. * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. * Copyright (c) 2013, Joyent, Inc. All rights reserved. * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. * Copyright (c) 2015, STRATO AG, Inc. All rights reserved. * Copyright (c) 2016 Actifio, Inc. All rights reserved. * Copyright 2017 Nexenta Systems, Inc. * Copyright (c) 2017 Open-E, Inc. All Rights Reserved. + * Copyright (c) 2018, loli10K . All rights reserved. */ /* Portions Copyright 2010 Robert Milkowski */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "zfs_namecheck.h" /* * Needed to close a window in dnode_move() that allows the objset to be freed * before it can be safely accessed. */ krwlock_t os_lock; /* * Tunable to overwrite the maximum number of threads for the parallelization * of dmu_objset_find_dp, needed to speed up the import of pools with many * datasets. * Default is 4 times the number of leaf vdevs. */ int dmu_find_threads = 0; /* * Backfill lower metadnode objects after this many have been freed. * Backfilling negatively impacts object creation rates, so only do it * if there are enough holes to fill. */ int dmu_rescan_dnode_threshold = 1 << DN_MAX_INDBLKSHIFT; static char *upgrade_tag = "upgrade_tag"; static void dmu_objset_find_dp_cb(void *arg); static void dmu_objset_upgrade(objset_t *os, dmu_objset_upgrade_cb_t cb); static void dmu_objset_upgrade_stop(objset_t *os); void dmu_objset_init(void) { rw_init(&os_lock, NULL, RW_DEFAULT, NULL); } void dmu_objset_fini(void) { rw_destroy(&os_lock); } spa_t * dmu_objset_spa(objset_t *os) { return (os->os_spa); } zilog_t * dmu_objset_zil(objset_t *os) { return (os->os_zil); } dsl_pool_t * dmu_objset_pool(objset_t *os) { dsl_dataset_t *ds; if ((ds = os->os_dsl_dataset) != NULL && ds->ds_dir) return (ds->ds_dir->dd_pool); else return (spa_get_dsl(os->os_spa)); } dsl_dataset_t * dmu_objset_ds(objset_t *os) { return (os->os_dsl_dataset); } dmu_objset_type_t dmu_objset_type(objset_t *os) { return (os->os_phys->os_type); } void dmu_objset_name(objset_t *os, char *buf) { dsl_dataset_name(os->os_dsl_dataset, buf); } uint64_t dmu_objset_id(objset_t *os) { dsl_dataset_t *ds = os->os_dsl_dataset; return (ds ? ds->ds_object : 0); } uint64_t dmu_objset_dnodesize(objset_t *os) { return (os->os_dnodesize); } zfs_sync_type_t dmu_objset_syncprop(objset_t *os) { return (os->os_sync); } zfs_logbias_op_t dmu_objset_logbias(objset_t *os) { return (os->os_logbias); } static void checksum_changed_cb(void *arg, uint64_t newval) { objset_t *os = arg; /* * Inheritance should have been done by now. */ ASSERT(newval != ZIO_CHECKSUM_INHERIT); os->os_checksum = zio_checksum_select(newval, ZIO_CHECKSUM_ON_VALUE); } static void compression_changed_cb(void *arg, uint64_t newval) { objset_t *os = arg; /* * Inheritance and range checking should have been done by now. */ ASSERT(newval != ZIO_COMPRESS_INHERIT); os->os_compress = zio_compress_select(os->os_spa, newval, ZIO_COMPRESS_ON); } static void copies_changed_cb(void *arg, uint64_t newval) { objset_t *os = arg; /* * Inheritance and range checking should have been done by now. */ ASSERT(newval > 0); ASSERT(newval <= spa_max_replication(os->os_spa)); os->os_copies = newval; } static void dedup_changed_cb(void *arg, uint64_t newval) { objset_t *os = arg; spa_t *spa = os->os_spa; enum zio_checksum checksum; /* * Inheritance should have been done by now. */ ASSERT(newval != ZIO_CHECKSUM_INHERIT); checksum = zio_checksum_dedup_select(spa, newval, ZIO_CHECKSUM_OFF); os->os_dedup_checksum = checksum & ZIO_CHECKSUM_MASK; os->os_dedup_verify = !!(checksum & ZIO_CHECKSUM_VERIFY); } static void primary_cache_changed_cb(void *arg, uint64_t newval) { objset_t *os = arg; /* * Inheritance and range checking should have been done by now. */ ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE || newval == ZFS_CACHE_METADATA); os->os_primary_cache = newval; } static void secondary_cache_changed_cb(void *arg, uint64_t newval) { objset_t *os = arg; /* * Inheritance and range checking should have been done by now. */ ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE || newval == ZFS_CACHE_METADATA); os->os_secondary_cache = newval; } static void sync_changed_cb(void *arg, uint64_t newval) { objset_t *os = arg; /* * Inheritance and range checking should have been done by now. */ ASSERT(newval == ZFS_SYNC_STANDARD || newval == ZFS_SYNC_ALWAYS || newval == ZFS_SYNC_DISABLED); os->os_sync = newval; if (os->os_zil) zil_set_sync(os->os_zil, newval); } static void redundant_metadata_changed_cb(void *arg, uint64_t newval) { objset_t *os = arg; /* * Inheritance and range checking should have been done by now. */ ASSERT(newval == ZFS_REDUNDANT_METADATA_ALL || newval == ZFS_REDUNDANT_METADATA_MOST); os->os_redundant_metadata = newval; } static void dnodesize_changed_cb(void *arg, uint64_t newval) { objset_t *os = arg; switch (newval) { case ZFS_DNSIZE_LEGACY: os->os_dnodesize = DNODE_MIN_SIZE; break; case ZFS_DNSIZE_AUTO: /* * Choose a dnode size that will work well for most * workloads if the user specified "auto". Future code * improvements could dynamically select a dnode size * based on observed workload patterns. */ os->os_dnodesize = DNODE_MIN_SIZE * 2; break; case ZFS_DNSIZE_1K: case ZFS_DNSIZE_2K: case ZFS_DNSIZE_4K: case ZFS_DNSIZE_8K: case ZFS_DNSIZE_16K: os->os_dnodesize = newval; break; } } static void smallblk_changed_cb(void *arg, uint64_t newval) { objset_t *os = arg; /* * Inheritance and range checking should have been done by now. */ ASSERT(newval <= SPA_OLD_MAXBLOCKSIZE); ASSERT(ISP2(newval)); os->os_zpl_special_smallblock = newval; } static void logbias_changed_cb(void *arg, uint64_t newval) { objset_t *os = arg; ASSERT(newval == ZFS_LOGBIAS_LATENCY || newval == ZFS_LOGBIAS_THROUGHPUT); os->os_logbias = newval; if (os->os_zil) zil_set_logbias(os->os_zil, newval); } static void recordsize_changed_cb(void *arg, uint64_t newval) { objset_t *os = arg; os->os_recordsize = newval; } void dmu_objset_byteswap(void *buf, size_t size) { objset_phys_t *osp = buf; ASSERT(size == OBJSET_PHYS_SIZE_V1 || size == OBJSET_PHYS_SIZE_V2 || size == sizeof (objset_phys_t)); dnode_byteswap(&osp->os_meta_dnode); byteswap_uint64_array(&osp->os_zil_header, sizeof (zil_header_t)); osp->os_type = BSWAP_64(osp->os_type); osp->os_flags = BSWAP_64(osp->os_flags); if (size >= OBJSET_PHYS_SIZE_V2) { dnode_byteswap(&osp->os_userused_dnode); dnode_byteswap(&osp->os_groupused_dnode); if (size >= sizeof (objset_phys_t)) dnode_byteswap(&osp->os_projectused_dnode); } } /* * The hash is a CRC-based hash of the objset_t pointer and the object number. */ static uint64_t dnode_hash(const objset_t *os, uint64_t obj) { uintptr_t osv = (uintptr_t)os; uint64_t crc = -1ULL; ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); /* * The low 6 bits of the pointer don't have much entropy, because * the objset_t is larger than 2^6 bytes long. */ crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF]; crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF]; crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF]; crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 16)) & 0xFF]; crc ^= (osv>>14) ^ (obj>>24); return (crc); } unsigned int dnode_multilist_index_func(multilist_t *ml, void *obj) { dnode_t *dn = obj; return (dnode_hash(dn->dn_objset, dn->dn_object) % multilist_get_num_sublists(ml)); } /* * Instantiates the objset_t in-memory structure corresponding to the * objset_phys_t that's pointed to by the specified blkptr_t. */ int dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp, objset_t **osp) { objset_t *os; int i, err; ASSERT(ds == NULL || MUTEX_HELD(&ds->ds_opening_lock)); /* * The $ORIGIN dataset (if it exists) doesn't have an associated * objset, so there's no reason to open it. The $ORIGIN dataset * will not exist on pools older than SPA_VERSION_ORIGIN. */ if (ds != NULL && spa_get_dsl(spa) != NULL && spa_get_dsl(spa)->dp_origin_snap != NULL) { ASSERT3P(ds->ds_dir, !=, spa_get_dsl(spa)->dp_origin_snap->ds_dir); } os = kmem_zalloc(sizeof (objset_t), KM_SLEEP); os->os_dsl_dataset = ds; os->os_spa = spa; os->os_rootbp = bp; if (!BP_IS_HOLE(os->os_rootbp)) { arc_flags_t aflags = ARC_FLAG_WAIT; zbookmark_phys_t zb; int size; enum zio_flag zio_flags = ZIO_FLAG_CANFAIL; SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET, ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); if (DMU_OS_IS_L2CACHEABLE(os)) aflags |= ARC_FLAG_L2CACHE; if (ds != NULL && ds->ds_dir->dd_crypto_obj != 0) { ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF); ASSERT(BP_IS_AUTHENTICATED(bp)); zio_flags |= ZIO_FLAG_RAW; } dprintf_bp(os->os_rootbp, "reading %s", ""); err = arc_read(NULL, spa, os->os_rootbp, arc_getbuf_func, &os->os_phys_buf, ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb); if (err != 0) { kmem_free(os, sizeof (objset_t)); /* convert checksum errors into IO errors */ if (err == ECKSUM) err = SET_ERROR(EIO); return (err); } if (spa_version(spa) < SPA_VERSION_USERSPACE) size = OBJSET_PHYS_SIZE_V1; else if (!spa_feature_is_enabled(spa, SPA_FEATURE_PROJECT_QUOTA)) size = OBJSET_PHYS_SIZE_V2; else size = sizeof (objset_phys_t); /* Increase the blocksize if we are permitted. */ if (arc_buf_size(os->os_phys_buf) < size) { arc_buf_t *buf = arc_alloc_buf(spa, &os->os_phys_buf, ARC_BUFC_METADATA, size); bzero(buf->b_data, size); bcopy(os->os_phys_buf->b_data, buf->b_data, arc_buf_size(os->os_phys_buf)); arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf); os->os_phys_buf = buf; } os->os_phys = os->os_phys_buf->b_data; os->os_flags = os->os_phys->os_flags; } else { int size = spa_version(spa) >= SPA_VERSION_USERSPACE ? sizeof (objset_phys_t) : OBJSET_PHYS_SIZE_V1; os->os_phys_buf = arc_alloc_buf(spa, &os->os_phys_buf, ARC_BUFC_METADATA, size); os->os_phys = os->os_phys_buf->b_data; bzero(os->os_phys, size); } /* * These properties will be filled in by the logic in zfs_get_zplprop() * when they are queried for the first time. */ os->os_version = OBJSET_PROP_UNINITIALIZED; os->os_normalization = OBJSET_PROP_UNINITIALIZED; os->os_utf8only = OBJSET_PROP_UNINITIALIZED; os->os_casesensitivity = OBJSET_PROP_UNINITIALIZED; /* * Note: the changed_cb will be called once before the register * func returns, thus changing the checksum/compression from the * default (fletcher2/off). Snapshots don't need to know about * checksum/compression/copies. */ if (ds != NULL) { boolean_t needlock = B_FALSE; os->os_encrypted = (ds->ds_dir->dd_crypto_obj != 0); /* * Note: it's valid to open the objset if the dataset is * long-held, in which case the pool_config lock will not * be held. */ if (!dsl_pool_config_held(dmu_objset_pool(os))) { needlock = B_TRUE; dsl_pool_config_enter(dmu_objset_pool(os), FTAG); } err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_PRIMARYCACHE), primary_cache_changed_cb, os); if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_SECONDARYCACHE), secondary_cache_changed_cb, os); } if (!ds->ds_is_snapshot) { if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum_changed_cb, os); } if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_COMPRESSION), compression_changed_cb, os); } if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_COPIES), copies_changed_cb, os); } if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_DEDUP), dedup_changed_cb, os); } if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_LOGBIAS), logbias_changed_cb, os); } if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_SYNC), sync_changed_cb, os); } if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name( ZFS_PROP_REDUNDANT_METADATA), redundant_metadata_changed_cb, os); } if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_RECORDSIZE), recordsize_changed_cb, os); } if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_DNODESIZE), dnodesize_changed_cb, os); } if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name( ZFS_PROP_SPECIAL_SMALL_BLOCKS), smallblk_changed_cb, os); } } if (needlock) dsl_pool_config_exit(dmu_objset_pool(os), FTAG); if (err != 0) { arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf); kmem_free(os, sizeof (objset_t)); return (err); } } else { /* It's the meta-objset. */ os->os_checksum = ZIO_CHECKSUM_FLETCHER_4; os->os_compress = ZIO_COMPRESS_ON; os->os_encrypted = B_FALSE; os->os_copies = spa_max_replication(spa); os->os_dedup_checksum = ZIO_CHECKSUM_OFF; os->os_dedup_verify = B_FALSE; os->os_logbias = ZFS_LOGBIAS_LATENCY; os->os_sync = ZFS_SYNC_STANDARD; os->os_primary_cache = ZFS_CACHE_ALL; os->os_secondary_cache = ZFS_CACHE_ALL; os->os_dnodesize = DNODE_MIN_SIZE; } if (ds == NULL || !ds->ds_is_snapshot) os->os_zil_header = os->os_phys->os_zil_header; os->os_zil = zil_alloc(os, &os->os_zil_header); for (i = 0; i < TXG_SIZE; i++) { os->os_dirty_dnodes[i] = multilist_create(sizeof (dnode_t), offsetof(dnode_t, dn_dirty_link[i]), dnode_multilist_index_func); } list_create(&os->os_dnodes, sizeof (dnode_t), offsetof(dnode_t, dn_link)); list_create(&os->os_downgraded_dbufs, sizeof (dmu_buf_impl_t), offsetof(dmu_buf_impl_t, db_link)); list_link_init(&os->os_evicting_node); mutex_init(&os->os_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&os->os_userused_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&os->os_obj_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&os->os_user_ptr_lock, NULL, MUTEX_DEFAULT, NULL); os->os_obj_next_percpu_len = boot_ncpus; os->os_obj_next_percpu = kmem_zalloc(os->os_obj_next_percpu_len * sizeof (os->os_obj_next_percpu[0]), KM_SLEEP); dnode_special_open(os, &os->os_phys->os_meta_dnode, DMU_META_DNODE_OBJECT, &os->os_meta_dnode); if (OBJSET_BUF_HAS_USERUSED(os->os_phys_buf)) { dnode_special_open(os, &os->os_phys->os_userused_dnode, DMU_USERUSED_OBJECT, &os->os_userused_dnode); dnode_special_open(os, &os->os_phys->os_groupused_dnode, DMU_GROUPUSED_OBJECT, &os->os_groupused_dnode); if (OBJSET_BUF_HAS_PROJECTUSED(os->os_phys_buf)) dnode_special_open(os, &os->os_phys->os_projectused_dnode, DMU_PROJECTUSED_OBJECT, &os->os_projectused_dnode); } mutex_init(&os->os_upgrade_lock, NULL, MUTEX_DEFAULT, NULL); *osp = os; return (0); } int dmu_objset_from_ds(dsl_dataset_t *ds, objset_t **osp) { int err = 0; /* * We shouldn't be doing anything with dsl_dataset_t's unless the * pool_config lock is held, or the dataset is long-held. */ ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool) || dsl_dataset_long_held(ds)); mutex_enter(&ds->ds_opening_lock); if (ds->ds_objset == NULL) { objset_t *os; rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); err = dmu_objset_open_impl(dsl_dataset_get_spa(ds), ds, dsl_dataset_get_blkptr(ds), &os); rrw_exit(&ds->ds_bp_rwlock, FTAG); if (err == 0) { mutex_enter(&ds->ds_lock); ASSERT(ds->ds_objset == NULL); ds->ds_objset = os; mutex_exit(&ds->ds_lock); } } *osp = ds->ds_objset; mutex_exit(&ds->ds_opening_lock); return (err); } /* * Holds the pool while the objset is held. Therefore only one objset * can be held at a time. */ int dmu_objset_hold_flags(const char *name, boolean_t decrypt, void *tag, objset_t **osp) { dsl_pool_t *dp; dsl_dataset_t *ds; int err; ds_hold_flags_t flags = (decrypt) ? DS_HOLD_FLAG_DECRYPT : 0; err = dsl_pool_hold(name, tag, &dp); if (err != 0) return (err); err = dsl_dataset_hold_flags(dp, name, flags, tag, &ds); if (err != 0) { dsl_pool_rele(dp, tag); return (err); } err = dmu_objset_from_ds(ds, osp); if (err != 0) { dsl_dataset_rele(ds, tag); dsl_pool_rele(dp, tag); } return (err); } int dmu_objset_hold(const char *name, void *tag, objset_t **osp) { return (dmu_objset_hold_flags(name, B_FALSE, tag, osp)); } static int dmu_objset_own_impl(dsl_dataset_t *ds, dmu_objset_type_t type, boolean_t readonly, boolean_t decrypt, void *tag, objset_t **osp) { int err; err = dmu_objset_from_ds(ds, osp); if (err != 0) { return (err); } else if (type != DMU_OST_ANY && type != (*osp)->os_phys->os_type) { return (SET_ERROR(EINVAL)); } else if (!readonly && dsl_dataset_is_snapshot(ds)) { return (SET_ERROR(EROFS)); } else if (!readonly && decrypt && dsl_dir_incompatible_encryption_version(ds->ds_dir)) { return (SET_ERROR(EROFS)); } /* if we are decrypting, we can now check MACs in os->os_phys_buf */ if (decrypt && arc_is_unauthenticated((*osp)->os_phys_buf)) { zbookmark_phys_t zb; SET_BOOKMARK(&zb, ds->ds_object, ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); err = arc_untransform((*osp)->os_phys_buf, (*osp)->os_spa, &zb, B_FALSE); if (err != 0) return (err); ASSERT0(arc_is_unauthenticated((*osp)->os_phys_buf)); } return (0); } /* * dsl_pool must not be held when this is called. * Upon successful return, there will be a longhold on the dataset, * and the dsl_pool will not be held. */ int dmu_objset_own(const char *name, dmu_objset_type_t type, boolean_t readonly, boolean_t decrypt, void *tag, objset_t **osp) { dsl_pool_t *dp; dsl_dataset_t *ds; int err; ds_hold_flags_t flags = (decrypt) ? DS_HOLD_FLAG_DECRYPT : 0; err = dsl_pool_hold(name, FTAG, &dp); if (err != 0) return (err); err = dsl_dataset_own(dp, name, flags, tag, &ds); if (err != 0) { dsl_pool_rele(dp, FTAG); return (err); } err = dmu_objset_own_impl(ds, type, readonly, decrypt, tag, osp); if (err != 0) { dsl_dataset_disown(ds, flags, tag); dsl_pool_rele(dp, FTAG); return (err); } /* * User accounting requires the dataset to be decrypted and rw. * We also don't begin user accounting during claiming to help * speed up pool import times and to keep this txg reserved * completely for recovery work. */ if ((dmu_objset_userobjspace_upgradable(*osp) || dmu_objset_projectquota_upgradable(*osp)) && !readonly && !dp->dp_spa->spa_claiming && (ds->ds_dir->dd_crypto_obj == 0 || decrypt)) dmu_objset_id_quota_upgrade(*osp); dsl_pool_rele(dp, FTAG); return (0); } int dmu_objset_own_obj(dsl_pool_t *dp, uint64_t obj, dmu_objset_type_t type, boolean_t readonly, boolean_t decrypt, void *tag, objset_t **osp) { dsl_dataset_t *ds; int err; ds_hold_flags_t flags = (decrypt) ? DS_HOLD_FLAG_DECRYPT : 0; err = dsl_dataset_own_obj(dp, obj, flags, tag, &ds); if (err != 0) return (err); err = dmu_objset_own_impl(ds, type, readonly, decrypt, tag, osp); if (err != 0) { dsl_dataset_disown(ds, flags, tag); return (err); } return (0); } void dmu_objset_rele_flags(objset_t *os, boolean_t decrypt, void *tag) { ds_hold_flags_t flags = (decrypt) ? DS_HOLD_FLAG_DECRYPT : 0; dsl_pool_t *dp = dmu_objset_pool(os); dsl_dataset_rele_flags(os->os_dsl_dataset, flags, tag); dsl_pool_rele(dp, tag); } void dmu_objset_rele(objset_t *os, void *tag) { dmu_objset_rele_flags(os, B_FALSE, tag); } /* * When we are called, os MUST refer to an objset associated with a dataset * that is owned by 'tag'; that is, is held and long held by 'tag' and ds_owner * == tag. We will then release and reacquire ownership of the dataset while * holding the pool config_rwlock to avoid intervening namespace or ownership * changes may occur. * * This exists solely to accommodate zfs_ioc_userspace_upgrade()'s desire to * release the hold on its dataset and acquire a new one on the dataset of the * same name so that it can be partially torn down and reconstructed. */ void dmu_objset_refresh_ownership(dsl_dataset_t *ds, dsl_dataset_t **newds, boolean_t decrypt, void *tag) { dsl_pool_t *dp; char name[ZFS_MAX_DATASET_NAME_LEN]; VERIFY3P(ds, !=, NULL); VERIFY3P(ds->ds_owner, ==, tag); VERIFY(dsl_dataset_long_held(ds)); dsl_dataset_name(ds, name); dp = ds->ds_dir->dd_pool; dsl_pool_config_enter(dp, FTAG); dsl_dataset_disown(ds, decrypt, tag); VERIFY0(dsl_dataset_own(dp, name, (decrypt) ? DS_HOLD_FLAG_DECRYPT : 0, tag, newds)); dsl_pool_config_exit(dp, FTAG); } void dmu_objset_disown(objset_t *os, boolean_t decrypt, void *tag) { /* * Stop upgrading thread */ dmu_objset_upgrade_stop(os); dsl_dataset_disown(os->os_dsl_dataset, (decrypt) ? DS_HOLD_FLAG_DECRYPT : 0, tag); } void dmu_objset_evict_dbufs(objset_t *os) { dnode_t *dn_marker; dnode_t *dn; dn_marker = kmem_alloc(sizeof (dnode_t), KM_SLEEP); mutex_enter(&os->os_lock); dn = list_head(&os->os_dnodes); while (dn != NULL) { /* * Skip dnodes without holds. We have to do this dance * because dnode_add_ref() only works if there is already a * hold. If the dnode has no holds, then it has no dbufs. */ if (dnode_add_ref(dn, FTAG)) { list_insert_after(&os->os_dnodes, dn, dn_marker); mutex_exit(&os->os_lock); dnode_evict_dbufs(dn); dnode_rele(dn, FTAG); mutex_enter(&os->os_lock); dn = list_next(&os->os_dnodes, dn_marker); list_remove(&os->os_dnodes, dn_marker); } else { dn = list_next(&os->os_dnodes, dn); } } mutex_exit(&os->os_lock); kmem_free(dn_marker, sizeof (dnode_t)); if (DMU_USERUSED_DNODE(os) != NULL) { if (DMU_PROJECTUSED_DNODE(os) != NULL) dnode_evict_dbufs(DMU_PROJECTUSED_DNODE(os)); dnode_evict_dbufs(DMU_GROUPUSED_DNODE(os)); dnode_evict_dbufs(DMU_USERUSED_DNODE(os)); } dnode_evict_dbufs(DMU_META_DNODE(os)); } /* * Objset eviction processing is split into into two pieces. * The first marks the objset as evicting, evicts any dbufs that * have a refcount of zero, and then queues up the objset for the * second phase of eviction. Once os->os_dnodes has been cleared by * dnode_buf_pageout()->dnode_destroy(), the second phase is executed. * The second phase closes the special dnodes, dequeues the objset from * the list of those undergoing eviction, and finally frees the objset. * * NOTE: Due to asynchronous eviction processing (invocation of * dnode_buf_pageout()), it is possible for the meta dnode for the * objset to have no holds even though os->os_dnodes is not empty. */ void dmu_objset_evict(objset_t *os) { dsl_dataset_t *ds = os->os_dsl_dataset; for (int t = 0; t < TXG_SIZE; t++) ASSERT(!dmu_objset_is_dirty(os, t)); if (ds) dsl_prop_unregister_all(ds, os); if (os->os_sa) sa_tear_down(os); dmu_objset_evict_dbufs(os); mutex_enter(&os->os_lock); spa_evicting_os_register(os->os_spa, os); if (list_is_empty(&os->os_dnodes)) { mutex_exit(&os->os_lock); dmu_objset_evict_done(os); } else { mutex_exit(&os->os_lock); } } void dmu_objset_evict_done(objset_t *os) { ASSERT3P(list_head(&os->os_dnodes), ==, NULL); dnode_special_close(&os->os_meta_dnode); if (DMU_USERUSED_DNODE(os)) { if (DMU_PROJECTUSED_DNODE(os)) dnode_special_close(&os->os_projectused_dnode); dnode_special_close(&os->os_userused_dnode); dnode_special_close(&os->os_groupused_dnode); } zil_free(os->os_zil); arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf); /* * This is a barrier to prevent the objset from going away in * dnode_move() until we can safely ensure that the objset is still in * use. We consider the objset valid before the barrier and invalid * after the barrier. */ rw_enter(&os_lock, RW_READER); rw_exit(&os_lock); kmem_free(os->os_obj_next_percpu, os->os_obj_next_percpu_len * sizeof (os->os_obj_next_percpu[0])); mutex_destroy(&os->os_lock); mutex_destroy(&os->os_userused_lock); mutex_destroy(&os->os_obj_lock); mutex_destroy(&os->os_user_ptr_lock); mutex_destroy(&os->os_upgrade_lock); for (int i = 0; i < TXG_SIZE; i++) { multilist_destroy(os->os_dirty_dnodes[i]); } spa_evicting_os_deregister(os->os_spa, os); kmem_free(os, sizeof (objset_t)); } inode_timespec_t dmu_objset_snap_cmtime(objset_t *os) { return (dsl_dir_snap_cmtime(os->os_dsl_dataset->ds_dir)); } objset_t * dmu_objset_create_impl_dnstats(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp, dmu_objset_type_t type, int levels, int blksz, int ibs, dmu_tx_t *tx) { objset_t *os; dnode_t *mdn; ASSERT(dmu_tx_is_syncing(tx)); if (blksz == 0) blksz = DNODE_BLOCK_SIZE; if (ibs == 0) ibs = DN_MAX_INDBLKSHIFT; if (ds != NULL) VERIFY0(dmu_objset_from_ds(ds, &os)); else VERIFY0(dmu_objset_open_impl(spa, NULL, bp, &os)); mdn = DMU_META_DNODE(os); dnode_allocate(mdn, DMU_OT_DNODE, blksz, ibs, DMU_OT_NONE, 0, DNODE_MIN_SLOTS, tx); /* * We don't want to have to increase the meta-dnode's nlevels * later, because then we could do it in quescing context while * we are also accessing it in open context. * * This precaution is not necessary for the MOS (ds == NULL), * because the MOS is only updated in syncing context. * This is most fortunate: the MOS is the only objset that * needs to be synced multiple times as spa_sync() iterates * to convergence, so minimizing its dn_nlevels matters. */ if (ds != NULL) { if (levels == 0) { levels = 1; /* * Determine the number of levels necessary for the * meta-dnode to contain DN_MAX_OBJECT dnodes. Note * that in order to ensure that we do not overflow * 64 bits, there has to be a nlevels that gives us a * number of blocks > DN_MAX_OBJECT but < 2^64. * Therefore, (mdn->dn_indblkshift - SPA_BLKPTRSHIFT) * (10) must be less than (64 - log2(DN_MAX_OBJECT)) * (16). */ while ((uint64_t)mdn->dn_nblkptr << (mdn->dn_datablkshift - DNODE_SHIFT + (levels - 1) * (mdn->dn_indblkshift - SPA_BLKPTRSHIFT)) < DN_MAX_OBJECT) levels++; } mdn->dn_next_nlevels[tx->tx_txg & TXG_MASK] = mdn->dn_nlevels = levels; } ASSERT(type != DMU_OST_NONE); ASSERT(type != DMU_OST_ANY); ASSERT(type < DMU_OST_NUMTYPES); os->os_phys->os_type = type; /* * Enable user accounting if it is enabled and this is not an * encrypted receive. */ if (dmu_objset_userused_enabled(os) && (!os->os_encrypted || !dmu_objset_is_receiving(os))) { os->os_phys->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE; if (dmu_objset_userobjused_enabled(os)) { ds->ds_feature_activation[ SPA_FEATURE_USEROBJ_ACCOUNTING] = (void *)B_TRUE; os->os_phys->os_flags |= OBJSET_FLAG_USEROBJACCOUNTING_COMPLETE; } if (dmu_objset_projectquota_enabled(os)) { ds->ds_feature_activation[ SPA_FEATURE_PROJECT_QUOTA] = (void *)B_TRUE; os->os_phys->os_flags |= OBJSET_FLAG_PROJECTQUOTA_COMPLETE; } os->os_flags = os->os_phys->os_flags; } dsl_dataset_dirty(ds, tx); return (os); } /* called from dsl for meta-objset */ objset_t * dmu_objset_create_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp, dmu_objset_type_t type, dmu_tx_t *tx) { return (dmu_objset_create_impl_dnstats(spa, ds, bp, type, 0, 0, 0, tx)); } typedef struct dmu_objset_create_arg { const char *doca_name; cred_t *doca_cred; void (*doca_userfunc)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx); void *doca_userarg; dmu_objset_type_t doca_type; uint64_t doca_flags; dsl_crypto_params_t *doca_dcp; } dmu_objset_create_arg_t; /*ARGSUSED*/ static int dmu_objset_create_check(void *arg, dmu_tx_t *tx) { dmu_objset_create_arg_t *doca = arg; dsl_pool_t *dp = dmu_tx_pool(tx); dsl_dir_t *pdd; + dsl_dataset_t *parentds; + objset_t *parentos; const char *tail; int error; if (strchr(doca->doca_name, '@') != NULL) return (SET_ERROR(EINVAL)); if (strlen(doca->doca_name) >= ZFS_MAX_DATASET_NAME_LEN) return (SET_ERROR(ENAMETOOLONG)); if (dataset_nestcheck(doca->doca_name) != 0) return (SET_ERROR(ENAMETOOLONG)); error = dsl_dir_hold(dp, doca->doca_name, FTAG, &pdd, &tail); if (error != 0) return (error); if (tail == NULL) { dsl_dir_rele(pdd, FTAG); return (SET_ERROR(EEXIST)); } error = dmu_objset_create_crypt_check(pdd, doca->doca_dcp, NULL); if (error != 0) { dsl_dir_rele(pdd, FTAG); return (error); } error = dsl_fs_ss_limit_check(pdd, 1, ZFS_PROP_FILESYSTEM_LIMIT, NULL, doca->doca_cred); + if (error != 0) { + dsl_dir_rele(pdd, FTAG); + return (error); + } + /* can't create below anything but filesystems (eg. no ZVOLs) */ + error = dsl_dataset_hold_obj(pdd->dd_pool, + dsl_dir_phys(pdd)->dd_head_dataset_obj, FTAG, &parentds); + if (error != 0) { + dsl_dir_rele(pdd, FTAG); + return (error); + } + error = dmu_objset_from_ds(parentds, &parentos); + if (error != 0) { + dsl_dataset_rele(parentds, FTAG); + dsl_dir_rele(pdd, FTAG); + return (error); + } + if (dmu_objset_type(parentos) != DMU_OST_ZFS) { + dsl_dataset_rele(parentds, FTAG); + dsl_dir_rele(pdd, FTAG); + return (SET_ERROR(ZFS_ERR_WRONG_PARENT)); + } + dsl_dataset_rele(parentds, FTAG); dsl_dir_rele(pdd, FTAG); return (error); } static void dmu_objset_create_sync(void *arg, dmu_tx_t *tx) { dmu_objset_create_arg_t *doca = arg; dsl_pool_t *dp = dmu_tx_pool(tx); spa_t *spa = dp->dp_spa; dsl_dir_t *pdd; const char *tail; dsl_dataset_t *ds; uint64_t obj; blkptr_t *bp; objset_t *os; zio_t *rzio; VERIFY0(dsl_dir_hold(dp, doca->doca_name, FTAG, &pdd, &tail)); obj = dsl_dataset_create_sync(pdd, tail, NULL, doca->doca_flags, doca->doca_cred, doca->doca_dcp, tx); VERIFY0(dsl_dataset_hold_obj_flags(pdd->dd_pool, obj, DS_HOLD_FLAG_DECRYPT, FTAG, &ds)); rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); bp = dsl_dataset_get_blkptr(ds); os = dmu_objset_create_impl(spa, ds, bp, doca->doca_type, tx); rrw_exit(&ds->ds_bp_rwlock, FTAG); if (doca->doca_userfunc != NULL) { doca->doca_userfunc(os, doca->doca_userarg, doca->doca_cred, tx); } /* * The doca_userfunc() may write out some data that needs to be * encrypted if the dataset is encrypted (specifically the root * directory). This data must be written out before the encryption * key mapping is removed by dsl_dataset_rele_flags(). Force the * I/O to occur immediately by invoking the relevant sections of * dsl_pool_sync(). */ if (os->os_encrypted) { dsl_dataset_t *tmpds = NULL; boolean_t need_sync_done = B_FALSE; mutex_enter(&ds->ds_lock); ds->ds_owner = FTAG; mutex_exit(&ds->ds_lock); rzio = zio_root(spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); tmpds = txg_list_remove_this(&dp->dp_dirty_datasets, ds, tx->tx_txg); if (tmpds != NULL) { dsl_dataset_sync(ds, rzio, tx); need_sync_done = B_TRUE; } VERIFY0(zio_wait(rzio)); dmu_objset_do_userquota_updates(os, tx); taskq_wait(dp->dp_sync_taskq); if (txg_list_member(&dp->dp_dirty_datasets, ds, tx->tx_txg)) { ASSERT3P(ds->ds_key_mapping, !=, NULL); key_mapping_rele(spa, ds->ds_key_mapping, ds); } rzio = zio_root(spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); tmpds = txg_list_remove_this(&dp->dp_dirty_datasets, ds, tx->tx_txg); if (tmpds != NULL) { dmu_buf_rele(ds->ds_dbuf, ds); dsl_dataset_sync(ds, rzio, tx); } VERIFY0(zio_wait(rzio)); if (need_sync_done) { ASSERT3P(ds->ds_key_mapping, !=, NULL); key_mapping_rele(spa, ds->ds_key_mapping, ds); dsl_dataset_sync_done(ds, tx); } mutex_enter(&ds->ds_lock); ds->ds_owner = NULL; mutex_exit(&ds->ds_lock); } spa_history_log_internal_ds(ds, "create", tx, ""); zvol_create_minors(spa, doca->doca_name, B_TRUE); dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG); dsl_dir_rele(pdd, FTAG); } int dmu_objset_create(const char *name, dmu_objset_type_t type, uint64_t flags, dsl_crypto_params_t *dcp, dmu_objset_create_sync_func_t func, void *arg) { dmu_objset_create_arg_t doca; dsl_crypto_params_t tmp_dcp = { 0 }; doca.doca_name = name; doca.doca_cred = CRED(); doca.doca_flags = flags; doca.doca_userfunc = func; doca.doca_userarg = arg; doca.doca_type = type; /* * Some callers (mostly for testing) do not provide a dcp on their * own but various code inside the sync task will require it to be * allocated. Rather than adding NULL checks throughout this code * or adding dummy dcp's to all of the callers we simply create a * dummy one here and use that. This zero dcp will have the same * effect as asking for inheritance of all encryption params. */ doca.doca_dcp = (dcp != NULL) ? dcp : &tmp_dcp; return (dsl_sync_task(name, dmu_objset_create_check, dmu_objset_create_sync, &doca, 6, ZFS_SPACE_CHECK_NORMAL)); } typedef struct dmu_objset_clone_arg { const char *doca_clone; const char *doca_origin; cred_t *doca_cred; } dmu_objset_clone_arg_t; /*ARGSUSED*/ static int dmu_objset_clone_check(void *arg, dmu_tx_t *tx) { dmu_objset_clone_arg_t *doca = arg; dsl_dir_t *pdd; const char *tail; int error; dsl_dataset_t *origin; dsl_pool_t *dp = dmu_tx_pool(tx); if (strchr(doca->doca_clone, '@') != NULL) return (SET_ERROR(EINVAL)); if (strlen(doca->doca_clone) >= ZFS_MAX_DATASET_NAME_LEN) return (SET_ERROR(ENAMETOOLONG)); error = dsl_dir_hold(dp, doca->doca_clone, FTAG, &pdd, &tail); if (error != 0) return (error); if (tail == NULL) { dsl_dir_rele(pdd, FTAG); return (SET_ERROR(EEXIST)); } error = dsl_fs_ss_limit_check(pdd, 1, ZFS_PROP_FILESYSTEM_LIMIT, NULL, doca->doca_cred); if (error != 0) { dsl_dir_rele(pdd, FTAG); return (SET_ERROR(EDQUOT)); } error = dsl_dataset_hold(dp, doca->doca_origin, FTAG, &origin); if (error != 0) { dsl_dir_rele(pdd, FTAG); return (error); } /* You can only clone snapshots, not the head datasets. */ if (!origin->ds_is_snapshot) { dsl_dataset_rele(origin, FTAG); dsl_dir_rele(pdd, FTAG); return (SET_ERROR(EINVAL)); } error = dmu_objset_clone_crypt_check(pdd, origin->ds_dir); if (error != 0) { dsl_dataset_rele(origin, FTAG); dsl_dir_rele(pdd, FTAG); return (error); } dsl_dataset_rele(origin, FTAG); dsl_dir_rele(pdd, FTAG); return (0); } static void dmu_objset_clone_sync(void *arg, dmu_tx_t *tx) { dmu_objset_clone_arg_t *doca = arg; dsl_pool_t *dp = dmu_tx_pool(tx); dsl_dir_t *pdd; const char *tail; dsl_dataset_t *origin, *ds; uint64_t obj; char namebuf[ZFS_MAX_DATASET_NAME_LEN]; VERIFY0(dsl_dir_hold(dp, doca->doca_clone, FTAG, &pdd, &tail)); VERIFY0(dsl_dataset_hold(dp, doca->doca_origin, FTAG, &origin)); obj = dsl_dataset_create_sync(pdd, tail, origin, 0, doca->doca_cred, NULL, tx); VERIFY0(dsl_dataset_hold_obj(pdd->dd_pool, obj, FTAG, &ds)); dsl_dataset_name(origin, namebuf); spa_history_log_internal_ds(ds, "clone", tx, "origin=%s (%llu)", namebuf, origin->ds_object); zvol_create_minors(dp->dp_spa, doca->doca_clone, B_TRUE); dsl_dataset_rele(ds, FTAG); dsl_dataset_rele(origin, FTAG); dsl_dir_rele(pdd, FTAG); } int dmu_objset_clone(const char *clone, const char *origin) { dmu_objset_clone_arg_t doca; doca.doca_clone = clone; doca.doca_origin = origin; doca.doca_cred = CRED(); return (dsl_sync_task(clone, dmu_objset_clone_check, dmu_objset_clone_sync, &doca, 6, ZFS_SPACE_CHECK_NORMAL)); } static int dmu_objset_remap_indirects_impl(objset_t *os, uint64_t last_removed_txg) { int error = 0; uint64_t object = 0; while ((error = dmu_object_next(os, &object, B_FALSE, 0)) == 0) { error = dmu_object_remap_indirects(os, object, last_removed_txg); /* * If the ZPL removed the object before we managed to dnode_hold * it, we would get an ENOENT. If the ZPL declares its intent * to remove the object (dnode_free) before we manage to * dnode_hold it, we would get an EEXIST. In either case, we * want to continue remapping the other objects in the objset; * in all other cases, we want to break early. */ if (error != 0 && error != ENOENT && error != EEXIST) { break; } } if (error == ESRCH) { error = 0; } return (error); } int dmu_objset_remap_indirects(const char *fsname) { int error = 0; objset_t *os = NULL; uint64_t last_removed_txg; uint64_t remap_start_txg; dsl_dir_t *dd; error = dmu_objset_hold(fsname, FTAG, &os); if (error != 0) { return (error); } dd = dmu_objset_ds(os)->ds_dir; if (!spa_feature_is_enabled(dmu_objset_spa(os), SPA_FEATURE_OBSOLETE_COUNTS)) { dmu_objset_rele(os, FTAG); return (SET_ERROR(ENOTSUP)); } if (dsl_dataset_is_snapshot(dmu_objset_ds(os))) { dmu_objset_rele(os, FTAG); return (SET_ERROR(EINVAL)); } /* * If there has not been a removal, we're done. */ last_removed_txg = spa_get_last_removal_txg(dmu_objset_spa(os)); if (last_removed_txg == -1ULL) { dmu_objset_rele(os, FTAG); return (0); } /* * If we have remapped since the last removal, we're done. */ if (dsl_dir_is_zapified(dd)) { uint64_t last_remap_txg; if (zap_lookup(spa_meta_objset(dmu_objset_spa(os)), dd->dd_object, DD_FIELD_LAST_REMAP_TXG, sizeof (last_remap_txg), 1, &last_remap_txg) == 0 && last_remap_txg > last_removed_txg) { dmu_objset_rele(os, FTAG); return (0); } } dsl_dataset_long_hold(dmu_objset_ds(os), FTAG); dsl_pool_rele(dmu_objset_pool(os), FTAG); remap_start_txg = spa_last_synced_txg(dmu_objset_spa(os)); error = dmu_objset_remap_indirects_impl(os, last_removed_txg); if (error == 0) { /* * We update the last_remap_txg to be the start txg so that * we can guarantee that every block older than last_remap_txg * that can be remapped has been remapped. */ error = dsl_dir_update_last_remap_txg(dd, remap_start_txg); } dsl_dataset_long_rele(dmu_objset_ds(os), FTAG); dsl_dataset_rele(dmu_objset_ds(os), FTAG); return (error); } int dmu_objset_snapshot_one(const char *fsname, const char *snapname) { int err; char *longsnap = kmem_asprintf("%s@%s", fsname, snapname); nvlist_t *snaps = fnvlist_alloc(); fnvlist_add_boolean(snaps, longsnap); strfree(longsnap); err = dsl_dataset_snapshot(snaps, NULL, NULL); fnvlist_free(snaps); return (err); } static void dmu_objset_upgrade_task_cb(void *data) { objset_t *os = data; mutex_enter(&os->os_upgrade_lock); os->os_upgrade_status = EINTR; if (!os->os_upgrade_exit) { mutex_exit(&os->os_upgrade_lock); os->os_upgrade_status = os->os_upgrade_cb(os); mutex_enter(&os->os_upgrade_lock); } os->os_upgrade_exit = B_TRUE; os->os_upgrade_id = 0; mutex_exit(&os->os_upgrade_lock); dsl_dataset_long_rele(dmu_objset_ds(os), upgrade_tag); } static void dmu_objset_upgrade(objset_t *os, dmu_objset_upgrade_cb_t cb) { if (os->os_upgrade_id != 0) return; ASSERT(dsl_pool_config_held(dmu_objset_pool(os))); dsl_dataset_long_hold(dmu_objset_ds(os), upgrade_tag); mutex_enter(&os->os_upgrade_lock); if (os->os_upgrade_id == 0 && os->os_upgrade_status == 0) { os->os_upgrade_exit = B_FALSE; os->os_upgrade_cb = cb; os->os_upgrade_id = taskq_dispatch( os->os_spa->spa_upgrade_taskq, dmu_objset_upgrade_task_cb, os, TQ_SLEEP); if (os->os_upgrade_id == TASKQID_INVALID) { dsl_dataset_long_rele(dmu_objset_ds(os), upgrade_tag); os->os_upgrade_status = ENOMEM; } } mutex_exit(&os->os_upgrade_lock); } static void dmu_objset_upgrade_stop(objset_t *os) { mutex_enter(&os->os_upgrade_lock); os->os_upgrade_exit = B_TRUE; if (os->os_upgrade_id != 0) { taskqid_t id = os->os_upgrade_id; os->os_upgrade_id = 0; mutex_exit(&os->os_upgrade_lock); if ((taskq_cancel_id(os->os_spa->spa_upgrade_taskq, id)) == 0) { dsl_dataset_long_rele(dmu_objset_ds(os), upgrade_tag); } txg_wait_synced(os->os_spa->spa_dsl_pool, 0); } else { mutex_exit(&os->os_upgrade_lock); } } static void dmu_objset_sync_dnodes(multilist_sublist_t *list, dmu_tx_t *tx) { dnode_t *dn; while ((dn = multilist_sublist_head(list)) != NULL) { ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); ASSERT(dn->dn_dbuf->db_data_pending); /* * Initialize dn_zio outside dnode_sync() because the * meta-dnode needs to set it ouside dnode_sync(). */ dn->dn_zio = dn->dn_dbuf->db_data_pending->dr_zio; ASSERT(dn->dn_zio); ASSERT3U(dn->dn_nlevels, <=, DN_MAX_LEVELS); multilist_sublist_remove(list, dn); /* * If we are not doing useraccounting (os_synced_dnodes == NULL) * we are done with this dnode for this txg. Unset dn_dirty_txg * if later txgs aren't dirtying it so that future holders do * not get a stale value. Otherwise, we will do this in * userquota_updates_task() when processing has completely * finished for this txg. */ multilist_t *newlist = dn->dn_objset->os_synced_dnodes; if (newlist != NULL) { (void) dnode_add_ref(dn, newlist); multilist_insert(newlist, dn); } else { mutex_enter(&dn->dn_mtx); if (dn->dn_dirty_txg == tx->tx_txg) dn->dn_dirty_txg = 0; mutex_exit(&dn->dn_mtx); } dnode_sync(dn, tx); } } /* ARGSUSED */ static void dmu_objset_write_ready(zio_t *zio, arc_buf_t *abuf, void *arg) { blkptr_t *bp = zio->io_bp; objset_t *os = arg; dnode_phys_t *dnp = &os->os_phys->os_meta_dnode; uint64_t fill = 0; ASSERT(!BP_IS_EMBEDDED(bp)); ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_OBJSET); ASSERT0(BP_GET_LEVEL(bp)); /* * Update rootbp fill count: it should be the number of objects * allocated in the object set (not counting the "special" * objects that are stored in the objset_phys_t -- the meta * dnode and user/group/project accounting objects). */ for (int i = 0; i < dnp->dn_nblkptr; i++) fill += BP_GET_FILL(&dnp->dn_blkptr[i]); BP_SET_FILL(bp, fill); if (os->os_dsl_dataset != NULL) rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_WRITER, FTAG); *os->os_rootbp = *bp; if (os->os_dsl_dataset != NULL) rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG); } /* ARGSUSED */ static void dmu_objset_write_done(zio_t *zio, arc_buf_t *abuf, void *arg) { blkptr_t *bp = zio->io_bp; blkptr_t *bp_orig = &zio->io_bp_orig; objset_t *os = arg; if (zio->io_flags & ZIO_FLAG_IO_REWRITE) { ASSERT(BP_EQUAL(bp, bp_orig)); } else { dsl_dataset_t *ds = os->os_dsl_dataset; dmu_tx_t *tx = os->os_synctx; (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE); dsl_dataset_block_born(ds, bp, tx); } kmem_free(bp, sizeof (*bp)); } typedef struct sync_dnodes_arg { multilist_t *sda_list; int sda_sublist_idx; multilist_t *sda_newlist; dmu_tx_t *sda_tx; } sync_dnodes_arg_t; static void sync_dnodes_task(void *arg) { sync_dnodes_arg_t *sda = arg; multilist_sublist_t *ms = multilist_sublist_lock(sda->sda_list, sda->sda_sublist_idx); dmu_objset_sync_dnodes(ms, sda->sda_tx); multilist_sublist_unlock(ms); kmem_free(sda, sizeof (*sda)); } /* called from dsl */ void dmu_objset_sync(objset_t *os, zio_t *pio, dmu_tx_t *tx) { int txgoff; zbookmark_phys_t zb; zio_prop_t zp; zio_t *zio; list_t *list; dbuf_dirty_record_t *dr; blkptr_t *blkptr_copy = kmem_alloc(sizeof (*os->os_rootbp), KM_SLEEP); *blkptr_copy = *os->os_rootbp; dprintf_ds(os->os_dsl_dataset, "txg=%llu\n", tx->tx_txg); ASSERT(dmu_tx_is_syncing(tx)); /* XXX the write_done callback should really give us the tx... */ os->os_synctx = tx; if (os->os_dsl_dataset == NULL) { /* * This is the MOS. If we have upgraded, * spa_max_replication() could change, so reset * os_copies here. */ os->os_copies = spa_max_replication(os->os_spa); } /* * Create the root block IO */ SET_BOOKMARK(&zb, os->os_dsl_dataset ? os->os_dsl_dataset->ds_object : DMU_META_OBJSET, ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); arc_release(os->os_phys_buf, &os->os_phys_buf); dmu_write_policy(os, NULL, 0, 0, &zp); /* * If we are either claiming the ZIL or doing a raw receive, write * out the os_phys_buf raw. Neither of these actions will effect the * MAC at this point. */ if (os->os_raw_receive || os->os_next_write_raw[tx->tx_txg & TXG_MASK]) { ASSERT(os->os_encrypted); arc_convert_to_raw(os->os_phys_buf, os->os_dsl_dataset->ds_object, ZFS_HOST_BYTEORDER, DMU_OT_OBJSET, NULL, NULL, NULL); } zio = arc_write(pio, os->os_spa, tx->tx_txg, blkptr_copy, os->os_phys_buf, DMU_OS_IS_L2CACHEABLE(os), &zp, dmu_objset_write_ready, NULL, NULL, dmu_objset_write_done, os, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); /* * Sync special dnodes - the parent IO for the sync is the root block */ DMU_META_DNODE(os)->dn_zio = zio; dnode_sync(DMU_META_DNODE(os), tx); os->os_phys->os_flags = os->os_flags; if (DMU_USERUSED_DNODE(os) && DMU_USERUSED_DNODE(os)->dn_type != DMU_OT_NONE) { DMU_USERUSED_DNODE(os)->dn_zio = zio; dnode_sync(DMU_USERUSED_DNODE(os), tx); DMU_GROUPUSED_DNODE(os)->dn_zio = zio; dnode_sync(DMU_GROUPUSED_DNODE(os), tx); } if (DMU_PROJECTUSED_DNODE(os) && DMU_PROJECTUSED_DNODE(os)->dn_type != DMU_OT_NONE) { DMU_PROJECTUSED_DNODE(os)->dn_zio = zio; dnode_sync(DMU_PROJECTUSED_DNODE(os), tx); } txgoff = tx->tx_txg & TXG_MASK; if (dmu_objset_userused_enabled(os) && (!os->os_encrypted || !dmu_objset_is_receiving(os))) { /* * We must create the list here because it uses the * dn_dirty_link[] of this txg. But it may already * exist because we call dsl_dataset_sync() twice per txg. */ if (os->os_synced_dnodes == NULL) { os->os_synced_dnodes = multilist_create(sizeof (dnode_t), offsetof(dnode_t, dn_dirty_link[txgoff]), dnode_multilist_index_func); } else { ASSERT3U(os->os_synced_dnodes->ml_offset, ==, offsetof(dnode_t, dn_dirty_link[txgoff])); } } for (int i = 0; i < multilist_get_num_sublists(os->os_dirty_dnodes[txgoff]); i++) { sync_dnodes_arg_t *sda = kmem_alloc(sizeof (*sda), KM_SLEEP); sda->sda_list = os->os_dirty_dnodes[txgoff]; sda->sda_sublist_idx = i; sda->sda_tx = tx; (void) taskq_dispatch(dmu_objset_pool(os)->dp_sync_taskq, sync_dnodes_task, sda, 0); /* callback frees sda */ } taskq_wait(dmu_objset_pool(os)->dp_sync_taskq); list = &DMU_META_DNODE(os)->dn_dirty_records[txgoff]; while ((dr = list_head(list)) != NULL) { ASSERT0(dr->dr_dbuf->db_level); list_remove(list, dr); if (dr->dr_zio) zio_nowait(dr->dr_zio); } /* Enable dnode backfill if enough objects have been freed. */ if (os->os_freed_dnodes >= dmu_rescan_dnode_threshold) { os->os_rescan_dnodes = B_TRUE; os->os_freed_dnodes = 0; } /* * Free intent log blocks up to this tx. */ zil_sync(os->os_zil, tx); os->os_phys->os_zil_header = os->os_zil_header; zio_nowait(zio); } boolean_t dmu_objset_is_dirty(objset_t *os, uint64_t txg) { return (!multilist_is_empty(os->os_dirty_dnodes[txg & TXG_MASK])); } static objset_used_cb_t *used_cbs[DMU_OST_NUMTYPES]; void dmu_objset_register_type(dmu_objset_type_t ost, objset_used_cb_t *cb) { used_cbs[ost] = cb; } boolean_t dmu_objset_userused_enabled(objset_t *os) { return (spa_version(os->os_spa) >= SPA_VERSION_USERSPACE && used_cbs[os->os_phys->os_type] != NULL && DMU_USERUSED_DNODE(os) != NULL); } boolean_t dmu_objset_userobjused_enabled(objset_t *os) { return (dmu_objset_userused_enabled(os) && spa_feature_is_enabled(os->os_spa, SPA_FEATURE_USEROBJ_ACCOUNTING)); } boolean_t dmu_objset_projectquota_enabled(objset_t *os) { return (used_cbs[os->os_phys->os_type] != NULL && DMU_PROJECTUSED_DNODE(os) != NULL && spa_feature_is_enabled(os->os_spa, SPA_FEATURE_PROJECT_QUOTA)); } typedef struct userquota_node { /* must be in the first filed, see userquota_update_cache() */ char uqn_id[20 + DMU_OBJACCT_PREFIX_LEN]; int64_t uqn_delta; avl_node_t uqn_node; } userquota_node_t; typedef struct userquota_cache { avl_tree_t uqc_user_deltas; avl_tree_t uqc_group_deltas; avl_tree_t uqc_project_deltas; } userquota_cache_t; static int userquota_compare(const void *l, const void *r) { const userquota_node_t *luqn = l; const userquota_node_t *ruqn = r; int rv; /* * NB: can only access uqn_id because userquota_update_cache() doesn't * pass in an entire userquota_node_t. */ rv = strcmp(luqn->uqn_id, ruqn->uqn_id); return (AVL_ISIGN(rv)); } static void do_userquota_cacheflush(objset_t *os, userquota_cache_t *cache, dmu_tx_t *tx) { void *cookie; userquota_node_t *uqn; ASSERT(dmu_tx_is_syncing(tx)); cookie = NULL; while ((uqn = avl_destroy_nodes(&cache->uqc_user_deltas, &cookie)) != NULL) { /* * os_userused_lock protects against concurrent calls to * zap_increment_int(). It's needed because zap_increment_int() * is not thread-safe (i.e. not atomic). */ mutex_enter(&os->os_userused_lock); VERIFY0(zap_increment(os, DMU_USERUSED_OBJECT, uqn->uqn_id, uqn->uqn_delta, tx)); mutex_exit(&os->os_userused_lock); kmem_free(uqn, sizeof (*uqn)); } avl_destroy(&cache->uqc_user_deltas); cookie = NULL; while ((uqn = avl_destroy_nodes(&cache->uqc_group_deltas, &cookie)) != NULL) { mutex_enter(&os->os_userused_lock); VERIFY0(zap_increment(os, DMU_GROUPUSED_OBJECT, uqn->uqn_id, uqn->uqn_delta, tx)); mutex_exit(&os->os_userused_lock); kmem_free(uqn, sizeof (*uqn)); } avl_destroy(&cache->uqc_group_deltas); if (dmu_objset_projectquota_enabled(os)) { cookie = NULL; while ((uqn = avl_destroy_nodes(&cache->uqc_project_deltas, &cookie)) != NULL) { mutex_enter(&os->os_userused_lock); VERIFY0(zap_increment(os, DMU_PROJECTUSED_OBJECT, uqn->uqn_id, uqn->uqn_delta, tx)); mutex_exit(&os->os_userused_lock); kmem_free(uqn, sizeof (*uqn)); } avl_destroy(&cache->uqc_project_deltas); } } static void userquota_update_cache(avl_tree_t *avl, const char *id, int64_t delta) { userquota_node_t *uqn; avl_index_t idx; ASSERT(strlen(id) < sizeof (uqn->uqn_id)); /* * Use id directly for searching because uqn_id is the first field of * userquota_node_t and fields after uqn_id won't be accessed in * avl_find(). */ uqn = avl_find(avl, (const void *)id, &idx); if (uqn == NULL) { uqn = kmem_zalloc(sizeof (*uqn), KM_SLEEP); strlcpy(uqn->uqn_id, id, sizeof (uqn->uqn_id)); avl_insert(avl, uqn, idx); } uqn->uqn_delta += delta; } static void do_userquota_update(objset_t *os, userquota_cache_t *cache, uint64_t used, uint64_t flags, uint64_t user, uint64_t group, uint64_t project, boolean_t subtract) { if (flags & DNODE_FLAG_USERUSED_ACCOUNTED) { int64_t delta = DNODE_MIN_SIZE + used; char name[20]; if (subtract) delta = -delta; (void) sprintf(name, "%llx", (longlong_t)user); userquota_update_cache(&cache->uqc_user_deltas, name, delta); (void) sprintf(name, "%llx", (longlong_t)group); userquota_update_cache(&cache->uqc_group_deltas, name, delta); if (dmu_objset_projectquota_enabled(os)) { (void) sprintf(name, "%llx", (longlong_t)project); userquota_update_cache(&cache->uqc_project_deltas, name, delta); } } } static void do_userobjquota_update(objset_t *os, userquota_cache_t *cache, uint64_t flags, uint64_t user, uint64_t group, uint64_t project, boolean_t subtract) { if (flags & DNODE_FLAG_USEROBJUSED_ACCOUNTED) { char name[20 + DMU_OBJACCT_PREFIX_LEN]; int delta = subtract ? -1 : 1; (void) snprintf(name, sizeof (name), DMU_OBJACCT_PREFIX "%llx", (longlong_t)user); userquota_update_cache(&cache->uqc_user_deltas, name, delta); (void) snprintf(name, sizeof (name), DMU_OBJACCT_PREFIX "%llx", (longlong_t)group); userquota_update_cache(&cache->uqc_group_deltas, name, delta); if (dmu_objset_projectquota_enabled(os)) { (void) snprintf(name, sizeof (name), DMU_OBJACCT_PREFIX "%llx", (longlong_t)project); userquota_update_cache(&cache->uqc_project_deltas, name, delta); } } } typedef struct userquota_updates_arg { objset_t *uua_os; int uua_sublist_idx; dmu_tx_t *uua_tx; } userquota_updates_arg_t; static void userquota_updates_task(void *arg) { userquota_updates_arg_t *uua = arg; objset_t *os = uua->uua_os; dmu_tx_t *tx = uua->uua_tx; dnode_t *dn; userquota_cache_t cache = { { 0 } }; multilist_sublist_t *list = multilist_sublist_lock(os->os_synced_dnodes, uua->uua_sublist_idx); ASSERT(multilist_sublist_head(list) == NULL || dmu_objset_userused_enabled(os)); avl_create(&cache.uqc_user_deltas, userquota_compare, sizeof (userquota_node_t), offsetof(userquota_node_t, uqn_node)); avl_create(&cache.uqc_group_deltas, userquota_compare, sizeof (userquota_node_t), offsetof(userquota_node_t, uqn_node)); if (dmu_objset_projectquota_enabled(os)) avl_create(&cache.uqc_project_deltas, userquota_compare, sizeof (userquota_node_t), offsetof(userquota_node_t, uqn_node)); while ((dn = multilist_sublist_head(list)) != NULL) { int flags; ASSERT(!DMU_OBJECT_IS_SPECIAL(dn->dn_object)); ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE || dn->dn_phys->dn_flags & DNODE_FLAG_USERUSED_ACCOUNTED); flags = dn->dn_id_flags; ASSERT(flags); if (flags & DN_ID_OLD_EXIST) { do_userquota_update(os, &cache, dn->dn_oldused, dn->dn_oldflags, dn->dn_olduid, dn->dn_oldgid, dn->dn_oldprojid, B_TRUE); do_userobjquota_update(os, &cache, dn->dn_oldflags, dn->dn_olduid, dn->dn_oldgid, dn->dn_oldprojid, B_TRUE); } if (flags & DN_ID_NEW_EXIST) { do_userquota_update(os, &cache, DN_USED_BYTES(dn->dn_phys), dn->dn_phys->dn_flags, dn->dn_newuid, dn->dn_newgid, dn->dn_newprojid, B_FALSE); do_userobjquota_update(os, &cache, dn->dn_phys->dn_flags, dn->dn_newuid, dn->dn_newgid, dn->dn_newprojid, B_FALSE); } mutex_enter(&dn->dn_mtx); dn->dn_oldused = 0; dn->dn_oldflags = 0; if (dn->dn_id_flags & DN_ID_NEW_EXIST) { dn->dn_olduid = dn->dn_newuid; dn->dn_oldgid = dn->dn_newgid; dn->dn_oldprojid = dn->dn_newprojid; dn->dn_id_flags |= DN_ID_OLD_EXIST; if (dn->dn_bonuslen == 0) dn->dn_id_flags |= DN_ID_CHKED_SPILL; else dn->dn_id_flags |= DN_ID_CHKED_BONUS; } dn->dn_id_flags &= ~(DN_ID_NEW_EXIST); if (dn->dn_dirty_txg == spa_syncing_txg(os->os_spa)) dn->dn_dirty_txg = 0; mutex_exit(&dn->dn_mtx); multilist_sublist_remove(list, dn); dnode_rele(dn, os->os_synced_dnodes); } do_userquota_cacheflush(os, &cache, tx); multilist_sublist_unlock(list); kmem_free(uua, sizeof (*uua)); } void dmu_objset_do_userquota_updates(objset_t *os, dmu_tx_t *tx) { if (!dmu_objset_userused_enabled(os)) return; /* * If this is a raw receive just return and handle accounting * later when we have the keys loaded. We also don't do user * accounting during claiming since the datasets are not owned * for the duration of claiming and this txg should only be * used for recovery. */ if (os->os_encrypted && dmu_objset_is_receiving(os)) return; if (tx->tx_txg <= os->os_spa->spa_claim_max_txg) return; /* Allocate the user/group/project used objects if necessary. */ if (DMU_USERUSED_DNODE(os)->dn_type == DMU_OT_NONE) { VERIFY0(zap_create_claim(os, DMU_USERUSED_OBJECT, DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx)); VERIFY0(zap_create_claim(os, DMU_GROUPUSED_OBJECT, DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx)); } if (dmu_objset_projectquota_enabled(os) && DMU_PROJECTUSED_DNODE(os)->dn_type == DMU_OT_NONE) { VERIFY0(zap_create_claim(os, DMU_PROJECTUSED_OBJECT, DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx)); } for (int i = 0; i < multilist_get_num_sublists(os->os_synced_dnodes); i++) { userquota_updates_arg_t *uua = kmem_alloc(sizeof (*uua), KM_SLEEP); uua->uua_os = os; uua->uua_sublist_idx = i; uua->uua_tx = tx; /* note: caller does taskq_wait() */ (void) taskq_dispatch(dmu_objset_pool(os)->dp_sync_taskq, userquota_updates_task, uua, 0); /* callback frees uua */ } } /* * Returns a pointer to data to find uid/gid from * * If a dirty record for transaction group that is syncing can't * be found then NULL is returned. In the NULL case it is assumed * the uid/gid aren't changing. */ static void * dmu_objset_userquota_find_data(dmu_buf_impl_t *db, dmu_tx_t *tx) { dbuf_dirty_record_t *dr, **drp; void *data; if (db->db_dirtycnt == 0) return (db->db.db_data); /* Nothing is changing */ for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next) if (dr->dr_txg == tx->tx_txg) break; if (dr == NULL) { data = NULL; } else { dnode_t *dn; DB_DNODE_ENTER(dr->dr_dbuf); dn = DB_DNODE(dr->dr_dbuf); if (dn->dn_bonuslen == 0 && dr->dr_dbuf->db_blkid == DMU_SPILL_BLKID) data = dr->dt.dl.dr_data->b_data; else data = dr->dt.dl.dr_data; DB_DNODE_EXIT(dr->dr_dbuf); } return (data); } void dmu_objset_userquota_get_ids(dnode_t *dn, boolean_t before, dmu_tx_t *tx) { objset_t *os = dn->dn_objset; void *data = NULL; dmu_buf_impl_t *db = NULL; uint64_t *user = NULL; uint64_t *group = NULL; uint64_t *project = NULL; int flags = dn->dn_id_flags; int error; boolean_t have_spill = B_FALSE; if (!dmu_objset_userused_enabled(dn->dn_objset)) return; /* * Raw receives introduce a problem with user accounting. Raw * receives cannot update the user accounting info because the * user ids and the sizes are encrypted. To guarantee that we * never end up with bad user accounting, we simply disable it * during raw receives. We also disable this for normal receives * so that an incremental raw receive may be done on top of an * existing non-raw receive. */ if (os->os_encrypted && dmu_objset_is_receiving(os)) return; if (before && (flags & (DN_ID_CHKED_BONUS|DN_ID_OLD_EXIST| DN_ID_CHKED_SPILL))) return; if (before && dn->dn_bonuslen != 0) data = DN_BONUS(dn->dn_phys); else if (!before && dn->dn_bonuslen != 0) { if (dn->dn_bonus) { db = dn->dn_bonus; mutex_enter(&db->db_mtx); data = dmu_objset_userquota_find_data(db, tx); } else { data = DN_BONUS(dn->dn_phys); } } else if (dn->dn_bonuslen == 0 && dn->dn_bonustype == DMU_OT_SA) { int rf = 0; if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) rf |= DB_RF_HAVESTRUCT; error = dmu_spill_hold_by_dnode(dn, rf | DB_RF_MUST_SUCCEED, FTAG, (dmu_buf_t **)&db); ASSERT(error == 0); mutex_enter(&db->db_mtx); data = (before) ? db->db.db_data : dmu_objset_userquota_find_data(db, tx); have_spill = B_TRUE; } else { mutex_enter(&dn->dn_mtx); dn->dn_id_flags |= DN_ID_CHKED_BONUS; mutex_exit(&dn->dn_mtx); return; } if (before) { ASSERT(data); user = &dn->dn_olduid; group = &dn->dn_oldgid; project = &dn->dn_oldprojid; } else if (data) { user = &dn->dn_newuid; group = &dn->dn_newgid; project = &dn->dn_newprojid; } /* * Must always call the callback in case the object * type has changed and that type isn't an object type to track */ error = used_cbs[os->os_phys->os_type](dn->dn_bonustype, data, user, group, project); /* * Preserve existing uid/gid when the callback can't determine * what the new uid/gid are and the callback returned EEXIST. * The EEXIST error tells us to just use the existing uid/gid. * If we don't know what the old values are then just assign * them to 0, since that is a new file being created. */ if (!before && data == NULL && error == EEXIST) { if (flags & DN_ID_OLD_EXIST) { dn->dn_newuid = dn->dn_olduid; dn->dn_newgid = dn->dn_oldgid; dn->dn_newprojid = dn->dn_oldprojid; } else { dn->dn_newuid = 0; dn->dn_newgid = 0; dn->dn_newprojid = ZFS_DEFAULT_PROJID; } error = 0; } if (db) mutex_exit(&db->db_mtx); mutex_enter(&dn->dn_mtx); if (error == 0 && before) dn->dn_id_flags |= DN_ID_OLD_EXIST; if (error == 0 && !before) dn->dn_id_flags |= DN_ID_NEW_EXIST; if (have_spill) { dn->dn_id_flags |= DN_ID_CHKED_SPILL; } else { dn->dn_id_flags |= DN_ID_CHKED_BONUS; } mutex_exit(&dn->dn_mtx); if (have_spill) dmu_buf_rele((dmu_buf_t *)db, FTAG); } boolean_t dmu_objset_userspace_present(objset_t *os) { return (os->os_phys->os_flags & OBJSET_FLAG_USERACCOUNTING_COMPLETE); } boolean_t dmu_objset_userobjspace_present(objset_t *os) { return (os->os_phys->os_flags & OBJSET_FLAG_USEROBJACCOUNTING_COMPLETE); } boolean_t dmu_objset_projectquota_present(objset_t *os) { return (os->os_phys->os_flags & OBJSET_FLAG_PROJECTQUOTA_COMPLETE); } static int dmu_objset_space_upgrade(objset_t *os) { uint64_t obj; int err = 0; /* * We simply need to mark every object dirty, so that it will be * synced out and now accounted. If this is called * concurrently, or if we already did some work before crashing, * that's fine, since we track each object's accounted state * independently. */ for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE, 0)) { dmu_tx_t *tx; dmu_buf_t *db; int objerr; mutex_enter(&os->os_upgrade_lock); if (os->os_upgrade_exit) err = SET_ERROR(EINTR); mutex_exit(&os->os_upgrade_lock); if (err != 0) return (err); if (issig(JUSTLOOKING) && issig(FORREAL)) return (SET_ERROR(EINTR)); objerr = dmu_bonus_hold(os, obj, FTAG, &db); if (objerr != 0) continue; tx = dmu_tx_create(os); dmu_tx_hold_bonus(tx, obj); objerr = dmu_tx_assign(tx, TXG_WAIT); if (objerr != 0) { dmu_buf_rele(db, FTAG); dmu_tx_abort(tx); continue; } dmu_buf_will_dirty(db, tx); dmu_buf_rele(db, FTAG); dmu_tx_commit(tx); } return (0); } int dmu_objset_userspace_upgrade(objset_t *os) { int err = 0; if (dmu_objset_userspace_present(os)) return (0); if (dmu_objset_is_snapshot(os)) return (SET_ERROR(EINVAL)); if (!dmu_objset_userused_enabled(os)) return (SET_ERROR(ENOTSUP)); err = dmu_objset_space_upgrade(os); if (err) return (err); os->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE; txg_wait_synced(dmu_objset_pool(os), 0); return (0); } static int dmu_objset_id_quota_upgrade_cb(objset_t *os) { int err = 0; if (dmu_objset_userobjspace_present(os) && dmu_objset_projectquota_present(os)) return (0); if (dmu_objset_is_snapshot(os)) return (SET_ERROR(EINVAL)); if (!dmu_objset_userobjused_enabled(os)) return (SET_ERROR(ENOTSUP)); if (!dmu_objset_projectquota_enabled(os) && dmu_objset_userobjspace_present(os)) return (SET_ERROR(ENOTSUP)); dmu_objset_ds(os)->ds_feature_activation[ SPA_FEATURE_USEROBJ_ACCOUNTING] = (void *)B_TRUE; if (dmu_objset_projectquota_enabled(os)) dmu_objset_ds(os)->ds_feature_activation[ SPA_FEATURE_PROJECT_QUOTA] = (void *)B_TRUE; err = dmu_objset_space_upgrade(os); if (err) return (err); os->os_flags |= OBJSET_FLAG_USEROBJACCOUNTING_COMPLETE; if (dmu_objset_projectquota_enabled(os)) os->os_flags |= OBJSET_FLAG_PROJECTQUOTA_COMPLETE; txg_wait_synced(dmu_objset_pool(os), 0); return (0); } void dmu_objset_id_quota_upgrade(objset_t *os) { dmu_objset_upgrade(os, dmu_objset_id_quota_upgrade_cb); } boolean_t dmu_objset_userobjspace_upgradable(objset_t *os) { return (dmu_objset_type(os) == DMU_OST_ZFS && !dmu_objset_is_snapshot(os) && dmu_objset_userobjused_enabled(os) && !dmu_objset_userobjspace_present(os)); } boolean_t dmu_objset_projectquota_upgradable(objset_t *os) { return (dmu_objset_type(os) == DMU_OST_ZFS && !dmu_objset_is_snapshot(os) && dmu_objset_projectquota_enabled(os) && !dmu_objset_projectquota_present(os)); } void dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp, uint64_t *usedobjsp, uint64_t *availobjsp) { dsl_dataset_space(os->os_dsl_dataset, refdbytesp, availbytesp, usedobjsp, availobjsp); } uint64_t dmu_objset_fsid_guid(objset_t *os) { return (dsl_dataset_fsid_guid(os->os_dsl_dataset)); } void dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat) { stat->dds_type = os->os_phys->os_type; if (os->os_dsl_dataset) dsl_dataset_fast_stat(os->os_dsl_dataset, stat); } void dmu_objset_stats(objset_t *os, nvlist_t *nv) { ASSERT(os->os_dsl_dataset || os->os_phys->os_type == DMU_OST_META); if (os->os_dsl_dataset != NULL) dsl_dataset_stats(os->os_dsl_dataset, nv); dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_TYPE, os->os_phys->os_type); dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERACCOUNTING, dmu_objset_userspace_present(os)); } int dmu_objset_is_snapshot(objset_t *os) { if (os->os_dsl_dataset != NULL) return (os->os_dsl_dataset->ds_is_snapshot); else return (B_FALSE); } int dmu_snapshot_realname(objset_t *os, char *name, char *real, int maxlen, boolean_t *conflict) { dsl_dataset_t *ds = os->os_dsl_dataset; uint64_t ignored; if (dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0) return (SET_ERROR(ENOENT)); return (zap_lookup_norm(ds->ds_dir->dd_pool->dp_meta_objset, dsl_dataset_phys(ds)->ds_snapnames_zapobj, name, 8, 1, &ignored, MT_NORMALIZE, real, maxlen, conflict)); } int dmu_snapshot_list_next(objset_t *os, int namelen, char *name, uint64_t *idp, uint64_t *offp, boolean_t *case_conflict) { dsl_dataset_t *ds = os->os_dsl_dataset; zap_cursor_t cursor; zap_attribute_t attr; ASSERT(dsl_pool_config_held(dmu_objset_pool(os))); if (dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0) return (SET_ERROR(ENOENT)); zap_cursor_init_serialized(&cursor, ds->ds_dir->dd_pool->dp_meta_objset, dsl_dataset_phys(ds)->ds_snapnames_zapobj, *offp); if (zap_cursor_retrieve(&cursor, &attr) != 0) { zap_cursor_fini(&cursor); return (SET_ERROR(ENOENT)); } if (strlen(attr.za_name) + 1 > namelen) { zap_cursor_fini(&cursor); return (SET_ERROR(ENAMETOOLONG)); } (void) strcpy(name, attr.za_name); if (idp) *idp = attr.za_first_integer; if (case_conflict) *case_conflict = attr.za_normalization_conflict; zap_cursor_advance(&cursor); *offp = zap_cursor_serialize(&cursor); zap_cursor_fini(&cursor); return (0); } int dmu_snapshot_lookup(objset_t *os, const char *name, uint64_t *value) { return (dsl_dataset_snap_lookup(os->os_dsl_dataset, name, value)); } int dmu_dir_list_next(objset_t *os, int namelen, char *name, uint64_t *idp, uint64_t *offp) { dsl_dir_t *dd = os->os_dsl_dataset->ds_dir; zap_cursor_t cursor; zap_attribute_t attr; /* there is no next dir on a snapshot! */ if (os->os_dsl_dataset->ds_object != dsl_dir_phys(dd)->dd_head_dataset_obj) return (SET_ERROR(ENOENT)); zap_cursor_init_serialized(&cursor, dd->dd_pool->dp_meta_objset, dsl_dir_phys(dd)->dd_child_dir_zapobj, *offp); if (zap_cursor_retrieve(&cursor, &attr) != 0) { zap_cursor_fini(&cursor); return (SET_ERROR(ENOENT)); } if (strlen(attr.za_name) + 1 > namelen) { zap_cursor_fini(&cursor); return (SET_ERROR(ENAMETOOLONG)); } (void) strcpy(name, attr.za_name); if (idp) *idp = attr.za_first_integer; zap_cursor_advance(&cursor); *offp = zap_cursor_serialize(&cursor); zap_cursor_fini(&cursor); return (0); } typedef struct dmu_objset_find_ctx { taskq_t *dc_tq; dsl_pool_t *dc_dp; uint64_t dc_ddobj; char *dc_ddname; /* last component of ddobj's name */ int (*dc_func)(dsl_pool_t *, dsl_dataset_t *, void *); void *dc_arg; int dc_flags; kmutex_t *dc_error_lock; int *dc_error; } dmu_objset_find_ctx_t; static void dmu_objset_find_dp_impl(dmu_objset_find_ctx_t *dcp) { dsl_pool_t *dp = dcp->dc_dp; dsl_dir_t *dd; dsl_dataset_t *ds; zap_cursor_t zc; zap_attribute_t *attr; uint64_t thisobj; int err = 0; /* don't process if there already was an error */ if (*dcp->dc_error != 0) goto out; /* * Note: passing the name (dc_ddname) here is optional, but it * improves performance because we don't need to call * zap_value_search() to determine the name. */ err = dsl_dir_hold_obj(dp, dcp->dc_ddobj, dcp->dc_ddname, FTAG, &dd); if (err != 0) goto out; /* Don't visit hidden ($MOS & $ORIGIN) objsets. */ if (dd->dd_myname[0] == '$') { dsl_dir_rele(dd, FTAG); goto out; } thisobj = dsl_dir_phys(dd)->dd_head_dataset_obj; attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP); /* * Iterate over all children. */ if (dcp->dc_flags & DS_FIND_CHILDREN) { for (zap_cursor_init(&zc, dp->dp_meta_objset, dsl_dir_phys(dd)->dd_child_dir_zapobj); zap_cursor_retrieve(&zc, attr) == 0; (void) zap_cursor_advance(&zc)) { ASSERT3U(attr->za_integer_length, ==, sizeof (uint64_t)); ASSERT3U(attr->za_num_integers, ==, 1); dmu_objset_find_ctx_t *child_dcp = kmem_alloc(sizeof (*child_dcp), KM_SLEEP); *child_dcp = *dcp; child_dcp->dc_ddobj = attr->za_first_integer; child_dcp->dc_ddname = spa_strdup(attr->za_name); if (dcp->dc_tq != NULL) (void) taskq_dispatch(dcp->dc_tq, dmu_objset_find_dp_cb, child_dcp, TQ_SLEEP); else dmu_objset_find_dp_impl(child_dcp); } zap_cursor_fini(&zc); } /* * Iterate over all snapshots. */ if (dcp->dc_flags & DS_FIND_SNAPSHOTS) { dsl_dataset_t *ds; err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds); if (err == 0) { uint64_t snapobj; snapobj = dsl_dataset_phys(ds)->ds_snapnames_zapobj; dsl_dataset_rele(ds, FTAG); for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj); zap_cursor_retrieve(&zc, attr) == 0; (void) zap_cursor_advance(&zc)) { ASSERT3U(attr->za_integer_length, ==, sizeof (uint64_t)); ASSERT3U(attr->za_num_integers, ==, 1); err = dsl_dataset_hold_obj(dp, attr->za_first_integer, FTAG, &ds); if (err != 0) break; err = dcp->dc_func(dp, ds, dcp->dc_arg); dsl_dataset_rele(ds, FTAG); if (err != 0) break; } zap_cursor_fini(&zc); } } kmem_free(attr, sizeof (zap_attribute_t)); if (err != 0) { dsl_dir_rele(dd, FTAG); goto out; } /* * Apply to self. */ err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds); /* * Note: we hold the dir while calling dsl_dataset_hold_obj() so * that the dir will remain cached, and we won't have to re-instantiate * it (which could be expensive due to finding its name via * zap_value_search()). */ dsl_dir_rele(dd, FTAG); if (err != 0) goto out; err = dcp->dc_func(dp, ds, dcp->dc_arg); dsl_dataset_rele(ds, FTAG); out: if (err != 0) { mutex_enter(dcp->dc_error_lock); /* only keep first error */ if (*dcp->dc_error == 0) *dcp->dc_error = err; mutex_exit(dcp->dc_error_lock); } if (dcp->dc_ddname != NULL) spa_strfree(dcp->dc_ddname); kmem_free(dcp, sizeof (*dcp)); } static void dmu_objset_find_dp_cb(void *arg) { dmu_objset_find_ctx_t *dcp = arg; dsl_pool_t *dp = dcp->dc_dp; /* * We need to get a pool_config_lock here, as there are several * asssert(pool_config_held) down the stack. Getting a lock via * dsl_pool_config_enter is risky, as it might be stalled by a * pending writer. This would deadlock, as the write lock can * only be granted when our parent thread gives up the lock. * The _prio interface gives us priority over a pending writer. */ dsl_pool_config_enter_prio(dp, FTAG); dmu_objset_find_dp_impl(dcp); dsl_pool_config_exit(dp, FTAG); } /* * Find objsets under and including ddobj, call func(ds) on each. * The order for the enumeration is completely undefined. * func is called with dsl_pool_config held. */ int dmu_objset_find_dp(dsl_pool_t *dp, uint64_t ddobj, int func(dsl_pool_t *, dsl_dataset_t *, void *), void *arg, int flags) { int error = 0; taskq_t *tq = NULL; int ntasks; dmu_objset_find_ctx_t *dcp; kmutex_t err_lock; mutex_init(&err_lock, NULL, MUTEX_DEFAULT, NULL); dcp = kmem_alloc(sizeof (*dcp), KM_SLEEP); dcp->dc_tq = NULL; dcp->dc_dp = dp; dcp->dc_ddobj = ddobj; dcp->dc_ddname = NULL; dcp->dc_func = func; dcp->dc_arg = arg; dcp->dc_flags = flags; dcp->dc_error_lock = &err_lock; dcp->dc_error = &error; if ((flags & DS_FIND_SERIALIZE) || dsl_pool_config_held_writer(dp)) { /* * In case a write lock is held we can't make use of * parallelism, as down the stack of the worker threads * the lock is asserted via dsl_pool_config_held. * In case of a read lock this is solved by getting a read * lock in each worker thread, which isn't possible in case * of a writer lock. So we fall back to the synchronous path * here. * In the future it might be possible to get some magic into * dsl_pool_config_held in a way that it returns true for * the worker threads so that a single lock held from this * thread suffices. For now, stay single threaded. */ dmu_objset_find_dp_impl(dcp); mutex_destroy(&err_lock); return (error); } ntasks = dmu_find_threads; if (ntasks == 0) ntasks = vdev_count_leaves(dp->dp_spa) * 4; tq = taskq_create("dmu_objset_find", ntasks, maxclsyspri, ntasks, INT_MAX, 0); if (tq == NULL) { kmem_free(dcp, sizeof (*dcp)); mutex_destroy(&err_lock); return (SET_ERROR(ENOMEM)); } dcp->dc_tq = tq; /* dcp will be freed by task */ (void) taskq_dispatch(tq, dmu_objset_find_dp_cb, dcp, TQ_SLEEP); /* * PORTING: this code relies on the property of taskq_wait to wait * until no more tasks are queued and no more tasks are active. As * we always queue new tasks from within other tasks, task_wait * reliably waits for the full recursion to finish, even though we * enqueue new tasks after taskq_wait has been called. * On platforms other than illumos, taskq_wait may not have this * property. */ taskq_wait(tq); taskq_destroy(tq); mutex_destroy(&err_lock); return (error); } /* * Find all objsets under name, and for each, call 'func(child_name, arg)'. * The dp_config_rwlock must not be held when this is called, and it * will not be held when the callback is called. * Therefore this function should only be used when the pool is not changing * (e.g. in syncing context), or the callback can deal with the possible races. */ static int dmu_objset_find_impl(spa_t *spa, const char *name, int func(const char *, void *), void *arg, int flags) { dsl_dir_t *dd; dsl_pool_t *dp = spa_get_dsl(spa); dsl_dataset_t *ds; zap_cursor_t zc; zap_attribute_t *attr; char *child; uint64_t thisobj; int err; dsl_pool_config_enter(dp, FTAG); err = dsl_dir_hold(dp, name, FTAG, &dd, NULL); if (err != 0) { dsl_pool_config_exit(dp, FTAG); return (err); } /* Don't visit hidden ($MOS & $ORIGIN) objsets. */ if (dd->dd_myname[0] == '$') { dsl_dir_rele(dd, FTAG); dsl_pool_config_exit(dp, FTAG); return (0); } thisobj = dsl_dir_phys(dd)->dd_head_dataset_obj; attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP); /* * Iterate over all children. */ if (flags & DS_FIND_CHILDREN) { for (zap_cursor_init(&zc, dp->dp_meta_objset, dsl_dir_phys(dd)->dd_child_dir_zapobj); zap_cursor_retrieve(&zc, attr) == 0; (void) zap_cursor_advance(&zc)) { ASSERT3U(attr->za_integer_length, ==, sizeof (uint64_t)); ASSERT3U(attr->za_num_integers, ==, 1); child = kmem_asprintf("%s/%s", name, attr->za_name); dsl_pool_config_exit(dp, FTAG); err = dmu_objset_find_impl(spa, child, func, arg, flags); dsl_pool_config_enter(dp, FTAG); strfree(child); if (err != 0) break; } zap_cursor_fini(&zc); if (err != 0) { dsl_dir_rele(dd, FTAG); dsl_pool_config_exit(dp, FTAG); kmem_free(attr, sizeof (zap_attribute_t)); return (err); } } /* * Iterate over all snapshots. */ if (flags & DS_FIND_SNAPSHOTS) { err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds); if (err == 0) { uint64_t snapobj; snapobj = dsl_dataset_phys(ds)->ds_snapnames_zapobj; dsl_dataset_rele(ds, FTAG); for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj); zap_cursor_retrieve(&zc, attr) == 0; (void) zap_cursor_advance(&zc)) { ASSERT3U(attr->za_integer_length, ==, sizeof (uint64_t)); ASSERT3U(attr->za_num_integers, ==, 1); child = kmem_asprintf("%s@%s", name, attr->za_name); dsl_pool_config_exit(dp, FTAG); err = func(child, arg); dsl_pool_config_enter(dp, FTAG); strfree(child); if (err != 0) break; } zap_cursor_fini(&zc); } } dsl_dir_rele(dd, FTAG); kmem_free(attr, sizeof (zap_attribute_t)); dsl_pool_config_exit(dp, FTAG); if (err != 0) return (err); /* Apply to self. */ return (func(name, arg)); } /* * See comment above dmu_objset_find_impl(). */ int dmu_objset_find(char *name, int func(const char *, void *), void *arg, int flags) { spa_t *spa; int error; error = spa_open(name, &spa, FTAG); if (error != 0) return (error); error = dmu_objset_find_impl(spa, name, func, arg, flags); spa_close(spa, FTAG); return (error); } boolean_t dmu_objset_incompatible_encryption_version(objset_t *os) { return (dsl_dir_incompatible_encryption_version( os->os_dsl_dataset->ds_dir)); } void dmu_objset_set_user(objset_t *os, void *user_ptr) { ASSERT(MUTEX_HELD(&os->os_user_ptr_lock)); os->os_user_ptr = user_ptr; } void * dmu_objset_get_user(objset_t *os) { ASSERT(MUTEX_HELD(&os->os_user_ptr_lock)); return (os->os_user_ptr); } /* * Determine name of filesystem, given name of snapshot. * buf must be at least ZFS_MAX_DATASET_NAME_LEN bytes */ int dmu_fsname(const char *snapname, char *buf) { char *atp = strchr(snapname, '@'); if (atp == NULL) return (SET_ERROR(EINVAL)); if (atp - snapname >= ZFS_MAX_DATASET_NAME_LEN) return (SET_ERROR(ENAMETOOLONG)); (void) strlcpy(buf, snapname, atp - snapname + 1); return (0); } /* * Call when we think we're going to write/free space in open context to track * the amount of dirty data in the open txg, which is also the amount * of memory that can not be evicted until this txg syncs. */ void dmu_objset_willuse_space(objset_t *os, int64_t space, dmu_tx_t *tx) { dsl_dataset_t *ds = os->os_dsl_dataset; int64_t aspace = spa_get_worst_case_asize(os->os_spa, space); if (ds != NULL) { dsl_dir_willuse_space(ds->ds_dir, aspace, tx); dsl_pool_dirty_space(dmu_tx_pool(tx), space, tx); } } #if defined(_KERNEL) EXPORT_SYMBOL(dmu_objset_zil); EXPORT_SYMBOL(dmu_objset_pool); EXPORT_SYMBOL(dmu_objset_ds); EXPORT_SYMBOL(dmu_objset_type); EXPORT_SYMBOL(dmu_objset_name); EXPORT_SYMBOL(dmu_objset_hold); EXPORT_SYMBOL(dmu_objset_hold_flags); EXPORT_SYMBOL(dmu_objset_own); EXPORT_SYMBOL(dmu_objset_rele); EXPORT_SYMBOL(dmu_objset_rele_flags); EXPORT_SYMBOL(dmu_objset_disown); EXPORT_SYMBOL(dmu_objset_from_ds); EXPORT_SYMBOL(dmu_objset_create); EXPORT_SYMBOL(dmu_objset_clone); EXPORT_SYMBOL(dmu_objset_stats); EXPORT_SYMBOL(dmu_objset_fast_stat); EXPORT_SYMBOL(dmu_objset_spa); EXPORT_SYMBOL(dmu_objset_space); EXPORT_SYMBOL(dmu_objset_fsid_guid); EXPORT_SYMBOL(dmu_objset_find); EXPORT_SYMBOL(dmu_objset_byteswap); EXPORT_SYMBOL(dmu_objset_evict_dbufs); EXPORT_SYMBOL(dmu_objset_snap_cmtime); EXPORT_SYMBOL(dmu_objset_dnodesize); EXPORT_SYMBOL(dmu_objset_sync); EXPORT_SYMBOL(dmu_objset_is_dirty); EXPORT_SYMBOL(dmu_objset_create_impl_dnstats); EXPORT_SYMBOL(dmu_objset_create_impl); EXPORT_SYMBOL(dmu_objset_open_impl); EXPORT_SYMBOL(dmu_objset_evict); EXPORT_SYMBOL(dmu_objset_register_type); EXPORT_SYMBOL(dmu_objset_do_userquota_updates); EXPORT_SYMBOL(dmu_objset_userquota_get_ids); EXPORT_SYMBOL(dmu_objset_userused_enabled); EXPORT_SYMBOL(dmu_objset_userspace_upgrade); EXPORT_SYMBOL(dmu_objset_userspace_present); EXPORT_SYMBOL(dmu_objset_userobjused_enabled); EXPORT_SYMBOL(dmu_objset_userobjspace_upgradable); EXPORT_SYMBOL(dmu_objset_userobjspace_present); EXPORT_SYMBOL(dmu_objset_projectquota_enabled); EXPORT_SYMBOL(dmu_objset_projectquota_present); EXPORT_SYMBOL(dmu_objset_projectquota_upgradable); EXPORT_SYMBOL(dmu_objset_id_quota_upgrade); #endif diff --git a/module/zfs/dmu_recv.c b/module/zfs/dmu_recv.c index 4ac6f2f17f14..e05b5ad821f0 100644 --- a/module/zfs/dmu_recv.c +++ b/module/zfs/dmu_recv.c @@ -1,2906 +1,2930 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright 2011 Nexenta Systems, Inc. All rights reserved. * Copyright (c) 2011, 2015 by Delphix. All rights reserved. * Copyright (c) 2014, Joyent, Inc. All rights reserved. * Copyright 2014 HybridCluster. All rights reserved. * Copyright 2016 RackTop Systems. * Copyright (c) 2016 Actifio, Inc. All rights reserved. + * Copyright (c) 2018, loli10K . All rights reserved. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include int zfs_recv_queue_length = SPA_MAXBLOCKSIZE; static char *dmu_recv_tag = "dmu_recv_tag"; const char *recv_clone_name = "%recv"; static void byteswap_record(dmu_replay_record_t *drr); typedef struct dmu_recv_begin_arg { const char *drba_origin; dmu_recv_cookie_t *drba_cookie; cred_t *drba_cred; dsl_crypto_params_t *drba_dcp; uint64_t drba_snapobj; } dmu_recv_begin_arg_t; static int recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds, uint64_t fromguid, uint64_t featureflags) { uint64_t val; + uint64_t children; int error; dsl_pool_t *dp = ds->ds_dir->dd_pool; boolean_t encrypted = ds->ds_dir->dd_crypto_obj != 0; boolean_t raw = (featureflags & DMU_BACKUP_FEATURE_RAW) != 0; boolean_t embed = (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) != 0; /* temporary clone name must not exist */ error = zap_lookup(dp->dp_meta_objset, dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, recv_clone_name, 8, 1, &val); if (error != ENOENT) return (error == 0 ? EBUSY : error); /* new snapshot name must not exist */ error = zap_lookup(dp->dp_meta_objset, dsl_dataset_phys(ds)->ds_snapnames_zapobj, drba->drba_cookie->drc_tosnap, 8, 1, &val); if (error != ENOENT) return (error == 0 ? EEXIST : error); + /* must not have children if receiving a ZVOL */ + error = zap_count(dp->dp_meta_objset, + dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, &children); + if (error != 0) + return (error); + if (drba->drba_cookie->drc_drrb->drr_type != DMU_OST_ZFS && + children > 0) + return (SET_ERROR(ZFS_ERR_WRONG_PARENT)); + /* * Check snapshot limit before receiving. We'll recheck again at the * end, but might as well abort before receiving if we're already over * the limit. * * Note that we do not check the file system limit with * dsl_dir_fscount_check because the temporary %clones don't count * against that limit. */ error = dsl_fs_ss_limit_check(ds->ds_dir, 1, ZFS_PROP_SNAPSHOT_LIMIT, NULL, drba->drba_cred); if (error != 0) return (error); if (fromguid != 0) { dsl_dataset_t *snap; uint64_t obj = dsl_dataset_phys(ds)->ds_prev_snap_obj; /* Can't perform a raw receive on top of a non-raw receive */ if (!encrypted && raw) return (SET_ERROR(EINVAL)); /* Encryption is incompatible with embedded data */ if (encrypted && embed) return (SET_ERROR(EINVAL)); /* Find snapshot in this dir that matches fromguid. */ while (obj != 0) { error = dsl_dataset_hold_obj(dp, obj, FTAG, &snap); if (error != 0) return (SET_ERROR(ENODEV)); if (snap->ds_dir != ds->ds_dir) { dsl_dataset_rele(snap, FTAG); return (SET_ERROR(ENODEV)); } if (dsl_dataset_phys(snap)->ds_guid == fromguid) break; obj = dsl_dataset_phys(snap)->ds_prev_snap_obj; dsl_dataset_rele(snap, FTAG); } if (obj == 0) return (SET_ERROR(ENODEV)); if (drba->drba_cookie->drc_force) { drba->drba_snapobj = obj; } else { /* * If we are not forcing, there must be no * changes since fromsnap. */ if (dsl_dataset_modified_since_snap(ds, snap)) { dsl_dataset_rele(snap, FTAG); return (SET_ERROR(ETXTBSY)); } drba->drba_snapobj = ds->ds_prev->ds_object; } dsl_dataset_rele(snap, FTAG); } else { /* if full, then must be forced */ if (!drba->drba_cookie->drc_force) return (SET_ERROR(EEXIST)); /* * We don't support using zfs recv -F to blow away * encrypted filesystems. This would require the * dsl dir to point to the old encryption key and * the new one at the same time during the receive. */ if ((!encrypted && raw) || encrypted) return (SET_ERROR(EINVAL)); /* * Perform the same encryption checks we would if * we were creating a new dataset from scratch. */ if (!raw) { boolean_t will_encrypt; error = dmu_objset_create_crypt_check( ds->ds_dir->dd_parent, drba->drba_dcp, &will_encrypt); if (error != 0) return (error); if (will_encrypt && embed) return (SET_ERROR(EINVAL)); } drba->drba_snapobj = 0; } return (0); } static int dmu_recv_begin_check(void *arg, dmu_tx_t *tx) { dmu_recv_begin_arg_t *drba = arg; dsl_pool_t *dp = dmu_tx_pool(tx); struct drr_begin *drrb = drba->drba_cookie->drc_drrb; uint64_t fromguid = drrb->drr_fromguid; int flags = drrb->drr_flags; ds_hold_flags_t dsflags = 0; int error; uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo); dsl_dataset_t *ds; const char *tofs = drba->drba_cookie->drc_tofs; /* already checked */ ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC); ASSERT(!(featureflags & DMU_BACKUP_FEATURE_RESUMING)); if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) == DMU_COMPOUNDSTREAM || drrb->drr_type >= DMU_OST_NUMTYPES || ((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL)) return (SET_ERROR(EINVAL)); /* Verify pool version supports SA if SA_SPILL feature set */ if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) && spa_version(dp->dp_spa) < SPA_VERSION_SA) return (SET_ERROR(ENOTSUP)); if (drba->drba_cookie->drc_resumable && !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EXTENSIBLE_DATASET)) return (SET_ERROR(ENOTSUP)); /* * The receiving code doesn't know how to translate a WRITE_EMBEDDED * record to a plain WRITE record, so the pool must have the * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED * records. Same with WRITE_EMBEDDED records that use LZ4 compression. */ if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) && !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) return (SET_ERROR(ENOTSUP)); if ((featureflags & DMU_BACKUP_FEATURE_LZ4) && !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS)) return (SET_ERROR(ENOTSUP)); /* * The receiving code doesn't know how to translate large blocks * to smaller ones, so the pool must have the LARGE_BLOCKS * feature enabled if the stream has LARGE_BLOCKS. Same with * large dnodes. */ if ((featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) && !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_BLOCKS)) return (SET_ERROR(ENOTSUP)); if ((featureflags & DMU_BACKUP_FEATURE_LARGE_DNODE) && !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_DNODE)) return (SET_ERROR(ENOTSUP)); if (featureflags & DMU_BACKUP_FEATURE_RAW) { /* raw receives require the encryption feature */ if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_ENCRYPTION)) return (SET_ERROR(ENOTSUP)); /* embedded data is incompatible with encryption and raw recv */ if (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) return (SET_ERROR(EINVAL)); } else { dsflags |= DS_HOLD_FLAG_DECRYPT; } error = dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds); if (error == 0) { /* target fs already exists; recv into temp clone */ /* Can't recv a clone into an existing fs */ if (flags & DRR_FLAG_CLONE || drba->drba_origin) { dsl_dataset_rele_flags(ds, dsflags, FTAG); return (SET_ERROR(EINVAL)); } error = recv_begin_check_existing_impl(drba, ds, fromguid, featureflags); dsl_dataset_rele_flags(ds, dsflags, FTAG); } else if (error == ENOENT) { /* target fs does not exist; must be a full backup or clone */ char buf[ZFS_MAX_DATASET_NAME_LEN]; + objset_t *os; /* * If it's a non-clone incremental, we are missing the * target fs, so fail the recv. */ if (fromguid != 0 && !(flags & DRR_FLAG_CLONE || drba->drba_origin)) return (SET_ERROR(ENOENT)); /* * If we're receiving a full send as a clone, and it doesn't * contain all the necessary free records and freeobject * records, reject it. */ if (fromguid == 0 && drba->drba_origin && !(flags & DRR_FLAG_FREERECORDS)) return (SET_ERROR(EINVAL)); /* Open the parent of tofs */ ASSERT3U(strlen(tofs), <, sizeof (buf)); (void) strlcpy(buf, tofs, strrchr(tofs, '/') - tofs + 1); error = dsl_dataset_hold_flags(dp, buf, dsflags, FTAG, &ds); if (error != 0) return (error); if ((featureflags & DMU_BACKUP_FEATURE_RAW) == 0 && drba->drba_origin == NULL) { boolean_t will_encrypt; /* * Check that we aren't breaking any encryption rules * and that we have all the parameters we need to * create an encrypted dataset if necessary. If we are * making an encrypted dataset the stream can't have * embedded data. */ error = dmu_objset_create_crypt_check(ds->ds_dir, drba->drba_dcp, &will_encrypt); if (error != 0) { dsl_dataset_rele_flags(ds, dsflags, FTAG); return (error); } if (will_encrypt && (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)) { dsl_dataset_rele_flags(ds, dsflags, FTAG); return (SET_ERROR(EINVAL)); } } /* * Check filesystem and snapshot limits before receiving. We'll * recheck snapshot limits again at the end (we create the * filesystems and increment those counts during begin_sync). */ error = dsl_fs_ss_limit_check(ds->ds_dir, 1, ZFS_PROP_FILESYSTEM_LIMIT, NULL, drba->drba_cred); if (error != 0) { dsl_dataset_rele_flags(ds, dsflags, FTAG); return (error); } error = dsl_fs_ss_limit_check(ds->ds_dir, 1, ZFS_PROP_SNAPSHOT_LIMIT, NULL, drba->drba_cred); if (error != 0) { dsl_dataset_rele_flags(ds, dsflags, FTAG); return (error); } + /* can't recv below anything but filesystems (eg. no ZVOLs) */ + error = dmu_objset_from_ds(ds, &os); + if (error != 0) { + dsl_dataset_rele_flags(ds, dsflags, FTAG); + return (error); + } + if (dmu_objset_type(os) != DMU_OST_ZFS) { + dsl_dataset_rele_flags(ds, dsflags, FTAG); + return (SET_ERROR(ZFS_ERR_WRONG_PARENT)); + } + if (drba->drba_origin != NULL) { dsl_dataset_t *origin; error = dsl_dataset_hold_flags(dp, drba->drba_origin, dsflags, FTAG, &origin); if (error != 0) { dsl_dataset_rele_flags(ds, dsflags, FTAG); return (error); } if (!origin->ds_is_snapshot) { dsl_dataset_rele_flags(origin, dsflags, FTAG); dsl_dataset_rele_flags(ds, dsflags, FTAG); return (SET_ERROR(EINVAL)); } if (dsl_dataset_phys(origin)->ds_guid != fromguid && fromguid != 0) { dsl_dataset_rele_flags(origin, dsflags, FTAG); dsl_dataset_rele_flags(ds, dsflags, FTAG); return (SET_ERROR(ENODEV)); } if (origin->ds_dir->dd_crypto_obj != 0 && (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)) { dsl_dataset_rele_flags(origin, dsflags, FTAG); dsl_dataset_rele_flags(ds, dsflags, FTAG); return (SET_ERROR(EINVAL)); } dsl_dataset_rele_flags(origin, dsflags, FTAG); } + dsl_dataset_rele_flags(ds, dsflags, FTAG); error = 0; } return (error); } static void dmu_recv_begin_sync(void *arg, dmu_tx_t *tx) { dmu_recv_begin_arg_t *drba = arg; dsl_pool_t *dp = dmu_tx_pool(tx); objset_t *mos = dp->dp_meta_objset; struct drr_begin *drrb = drba->drba_cookie->drc_drrb; const char *tofs = drba->drba_cookie->drc_tofs; uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo); dsl_dataset_t *ds, *newds; objset_t *os; uint64_t dsobj; ds_hold_flags_t dsflags = 0; int error; uint64_t crflags = 0; dsl_crypto_params_t dummy_dcp = { 0 }; dsl_crypto_params_t *dcp = drba->drba_dcp; if (drrb->drr_flags & DRR_FLAG_CI_DATA) crflags |= DS_FLAG_CI_DATASET; if ((featureflags & DMU_BACKUP_FEATURE_RAW) == 0) dsflags |= DS_HOLD_FLAG_DECRYPT; /* * Raw, non-incremental recvs always use a dummy dcp with * the raw cmd set. Raw incremental recvs do not use a dcp * since the encryption parameters are already set in stone. */ if (dcp == NULL && drba->drba_snapobj == 0 && drba->drba_origin == NULL) { ASSERT3P(dcp, ==, NULL); dcp = &dummy_dcp; if (featureflags & DMU_BACKUP_FEATURE_RAW) dcp->cp_cmd = DCP_CMD_RAW_RECV; } error = dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds); if (error == 0) { /* create temporary clone */ dsl_dataset_t *snap = NULL; if (drba->drba_snapobj != 0) { VERIFY0(dsl_dataset_hold_obj(dp, drba->drba_snapobj, FTAG, &snap)); ASSERT3P(dcp, ==, NULL); } dsobj = dsl_dataset_create_sync(ds->ds_dir, recv_clone_name, snap, crflags, drba->drba_cred, dcp, tx); if (drba->drba_snapobj != 0) dsl_dataset_rele(snap, FTAG); dsl_dataset_rele_flags(ds, dsflags, FTAG); } else { dsl_dir_t *dd; const char *tail; dsl_dataset_t *origin = NULL; VERIFY0(dsl_dir_hold(dp, tofs, FTAG, &dd, &tail)); if (drba->drba_origin != NULL) { VERIFY0(dsl_dataset_hold(dp, drba->drba_origin, FTAG, &origin)); ASSERT3P(dcp, ==, NULL); } /* Create new dataset. */ dsobj = dsl_dataset_create_sync(dd, strrchr(tofs, '/') + 1, origin, crflags, drba->drba_cred, dcp, tx); if (origin != NULL) dsl_dataset_rele(origin, FTAG); dsl_dir_rele(dd, FTAG); drba->drba_cookie->drc_newfs = B_TRUE; } VERIFY0(dsl_dataset_own_obj(dp, dsobj, dsflags, dmu_recv_tag, &newds)); VERIFY0(dmu_objset_from_ds(newds, &os)); if (drba->drba_cookie->drc_resumable) { dsl_dataset_zapify(newds, tx); if (drrb->drr_fromguid != 0) { VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_FROMGUID, 8, 1, &drrb->drr_fromguid, tx)); } VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TOGUID, 8, 1, &drrb->drr_toguid, tx)); VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TONAME, 1, strlen(drrb->drr_toname) + 1, drrb->drr_toname, tx)); uint64_t one = 1; uint64_t zero = 0; VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OBJECT, 8, 1, &one, tx)); VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OFFSET, 8, 1, &zero, tx)); VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_BYTES, 8, 1, &zero, tx)); if (featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) { VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_LARGEBLOCK, 8, 1, &one, tx)); } if (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) { VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_EMBEDOK, 8, 1, &one, tx)); } if (featureflags & DMU_BACKUP_FEATURE_COMPRESSED) { VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_COMPRESSOK, 8, 1, &one, tx)); } if (featureflags & DMU_BACKUP_FEATURE_RAW) { VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_RAWOK, 8, 1, &one, tx)); } } /* * Usually the os->os_encrypted value is tied to the presence of a * DSL Crypto Key object in the dd. However, that will not be received * until dmu_recv_stream(), so we set the value manually for now. */ if (featureflags & DMU_BACKUP_FEATURE_RAW) { os->os_encrypted = B_TRUE; drba->drba_cookie->drc_raw = B_TRUE; } dmu_buf_will_dirty(newds->ds_dbuf, tx); dsl_dataset_phys(newds)->ds_flags |= DS_FLAG_INCONSISTENT; /* * If we actually created a non-clone, we need to create the objset * in our new dataset. If this is a raw send we postpone this until * dmu_recv_stream() so that we can allocate the metadnode with the * properties from the DRR_BEGIN payload. */ rrw_enter(&newds->ds_bp_rwlock, RW_READER, FTAG); if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds)) && (featureflags & DMU_BACKUP_FEATURE_RAW) == 0) { (void) dmu_objset_create_impl(dp->dp_spa, newds, dsl_dataset_get_blkptr(newds), drrb->drr_type, tx); } rrw_exit(&newds->ds_bp_rwlock, FTAG); drba->drba_cookie->drc_ds = newds; spa_history_log_internal_ds(newds, "receive", tx, ""); } static int dmu_recv_resume_begin_check(void *arg, dmu_tx_t *tx) { dmu_recv_begin_arg_t *drba = arg; dsl_pool_t *dp = dmu_tx_pool(tx); struct drr_begin *drrb = drba->drba_cookie->drc_drrb; int error; ds_hold_flags_t dsflags = 0; uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo); dsl_dataset_t *ds; const char *tofs = drba->drba_cookie->drc_tofs; /* already checked */ ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC); ASSERT(featureflags & DMU_BACKUP_FEATURE_RESUMING); if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) == DMU_COMPOUNDSTREAM || drrb->drr_type >= DMU_OST_NUMTYPES) return (SET_ERROR(EINVAL)); /* Verify pool version supports SA if SA_SPILL feature set */ if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) && spa_version(dp->dp_spa) < SPA_VERSION_SA) return (SET_ERROR(ENOTSUP)); /* * The receiving code doesn't know how to translate a WRITE_EMBEDDED * record to a plain WRITE record, so the pool must have the * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED * records. Same with WRITE_EMBEDDED records that use LZ4 compression. */ if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) && !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) return (SET_ERROR(ENOTSUP)); if ((featureflags & DMU_BACKUP_FEATURE_LZ4) && !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS)) return (SET_ERROR(ENOTSUP)); /* * The receiving code doesn't know how to translate large blocks * to smaller ones, so the pool must have the LARGE_BLOCKS * feature enabled if the stream has LARGE_BLOCKS. Same with * large dnodes. */ if ((featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) && !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_BLOCKS)) return (SET_ERROR(ENOTSUP)); if ((featureflags & DMU_BACKUP_FEATURE_LARGE_DNODE) && !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_DNODE)) return (SET_ERROR(ENOTSUP)); /* 6 extra bytes for /%recv */ char recvname[ZFS_MAX_DATASET_NAME_LEN + 6]; (void) snprintf(recvname, sizeof (recvname), "%s/%s", tofs, recv_clone_name); if ((featureflags & DMU_BACKUP_FEATURE_RAW) == 0) dsflags |= DS_HOLD_FLAG_DECRYPT; if (dsl_dataset_hold_flags(dp, recvname, dsflags, FTAG, &ds) != 0) { /* %recv does not exist; continue in tofs */ error = dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds); if (error != 0) return (error); } /* check that ds is marked inconsistent */ if (!DS_IS_INCONSISTENT(ds)) { dsl_dataset_rele_flags(ds, dsflags, FTAG); return (SET_ERROR(EINVAL)); } /* check that there is resuming data, and that the toguid matches */ if (!dsl_dataset_is_zapified(ds)) { dsl_dataset_rele_flags(ds, dsflags, FTAG); return (SET_ERROR(EINVAL)); } uint64_t val; error = zap_lookup(dp->dp_meta_objset, ds->ds_object, DS_FIELD_RESUME_TOGUID, sizeof (val), 1, &val); if (error != 0 || drrb->drr_toguid != val) { dsl_dataset_rele_flags(ds, dsflags, FTAG); return (SET_ERROR(EINVAL)); } /* * Check if the receive is still running. If so, it will be owned. * Note that nothing else can own the dataset (e.g. after the receive * fails) because it will be marked inconsistent. */ if (dsl_dataset_has_owner(ds)) { dsl_dataset_rele_flags(ds, dsflags, FTAG); return (SET_ERROR(EBUSY)); } /* There should not be any snapshots of this fs yet. */ if (ds->ds_prev != NULL && ds->ds_prev->ds_dir == ds->ds_dir) { dsl_dataset_rele_flags(ds, dsflags, FTAG); return (SET_ERROR(EINVAL)); } /* * Note: resume point will be checked when we process the first WRITE * record. */ /* check that the origin matches */ val = 0; (void) zap_lookup(dp->dp_meta_objset, ds->ds_object, DS_FIELD_RESUME_FROMGUID, sizeof (val), 1, &val); if (drrb->drr_fromguid != val) { dsl_dataset_rele_flags(ds, dsflags, FTAG); return (SET_ERROR(EINVAL)); } dsl_dataset_rele_flags(ds, dsflags, FTAG); return (0); } static void dmu_recv_resume_begin_sync(void *arg, dmu_tx_t *tx) { dmu_recv_begin_arg_t *drba = arg; dsl_pool_t *dp = dmu_tx_pool(tx); const char *tofs = drba->drba_cookie->drc_tofs; struct drr_begin *drrb = drba->drba_cookie->drc_drrb; uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo); dsl_dataset_t *ds; objset_t *os; ds_hold_flags_t dsflags = 0; uint64_t dsobj; /* 6 extra bytes for /%recv */ char recvname[ZFS_MAX_DATASET_NAME_LEN + 6]; (void) snprintf(recvname, sizeof (recvname), "%s/%s", tofs, recv_clone_name); if (featureflags & DMU_BACKUP_FEATURE_RAW) { drba->drba_cookie->drc_raw = B_TRUE; } else { dsflags |= DS_HOLD_FLAG_DECRYPT; } if (dsl_dataset_hold_flags(dp, recvname, dsflags, FTAG, &ds) != 0) { /* %recv does not exist; continue in tofs */ VERIFY0(dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds)); drba->drba_cookie->drc_newfs = B_TRUE; } /* clear the inconsistent flag so that we can own it */ ASSERT(DS_IS_INCONSISTENT(ds)); dmu_buf_will_dirty(ds->ds_dbuf, tx); dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT; dsobj = ds->ds_object; dsl_dataset_rele_flags(ds, dsflags, FTAG); VERIFY0(dsl_dataset_own_obj(dp, dsobj, dsflags, dmu_recv_tag, &ds)); VERIFY0(dmu_objset_from_ds(ds, &os)); dmu_buf_will_dirty(ds->ds_dbuf, tx); dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_INCONSISTENT; rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); ASSERT(!BP_IS_HOLE(dsl_dataset_get_blkptr(ds)) || drba->drba_cookie->drc_raw); rrw_exit(&ds->ds_bp_rwlock, FTAG); drba->drba_cookie->drc_ds = ds; spa_history_log_internal_ds(ds, "resume receive", tx, ""); } /* * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin() * succeeds; otherwise we will leak the holds on the datasets. */ int dmu_recv_begin(char *tofs, char *tosnap, dmu_replay_record_t *drr_begin, boolean_t force, boolean_t resumable, nvlist_t *localprops, nvlist_t *hidden_args, char *origin, dmu_recv_cookie_t *drc) { dmu_recv_begin_arg_t drba = { 0 }; bzero(drc, sizeof (dmu_recv_cookie_t)); drc->drc_drr_begin = drr_begin; drc->drc_drrb = &drr_begin->drr_u.drr_begin; drc->drc_tosnap = tosnap; drc->drc_tofs = tofs; drc->drc_force = force; drc->drc_resumable = resumable; drc->drc_cred = CRED(); drc->drc_clone = (origin != NULL); if (drc->drc_drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) { drc->drc_byteswap = B_TRUE; (void) fletcher_4_incremental_byteswap(drr_begin, sizeof (dmu_replay_record_t), &drc->drc_cksum); byteswap_record(drr_begin); } else if (drc->drc_drrb->drr_magic == DMU_BACKUP_MAGIC) { (void) fletcher_4_incremental_native(drr_begin, sizeof (dmu_replay_record_t), &drc->drc_cksum); } else { return (SET_ERROR(EINVAL)); } drba.drba_origin = origin; drba.drba_cookie = drc; drba.drba_cred = CRED(); if (DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo) & DMU_BACKUP_FEATURE_RESUMING) { return (dsl_sync_task(tofs, dmu_recv_resume_begin_check, dmu_recv_resume_begin_sync, &drba, 5, ZFS_SPACE_CHECK_NORMAL)); } else { int err; /* * For non-raw, non-incremental, non-resuming receives the * user can specify encryption parameters on the command line * with "zfs recv -o". For these receives we create a dcp and * pass it to the sync task. Creating the dcp will implicitly * remove the encryption params from the localprops nvlist, * which avoids errors when trying to set these normally * read-only properties. Any other kind of receive that * attempts to set these properties will fail as a result. */ if ((DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo) & DMU_BACKUP_FEATURE_RAW) == 0 && origin == NULL && drc->drc_drrb->drr_fromguid == 0) { err = dsl_crypto_params_create_nvlist(DCP_CMD_NONE, localprops, hidden_args, &drba.drba_dcp); if (err != 0) return (err); } err = dsl_sync_task(tofs, dmu_recv_begin_check, dmu_recv_begin_sync, &drba, 5, ZFS_SPACE_CHECK_NORMAL); dsl_crypto_params_free(drba.drba_dcp, !!err); return (err); } } struct receive_record_arg { dmu_replay_record_t header; void *payload; /* Pointer to a buffer containing the payload */ /* * If the record is a write, pointer to the arc_buf_t containing the * payload. */ arc_buf_t *arc_buf; int payload_size; uint64_t bytes_read; /* bytes read from stream when record created */ boolean_t eos_marker; /* Marks the end of the stream */ bqueue_node_t node; }; struct receive_writer_arg { objset_t *os; boolean_t byteswap; bqueue_t q; /* * These three args are used to signal to the main thread that we're * done. */ kmutex_t mutex; kcondvar_t cv; boolean_t done; int err; /* A map from guid to dataset to help handle dedup'd streams. */ avl_tree_t *guid_to_ds_map; boolean_t resumable; boolean_t raw; uint64_t last_object; uint64_t last_offset; uint64_t max_object; /* highest object ID referenced in stream */ uint64_t bytes_read; /* bytes read when current record created */ /* Encryption parameters for the last received DRR_OBJECT_RANGE */ boolean_t or_crypt_params_present; uint64_t or_firstobj; uint64_t or_numslots; uint8_t or_salt[ZIO_DATA_SALT_LEN]; uint8_t or_iv[ZIO_DATA_IV_LEN]; uint8_t or_mac[ZIO_DATA_MAC_LEN]; boolean_t or_byteorder; }; struct objlist { list_t list; /* List of struct receive_objnode. */ /* * Last object looked up. Used to assert that objects are being looked * up in ascending order. */ uint64_t last_lookup; }; struct receive_objnode { list_node_t node; uint64_t object; }; struct receive_arg { objset_t *os; vnode_t *vp; /* The vnode to read the stream from */ uint64_t voff; /* The current offset in the stream */ uint64_t bytes_read; /* * A record that has had its payload read in, but hasn't yet been handed * off to the worker thread. */ struct receive_record_arg *rrd; /* A record that has had its header read in, but not its payload. */ struct receive_record_arg *next_rrd; zio_cksum_t cksum; zio_cksum_t prev_cksum; int err; boolean_t byteswap; boolean_t raw; uint64_t featureflags; /* Sorted list of objects not to issue prefetches for. */ struct objlist ignore_objlist; }; typedef struct guid_map_entry { uint64_t guid; boolean_t raw; dsl_dataset_t *gme_ds; avl_node_t avlnode; } guid_map_entry_t; static int guid_compare(const void *arg1, const void *arg2) { const guid_map_entry_t *gmep1 = (const guid_map_entry_t *)arg1; const guid_map_entry_t *gmep2 = (const guid_map_entry_t *)arg2; return (AVL_CMP(gmep1->guid, gmep2->guid)); } static void free_guid_map_onexit(void *arg) { avl_tree_t *ca = arg; void *cookie = NULL; guid_map_entry_t *gmep; while ((gmep = avl_destroy_nodes(ca, &cookie)) != NULL) { ds_hold_flags_t dsflags = DS_HOLD_FLAG_DECRYPT; if (gmep->raw) { gmep->gme_ds->ds_objset->os_raw_receive = B_FALSE; dsflags &= ~DS_HOLD_FLAG_DECRYPT; } dsl_dataset_disown(gmep->gme_ds, dsflags, gmep); kmem_free(gmep, sizeof (guid_map_entry_t)); } avl_destroy(ca); kmem_free(ca, sizeof (avl_tree_t)); } static int receive_read(struct receive_arg *ra, int len, void *buf) { int done = 0; /* * The code doesn't rely on this (lengths being multiples of 8). See * comment in dump_bytes. */ ASSERT(len % 8 == 0 || (ra->featureflags & DMU_BACKUP_FEATURE_RAW) != 0); while (done < len) { ssize_t resid; ra->err = vn_rdwr(UIO_READ, ra->vp, (char *)buf + done, len - done, ra->voff, UIO_SYSSPACE, FAPPEND, RLIM64_INFINITY, CRED(), &resid); if (resid == len - done) { /* * Note: ECKSUM indicates that the receive * was interrupted and can potentially be resumed. */ ra->err = SET_ERROR(ECKSUM); } ra->voff += len - done - resid; done = len - resid; if (ra->err != 0) return (ra->err); } ra->bytes_read += len; ASSERT3U(done, ==, len); return (0); } noinline static void byteswap_record(dmu_replay_record_t *drr) { #define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X)) #define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X)) drr->drr_type = BSWAP_32(drr->drr_type); drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen); switch (drr->drr_type) { case DRR_BEGIN: DO64(drr_begin.drr_magic); DO64(drr_begin.drr_versioninfo); DO64(drr_begin.drr_creation_time); DO32(drr_begin.drr_type); DO32(drr_begin.drr_flags); DO64(drr_begin.drr_toguid); DO64(drr_begin.drr_fromguid); break; case DRR_OBJECT: DO64(drr_object.drr_object); DO32(drr_object.drr_type); DO32(drr_object.drr_bonustype); DO32(drr_object.drr_blksz); DO32(drr_object.drr_bonuslen); DO32(drr_object.drr_raw_bonuslen); DO64(drr_object.drr_toguid); DO64(drr_object.drr_maxblkid); break; case DRR_FREEOBJECTS: DO64(drr_freeobjects.drr_firstobj); DO64(drr_freeobjects.drr_numobjs); DO64(drr_freeobjects.drr_toguid); break; case DRR_WRITE: DO64(drr_write.drr_object); DO32(drr_write.drr_type); DO64(drr_write.drr_offset); DO64(drr_write.drr_logical_size); DO64(drr_write.drr_toguid); ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write.drr_key.ddk_cksum); DO64(drr_write.drr_key.ddk_prop); DO64(drr_write.drr_compressed_size); break; case DRR_WRITE_BYREF: DO64(drr_write_byref.drr_object); DO64(drr_write_byref.drr_offset); DO64(drr_write_byref.drr_length); DO64(drr_write_byref.drr_toguid); DO64(drr_write_byref.drr_refguid); DO64(drr_write_byref.drr_refobject); DO64(drr_write_byref.drr_refoffset); ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write_byref. drr_key.ddk_cksum); DO64(drr_write_byref.drr_key.ddk_prop); break; case DRR_WRITE_EMBEDDED: DO64(drr_write_embedded.drr_object); DO64(drr_write_embedded.drr_offset); DO64(drr_write_embedded.drr_length); DO64(drr_write_embedded.drr_toguid); DO32(drr_write_embedded.drr_lsize); DO32(drr_write_embedded.drr_psize); break; case DRR_FREE: DO64(drr_free.drr_object); DO64(drr_free.drr_offset); DO64(drr_free.drr_length); DO64(drr_free.drr_toguid); break; case DRR_SPILL: DO64(drr_spill.drr_object); DO64(drr_spill.drr_length); DO64(drr_spill.drr_toguid); DO64(drr_spill.drr_compressed_size); DO32(drr_spill.drr_type); break; case DRR_OBJECT_RANGE: DO64(drr_object_range.drr_firstobj); DO64(drr_object_range.drr_numslots); DO64(drr_object_range.drr_toguid); break; case DRR_END: DO64(drr_end.drr_toguid); ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_end.drr_checksum); break; default: break; } if (drr->drr_type != DRR_BEGIN) { ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_checksum.drr_checksum); } #undef DO64 #undef DO32 } static inline uint8_t deduce_nblkptr(dmu_object_type_t bonus_type, uint64_t bonus_size) { if (bonus_type == DMU_OT_SA) { return (1); } else { return (1 + ((DN_OLD_MAX_BONUSLEN - MIN(DN_OLD_MAX_BONUSLEN, bonus_size)) >> SPA_BLKPTRSHIFT)); } } static void save_resume_state(struct receive_writer_arg *rwa, uint64_t object, uint64_t offset, dmu_tx_t *tx) { int txgoff = dmu_tx_get_txg(tx) & TXG_MASK; if (!rwa->resumable) return; /* * We use ds_resume_bytes[] != 0 to indicate that we need to * update this on disk, so it must not be 0. */ ASSERT(rwa->bytes_read != 0); /* * We only resume from write records, which have a valid * (non-meta-dnode) object number. */ ASSERT(object != 0); /* * For resuming to work correctly, we must receive records in order, * sorted by object,offset. This is checked by the callers, but * assert it here for good measure. */ ASSERT3U(object, >=, rwa->os->os_dsl_dataset->ds_resume_object[txgoff]); ASSERT(object != rwa->os->os_dsl_dataset->ds_resume_object[txgoff] || offset >= rwa->os->os_dsl_dataset->ds_resume_offset[txgoff]); ASSERT3U(rwa->bytes_read, >=, rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff]); rwa->os->os_dsl_dataset->ds_resume_object[txgoff] = object; rwa->os->os_dsl_dataset->ds_resume_offset[txgoff] = offset; rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff] = rwa->bytes_read; } noinline static int receive_object(struct receive_writer_arg *rwa, struct drr_object *drro, void *data) { dmu_object_info_t doi; dmu_tx_t *tx; uint64_t object; int err; uint8_t dn_slots = drro->drr_dn_slots != 0 ? drro->drr_dn_slots : DNODE_MIN_SLOTS; if (drro->drr_type == DMU_OT_NONE || !DMU_OT_IS_VALID(drro->drr_type) || !DMU_OT_IS_VALID(drro->drr_bonustype) || drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS || drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS || P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) || drro->drr_blksz < SPA_MINBLOCKSIZE || drro->drr_blksz > spa_maxblocksize(dmu_objset_spa(rwa->os)) || drro->drr_bonuslen > DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(rwa->os))) || dn_slots > (spa_maxdnodesize(dmu_objset_spa(rwa->os)) >> DNODE_SHIFT)) { return (SET_ERROR(EINVAL)); } if (rwa->raw) { /* * We should have received a DRR_OBJECT_RANGE record * containing this block and stored it in rwa. */ if (drro->drr_object < rwa->or_firstobj || drro->drr_object >= rwa->or_firstobj + rwa->or_numslots || drro->drr_raw_bonuslen < drro->drr_bonuslen || drro->drr_indblkshift > SPA_MAXBLOCKSHIFT || drro->drr_nlevels > DN_MAX_LEVELS || drro->drr_nblkptr > DN_MAX_NBLKPTR || DN_SLOTS_TO_BONUSLEN(dn_slots) < drro->drr_raw_bonuslen) return (SET_ERROR(EINVAL)); } else { if (drro->drr_flags != 0 || drro->drr_raw_bonuslen != 0 || drro->drr_indblkshift != 0 || drro->drr_nlevels != 0 || drro->drr_nblkptr != 0) return (SET_ERROR(EINVAL)); } err = dmu_object_info(rwa->os, drro->drr_object, &doi); if (err != 0 && err != ENOENT && err != EEXIST) return (SET_ERROR(EINVAL)); if (drro->drr_object > rwa->max_object) rwa->max_object = drro->drr_object; /* * If we are losing blkptrs or changing the block size this must * be a new file instance. We must clear out the previous file * contents before we can change this type of metadata in the dnode. * Raw receives will also check that the indirect structure of the * dnode hasn't changed. */ if (err == 0) { uint32_t indblksz = drro->drr_indblkshift ? 1ULL << drro->drr_indblkshift : 0; int nblkptr = deduce_nblkptr(drro->drr_bonustype, drro->drr_bonuslen); object = drro->drr_object; /* nblkptr will be bounded by the bonus size and type */ if (rwa->raw && nblkptr != drro->drr_nblkptr) return (SET_ERROR(EINVAL)); if (drro->drr_blksz != doi.doi_data_block_size || nblkptr < doi.doi_nblkptr || dn_slots != doi.doi_dnodesize >> DNODE_SHIFT || (rwa->raw && (indblksz != doi.doi_metadata_block_size || drro->drr_nlevels < doi.doi_indirection))) { err = dmu_free_long_range(rwa->os, drro->drr_object, 0, DMU_OBJECT_END); if (err != 0) return (SET_ERROR(EINVAL)); } /* * The dmu does not currently support decreasing nlevels * on an object. For non-raw sends, this does not matter * and the new object can just use the previous one's nlevels. * For raw sends, however, the structure of the received dnode * (including nlevels) must match that of the send side. * Therefore, instead of using dmu_object_reclaim(), we must * free the object completely and call dmu_object_claim_dnsize() * instead. */ if ((rwa->raw && drro->drr_nlevels < doi.doi_indirection) || dn_slots != doi.doi_dnodesize >> DNODE_SHIFT) { err = dmu_free_long_object(rwa->os, drro->drr_object); if (err != 0) return (SET_ERROR(EINVAL)); txg_wait_synced(dmu_objset_pool(rwa->os), 0); object = DMU_NEW_OBJECT; } } else if (err == EEXIST) { /* * The object requested is currently an interior slot of a * multi-slot dnode. This will be resolved when the next txg * is synced out, since the send stream will have told us * to free this slot when we freed the associated dnode * earlier in the stream. */ txg_wait_synced(dmu_objset_pool(rwa->os), 0); object = drro->drr_object; } else { /* object is free and we are about to allocate a new one */ object = DMU_NEW_OBJECT; } /* * If this is a multi-slot dnode there is a chance that this * object will expand into a slot that is already used by * another object from the previous snapshot. We must free * these objects before we attempt to allocate the new dnode. */ if (dn_slots > 1) { boolean_t need_sync = B_FALSE; for (uint64_t slot = drro->drr_object + 1; slot < drro->drr_object + dn_slots; slot++) { dmu_object_info_t slot_doi; err = dmu_object_info(rwa->os, slot, &slot_doi); if (err == ENOENT || err == EEXIST) continue; else if (err != 0) return (err); err = dmu_free_long_object(rwa->os, slot); if (err != 0) return (err); need_sync = B_TRUE; } if (need_sync) txg_wait_synced(dmu_objset_pool(rwa->os), 0); } tx = dmu_tx_create(rwa->os); dmu_tx_hold_bonus(tx, object); dmu_tx_hold_write(tx, object, 0, 0); err = dmu_tx_assign(tx, TXG_WAIT); if (err != 0) { dmu_tx_abort(tx); return (err); } if (object == DMU_NEW_OBJECT) { /* currently free, want to be allocated */ err = dmu_object_claim_dnsize(rwa->os, drro->drr_object, drro->drr_type, drro->drr_blksz, drro->drr_bonustype, drro->drr_bonuslen, dn_slots << DNODE_SHIFT, tx); } else if (drro->drr_type != doi.doi_type || drro->drr_blksz != doi.doi_data_block_size || drro->drr_bonustype != doi.doi_bonus_type || drro->drr_bonuslen != doi.doi_bonus_size) { /* currently allocated, but with different properties */ err = dmu_object_reclaim_dnsize(rwa->os, drro->drr_object, drro->drr_type, drro->drr_blksz, drro->drr_bonustype, drro->drr_bonuslen, dn_slots << DNODE_SHIFT, tx); } if (err != 0) { dmu_tx_commit(tx); return (SET_ERROR(EINVAL)); } if (rwa->or_crypt_params_present) { /* * Set the crypt params for the buffer associated with this * range of dnodes. This causes the blkptr_t to have the * same crypt params (byteorder, salt, iv, mac) as on the * sending side. * * Since we are committing this tx now, it is possible for * the dnode block to end up on-disk with the incorrect MAC, * if subsequent objects in this block are received in a * different txg. However, since the dataset is marked as * inconsistent, no code paths will do a non-raw read (or * decrypt the block / verify the MAC). The receive code and * scrub code can safely do raw reads and verify the * checksum. They don't need to verify the MAC. */ dmu_buf_t *db = NULL; uint64_t offset = rwa->or_firstobj * DNODE_MIN_SIZE; err = dmu_buf_hold_by_dnode(DMU_META_DNODE(rwa->os), offset, FTAG, &db, DMU_READ_PREFETCH | DMU_READ_NO_DECRYPT); if (err != 0) { dmu_tx_commit(tx); return (SET_ERROR(EINVAL)); } dmu_buf_set_crypt_params(db, rwa->or_byteorder, rwa->or_salt, rwa->or_iv, rwa->or_mac, tx); dmu_buf_rele(db, FTAG); rwa->or_crypt_params_present = B_FALSE; } dmu_object_set_checksum(rwa->os, drro->drr_object, drro->drr_checksumtype, tx); dmu_object_set_compress(rwa->os, drro->drr_object, drro->drr_compress, tx); /* handle more restrictive dnode structuring for raw recvs */ if (rwa->raw) { /* * Set the indirect block shift and nlevels. This will not fail * because we ensured all of the blocks were free earlier if * this is a new object. */ VERIFY0(dmu_object_set_blocksize(rwa->os, drro->drr_object, drro->drr_blksz, drro->drr_indblkshift, tx)); VERIFY0(dmu_object_set_nlevels(rwa->os, drro->drr_object, drro->drr_nlevels, tx)); VERIFY0(dmu_object_set_maxblkid(rwa->os, drro->drr_object, drro->drr_maxblkid, tx)); } if (data != NULL) { dmu_buf_t *db; dnode_t *dn; uint32_t flags = DMU_READ_NO_PREFETCH; if (rwa->raw) flags |= DMU_READ_NO_DECRYPT; VERIFY0(dnode_hold(rwa->os, drro->drr_object, FTAG, &dn)); VERIFY0(dmu_bonus_hold_by_dnode(dn, FTAG, &db, flags)); dmu_buf_will_dirty(db, tx); ASSERT3U(db->db_size, >=, drro->drr_bonuslen); bcopy(data, db->db_data, DRR_OBJECT_PAYLOAD_SIZE(drro)); /* * Raw bonus buffers have their byteorder determined by the * DRR_OBJECT_RANGE record. */ if (rwa->byteswap && !rwa->raw) { dmu_object_byteswap_t byteswap = DMU_OT_BYTESWAP(drro->drr_bonustype); dmu_ot_byteswap[byteswap].ob_func(db->db_data, DRR_OBJECT_PAYLOAD_SIZE(drro)); } dmu_buf_rele(db, FTAG); dnode_rele(dn, FTAG); } dmu_tx_commit(tx); return (0); } /* ARGSUSED */ noinline static int receive_freeobjects(struct receive_writer_arg *rwa, struct drr_freeobjects *drrfo) { uint64_t obj; int next_err = 0; if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj) return (SET_ERROR(EINVAL)); for (obj = drrfo->drr_firstobj == 0 ? 1 : drrfo->drr_firstobj; obj < drrfo->drr_firstobj + drrfo->drr_numobjs && next_err == 0; next_err = dmu_object_next(rwa->os, &obj, FALSE, 0)) { dmu_object_info_t doi; int err; err = dmu_object_info(rwa->os, obj, &doi); if (err == ENOENT) continue; else if (err != 0) return (err); err = dmu_free_long_object(rwa->os, obj); if (err != 0) return (err); if (obj > rwa->max_object) rwa->max_object = obj; } if (next_err != ESRCH) return (next_err); return (0); } noinline static int receive_write(struct receive_writer_arg *rwa, struct drr_write *drrw, arc_buf_t *abuf) { int err; dmu_tx_t *tx; dnode_t *dn; if (drrw->drr_offset + drrw->drr_logical_size < drrw->drr_offset || !DMU_OT_IS_VALID(drrw->drr_type)) return (SET_ERROR(EINVAL)); /* * For resuming to work, records must be in increasing order * by (object, offset). */ if (drrw->drr_object < rwa->last_object || (drrw->drr_object == rwa->last_object && drrw->drr_offset < rwa->last_offset)) { return (SET_ERROR(EINVAL)); } rwa->last_object = drrw->drr_object; rwa->last_offset = drrw->drr_offset; if (rwa->last_object > rwa->max_object) rwa->max_object = rwa->last_object; if (dmu_object_info(rwa->os, drrw->drr_object, NULL) != 0) return (SET_ERROR(EINVAL)); tx = dmu_tx_create(rwa->os); dmu_tx_hold_write(tx, drrw->drr_object, drrw->drr_offset, drrw->drr_logical_size); err = dmu_tx_assign(tx, TXG_WAIT); if (err != 0) { dmu_tx_abort(tx); return (err); } if (rwa->byteswap && !arc_is_encrypted(abuf) && arc_get_compression(abuf) == ZIO_COMPRESS_OFF) { dmu_object_byteswap_t byteswap = DMU_OT_BYTESWAP(drrw->drr_type); dmu_ot_byteswap[byteswap].ob_func(abuf->b_data, DRR_WRITE_PAYLOAD_SIZE(drrw)); } VERIFY0(dnode_hold(rwa->os, drrw->drr_object, FTAG, &dn)); err = dmu_assign_arcbuf_by_dnode(dn, drrw->drr_offset, abuf, tx); if (err != 0) { dnode_rele(dn, FTAG); dmu_tx_commit(tx); return (err); } dnode_rele(dn, FTAG); /* * Note: If the receive fails, we want the resume stream to start * with the same record that we last successfully received (as opposed * to the next record), so that we can verify that we are * resuming from the correct location. */ save_resume_state(rwa, drrw->drr_object, drrw->drr_offset, tx); dmu_tx_commit(tx); return (0); } /* * Handle a DRR_WRITE_BYREF record. This record is used in dedup'ed * streams to refer to a copy of the data that is already on the * system because it came in earlier in the stream. This function * finds the earlier copy of the data, and uses that copy instead of * data from the stream to fulfill this write. */ static int receive_write_byref(struct receive_writer_arg *rwa, struct drr_write_byref *drrwbr) { dmu_tx_t *tx; int err; guid_map_entry_t gmesrch; guid_map_entry_t *gmep; avl_index_t where; objset_t *ref_os = NULL; int flags = DMU_READ_PREFETCH; dmu_buf_t *dbp; if (drrwbr->drr_offset + drrwbr->drr_length < drrwbr->drr_offset) return (SET_ERROR(EINVAL)); /* * If the GUID of the referenced dataset is different from the * GUID of the target dataset, find the referenced dataset. */ if (drrwbr->drr_toguid != drrwbr->drr_refguid) { gmesrch.guid = drrwbr->drr_refguid; if ((gmep = avl_find(rwa->guid_to_ds_map, &gmesrch, &where)) == NULL) { return (SET_ERROR(EINVAL)); } if (dmu_objset_from_ds(gmep->gme_ds, &ref_os)) return (SET_ERROR(EINVAL)); } else { ref_os = rwa->os; } if (drrwbr->drr_object > rwa->max_object) rwa->max_object = drrwbr->drr_object; if (rwa->raw) flags |= DMU_READ_NO_DECRYPT; /* may return either a regular db or an encrypted one */ err = dmu_buf_hold(ref_os, drrwbr->drr_refobject, drrwbr->drr_refoffset, FTAG, &dbp, flags); if (err != 0) return (err); tx = dmu_tx_create(rwa->os); dmu_tx_hold_write(tx, drrwbr->drr_object, drrwbr->drr_offset, drrwbr->drr_length); err = dmu_tx_assign(tx, TXG_WAIT); if (err != 0) { dmu_tx_abort(tx); return (err); } if (rwa->raw) { dmu_copy_from_buf(rwa->os, drrwbr->drr_object, drrwbr->drr_offset, dbp, tx); } else { dmu_write(rwa->os, drrwbr->drr_object, drrwbr->drr_offset, drrwbr->drr_length, dbp->db_data, tx); } dmu_buf_rele(dbp, FTAG); /* See comment in restore_write. */ save_resume_state(rwa, drrwbr->drr_object, drrwbr->drr_offset, tx); dmu_tx_commit(tx); return (0); } static int receive_write_embedded(struct receive_writer_arg *rwa, struct drr_write_embedded *drrwe, void *data) { dmu_tx_t *tx; int err; if (drrwe->drr_offset + drrwe->drr_length < drrwe->drr_offset) return (SET_ERROR(EINVAL)); if (drrwe->drr_psize > BPE_PAYLOAD_SIZE) return (SET_ERROR(EINVAL)); if (drrwe->drr_etype >= NUM_BP_EMBEDDED_TYPES) return (SET_ERROR(EINVAL)); if (drrwe->drr_compression >= ZIO_COMPRESS_FUNCTIONS) return (SET_ERROR(EINVAL)); if (rwa->raw) return (SET_ERROR(EINVAL)); if (drrwe->drr_object > rwa->max_object) rwa->max_object = drrwe->drr_object; tx = dmu_tx_create(rwa->os); dmu_tx_hold_write(tx, drrwe->drr_object, drrwe->drr_offset, drrwe->drr_length); err = dmu_tx_assign(tx, TXG_WAIT); if (err != 0) { dmu_tx_abort(tx); return (err); } dmu_write_embedded(rwa->os, drrwe->drr_object, drrwe->drr_offset, data, drrwe->drr_etype, drrwe->drr_compression, drrwe->drr_lsize, drrwe->drr_psize, rwa->byteswap ^ ZFS_HOST_BYTEORDER, tx); /* See comment in restore_write. */ save_resume_state(rwa, drrwe->drr_object, drrwe->drr_offset, tx); dmu_tx_commit(tx); return (0); } static int receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs, arc_buf_t *abuf) { dmu_tx_t *tx; dmu_buf_t *db, *db_spill; int err; uint32_t flags = 0; if (drrs->drr_length < SPA_MINBLOCKSIZE || drrs->drr_length > spa_maxblocksize(dmu_objset_spa(rwa->os))) return (SET_ERROR(EINVAL)); if (rwa->raw) { if (!DMU_OT_IS_VALID(drrs->drr_type) || drrs->drr_compressiontype >= ZIO_COMPRESS_FUNCTIONS || drrs->drr_compressed_size == 0) return (SET_ERROR(EINVAL)); flags |= DMU_READ_NO_DECRYPT; } if (dmu_object_info(rwa->os, drrs->drr_object, NULL) != 0) return (SET_ERROR(EINVAL)); if (drrs->drr_object > rwa->max_object) rwa->max_object = drrs->drr_object; VERIFY0(dmu_bonus_hold(rwa->os, drrs->drr_object, FTAG, &db)); if ((err = dmu_spill_hold_by_bonus(db, DMU_READ_NO_DECRYPT, FTAG, &db_spill)) != 0) { dmu_buf_rele(db, FTAG); return (err); } tx = dmu_tx_create(rwa->os); dmu_tx_hold_spill(tx, db->db_object); err = dmu_tx_assign(tx, TXG_WAIT); if (err != 0) { dmu_buf_rele(db, FTAG); dmu_buf_rele(db_spill, FTAG); dmu_tx_abort(tx); return (err); } if (db_spill->db_size < drrs->drr_length) VERIFY(0 == dbuf_spill_set_blksz(db_spill, drrs->drr_length, tx)); if (rwa->byteswap && !arc_is_encrypted(abuf) && arc_get_compression(abuf) == ZIO_COMPRESS_OFF) { dmu_object_byteswap_t byteswap = DMU_OT_BYTESWAP(drrs->drr_type); dmu_ot_byteswap[byteswap].ob_func(abuf->b_data, DRR_SPILL_PAYLOAD_SIZE(drrs)); } dbuf_assign_arcbuf((dmu_buf_impl_t *)db_spill, abuf, tx); dmu_buf_rele(db, FTAG); dmu_buf_rele(db_spill, FTAG); dmu_tx_commit(tx); return (0); } /* ARGSUSED */ noinline static int receive_free(struct receive_writer_arg *rwa, struct drr_free *drrf) { int err; if (drrf->drr_length != DMU_OBJECT_END && drrf->drr_offset + drrf->drr_length < drrf->drr_offset) return (SET_ERROR(EINVAL)); if (dmu_object_info(rwa->os, drrf->drr_object, NULL) != 0) return (SET_ERROR(EINVAL)); if (drrf->drr_object > rwa->max_object) rwa->max_object = drrf->drr_object; err = dmu_free_long_range(rwa->os, drrf->drr_object, drrf->drr_offset, drrf->drr_length); return (err); } static int receive_object_range(struct receive_writer_arg *rwa, struct drr_object_range *drror) { /* * By default, we assume this block is in our native format * (ZFS_HOST_BYTEORDER). We then take into account whether * the send stream is byteswapped (rwa->byteswap). Finally, * we need to byteswap again if this particular block was * in non-native format on the send side. */ boolean_t byteorder = ZFS_HOST_BYTEORDER ^ rwa->byteswap ^ !!DRR_IS_RAW_BYTESWAPPED(drror->drr_flags); /* * Since dnode block sizes are constant, we should not need to worry * about making sure that the dnode block size is the same on the * sending and receiving sides for the time being. For non-raw sends, * this does not matter (and in fact we do not send a DRR_OBJECT_RANGE * record at all). Raw sends require this record type because the * encryption parameters are used to protect an entire block of bonus * buffers. If the size of dnode blocks ever becomes variable, * handling will need to be added to ensure that dnode block sizes * match on the sending and receiving side. */ if (drror->drr_numslots != DNODES_PER_BLOCK || P2PHASE(drror->drr_firstobj, DNODES_PER_BLOCK) != 0 || !rwa->raw) return (SET_ERROR(EINVAL)); if (drror->drr_firstobj > rwa->max_object) rwa->max_object = drror->drr_firstobj; /* * The DRR_OBJECT_RANGE handling must be deferred to receive_object() * so that the block of dnodes is not written out when it's empty, * and converted to a HOLE BP. */ rwa->or_crypt_params_present = B_TRUE; rwa->or_firstobj = drror->drr_firstobj; rwa->or_numslots = drror->drr_numslots; bcopy(drror->drr_salt, rwa->or_salt, ZIO_DATA_SALT_LEN); bcopy(drror->drr_iv, rwa->or_iv, ZIO_DATA_IV_LEN); bcopy(drror->drr_mac, rwa->or_mac, ZIO_DATA_MAC_LEN); rwa->or_byteorder = byteorder; return (0); } /* used to destroy the drc_ds on error */ static void dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc) { dsl_dataset_t *ds = drc->drc_ds; ds_hold_flags_t dsflags = (drc->drc_raw) ? 0 : DS_HOLD_FLAG_DECRYPT; /* * Wait for the txg sync before cleaning up the receive. For * resumable receives, this ensures that our resume state has * been written out to disk. For raw receives, this ensures * that the user accounting code will not attempt to do anything * after we stopped receiving the dataset. */ txg_wait_synced(ds->ds_dir->dd_pool, 0); ds->ds_objset->os_raw_receive = B_FALSE; rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); if (drc->drc_resumable && !BP_IS_HOLE(dsl_dataset_get_blkptr(ds))) { rrw_exit(&ds->ds_bp_rwlock, FTAG); dsl_dataset_disown(ds, dsflags, dmu_recv_tag); } else { char name[ZFS_MAX_DATASET_NAME_LEN]; rrw_exit(&ds->ds_bp_rwlock, FTAG); dsl_dataset_name(ds, name); dsl_dataset_disown(ds, dsflags, dmu_recv_tag); (void) dsl_destroy_head(name); } } static void receive_cksum(struct receive_arg *ra, int len, void *buf) { if (ra->byteswap) { (void) fletcher_4_incremental_byteswap(buf, len, &ra->cksum); } else { (void) fletcher_4_incremental_native(buf, len, &ra->cksum); } } /* * Read the payload into a buffer of size len, and update the current record's * payload field. * Allocate ra->next_rrd and read the next record's header into * ra->next_rrd->header. * Verify checksum of payload and next record. */ static int receive_read_payload_and_next_header(struct receive_arg *ra, int len, void *buf) { int err; zio_cksum_t cksum_orig; zio_cksum_t *cksump; if (len != 0) { ASSERT3U(len, <=, SPA_MAXBLOCKSIZE); err = receive_read(ra, len, buf); if (err != 0) return (err); receive_cksum(ra, len, buf); /* note: rrd is NULL when reading the begin record's payload */ if (ra->rrd != NULL) { ra->rrd->payload = buf; ra->rrd->payload_size = len; ra->rrd->bytes_read = ra->bytes_read; } } else { ASSERT3P(buf, ==, NULL); } ra->prev_cksum = ra->cksum; ra->next_rrd = kmem_zalloc(sizeof (*ra->next_rrd), KM_SLEEP); err = receive_read(ra, sizeof (ra->next_rrd->header), &ra->next_rrd->header); ra->next_rrd->bytes_read = ra->bytes_read; if (err != 0) { kmem_free(ra->next_rrd, sizeof (*ra->next_rrd)); ra->next_rrd = NULL; return (err); } if (ra->next_rrd->header.drr_type == DRR_BEGIN) { kmem_free(ra->next_rrd, sizeof (*ra->next_rrd)); ra->next_rrd = NULL; return (SET_ERROR(EINVAL)); } /* * Note: checksum is of everything up to but not including the * checksum itself. */ ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum), ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t)); receive_cksum(ra, offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum), &ra->next_rrd->header); cksum_orig = ra->next_rrd->header.drr_u.drr_checksum.drr_checksum; cksump = &ra->next_rrd->header.drr_u.drr_checksum.drr_checksum; if (ra->byteswap) byteswap_record(&ra->next_rrd->header); if ((!ZIO_CHECKSUM_IS_ZERO(cksump)) && !ZIO_CHECKSUM_EQUAL(ra->cksum, *cksump)) { kmem_free(ra->next_rrd, sizeof (*ra->next_rrd)); ra->next_rrd = NULL; return (SET_ERROR(ECKSUM)); } receive_cksum(ra, sizeof (cksum_orig), &cksum_orig); return (0); } static void objlist_create(struct objlist *list) { list_create(&list->list, sizeof (struct receive_objnode), offsetof(struct receive_objnode, node)); list->last_lookup = 0; } static void objlist_destroy(struct objlist *list) { for (struct receive_objnode *n = list_remove_head(&list->list); n != NULL; n = list_remove_head(&list->list)) { kmem_free(n, sizeof (*n)); } list_destroy(&list->list); } /* * This function looks through the objlist to see if the specified object number * is contained in the objlist. In the process, it will remove all object * numbers in the list that are smaller than the specified object number. Thus, * any lookup of an object number smaller than a previously looked up object * number will always return false; therefore, all lookups should be done in * ascending order. */ static boolean_t objlist_exists(struct objlist *list, uint64_t object) { struct receive_objnode *node = list_head(&list->list); ASSERT3U(object, >=, list->last_lookup); list->last_lookup = object; while (node != NULL && node->object < object) { VERIFY3P(node, ==, list_remove_head(&list->list)); kmem_free(node, sizeof (*node)); node = list_head(&list->list); } return (node != NULL && node->object == object); } /* * The objlist is a list of object numbers stored in ascending order. However, * the insertion of new object numbers does not seek out the correct location to * store a new object number; instead, it appends it to the list for simplicity. * Thus, any users must take care to only insert new object numbers in ascending * order. */ static void objlist_insert(struct objlist *list, uint64_t object) { struct receive_objnode *node = kmem_zalloc(sizeof (*node), KM_SLEEP); node->object = object; #ifdef ZFS_DEBUG { struct receive_objnode *last_object = list_tail(&list->list); uint64_t last_objnum = (last_object != NULL ? last_object->object : 0); ASSERT3U(node->object, >, last_objnum); } #endif list_insert_tail(&list->list, node); } /* * Issue the prefetch reads for any necessary indirect blocks. * * We use the object ignore list to tell us whether or not to issue prefetches * for a given object. We do this for both correctness (in case the blocksize * of an object has changed) and performance (if the object doesn't exist, don't * needlessly try to issue prefetches). We also trim the list as we go through * the stream to prevent it from growing to an unbounded size. * * The object numbers within will always be in sorted order, and any write * records we see will also be in sorted order, but they're not sorted with * respect to each other (i.e. we can get several object records before * receiving each object's write records). As a result, once we've reached a * given object number, we can safely remove any reference to lower object * numbers in the ignore list. In practice, we receive up to 32 object records * before receiving write records, so the list can have up to 32 nodes in it. */ /* ARGSUSED */ static void receive_read_prefetch(struct receive_arg *ra, uint64_t object, uint64_t offset, uint64_t length) { if (!objlist_exists(&ra->ignore_objlist, object)) { dmu_prefetch(ra->os, object, 1, offset, length, ZIO_PRIORITY_SYNC_READ); } } /* * Read records off the stream, issuing any necessary prefetches. */ static int receive_read_record(struct receive_arg *ra) { int err; switch (ra->rrd->header.drr_type) { case DRR_OBJECT: { struct drr_object *drro = &ra->rrd->header.drr_u.drr_object; uint32_t size = DRR_OBJECT_PAYLOAD_SIZE(drro); void *buf = NULL; dmu_object_info_t doi; if (size != 0) buf = kmem_zalloc(size, KM_SLEEP); err = receive_read_payload_and_next_header(ra, size, buf); if (err != 0) { kmem_free(buf, size); return (err); } err = dmu_object_info(ra->os, drro->drr_object, &doi); /* * See receive_read_prefetch for an explanation why we're * storing this object in the ignore_obj_list. */ if (err == ENOENT || err == EEXIST || (err == 0 && doi.doi_data_block_size != drro->drr_blksz)) { objlist_insert(&ra->ignore_objlist, drro->drr_object); err = 0; } return (err); } case DRR_FREEOBJECTS: { err = receive_read_payload_and_next_header(ra, 0, NULL); return (err); } case DRR_WRITE: { struct drr_write *drrw = &ra->rrd->header.drr_u.drr_write; arc_buf_t *abuf; boolean_t is_meta = DMU_OT_IS_METADATA(drrw->drr_type); if (ra->raw) { boolean_t byteorder = ZFS_HOST_BYTEORDER ^ !!DRR_IS_RAW_BYTESWAPPED(drrw->drr_flags) ^ ra->byteswap; abuf = arc_loan_raw_buf(dmu_objset_spa(ra->os), drrw->drr_object, byteorder, drrw->drr_salt, drrw->drr_iv, drrw->drr_mac, drrw->drr_type, drrw->drr_compressed_size, drrw->drr_logical_size, drrw->drr_compressiontype); } else if (DRR_WRITE_COMPRESSED(drrw)) { ASSERT3U(drrw->drr_compressed_size, >, 0); ASSERT3U(drrw->drr_logical_size, >=, drrw->drr_compressed_size); ASSERT(!is_meta); abuf = arc_loan_compressed_buf( dmu_objset_spa(ra->os), drrw->drr_compressed_size, drrw->drr_logical_size, drrw->drr_compressiontype); } else { abuf = arc_loan_buf(dmu_objset_spa(ra->os), is_meta, drrw->drr_logical_size); } err = receive_read_payload_and_next_header(ra, DRR_WRITE_PAYLOAD_SIZE(drrw), abuf->b_data); if (err != 0) { dmu_return_arcbuf(abuf); return (err); } ra->rrd->arc_buf = abuf; receive_read_prefetch(ra, drrw->drr_object, drrw->drr_offset, drrw->drr_logical_size); return (err); } case DRR_WRITE_BYREF: { struct drr_write_byref *drrwb = &ra->rrd->header.drr_u.drr_write_byref; err = receive_read_payload_and_next_header(ra, 0, NULL); receive_read_prefetch(ra, drrwb->drr_object, drrwb->drr_offset, drrwb->drr_length); return (err); } case DRR_WRITE_EMBEDDED: { struct drr_write_embedded *drrwe = &ra->rrd->header.drr_u.drr_write_embedded; uint32_t size = P2ROUNDUP(drrwe->drr_psize, 8); void *buf = kmem_zalloc(size, KM_SLEEP); err = receive_read_payload_and_next_header(ra, size, buf); if (err != 0) { kmem_free(buf, size); return (err); } receive_read_prefetch(ra, drrwe->drr_object, drrwe->drr_offset, drrwe->drr_length); return (err); } case DRR_FREE: { /* * It might be beneficial to prefetch indirect blocks here, but * we don't really have the data to decide for sure. */ err = receive_read_payload_and_next_header(ra, 0, NULL); return (err); } case DRR_END: { struct drr_end *drre = &ra->rrd->header.drr_u.drr_end; if (!ZIO_CHECKSUM_EQUAL(ra->prev_cksum, drre->drr_checksum)) return (SET_ERROR(ECKSUM)); return (0); } case DRR_SPILL: { struct drr_spill *drrs = &ra->rrd->header.drr_u.drr_spill; arc_buf_t *abuf; int len = DRR_SPILL_PAYLOAD_SIZE(drrs); /* DRR_SPILL records are either raw or uncompressed */ if (ra->raw) { boolean_t byteorder = ZFS_HOST_BYTEORDER ^ !!DRR_IS_RAW_BYTESWAPPED(drrs->drr_flags) ^ ra->byteswap; abuf = arc_loan_raw_buf(dmu_objset_spa(ra->os), dmu_objset_id(ra->os), byteorder, drrs->drr_salt, drrs->drr_iv, drrs->drr_mac, drrs->drr_type, drrs->drr_compressed_size, drrs->drr_length, drrs->drr_compressiontype); } else { abuf = arc_loan_buf(dmu_objset_spa(ra->os), DMU_OT_IS_METADATA(drrs->drr_type), drrs->drr_length); } err = receive_read_payload_and_next_header(ra, len, abuf->b_data); if (err != 0) { dmu_return_arcbuf(abuf); return (err); } ra->rrd->arc_buf = abuf; return (err); } case DRR_OBJECT_RANGE: { err = receive_read_payload_and_next_header(ra, 0, NULL); return (err); } default: return (SET_ERROR(EINVAL)); } } static void dprintf_drr(struct receive_record_arg *rrd, int err) { #ifdef ZFS_DEBUG switch (rrd->header.drr_type) { case DRR_OBJECT: { struct drr_object *drro = &rrd->header.drr_u.drr_object; dprintf("drr_type = OBJECT obj = %llu type = %u " "bonustype = %u blksz = %u bonuslen = %u cksumtype = %u " "compress = %u dn_slots = %u err = %d\n", drro->drr_object, drro->drr_type, drro->drr_bonustype, drro->drr_blksz, drro->drr_bonuslen, drro->drr_checksumtype, drro->drr_compress, drro->drr_dn_slots, err); break; } case DRR_FREEOBJECTS: { struct drr_freeobjects *drrfo = &rrd->header.drr_u.drr_freeobjects; dprintf("drr_type = FREEOBJECTS firstobj = %llu " "numobjs = %llu err = %d\n", drrfo->drr_firstobj, drrfo->drr_numobjs, err); break; } case DRR_WRITE: { struct drr_write *drrw = &rrd->header.drr_u.drr_write; dprintf("drr_type = WRITE obj = %llu type = %u offset = %llu " "lsize = %llu cksumtype = %u cksumflags = %u " "compress = %u psize = %llu err = %d\n", drrw->drr_object, drrw->drr_type, drrw->drr_offset, drrw->drr_logical_size, drrw->drr_checksumtype, drrw->drr_flags, drrw->drr_compressiontype, drrw->drr_compressed_size, err); break; } case DRR_WRITE_BYREF: { struct drr_write_byref *drrwbr = &rrd->header.drr_u.drr_write_byref; dprintf("drr_type = WRITE_BYREF obj = %llu offset = %llu " "length = %llu toguid = %llx refguid = %llx " "refobject = %llu refoffset = %llu cksumtype = %u " "cksumflags = %u err = %d\n", drrwbr->drr_object, drrwbr->drr_offset, drrwbr->drr_length, drrwbr->drr_toguid, drrwbr->drr_refguid, drrwbr->drr_refobject, drrwbr->drr_refoffset, drrwbr->drr_checksumtype, drrwbr->drr_flags, err); break; } case DRR_WRITE_EMBEDDED: { struct drr_write_embedded *drrwe = &rrd->header.drr_u.drr_write_embedded; dprintf("drr_type = WRITE_EMBEDDED obj = %llu offset = %llu " "length = %llu compress = %u etype = %u lsize = %u " "psize = %u err = %d\n", drrwe->drr_object, drrwe->drr_offset, drrwe->drr_length, drrwe->drr_compression, drrwe->drr_etype, drrwe->drr_lsize, drrwe->drr_psize, err); break; } case DRR_FREE: { struct drr_free *drrf = &rrd->header.drr_u.drr_free; dprintf("drr_type = FREE obj = %llu offset = %llu " "length = %lld err = %d\n", drrf->drr_object, drrf->drr_offset, drrf->drr_length, err); break; } case DRR_SPILL: { struct drr_spill *drrs = &rrd->header.drr_u.drr_spill; dprintf("drr_type = SPILL obj = %llu length = %llu " "err = %d\n", drrs->drr_object, drrs->drr_length, err); break; } default: return; } #endif } /* * Commit the records to the pool. */ static int receive_process_record(struct receive_writer_arg *rwa, struct receive_record_arg *rrd) { int err; /* Processing in order, therefore bytes_read should be increasing. */ ASSERT3U(rrd->bytes_read, >=, rwa->bytes_read); rwa->bytes_read = rrd->bytes_read; switch (rrd->header.drr_type) { case DRR_OBJECT: { struct drr_object *drro = &rrd->header.drr_u.drr_object; err = receive_object(rwa, drro, rrd->payload); kmem_free(rrd->payload, rrd->payload_size); rrd->payload = NULL; break; } case DRR_FREEOBJECTS: { struct drr_freeobjects *drrfo = &rrd->header.drr_u.drr_freeobjects; err = receive_freeobjects(rwa, drrfo); break; } case DRR_WRITE: { struct drr_write *drrw = &rrd->header.drr_u.drr_write; err = receive_write(rwa, drrw, rrd->arc_buf); /* if receive_write() is successful, it consumes the arc_buf */ if (err != 0) dmu_return_arcbuf(rrd->arc_buf); rrd->arc_buf = NULL; rrd->payload = NULL; break; } case DRR_WRITE_BYREF: { struct drr_write_byref *drrwbr = &rrd->header.drr_u.drr_write_byref; err = receive_write_byref(rwa, drrwbr); break; } case DRR_WRITE_EMBEDDED: { struct drr_write_embedded *drrwe = &rrd->header.drr_u.drr_write_embedded; err = receive_write_embedded(rwa, drrwe, rrd->payload); kmem_free(rrd->payload, rrd->payload_size); rrd->payload = NULL; break; } case DRR_FREE: { struct drr_free *drrf = &rrd->header.drr_u.drr_free; err = receive_free(rwa, drrf); break; } case DRR_SPILL: { struct drr_spill *drrs = &rrd->header.drr_u.drr_spill; err = receive_spill(rwa, drrs, rrd->arc_buf); /* if receive_spill() is successful, it consumes the arc_buf */ if (err != 0) dmu_return_arcbuf(rrd->arc_buf); rrd->arc_buf = NULL; rrd->payload = NULL; break; } case DRR_OBJECT_RANGE: { struct drr_object_range *drror = &rrd->header.drr_u.drr_object_range; return (receive_object_range(rwa, drror)); } default: return (SET_ERROR(EINVAL)); } if (err != 0) dprintf_drr(rrd, err); return (err); } /* * dmu_recv_stream's worker thread; pull records off the queue, and then call * receive_process_record When we're done, signal the main thread and exit. */ static void receive_writer_thread(void *arg) { struct receive_writer_arg *rwa = arg; struct receive_record_arg *rrd; fstrans_cookie_t cookie = spl_fstrans_mark(); for (rrd = bqueue_dequeue(&rwa->q); !rrd->eos_marker; rrd = bqueue_dequeue(&rwa->q)) { /* * If there's an error, the main thread will stop putting things * on the queue, but we need to clear everything in it before we * can exit. */ if (rwa->err == 0) { rwa->err = receive_process_record(rwa, rrd); } else if (rrd->arc_buf != NULL) { dmu_return_arcbuf(rrd->arc_buf); rrd->arc_buf = NULL; rrd->payload = NULL; } else if (rrd->payload != NULL) { kmem_free(rrd->payload, rrd->payload_size); rrd->payload = NULL; } kmem_free(rrd, sizeof (*rrd)); } kmem_free(rrd, sizeof (*rrd)); mutex_enter(&rwa->mutex); rwa->done = B_TRUE; cv_signal(&rwa->cv); mutex_exit(&rwa->mutex); spl_fstrans_unmark(cookie); thread_exit(); } static int resume_check(struct receive_arg *ra, nvlist_t *begin_nvl) { uint64_t val; objset_t *mos = dmu_objset_pool(ra->os)->dp_meta_objset; uint64_t dsobj = dmu_objset_id(ra->os); uint64_t resume_obj, resume_off; if (nvlist_lookup_uint64(begin_nvl, "resume_object", &resume_obj) != 0 || nvlist_lookup_uint64(begin_nvl, "resume_offset", &resume_off) != 0) { return (SET_ERROR(EINVAL)); } VERIFY0(zap_lookup(mos, dsobj, DS_FIELD_RESUME_OBJECT, sizeof (val), 1, &val)); if (resume_obj != val) return (SET_ERROR(EINVAL)); VERIFY0(zap_lookup(mos, dsobj, DS_FIELD_RESUME_OFFSET, sizeof (val), 1, &val)); if (resume_off != val) return (SET_ERROR(EINVAL)); return (0); } /* * Read in the stream's records, one by one, and apply them to the pool. There * are two threads involved; the thread that calls this function will spin up a * worker thread, read the records off the stream one by one, and issue * prefetches for any necessary indirect blocks. It will then push the records * onto an internal blocking queue. The worker thread will pull the records off * the queue, and actually write the data into the DMU. This way, the worker * thread doesn't have to wait for reads to complete, since everything it needs * (the indirect blocks) will be prefetched. * * NB: callers *must* call dmu_recv_end() if this succeeds. */ int dmu_recv_stream(dmu_recv_cookie_t *drc, vnode_t *vp, offset_t *voffp, int cleanup_fd, uint64_t *action_handlep) { int err = 0; struct receive_arg *ra; struct receive_writer_arg *rwa; int featureflags; uint32_t payloadlen; void *payload; nvlist_t *begin_nvl = NULL; ra = kmem_zalloc(sizeof (*ra), KM_SLEEP); rwa = kmem_zalloc(sizeof (*rwa), KM_SLEEP); ra->byteswap = drc->drc_byteswap; ra->raw = drc->drc_raw; ra->cksum = drc->drc_cksum; ra->vp = vp; ra->voff = *voffp; if (dsl_dataset_is_zapified(drc->drc_ds)) { (void) zap_lookup(drc->drc_ds->ds_dir->dd_pool->dp_meta_objset, drc->drc_ds->ds_object, DS_FIELD_RESUME_BYTES, sizeof (ra->bytes_read), 1, &ra->bytes_read); } objlist_create(&ra->ignore_objlist); /* these were verified in dmu_recv_begin */ ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo), ==, DMU_SUBSTREAM); ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES); /* * Open the objset we are modifying. */ VERIFY0(dmu_objset_from_ds(drc->drc_ds, &ra->os)); ASSERT(dsl_dataset_phys(drc->drc_ds)->ds_flags & DS_FLAG_INCONSISTENT); featureflags = DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo); ra->featureflags = featureflags; ASSERT0(ra->os->os_encrypted && (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)); /* if this stream is dedup'ed, set up the avl tree for guid mapping */ if (featureflags & DMU_BACKUP_FEATURE_DEDUP) { minor_t minor; if (cleanup_fd == -1) { err = SET_ERROR(EBADF); goto out; } err = zfs_onexit_fd_hold(cleanup_fd, &minor); if (err != 0) { cleanup_fd = -1; goto out; } if (*action_handlep == 0) { rwa->guid_to_ds_map = kmem_alloc(sizeof (avl_tree_t), KM_SLEEP); avl_create(rwa->guid_to_ds_map, guid_compare, sizeof (guid_map_entry_t), offsetof(guid_map_entry_t, avlnode)); err = zfs_onexit_add_cb(minor, free_guid_map_onexit, rwa->guid_to_ds_map, action_handlep); if (err != 0) goto out; } else { err = zfs_onexit_cb_data(minor, *action_handlep, (void **)&rwa->guid_to_ds_map); if (err != 0) goto out; } drc->drc_guid_to_ds_map = rwa->guid_to_ds_map; } payloadlen = drc->drc_drr_begin->drr_payloadlen; payload = NULL; if (payloadlen != 0) payload = kmem_alloc(payloadlen, KM_SLEEP); err = receive_read_payload_and_next_header(ra, payloadlen, payload); if (err != 0) { if (payloadlen != 0) kmem_free(payload, payloadlen); goto out; } if (payloadlen != 0) { err = nvlist_unpack(payload, payloadlen, &begin_nvl, KM_SLEEP); kmem_free(payload, payloadlen); if (err != 0) goto out; } /* handle DSL encryption key payload */ if (featureflags & DMU_BACKUP_FEATURE_RAW) { nvlist_t *keynvl = NULL; ASSERT(ra->os->os_encrypted); ASSERT(drc->drc_raw); err = nvlist_lookup_nvlist(begin_nvl, "crypt_keydata", &keynvl); if (err != 0) goto out; /* * If this is a new dataset we set the key immediately. * Otherwise we don't want to change the key until we * are sure the rest of the receive succeeded so we stash * the keynvl away until then. */ err = dsl_crypto_recv_raw(spa_name(ra->os->os_spa), drc->drc_ds->ds_object, drc->drc_drrb->drr_type, keynvl, drc->drc_newfs); if (err != 0) goto out; if (!drc->drc_newfs) drc->drc_keynvl = fnvlist_dup(keynvl); } if (featureflags & DMU_BACKUP_FEATURE_RESUMING) { err = resume_check(ra, begin_nvl); if (err != 0) goto out; } (void) bqueue_init(&rwa->q, MAX(zfs_recv_queue_length, 2 * zfs_max_recordsize), offsetof(struct receive_record_arg, node)); cv_init(&rwa->cv, NULL, CV_DEFAULT, NULL); mutex_init(&rwa->mutex, NULL, MUTEX_DEFAULT, NULL); rwa->os = ra->os; rwa->byteswap = drc->drc_byteswap; rwa->resumable = drc->drc_resumable; rwa->raw = drc->drc_raw; rwa->os->os_raw_receive = drc->drc_raw; (void) thread_create(NULL, 0, receive_writer_thread, rwa, 0, curproc, TS_RUN, minclsyspri); /* * We're reading rwa->err without locks, which is safe since we are the * only reader, and the worker thread is the only writer. It's ok if we * miss a write for an iteration or two of the loop, since the writer * thread will keep freeing records we send it until we send it an eos * marker. * * We can leave this loop in 3 ways: First, if rwa->err is * non-zero. In that case, the writer thread will free the rrd we just * pushed. Second, if we're interrupted; in that case, either it's the * first loop and ra->rrd was never allocated, or it's later and ra->rrd * has been handed off to the writer thread who will free it. Finally, * if receive_read_record fails or we're at the end of the stream, then * we free ra->rrd and exit. */ while (rwa->err == 0) { if (issig(JUSTLOOKING) && issig(FORREAL)) { err = SET_ERROR(EINTR); break; } ASSERT3P(ra->rrd, ==, NULL); ra->rrd = ra->next_rrd; ra->next_rrd = NULL; /* Allocates and loads header into ra->next_rrd */ err = receive_read_record(ra); if (ra->rrd->header.drr_type == DRR_END || err != 0) { kmem_free(ra->rrd, sizeof (*ra->rrd)); ra->rrd = NULL; break; } bqueue_enqueue(&rwa->q, ra->rrd, sizeof (struct receive_record_arg) + ra->rrd->payload_size); ra->rrd = NULL; } if (ra->next_rrd == NULL) ra->next_rrd = kmem_zalloc(sizeof (*ra->next_rrd), KM_SLEEP); ra->next_rrd->eos_marker = B_TRUE; bqueue_enqueue(&rwa->q, ra->next_rrd, 1); mutex_enter(&rwa->mutex); while (!rwa->done) { cv_wait(&rwa->cv, &rwa->mutex); } mutex_exit(&rwa->mutex); /* * If we are receiving a full stream as a clone, all object IDs which * are greater than the maximum ID referenced in the stream are * by definition unused and must be freed. */ if (drc->drc_clone && drc->drc_drrb->drr_fromguid == 0) { uint64_t obj = rwa->max_object + 1; int free_err = 0; int next_err = 0; while (next_err == 0) { free_err = dmu_free_long_object(rwa->os, obj); if (free_err != 0 && free_err != ENOENT) break; next_err = dmu_object_next(rwa->os, &obj, FALSE, 0); } if (err == 0) { if (free_err != 0 && free_err != ENOENT) err = free_err; else if (next_err != ESRCH) err = next_err; } } cv_destroy(&rwa->cv); mutex_destroy(&rwa->mutex); bqueue_destroy(&rwa->q); if (err == 0) err = rwa->err; out: nvlist_free(begin_nvl); if ((featureflags & DMU_BACKUP_FEATURE_DEDUP) && (cleanup_fd != -1)) zfs_onexit_fd_rele(cleanup_fd); if (err != 0) { /* * Clean up references. If receive is not resumable, * destroy what we created, so we don't leave it in * the inconsistent state. */ dmu_recv_cleanup_ds(drc); nvlist_free(drc->drc_keynvl); } *voffp = ra->voff; objlist_destroy(&ra->ignore_objlist); kmem_free(ra, sizeof (*ra)); kmem_free(rwa, sizeof (*rwa)); return (err); } static int dmu_recv_end_check(void *arg, dmu_tx_t *tx) { dmu_recv_cookie_t *drc = arg; dsl_pool_t *dp = dmu_tx_pool(tx); int error; ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag); if (!drc->drc_newfs) { dsl_dataset_t *origin_head; error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head); if (error != 0) return (error); if (drc->drc_force) { /* * We will destroy any snapshots in tofs (i.e. before * origin_head) that are after the origin (which is * the snap before drc_ds, because drc_ds can not * have any snaps of its own). */ uint64_t obj; obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj; while (obj != dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) { dsl_dataset_t *snap; error = dsl_dataset_hold_obj(dp, obj, FTAG, &snap); if (error != 0) break; if (snap->ds_dir != origin_head->ds_dir) error = SET_ERROR(EINVAL); if (error == 0) { error = dsl_destroy_snapshot_check_impl( snap, B_FALSE); } obj = dsl_dataset_phys(snap)->ds_prev_snap_obj; dsl_dataset_rele(snap, FTAG); if (error != 0) break; } if (error != 0) { dsl_dataset_rele(origin_head, FTAG); return (error); } } if (drc->drc_keynvl != NULL) { error = dsl_crypto_recv_raw_key_check(drc->drc_ds, drc->drc_keynvl, tx); if (error != 0) { dsl_dataset_rele(origin_head, FTAG); return (error); } } error = dsl_dataset_clone_swap_check_impl(drc->drc_ds, origin_head, drc->drc_force, drc->drc_owner, tx); if (error != 0) { dsl_dataset_rele(origin_head, FTAG); return (error); } error = dsl_dataset_snapshot_check_impl(origin_head, drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred); dsl_dataset_rele(origin_head, FTAG); if (error != 0) return (error); error = dsl_destroy_head_check_impl(drc->drc_ds, 1); } else { error = dsl_dataset_snapshot_check_impl(drc->drc_ds, drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred); } return (error); } static void dmu_recv_end_sync(void *arg, dmu_tx_t *tx) { dmu_recv_cookie_t *drc = arg; dsl_pool_t *dp = dmu_tx_pool(tx); boolean_t encrypted = drc->drc_ds->ds_dir->dd_crypto_obj != 0; spa_history_log_internal_ds(drc->drc_ds, "finish receiving", tx, "snap=%s", drc->drc_tosnap); drc->drc_ds->ds_objset->os_raw_receive = B_FALSE; if (!drc->drc_newfs) { dsl_dataset_t *origin_head; VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head)); if (drc->drc_force) { /* * Destroy any snapshots of drc_tofs (origin_head) * after the origin (the snap before drc_ds). */ uint64_t obj; obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj; while (obj != dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) { dsl_dataset_t *snap; VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG, &snap)); ASSERT3P(snap->ds_dir, ==, origin_head->ds_dir); obj = dsl_dataset_phys(snap)->ds_prev_snap_obj; dsl_destroy_snapshot_sync_impl(snap, B_FALSE, tx); dsl_dataset_rele(snap, FTAG); } } if (drc->drc_keynvl != NULL) { dsl_crypto_recv_raw_key_sync(drc->drc_ds, drc->drc_keynvl, tx); nvlist_free(drc->drc_keynvl); drc->drc_keynvl = NULL; } VERIFY3P(drc->drc_ds->ds_prev, ==, origin_head->ds_prev); dsl_dataset_clone_swap_sync_impl(drc->drc_ds, origin_head, tx); dsl_dataset_snapshot_sync_impl(origin_head, drc->drc_tosnap, tx); /* set snapshot's creation time and guid */ dmu_buf_will_dirty(origin_head->ds_prev->ds_dbuf, tx); dsl_dataset_phys(origin_head->ds_prev)->ds_creation_time = drc->drc_drrb->drr_creation_time; dsl_dataset_phys(origin_head->ds_prev)->ds_guid = drc->drc_drrb->drr_toguid; dsl_dataset_phys(origin_head->ds_prev)->ds_flags &= ~DS_FLAG_INCONSISTENT; dmu_buf_will_dirty(origin_head->ds_dbuf, tx); dsl_dataset_phys(origin_head)->ds_flags &= ~DS_FLAG_INCONSISTENT; drc->drc_newsnapobj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj; dsl_dataset_rele(origin_head, FTAG); dsl_destroy_head_sync_impl(drc->drc_ds, tx); if (drc->drc_owner != NULL) VERIFY3P(origin_head->ds_owner, ==, drc->drc_owner); } else { dsl_dataset_t *ds = drc->drc_ds; dsl_dataset_snapshot_sync_impl(ds, drc->drc_tosnap, tx); /* set snapshot's creation time and guid */ dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx); dsl_dataset_phys(ds->ds_prev)->ds_creation_time = drc->drc_drrb->drr_creation_time; dsl_dataset_phys(ds->ds_prev)->ds_guid = drc->drc_drrb->drr_toguid; dsl_dataset_phys(ds->ds_prev)->ds_flags &= ~DS_FLAG_INCONSISTENT; dmu_buf_will_dirty(ds->ds_dbuf, tx); dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT; if (dsl_dataset_has_resume_receive_state(ds)) { (void) zap_remove(dp->dp_meta_objset, ds->ds_object, DS_FIELD_RESUME_FROMGUID, tx); (void) zap_remove(dp->dp_meta_objset, ds->ds_object, DS_FIELD_RESUME_OBJECT, tx); (void) zap_remove(dp->dp_meta_objset, ds->ds_object, DS_FIELD_RESUME_OFFSET, tx); (void) zap_remove(dp->dp_meta_objset, ds->ds_object, DS_FIELD_RESUME_BYTES, tx); (void) zap_remove(dp->dp_meta_objset, ds->ds_object, DS_FIELD_RESUME_TOGUID, tx); (void) zap_remove(dp->dp_meta_objset, ds->ds_object, DS_FIELD_RESUME_TONAME, tx); } drc->drc_newsnapobj = dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj; } zvol_create_minors(dp->dp_spa, drc->drc_tofs, B_TRUE); /* * Release the hold from dmu_recv_begin. This must be done before * we return to open context, so that when we free the dataset's dnode * we can evict its bonus buffer. Since the dataset may be destroyed * at this point (and therefore won't have a valid pointer to the spa) * we release the key mapping manually here while we do have a valid * pointer, if it exists. */ if (!drc->drc_raw && encrypted) { (void) spa_keystore_remove_mapping(dmu_tx_pool(tx)->dp_spa, drc->drc_ds->ds_object, drc->drc_ds); } dsl_dataset_disown(drc->drc_ds, 0, dmu_recv_tag); drc->drc_ds = NULL; } static int add_ds_to_guidmap(const char *name, avl_tree_t *guid_map, uint64_t snapobj, boolean_t raw) { dsl_pool_t *dp; dsl_dataset_t *snapds; guid_map_entry_t *gmep; objset_t *os; ds_hold_flags_t dsflags = (raw) ? 0 : DS_HOLD_FLAG_DECRYPT; int err; ASSERT(guid_map != NULL); err = dsl_pool_hold(name, FTAG, &dp); if (err != 0) return (err); gmep = kmem_alloc(sizeof (*gmep), KM_SLEEP); err = dsl_dataset_own_obj(dp, snapobj, dsflags, gmep, &snapds); if (err == 0) { /* * If this is a deduplicated raw send stream, we need * to make sure that we can still read raw blocks from * earlier datasets in the stream, so we set the * os_raw_receive flag now. */ if (raw) { err = dmu_objset_from_ds(snapds, &os); if (err != 0) { dsl_dataset_disown(snapds, dsflags, FTAG); dsl_pool_rele(dp, FTAG); kmem_free(gmep, sizeof (*gmep)); return (err); } os->os_raw_receive = B_TRUE; } gmep->raw = raw; gmep->guid = dsl_dataset_phys(snapds)->ds_guid; gmep->gme_ds = snapds; avl_add(guid_map, gmep); } else { kmem_free(gmep, sizeof (*gmep)); } dsl_pool_rele(dp, FTAG); return (err); } static int dmu_recv_end_modified_blocks = 3; static int dmu_recv_existing_end(dmu_recv_cookie_t *drc) { #ifdef _KERNEL /* * We will be destroying the ds; make sure its origin is unmounted if * necessary. */ char name[ZFS_MAX_DATASET_NAME_LEN]; dsl_dataset_name(drc->drc_ds, name); zfs_destroy_unmount_origin(name); #endif return (dsl_sync_task(drc->drc_tofs, dmu_recv_end_check, dmu_recv_end_sync, drc, dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL)); } static int dmu_recv_new_end(dmu_recv_cookie_t *drc) { return (dsl_sync_task(drc->drc_tofs, dmu_recv_end_check, dmu_recv_end_sync, drc, dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL)); } int dmu_recv_end(dmu_recv_cookie_t *drc, void *owner) { int error; drc->drc_owner = owner; if (drc->drc_newfs) error = dmu_recv_new_end(drc); else error = dmu_recv_existing_end(drc); if (error != 0) { dmu_recv_cleanup_ds(drc); nvlist_free(drc->drc_keynvl); } else if (drc->drc_guid_to_ds_map != NULL) { (void) add_ds_to_guidmap(drc->drc_tofs, drc->drc_guid_to_ds_map, drc->drc_newsnapobj, drc->drc_raw); } return (error); } /* * Return TRUE if this objset is currently being received into. */ boolean_t dmu_objset_is_receiving(objset_t *os) { return (os->os_dsl_dataset != NULL && os->os_dsl_dataset->ds_owner == dmu_recv_tag); } #if defined(_KERNEL) module_param(zfs_recv_queue_length, int, 0644); MODULE_PARM_DESC(zfs_recv_queue_length, "Maximum receive queue length"); #endif diff --git a/module/zfs/dsl_dir.c b/module/zfs/dsl_dir.c index 5b6ce0420dea..b3b677fb85f5 100644 --- a/module/zfs/dsl_dir.c +++ b/module/zfs/dsl_dir.c @@ -1,2204 +1,2230 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2017 by Delphix. All rights reserved. * Copyright (c) 2013 Martin Matuska. All rights reserved. * Copyright (c) 2014 Joyent, Inc. All rights reserved. * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. * Copyright (c) 2016 Actifio, Inc. All rights reserved. + * Copyright (c) 2018, loli10K . All rights reserved. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "zfs_namecheck.h" #include "zfs_prop.h" /* * Filesystem and Snapshot Limits * ------------------------------ * * These limits are used to restrict the number of filesystems and/or snapshots * that can be created at a given level in the tree or below. A typical * use-case is with a delegated dataset where the administrator wants to ensure * that a user within the zone is not creating too many additional filesystems * or snapshots, even though they're not exceeding their space quota. * * The filesystem and snapshot counts are stored as extensible properties. This * capability is controlled by a feature flag and must be enabled to be used. * Once enabled, the feature is not active until the first limit is set. At * that point, future operations to create/destroy filesystems or snapshots * will validate and update the counts. * * Because the count properties will not exist before the feature is active, * the counts are updated when a limit is first set on an uninitialized * dsl_dir node in the tree (The filesystem/snapshot count on a node includes * all of the nested filesystems/snapshots. Thus, a new leaf node has a * filesystem count of 0 and a snapshot count of 0. Non-existent filesystem and * snapshot count properties on a node indicate uninitialized counts on that * node.) When first setting a limit on an uninitialized node, the code starts * at the filesystem with the new limit and descends into all sub-filesystems * to add the count properties. * * In practice this is lightweight since a limit is typically set when the * filesystem is created and thus has no children. Once valid, changing the * limit value won't require a re-traversal since the counts are already valid. * When recursively fixing the counts, if a node with a limit is encountered * during the descent, the counts are known to be valid and there is no need to * descend into that filesystem's children. The counts on filesystems above the * one with the new limit will still be uninitialized, unless a limit is * eventually set on one of those filesystems. The counts are always recursively * updated when a limit is set on a dataset, unless there is already a limit. * When a new limit value is set on a filesystem with an existing limit, it is * possible for the new limit to be less than the current count at that level * since a user who can change the limit is also allowed to exceed the limit. * * Once the feature is active, then whenever a filesystem or snapshot is * created, the code recurses up the tree, validating the new count against the * limit at each initialized level. In practice, most levels will not have a * limit set. If there is a limit at any initialized level up the tree, the * check must pass or the creation will fail. Likewise, when a filesystem or * snapshot is destroyed, the counts are recursively adjusted all the way up * the initizized nodes in the tree. Renaming a filesystem into different point * in the tree will first validate, then update the counts on each branch up to * the common ancestor. A receive will also validate the counts and then update * them. * * An exception to the above behavior is that the limit is not enforced if the * user has permission to modify the limit. This is primarily so that * recursive snapshots in the global zone always work. We want to prevent a * denial-of-service in which a lower level delegated dataset could max out its * limit and thus block recursive snapshots from being taken in the global zone. * Because of this, it is possible for the snapshot count to be over the limit * and snapshots taken in the global zone could cause a lower level dataset to * hit or exceed its limit. The administrator taking the global zone recursive * snapshot should be aware of this side-effect and behave accordingly. * For consistency, the filesystem limit is also not enforced if the user can * modify the limit. * * The filesystem and snapshot limits are validated by dsl_fs_ss_limit_check() * and updated by dsl_fs_ss_count_adjust(). A new limit value is setup in * dsl_dir_activate_fs_ss_limit() and the counts are adjusted, if necessary, by * dsl_dir_init_fs_ss_count(). * * There is a special case when we receive a filesystem that already exists. In * this case a temporary clone name of %X is created (see dmu_recv_begin). We * never update the filesystem counts for temporary clones. * * Likewise, we do not update the snapshot counts for temporary snapshots, * such as those created by zfs diff. */ extern inline dsl_dir_phys_t *dsl_dir_phys(dsl_dir_t *dd); static uint64_t dsl_dir_space_towrite(dsl_dir_t *dd); typedef struct ddulrt_arg { dsl_dir_t *ddulrta_dd; uint64_t ddlrta_txg; } ddulrt_arg_t; static void dsl_dir_evict_async(void *dbu) { dsl_dir_t *dd = dbu; int t; ASSERTV(dsl_pool_t *dp = dd->dd_pool); dd->dd_dbuf = NULL; for (t = 0; t < TXG_SIZE; t++) { ASSERT(!txg_list_member(&dp->dp_dirty_dirs, dd, t)); ASSERT(dd->dd_tempreserved[t] == 0); ASSERT(dd->dd_space_towrite[t] == 0); } if (dd->dd_parent) dsl_dir_async_rele(dd->dd_parent, dd); spa_async_close(dd->dd_pool->dp_spa, dd); dsl_prop_fini(dd); mutex_destroy(&dd->dd_lock); kmem_free(dd, sizeof (dsl_dir_t)); } int dsl_dir_hold_obj(dsl_pool_t *dp, uint64_t ddobj, const char *tail, void *tag, dsl_dir_t **ddp) { dmu_buf_t *dbuf; dsl_dir_t *dd; dmu_object_info_t doi; int err; ASSERT(dsl_pool_config_held(dp)); err = dmu_bonus_hold(dp->dp_meta_objset, ddobj, tag, &dbuf); if (err != 0) return (err); dd = dmu_buf_get_user(dbuf); dmu_object_info_from_db(dbuf, &doi); ASSERT3U(doi.doi_bonus_type, ==, DMU_OT_DSL_DIR); ASSERT3U(doi.doi_bonus_size, >=, sizeof (dsl_dir_phys_t)); if (dd == NULL) { dsl_dir_t *winner; dd = kmem_zalloc(sizeof (dsl_dir_t), KM_SLEEP); dd->dd_object = ddobj; dd->dd_dbuf = dbuf; dd->dd_pool = dp; if (dsl_dir_is_zapified(dd) && zap_contains(dp->dp_meta_objset, ddobj, DD_FIELD_CRYPTO_KEY_OBJ) == 0) { VERIFY0(zap_lookup(dp->dp_meta_objset, ddobj, DD_FIELD_CRYPTO_KEY_OBJ, sizeof (uint64_t), 1, &dd->dd_crypto_obj)); /* check for on-disk format errata */ if (dsl_dir_incompatible_encryption_version(dd)) { dp->dp_spa->spa_errata = ZPOOL_ERRATA_ZOL_6845_ENCRYPTION; } } mutex_init(&dd->dd_lock, NULL, MUTEX_DEFAULT, NULL); dsl_prop_init(dd); dsl_dir_snap_cmtime_update(dd); if (dsl_dir_phys(dd)->dd_parent_obj) { err = dsl_dir_hold_obj(dp, dsl_dir_phys(dd)->dd_parent_obj, NULL, dd, &dd->dd_parent); if (err != 0) goto errout; if (tail) { #ifdef ZFS_DEBUG uint64_t foundobj; err = zap_lookup(dp->dp_meta_objset, dsl_dir_phys(dd->dd_parent)-> dd_child_dir_zapobj, tail, sizeof (foundobj), 1, &foundobj); ASSERT(err || foundobj == ddobj); #endif (void) strlcpy(dd->dd_myname, tail, sizeof (dd->dd_myname)); } else { err = zap_value_search(dp->dp_meta_objset, dsl_dir_phys(dd->dd_parent)-> dd_child_dir_zapobj, ddobj, 0, dd->dd_myname); } if (err != 0) goto errout; } else { (void) strcpy(dd->dd_myname, spa_name(dp->dp_spa)); } if (dsl_dir_is_clone(dd)) { dmu_buf_t *origin_bonus; dsl_dataset_phys_t *origin_phys; /* * We can't open the origin dataset, because * that would require opening this dsl_dir. * Just look at its phys directly instead. */ err = dmu_bonus_hold(dp->dp_meta_objset, dsl_dir_phys(dd)->dd_origin_obj, FTAG, &origin_bonus); if (err != 0) goto errout; origin_phys = origin_bonus->db_data; dd->dd_origin_txg = origin_phys->ds_creation_txg; dmu_buf_rele(origin_bonus, FTAG); } dmu_buf_init_user(&dd->dd_dbu, NULL, dsl_dir_evict_async, &dd->dd_dbuf); winner = dmu_buf_set_user_ie(dbuf, &dd->dd_dbu); if (winner != NULL) { if (dd->dd_parent) dsl_dir_rele(dd->dd_parent, dd); dsl_prop_fini(dd); mutex_destroy(&dd->dd_lock); kmem_free(dd, sizeof (dsl_dir_t)); dd = winner; } else { spa_open_ref(dp->dp_spa, dd); } } /* * The dsl_dir_t has both open-to-close and instantiate-to-evict * holds on the spa. We need the open-to-close holds because * otherwise the spa_refcnt wouldn't change when we open a * dir which the spa also has open, so we could incorrectly * think it was OK to unload/export/destroy the pool. We need * the instantiate-to-evict hold because the dsl_dir_t has a * pointer to the dd_pool, which has a pointer to the spa_t. */ spa_open_ref(dp->dp_spa, tag); ASSERT3P(dd->dd_pool, ==, dp); ASSERT3U(dd->dd_object, ==, ddobj); ASSERT3P(dd->dd_dbuf, ==, dbuf); *ddp = dd; return (0); errout: if (dd->dd_parent) dsl_dir_rele(dd->dd_parent, dd); dsl_prop_fini(dd); mutex_destroy(&dd->dd_lock); kmem_free(dd, sizeof (dsl_dir_t)); dmu_buf_rele(dbuf, tag); return (err); } void dsl_dir_rele(dsl_dir_t *dd, void *tag) { dprintf_dd(dd, "%s\n", ""); spa_close(dd->dd_pool->dp_spa, tag); dmu_buf_rele(dd->dd_dbuf, tag); } /* * Remove a reference to the given dsl dir that is being asynchronously * released. Async releases occur from a taskq performing eviction of * dsl datasets and dirs. This process is identical to a normal release * with the exception of using the async API for releasing the reference on * the spa. */ void dsl_dir_async_rele(dsl_dir_t *dd, void *tag) { dprintf_dd(dd, "%s\n", ""); spa_async_close(dd->dd_pool->dp_spa, tag); dmu_buf_rele(dd->dd_dbuf, tag); } /* buf must be at least ZFS_MAX_DATASET_NAME_LEN bytes */ void dsl_dir_name(dsl_dir_t *dd, char *buf) { if (dd->dd_parent) { dsl_dir_name(dd->dd_parent, buf); VERIFY3U(strlcat(buf, "/", ZFS_MAX_DATASET_NAME_LEN), <, ZFS_MAX_DATASET_NAME_LEN); } else { buf[0] = '\0'; } if (!MUTEX_HELD(&dd->dd_lock)) { /* * recursive mutex so that we can use * dprintf_dd() with dd_lock held */ mutex_enter(&dd->dd_lock); VERIFY3U(strlcat(buf, dd->dd_myname, ZFS_MAX_DATASET_NAME_LEN), <, ZFS_MAX_DATASET_NAME_LEN); mutex_exit(&dd->dd_lock); } else { VERIFY3U(strlcat(buf, dd->dd_myname, ZFS_MAX_DATASET_NAME_LEN), <, ZFS_MAX_DATASET_NAME_LEN); } } /* Calculate name length, avoiding all the strcat calls of dsl_dir_name */ int dsl_dir_namelen(dsl_dir_t *dd) { int result = 0; if (dd->dd_parent) { /* parent's name + 1 for the "/" */ result = dsl_dir_namelen(dd->dd_parent) + 1; } if (!MUTEX_HELD(&dd->dd_lock)) { /* see dsl_dir_name */ mutex_enter(&dd->dd_lock); result += strlen(dd->dd_myname); mutex_exit(&dd->dd_lock); } else { result += strlen(dd->dd_myname); } return (result); } static int getcomponent(const char *path, char *component, const char **nextp) { char *p; if ((path == NULL) || (path[0] == '\0')) return (SET_ERROR(ENOENT)); /* This would be a good place to reserve some namespace... */ p = strpbrk(path, "/@"); if (p && (p[1] == '/' || p[1] == '@')) { /* two separators in a row */ return (SET_ERROR(EINVAL)); } if (p == NULL || p == path) { /* * if the first thing is an @ or /, it had better be an * @ and it had better not have any more ats or slashes, * and it had better have something after the @. */ if (p != NULL && (p[0] != '@' || strpbrk(path+1, "/@") || p[1] == '\0')) return (SET_ERROR(EINVAL)); if (strlen(path) >= ZFS_MAX_DATASET_NAME_LEN) return (SET_ERROR(ENAMETOOLONG)); (void) strcpy(component, path); p = NULL; } else if (p[0] == '/') { if (p - path >= ZFS_MAX_DATASET_NAME_LEN) return (SET_ERROR(ENAMETOOLONG)); (void) strncpy(component, path, p - path); component[p - path] = '\0'; p++; } else if (p[0] == '@') { /* * if the next separator is an @, there better not be * any more slashes. */ if (strchr(path, '/')) return (SET_ERROR(EINVAL)); if (p - path >= ZFS_MAX_DATASET_NAME_LEN) return (SET_ERROR(ENAMETOOLONG)); (void) strncpy(component, path, p - path); component[p - path] = '\0'; } else { panic("invalid p=%p", (void *)p); } *nextp = p; return (0); } /* * Return the dsl_dir_t, and possibly the last component which couldn't * be found in *tail. The name must be in the specified dsl_pool_t. This * thread must hold the dp_config_rwlock for the pool. Returns NULL if the * path is bogus, or if tail==NULL and we couldn't parse the whole name. * (*tail)[0] == '@' means that the last component is a snapshot. */ int dsl_dir_hold(dsl_pool_t *dp, const char *name, void *tag, dsl_dir_t **ddp, const char **tailp) { char *buf; const char *spaname, *next, *nextnext = NULL; int err; dsl_dir_t *dd; uint64_t ddobj; buf = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP); err = getcomponent(name, buf, &next); if (err != 0) goto error; /* Make sure the name is in the specified pool. */ spaname = spa_name(dp->dp_spa); if (strcmp(buf, spaname) != 0) { err = SET_ERROR(EXDEV); goto error; } ASSERT(dsl_pool_config_held(dp)); err = dsl_dir_hold_obj(dp, dp->dp_root_dir_obj, NULL, tag, &dd); if (err != 0) { goto error; } while (next != NULL) { dsl_dir_t *child_dd; err = getcomponent(next, buf, &nextnext); if (err != 0) break; ASSERT(next[0] != '\0'); if (next[0] == '@') break; dprintf("looking up %s in obj%lld\n", buf, dsl_dir_phys(dd)->dd_child_dir_zapobj); err = zap_lookup(dp->dp_meta_objset, dsl_dir_phys(dd)->dd_child_dir_zapobj, buf, sizeof (ddobj), 1, &ddobj); if (err != 0) { if (err == ENOENT) err = 0; break; } err = dsl_dir_hold_obj(dp, ddobj, buf, tag, &child_dd); if (err != 0) break; dsl_dir_rele(dd, tag); dd = child_dd; next = nextnext; } if (err != 0) { dsl_dir_rele(dd, tag); goto error; } /* * It's an error if there's more than one component left, or * tailp==NULL and there's any component left. */ if (next != NULL && (tailp == NULL || (nextnext && nextnext[0] != '\0'))) { /* bad path name */ dsl_dir_rele(dd, tag); dprintf("next=%p (%s) tail=%p\n", next, next?next:"", tailp); err = SET_ERROR(ENOENT); } if (tailp != NULL) *tailp = next; if (err == 0) *ddp = dd; error: kmem_free(buf, ZFS_MAX_DATASET_NAME_LEN); return (err); } /* * If the counts are already initialized for this filesystem and its * descendants then do nothing, otherwise initialize the counts. * * The counts on this filesystem, and those below, may be uninitialized due to * either the use of a pre-existing pool which did not support the * filesystem/snapshot limit feature, or one in which the feature had not yet * been enabled. * * Recursively descend the filesystem tree and update the filesystem/snapshot * counts on each filesystem below, then update the cumulative count on the * current filesystem. If the filesystem already has a count set on it, * then we know that its counts, and the counts on the filesystems below it, * are already correct, so we don't have to update this filesystem. */ static void dsl_dir_init_fs_ss_count(dsl_dir_t *dd, dmu_tx_t *tx) { uint64_t my_fs_cnt = 0; uint64_t my_ss_cnt = 0; dsl_pool_t *dp = dd->dd_pool; objset_t *os = dp->dp_meta_objset; zap_cursor_t *zc; zap_attribute_t *za; dsl_dataset_t *ds; ASSERT(spa_feature_is_active(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT)); ASSERT(dsl_pool_config_held(dp)); ASSERT(dmu_tx_is_syncing(tx)); dsl_dir_zapify(dd, tx); /* * If the filesystem count has already been initialized then we * don't need to recurse down any further. */ if (zap_contains(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT) == 0) return; zc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP); za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP); /* Iterate my child dirs */ for (zap_cursor_init(zc, os, dsl_dir_phys(dd)->dd_child_dir_zapobj); zap_cursor_retrieve(zc, za) == 0; zap_cursor_advance(zc)) { dsl_dir_t *chld_dd; uint64_t count; VERIFY0(dsl_dir_hold_obj(dp, za->za_first_integer, NULL, FTAG, &chld_dd)); /* * Ignore hidden ($FREE, $MOS & $ORIGIN) objsets and * temporary datasets. */ if (chld_dd->dd_myname[0] == '$' || chld_dd->dd_myname[0] == '%') { dsl_dir_rele(chld_dd, FTAG); continue; } my_fs_cnt++; /* count this child */ dsl_dir_init_fs_ss_count(chld_dd, tx); VERIFY0(zap_lookup(os, chld_dd->dd_object, DD_FIELD_FILESYSTEM_COUNT, sizeof (count), 1, &count)); my_fs_cnt += count; VERIFY0(zap_lookup(os, chld_dd->dd_object, DD_FIELD_SNAPSHOT_COUNT, sizeof (count), 1, &count)); my_ss_cnt += count; dsl_dir_rele(chld_dd, FTAG); } zap_cursor_fini(zc); /* Count my snapshots (we counted children's snapshots above) */ VERIFY0(dsl_dataset_hold_obj(dd->dd_pool, dsl_dir_phys(dd)->dd_head_dataset_obj, FTAG, &ds)); for (zap_cursor_init(zc, os, dsl_dataset_phys(ds)->ds_snapnames_zapobj); zap_cursor_retrieve(zc, za) == 0; zap_cursor_advance(zc)) { /* Don't count temporary snapshots */ if (za->za_name[0] != '%') my_ss_cnt++; } zap_cursor_fini(zc); dsl_dataset_rele(ds, FTAG); kmem_free(zc, sizeof (zap_cursor_t)); kmem_free(za, sizeof (zap_attribute_t)); /* we're in a sync task, update counts */ dmu_buf_will_dirty(dd->dd_dbuf, tx); VERIFY0(zap_add(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT, sizeof (my_fs_cnt), 1, &my_fs_cnt, tx)); VERIFY0(zap_add(os, dd->dd_object, DD_FIELD_SNAPSHOT_COUNT, sizeof (my_ss_cnt), 1, &my_ss_cnt, tx)); } static int dsl_dir_actv_fs_ss_limit_check(void *arg, dmu_tx_t *tx) { char *ddname = (char *)arg; dsl_pool_t *dp = dmu_tx_pool(tx); dsl_dataset_t *ds; dsl_dir_t *dd; int error; error = dsl_dataset_hold(dp, ddname, FTAG, &ds); if (error != 0) return (error); if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT)) { dsl_dataset_rele(ds, FTAG); return (SET_ERROR(ENOTSUP)); } dd = ds->ds_dir; if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT) && dsl_dir_is_zapified(dd) && zap_contains(dp->dp_meta_objset, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT) == 0) { dsl_dataset_rele(ds, FTAG); return (SET_ERROR(EALREADY)); } dsl_dataset_rele(ds, FTAG); return (0); } static void dsl_dir_actv_fs_ss_limit_sync(void *arg, dmu_tx_t *tx) { char *ddname = (char *)arg; dsl_pool_t *dp = dmu_tx_pool(tx); dsl_dataset_t *ds; spa_t *spa; VERIFY0(dsl_dataset_hold(dp, ddname, FTAG, &ds)); spa = dsl_dataset_get_spa(ds); if (!spa_feature_is_active(spa, SPA_FEATURE_FS_SS_LIMIT)) { /* * Since the feature was not active and we're now setting a * limit, increment the feature-active counter so that the * feature becomes active for the first time. * * We are already in a sync task so we can update the MOS. */ spa_feature_incr(spa, SPA_FEATURE_FS_SS_LIMIT, tx); } /* * Since we are now setting a non-UINT64_MAX limit on the filesystem, * we need to ensure the counts are correct. Descend down the tree from * this point and update all of the counts to be accurate. */ dsl_dir_init_fs_ss_count(ds->ds_dir, tx); dsl_dataset_rele(ds, FTAG); } /* * Make sure the feature is enabled and activate it if necessary. * Since we're setting a limit, ensure the on-disk counts are valid. * This is only called by the ioctl path when setting a limit value. * * We do not need to validate the new limit, since users who can change the * limit are also allowed to exceed the limit. */ int dsl_dir_activate_fs_ss_limit(const char *ddname) { int error; error = dsl_sync_task(ddname, dsl_dir_actv_fs_ss_limit_check, dsl_dir_actv_fs_ss_limit_sync, (void *)ddname, 0, ZFS_SPACE_CHECK_RESERVED); if (error == EALREADY) error = 0; return (error); } /* * Used to determine if the filesystem_limit or snapshot_limit should be * enforced. We allow the limit to be exceeded if the user has permission to * write the property value. We pass in the creds that we got in the open * context since we will always be the GZ root in syncing context. We also have * to handle the case where we are allowed to change the limit on the current * dataset, but there may be another limit in the tree above. * * We can never modify these two properties within a non-global zone. In * addition, the other checks are modeled on zfs_secpolicy_write_perms. We * can't use that function since we are already holding the dp_config_rwlock. * In addition, we already have the dd and dealing with snapshots is simplified * in this code. */ typedef enum { ENFORCE_ALWAYS, ENFORCE_NEVER, ENFORCE_ABOVE } enforce_res_t; static enforce_res_t dsl_enforce_ds_ss_limits(dsl_dir_t *dd, zfs_prop_t prop, cred_t *cr) { enforce_res_t enforce = ENFORCE_ALWAYS; uint64_t obj; dsl_dataset_t *ds; uint64_t zoned; ASSERT(prop == ZFS_PROP_FILESYSTEM_LIMIT || prop == ZFS_PROP_SNAPSHOT_LIMIT); #ifdef _KERNEL if (crgetzoneid(cr) != GLOBAL_ZONEID) return (ENFORCE_ALWAYS); if (secpolicy_zfs(cr) == 0) return (ENFORCE_NEVER); #endif if ((obj = dsl_dir_phys(dd)->dd_head_dataset_obj) == 0) return (ENFORCE_ALWAYS); ASSERT(dsl_pool_config_held(dd->dd_pool)); if (dsl_dataset_hold_obj(dd->dd_pool, obj, FTAG, &ds) != 0) return (ENFORCE_ALWAYS); if (dsl_prop_get_ds(ds, "zoned", 8, 1, &zoned, NULL) || zoned) { /* Only root can access zoned fs's from the GZ */ enforce = ENFORCE_ALWAYS; } else { if (dsl_deleg_access_impl(ds, zfs_prop_to_name(prop), cr) == 0) enforce = ENFORCE_ABOVE; } dsl_dataset_rele(ds, FTAG); return (enforce); } static void dsl_dir_update_last_remap_txg_sync(void *varg, dmu_tx_t *tx) { ddulrt_arg_t *arg = varg; uint64_t last_remap_txg; dsl_dir_t *dd = arg->ddulrta_dd; objset_t *mos = dd->dd_pool->dp_meta_objset; dsl_dir_zapify(dd, tx); if (zap_lookup(mos, dd->dd_object, DD_FIELD_LAST_REMAP_TXG, sizeof (last_remap_txg), 1, &last_remap_txg) != 0 || last_remap_txg < arg->ddlrta_txg) { VERIFY0(zap_update(mos, dd->dd_object, DD_FIELD_LAST_REMAP_TXG, sizeof (arg->ddlrta_txg), 1, &arg->ddlrta_txg, tx)); } } int dsl_dir_update_last_remap_txg(dsl_dir_t *dd, uint64_t txg) { ddulrt_arg_t arg; arg.ddulrta_dd = dd; arg.ddlrta_txg = txg; return (dsl_sync_task(spa_name(dd->dd_pool->dp_spa), NULL, dsl_dir_update_last_remap_txg_sync, &arg, 1, ZFS_SPACE_CHECK_RESERVED)); } /* * Check if adding additional child filesystem(s) would exceed any filesystem * limits or adding additional snapshot(s) would exceed any snapshot limits. * The prop argument indicates which limit to check. * * Note that all filesystem limits up to the root (or the highest * initialized) filesystem or the given ancestor must be satisfied. */ int dsl_fs_ss_limit_check(dsl_dir_t *dd, uint64_t delta, zfs_prop_t prop, dsl_dir_t *ancestor, cred_t *cr) { objset_t *os = dd->dd_pool->dp_meta_objset; uint64_t limit, count; char *count_prop; enforce_res_t enforce; int err = 0; ASSERT(dsl_pool_config_held(dd->dd_pool)); ASSERT(prop == ZFS_PROP_FILESYSTEM_LIMIT || prop == ZFS_PROP_SNAPSHOT_LIMIT); /* * If we're allowed to change the limit, don't enforce the limit * e.g. this can happen if a snapshot is taken by an administrative * user in the global zone (i.e. a recursive snapshot by root). * However, we must handle the case of delegated permissions where we * are allowed to change the limit on the current dataset, but there * is another limit in the tree above. */ enforce = dsl_enforce_ds_ss_limits(dd, prop, cr); if (enforce == ENFORCE_NEVER) return (0); /* * e.g. if renaming a dataset with no snapshots, count adjustment * is 0. */ if (delta == 0) return (0); if (prop == ZFS_PROP_SNAPSHOT_LIMIT) { /* * We don't enforce the limit for temporary snapshots. This is * indicated by a NULL cred_t argument. */ if (cr == NULL) return (0); count_prop = DD_FIELD_SNAPSHOT_COUNT; } else { count_prop = DD_FIELD_FILESYSTEM_COUNT; } /* * If an ancestor has been provided, stop checking the limit once we * hit that dir. We need this during rename so that we don't overcount * the check once we recurse up to the common ancestor. */ if (ancestor == dd) return (0); /* * If we hit an uninitialized node while recursing up the tree, we can * stop since we know there is no limit here (or above). The counts are * not valid on this node and we know we won't touch this node's counts. */ if (!dsl_dir_is_zapified(dd) || zap_lookup(os, dd->dd_object, count_prop, sizeof (count), 1, &count) == ENOENT) return (0); err = dsl_prop_get_dd(dd, zfs_prop_to_name(prop), 8, 1, &limit, NULL, B_FALSE); if (err != 0) return (err); /* Is there a limit which we've hit? */ if (enforce == ENFORCE_ALWAYS && (count + delta) > limit) return (SET_ERROR(EDQUOT)); if (dd->dd_parent != NULL) err = dsl_fs_ss_limit_check(dd->dd_parent, delta, prop, ancestor, cr); return (err); } /* * Adjust the filesystem or snapshot count for the specified dsl_dir_t and all * parents. When a new filesystem/snapshot is created, increment the count on * all parents, and when a filesystem/snapshot is destroyed, decrement the * count. */ void dsl_fs_ss_count_adjust(dsl_dir_t *dd, int64_t delta, const char *prop, dmu_tx_t *tx) { int err; objset_t *os = dd->dd_pool->dp_meta_objset; uint64_t count; ASSERT(dsl_pool_config_held(dd->dd_pool)); ASSERT(dmu_tx_is_syncing(tx)); ASSERT(strcmp(prop, DD_FIELD_FILESYSTEM_COUNT) == 0 || strcmp(prop, DD_FIELD_SNAPSHOT_COUNT) == 0); /* * When we receive an incremental stream into a filesystem that already * exists, a temporary clone is created. We don't count this temporary * clone, whose name begins with a '%'. We also ignore hidden ($FREE, * $MOS & $ORIGIN) objsets. */ if ((dd->dd_myname[0] == '%' || dd->dd_myname[0] == '$') && strcmp(prop, DD_FIELD_FILESYSTEM_COUNT) == 0) return; /* * e.g. if renaming a dataset with no snapshots, count adjustment is 0 */ if (delta == 0) return; /* * If we hit an uninitialized node while recursing up the tree, we can * stop since we know the counts are not valid on this node and we * know we shouldn't touch this node's counts. An uninitialized count * on the node indicates that either the feature has not yet been * activated or there are no limits on this part of the tree. */ if (!dsl_dir_is_zapified(dd) || (err = zap_lookup(os, dd->dd_object, prop, sizeof (count), 1, &count)) == ENOENT) return; VERIFY0(err); count += delta; /* Use a signed verify to make sure we're not neg. */ VERIFY3S(count, >=, 0); VERIFY0(zap_update(os, dd->dd_object, prop, sizeof (count), 1, &count, tx)); /* Roll up this additional count into our ancestors */ if (dd->dd_parent != NULL) dsl_fs_ss_count_adjust(dd->dd_parent, delta, prop, tx); } uint64_t dsl_dir_create_sync(dsl_pool_t *dp, dsl_dir_t *pds, const char *name, dmu_tx_t *tx) { objset_t *mos = dp->dp_meta_objset; uint64_t ddobj; dsl_dir_phys_t *ddphys; dmu_buf_t *dbuf; ddobj = dmu_object_alloc(mos, DMU_OT_DSL_DIR, 0, DMU_OT_DSL_DIR, sizeof (dsl_dir_phys_t), tx); if (pds) { VERIFY0(zap_add(mos, dsl_dir_phys(pds)->dd_child_dir_zapobj, name, sizeof (uint64_t), 1, &ddobj, tx)); } else { /* it's the root dir */ VERIFY0(zap_add(mos, DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1, &ddobj, tx)); } VERIFY0(dmu_bonus_hold(mos, ddobj, FTAG, &dbuf)); dmu_buf_will_dirty(dbuf, tx); ddphys = dbuf->db_data; ddphys->dd_creation_time = gethrestime_sec(); if (pds) { ddphys->dd_parent_obj = pds->dd_object; /* update the filesystem counts */ dsl_fs_ss_count_adjust(pds, 1, DD_FIELD_FILESYSTEM_COUNT, tx); } ddphys->dd_props_zapobj = zap_create(mos, DMU_OT_DSL_PROPS, DMU_OT_NONE, 0, tx); ddphys->dd_child_dir_zapobj = zap_create(mos, DMU_OT_DSL_DIR_CHILD_MAP, DMU_OT_NONE, 0, tx); if (spa_version(dp->dp_spa) >= SPA_VERSION_USED_BREAKDOWN) ddphys->dd_flags |= DD_FLAG_USED_BREAKDOWN; dmu_buf_rele(dbuf, FTAG); return (ddobj); } boolean_t dsl_dir_is_clone(dsl_dir_t *dd) { return (dsl_dir_phys(dd)->dd_origin_obj && (dd->dd_pool->dp_origin_snap == NULL || dsl_dir_phys(dd)->dd_origin_obj != dd->dd_pool->dp_origin_snap->ds_object)); } uint64_t dsl_dir_get_used(dsl_dir_t *dd) { return (dsl_dir_phys(dd)->dd_used_bytes); } uint64_t dsl_dir_get_compressed(dsl_dir_t *dd) { return (dsl_dir_phys(dd)->dd_compressed_bytes); } uint64_t dsl_dir_get_quota(dsl_dir_t *dd) { return (dsl_dir_phys(dd)->dd_quota); } uint64_t dsl_dir_get_reservation(dsl_dir_t *dd) { return (dsl_dir_phys(dd)->dd_reserved); } uint64_t dsl_dir_get_compressratio(dsl_dir_t *dd) { /* a fixed point number, 100x the ratio */ return (dsl_dir_phys(dd)->dd_compressed_bytes == 0 ? 100 : (dsl_dir_phys(dd)->dd_uncompressed_bytes * 100 / dsl_dir_phys(dd)->dd_compressed_bytes)); } uint64_t dsl_dir_get_logicalused(dsl_dir_t *dd) { return (dsl_dir_phys(dd)->dd_uncompressed_bytes); } uint64_t dsl_dir_get_usedsnap(dsl_dir_t *dd) { return (dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_SNAP]); } uint64_t dsl_dir_get_usedds(dsl_dir_t *dd) { return (dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_HEAD]); } uint64_t dsl_dir_get_usedrefreserv(dsl_dir_t *dd) { return (dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_REFRSRV]); } uint64_t dsl_dir_get_usedchild(dsl_dir_t *dd) { return (dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_CHILD] + dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_CHILD_RSRV]); } void dsl_dir_get_origin(dsl_dir_t *dd, char *buf) { dsl_dataset_t *ds; VERIFY0(dsl_dataset_hold_obj(dd->dd_pool, dsl_dir_phys(dd)->dd_origin_obj, FTAG, &ds)); dsl_dataset_name(ds, buf); dsl_dataset_rele(ds, FTAG); } int dsl_dir_get_filesystem_count(dsl_dir_t *dd, uint64_t *count) { if (dsl_dir_is_zapified(dd)) { objset_t *os = dd->dd_pool->dp_meta_objset; return (zap_lookup(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT, sizeof (*count), 1, count)); } else { return (ENOENT); } } int dsl_dir_get_snapshot_count(dsl_dir_t *dd, uint64_t *count) { if (dsl_dir_is_zapified(dd)) { objset_t *os = dd->dd_pool->dp_meta_objset; return (zap_lookup(os, dd->dd_object, DD_FIELD_SNAPSHOT_COUNT, sizeof (*count), 1, count)); } else { return (ENOENT); } } int dsl_dir_get_remaptxg(dsl_dir_t *dd, uint64_t *count) { if (dsl_dir_is_zapified(dd)) { objset_t *os = dd->dd_pool->dp_meta_objset; return (zap_lookup(os, dd->dd_object, DD_FIELD_LAST_REMAP_TXG, sizeof (*count), 1, count)); } else { return (ENOENT); } } void dsl_dir_stats(dsl_dir_t *dd, nvlist_t *nv) { mutex_enter(&dd->dd_lock); dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_QUOTA, dsl_dir_get_quota(dd)); dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_RESERVATION, dsl_dir_get_reservation(dd)); dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_LOGICALUSED, dsl_dir_get_logicalused(dd)); if (dsl_dir_phys(dd)->dd_flags & DD_FLAG_USED_BREAKDOWN) { dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDSNAP, dsl_dir_get_usedsnap(dd)); dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDDS, dsl_dir_get_usedds(dd)); dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDREFRESERV, dsl_dir_get_usedrefreserv(dd)); dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDCHILD, dsl_dir_get_usedchild(dd)); } mutex_exit(&dd->dd_lock); uint64_t count; if (dsl_dir_get_filesystem_count(dd, &count) == 0) { dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_FILESYSTEM_COUNT, count); } if (dsl_dir_get_snapshot_count(dd, &count) == 0) { dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_SNAPSHOT_COUNT, count); } if (dsl_dir_get_remaptxg(dd, &count) == 0) { dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REMAPTXG, count); } if (dsl_dir_is_clone(dd)) { char buf[ZFS_MAX_DATASET_NAME_LEN]; dsl_dir_get_origin(dd, buf); dsl_prop_nvlist_add_string(nv, ZFS_PROP_ORIGIN, buf); } } void dsl_dir_dirty(dsl_dir_t *dd, dmu_tx_t *tx) { dsl_pool_t *dp = dd->dd_pool; ASSERT(dsl_dir_phys(dd)); if (txg_list_add(&dp->dp_dirty_dirs, dd, tx->tx_txg)) { /* up the hold count until we can be written out */ dmu_buf_add_ref(dd->dd_dbuf, dd); } } static int64_t parent_delta(dsl_dir_t *dd, uint64_t used, int64_t delta) { uint64_t old_accounted = MAX(used, dsl_dir_phys(dd)->dd_reserved); uint64_t new_accounted = MAX(used + delta, dsl_dir_phys(dd)->dd_reserved); return (new_accounted - old_accounted); } void dsl_dir_sync(dsl_dir_t *dd, dmu_tx_t *tx) { ASSERT(dmu_tx_is_syncing(tx)); mutex_enter(&dd->dd_lock); ASSERT0(dd->dd_tempreserved[tx->tx_txg&TXG_MASK]); dprintf_dd(dd, "txg=%llu towrite=%lluK\n", tx->tx_txg, dd->dd_space_towrite[tx->tx_txg&TXG_MASK] / 1024); dd->dd_space_towrite[tx->tx_txg&TXG_MASK] = 0; mutex_exit(&dd->dd_lock); /* release the hold from dsl_dir_dirty */ dmu_buf_rele(dd->dd_dbuf, dd); } static uint64_t dsl_dir_space_towrite(dsl_dir_t *dd) { uint64_t space = 0; ASSERT(MUTEX_HELD(&dd->dd_lock)); for (int i = 0; i < TXG_SIZE; i++) { space += dd->dd_space_towrite[i & TXG_MASK]; ASSERT3U(dd->dd_space_towrite[i & TXG_MASK], >=, 0); } return (space); } /* * How much space would dd have available if ancestor had delta applied * to it? If ondiskonly is set, we're only interested in what's * on-disk, not estimated pending changes. */ uint64_t dsl_dir_space_available(dsl_dir_t *dd, dsl_dir_t *ancestor, int64_t delta, int ondiskonly) { uint64_t parentspace, myspace, quota, used; /* * If there are no restrictions otherwise, assume we have * unlimited space available. */ quota = UINT64_MAX; parentspace = UINT64_MAX; if (dd->dd_parent != NULL) { parentspace = dsl_dir_space_available(dd->dd_parent, ancestor, delta, ondiskonly); } mutex_enter(&dd->dd_lock); if (dsl_dir_phys(dd)->dd_quota != 0) quota = dsl_dir_phys(dd)->dd_quota; used = dsl_dir_phys(dd)->dd_used_bytes; if (!ondiskonly) used += dsl_dir_space_towrite(dd); if (dd->dd_parent == NULL) { uint64_t poolsize = dsl_pool_adjustedsize(dd->dd_pool, ZFS_SPACE_CHECK_NORMAL); quota = MIN(quota, poolsize); } if (dsl_dir_phys(dd)->dd_reserved > used && parentspace != UINT64_MAX) { /* * We have some space reserved, in addition to what our * parent gave us. */ parentspace += dsl_dir_phys(dd)->dd_reserved - used; } if (dd == ancestor) { ASSERT(delta <= 0); ASSERT(used >= -delta); used += delta; if (parentspace != UINT64_MAX) parentspace -= delta; } if (used > quota) { /* over quota */ myspace = 0; } else { /* * the lesser of the space provided by our parent and * the space left in our quota */ myspace = MIN(parentspace, quota - used); } mutex_exit(&dd->dd_lock); return (myspace); } struct tempreserve { list_node_t tr_node; dsl_dir_t *tr_ds; uint64_t tr_size; }; static int dsl_dir_tempreserve_impl(dsl_dir_t *dd, uint64_t asize, boolean_t netfree, boolean_t ignorequota, list_t *tr_list, dmu_tx_t *tx, boolean_t first) { uint64_t txg; uint64_t quota; struct tempreserve *tr; int retval; uint64_t ref_rsrv; top_of_function: txg = tx->tx_txg; retval = EDQUOT; ref_rsrv = 0; ASSERT3U(txg, !=, 0); ASSERT3S(asize, >, 0); mutex_enter(&dd->dd_lock); /* * Check against the dsl_dir's quota. We don't add in the delta * when checking for over-quota because they get one free hit. */ uint64_t est_inflight = dsl_dir_space_towrite(dd); for (int i = 0; i < TXG_SIZE; i++) est_inflight += dd->dd_tempreserved[i]; uint64_t used_on_disk = dsl_dir_phys(dd)->dd_used_bytes; /* * On the first iteration, fetch the dataset's used-on-disk and * refreservation values. Also, if checkrefquota is set, test if * allocating this space would exceed the dataset's refquota. */ if (first && tx->tx_objset) { int error; dsl_dataset_t *ds = tx->tx_objset->os_dsl_dataset; error = dsl_dataset_check_quota(ds, !netfree, asize, est_inflight, &used_on_disk, &ref_rsrv); if (error != 0) { mutex_exit(&dd->dd_lock); DMU_TX_STAT_BUMP(dmu_tx_quota); return (error); } } /* * If this transaction will result in a net free of space, * we want to let it through. */ if (ignorequota || netfree || dsl_dir_phys(dd)->dd_quota == 0) quota = UINT64_MAX; else quota = dsl_dir_phys(dd)->dd_quota; /* * Adjust the quota against the actual pool size at the root * minus any outstanding deferred frees. * To ensure that it's possible to remove files from a full * pool without inducing transient overcommits, we throttle * netfree transactions against a quota that is slightly larger, * but still within the pool's allocation slop. In cases where * we're very close to full, this will allow a steady trickle of * removes to get through. */ uint64_t deferred = 0; if (dd->dd_parent == NULL) { uint64_t avail = dsl_pool_unreserved_space(dd->dd_pool, (netfree) ? ZFS_SPACE_CHECK_RESERVED : ZFS_SPACE_CHECK_NORMAL); if (avail < quota) { quota = avail; retval = ENOSPC; } } /* * If they are requesting more space, and our current estimate * is over quota, they get to try again unless the actual * on-disk is over quota and there are no pending changes (which * may free up space for us). */ if (used_on_disk + est_inflight >= quota) { if (est_inflight > 0 || used_on_disk < quota || (retval == ENOSPC && used_on_disk < quota + deferred)) retval = ERESTART; dprintf_dd(dd, "failing: used=%lluK inflight = %lluK " "quota=%lluK tr=%lluK err=%d\n", used_on_disk>>10, est_inflight>>10, quota>>10, asize>>10, retval); mutex_exit(&dd->dd_lock); DMU_TX_STAT_BUMP(dmu_tx_quota); return (SET_ERROR(retval)); } /* We need to up our estimated delta before dropping dd_lock */ dd->dd_tempreserved[txg & TXG_MASK] += asize; uint64_t parent_rsrv = parent_delta(dd, used_on_disk + est_inflight, asize - ref_rsrv); mutex_exit(&dd->dd_lock); tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP); tr->tr_ds = dd; tr->tr_size = asize; list_insert_tail(tr_list, tr); /* see if it's OK with our parent */ if (dd->dd_parent != NULL && parent_rsrv != 0) { /* * Recurse on our parent without recursion. This has been * observed to be potentially large stack usage even within * the test suite. Largest seen stack was 7632 bytes on linux. */ dd = dd->dd_parent; asize = parent_rsrv; ignorequota = (dsl_dir_phys(dd)->dd_head_dataset_obj == 0); first = B_FALSE; goto top_of_function; } else { return (0); } } /* * Reserve space in this dsl_dir, to be used in this tx's txg. * After the space has been dirtied (and dsl_dir_willuse_space() * has been called), the reservation should be canceled, using * dsl_dir_tempreserve_clear(). */ int dsl_dir_tempreserve_space(dsl_dir_t *dd, uint64_t lsize, uint64_t asize, boolean_t netfree, void **tr_cookiep, dmu_tx_t *tx) { int err; list_t *tr_list; if (asize == 0) { *tr_cookiep = NULL; return (0); } tr_list = kmem_alloc(sizeof (list_t), KM_SLEEP); list_create(tr_list, sizeof (struct tempreserve), offsetof(struct tempreserve, tr_node)); ASSERT3S(asize, >, 0); err = arc_tempreserve_space(dd->dd_pool->dp_spa, lsize, tx->tx_txg); if (err == 0) { struct tempreserve *tr; tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP); tr->tr_size = lsize; list_insert_tail(tr_list, tr); } else { if (err == EAGAIN) { /* * If arc_memory_throttle() detected that pageout * is running and we are low on memory, we delay new * non-pageout transactions to give pageout an * advantage. * * It is unfortunate to be delaying while the caller's * locks are held. */ txg_delay(dd->dd_pool, tx->tx_txg, MSEC2NSEC(10), MSEC2NSEC(10)); err = SET_ERROR(ERESTART); } } if (err == 0) { err = dsl_dir_tempreserve_impl(dd, asize, netfree, B_FALSE, tr_list, tx, B_TRUE); } if (err != 0) dsl_dir_tempreserve_clear(tr_list, tx); else *tr_cookiep = tr_list; return (err); } /* * Clear a temporary reservation that we previously made with * dsl_dir_tempreserve_space(). */ void dsl_dir_tempreserve_clear(void *tr_cookie, dmu_tx_t *tx) { int txgidx = tx->tx_txg & TXG_MASK; list_t *tr_list = tr_cookie; struct tempreserve *tr; ASSERT3U(tx->tx_txg, !=, 0); if (tr_cookie == NULL) return; while ((tr = list_head(tr_list)) != NULL) { if (tr->tr_ds) { mutex_enter(&tr->tr_ds->dd_lock); ASSERT3U(tr->tr_ds->dd_tempreserved[txgidx], >=, tr->tr_size); tr->tr_ds->dd_tempreserved[txgidx] -= tr->tr_size; mutex_exit(&tr->tr_ds->dd_lock); } else { arc_tempreserve_clear(tr->tr_size); } list_remove(tr_list, tr); kmem_free(tr, sizeof (struct tempreserve)); } kmem_free(tr_list, sizeof (list_t)); } /* * This should be called from open context when we think we're going to write * or free space, for example when dirtying data. Be conservative; it's okay * to write less space or free more, but we don't want to write more or free * less than the amount specified. * * NOTE: The behavior of this function is identical to the Illumos / FreeBSD * version however it has been adjusted to use an iterative rather then * recursive algorithm to minimize stack usage. */ void dsl_dir_willuse_space(dsl_dir_t *dd, int64_t space, dmu_tx_t *tx) { int64_t parent_space; uint64_t est_used; do { mutex_enter(&dd->dd_lock); if (space > 0) dd->dd_space_towrite[tx->tx_txg & TXG_MASK] += space; est_used = dsl_dir_space_towrite(dd) + dsl_dir_phys(dd)->dd_used_bytes; parent_space = parent_delta(dd, est_used, space); mutex_exit(&dd->dd_lock); /* Make sure that we clean up dd_space_to* */ dsl_dir_dirty(dd, tx); dd = dd->dd_parent; space = parent_space; } while (space && dd); } /* call from syncing context when we actually write/free space for this dd */ void dsl_dir_diduse_space(dsl_dir_t *dd, dd_used_t type, int64_t used, int64_t compressed, int64_t uncompressed, dmu_tx_t *tx) { int64_t accounted_delta; /* * dsl_dataset_set_refreservation_sync_impl() calls this with * dd_lock held, so that it can atomically update * ds->ds_reserved and the dsl_dir accounting, so that * dsl_dataset_check_quota() can see dataset and dir accounting * consistently. */ boolean_t needlock = !MUTEX_HELD(&dd->dd_lock); ASSERT(dmu_tx_is_syncing(tx)); ASSERT(type < DD_USED_NUM); dmu_buf_will_dirty(dd->dd_dbuf, tx); if (needlock) mutex_enter(&dd->dd_lock); accounted_delta = parent_delta(dd, dsl_dir_phys(dd)->dd_used_bytes, used); ASSERT(used >= 0 || dsl_dir_phys(dd)->dd_used_bytes >= -used); ASSERT(compressed >= 0 || dsl_dir_phys(dd)->dd_compressed_bytes >= -compressed); ASSERT(uncompressed >= 0 || dsl_dir_phys(dd)->dd_uncompressed_bytes >= -uncompressed); dsl_dir_phys(dd)->dd_used_bytes += used; dsl_dir_phys(dd)->dd_uncompressed_bytes += uncompressed; dsl_dir_phys(dd)->dd_compressed_bytes += compressed; if (dsl_dir_phys(dd)->dd_flags & DD_FLAG_USED_BREAKDOWN) { ASSERT(used > 0 || dsl_dir_phys(dd)->dd_used_breakdown[type] >= -used); dsl_dir_phys(dd)->dd_used_breakdown[type] += used; #ifdef DEBUG { dd_used_t t; uint64_t u = 0; for (t = 0; t < DD_USED_NUM; t++) u += dsl_dir_phys(dd)->dd_used_breakdown[t]; ASSERT3U(u, ==, dsl_dir_phys(dd)->dd_used_bytes); } #endif } if (needlock) mutex_exit(&dd->dd_lock); if (dd->dd_parent != NULL) { dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD, accounted_delta, compressed, uncompressed, tx); dsl_dir_transfer_space(dd->dd_parent, used - accounted_delta, DD_USED_CHILD_RSRV, DD_USED_CHILD, tx); } } void dsl_dir_transfer_space(dsl_dir_t *dd, int64_t delta, dd_used_t oldtype, dd_used_t newtype, dmu_tx_t *tx) { ASSERT(dmu_tx_is_syncing(tx)); ASSERT(oldtype < DD_USED_NUM); ASSERT(newtype < DD_USED_NUM); if (delta == 0 || !(dsl_dir_phys(dd)->dd_flags & DD_FLAG_USED_BREAKDOWN)) return; dmu_buf_will_dirty(dd->dd_dbuf, tx); mutex_enter(&dd->dd_lock); ASSERT(delta > 0 ? dsl_dir_phys(dd)->dd_used_breakdown[oldtype] >= delta : dsl_dir_phys(dd)->dd_used_breakdown[newtype] >= -delta); ASSERT(dsl_dir_phys(dd)->dd_used_bytes >= ABS(delta)); dsl_dir_phys(dd)->dd_used_breakdown[oldtype] -= delta; dsl_dir_phys(dd)->dd_used_breakdown[newtype] += delta; mutex_exit(&dd->dd_lock); } typedef struct dsl_dir_set_qr_arg { const char *ddsqra_name; zprop_source_t ddsqra_source; uint64_t ddsqra_value; } dsl_dir_set_qr_arg_t; static int dsl_dir_set_quota_check(void *arg, dmu_tx_t *tx) { dsl_dir_set_qr_arg_t *ddsqra = arg; dsl_pool_t *dp = dmu_tx_pool(tx); dsl_dataset_t *ds; int error; uint64_t towrite, newval; error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds); if (error != 0) return (error); error = dsl_prop_predict(ds->ds_dir, "quota", ddsqra->ddsqra_source, ddsqra->ddsqra_value, &newval); if (error != 0) { dsl_dataset_rele(ds, FTAG); return (error); } if (newval == 0) { dsl_dataset_rele(ds, FTAG); return (0); } mutex_enter(&ds->ds_dir->dd_lock); /* * If we are doing the preliminary check in open context, and * there are pending changes, then don't fail it, since the * pending changes could under-estimate the amount of space to be * freed up. */ towrite = dsl_dir_space_towrite(ds->ds_dir); if ((dmu_tx_is_syncing(tx) || towrite == 0) && (newval < dsl_dir_phys(ds->ds_dir)->dd_reserved || newval < dsl_dir_phys(ds->ds_dir)->dd_used_bytes + towrite)) { error = SET_ERROR(ENOSPC); } mutex_exit(&ds->ds_dir->dd_lock); dsl_dataset_rele(ds, FTAG); return (error); } static void dsl_dir_set_quota_sync(void *arg, dmu_tx_t *tx) { dsl_dir_set_qr_arg_t *ddsqra = arg; dsl_pool_t *dp = dmu_tx_pool(tx); dsl_dataset_t *ds; uint64_t newval; VERIFY0(dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds)); if (spa_version(dp->dp_spa) >= SPA_VERSION_RECVD_PROPS) { dsl_prop_set_sync_impl(ds, zfs_prop_to_name(ZFS_PROP_QUOTA), ddsqra->ddsqra_source, sizeof (ddsqra->ddsqra_value), 1, &ddsqra->ddsqra_value, tx); VERIFY0(dsl_prop_get_int_ds(ds, zfs_prop_to_name(ZFS_PROP_QUOTA), &newval)); } else { newval = ddsqra->ddsqra_value; spa_history_log_internal_ds(ds, "set", tx, "%s=%lld", zfs_prop_to_name(ZFS_PROP_QUOTA), (longlong_t)newval); } dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx); mutex_enter(&ds->ds_dir->dd_lock); dsl_dir_phys(ds->ds_dir)->dd_quota = newval; mutex_exit(&ds->ds_dir->dd_lock); dsl_dataset_rele(ds, FTAG); } int dsl_dir_set_quota(const char *ddname, zprop_source_t source, uint64_t quota) { dsl_dir_set_qr_arg_t ddsqra; ddsqra.ddsqra_name = ddname; ddsqra.ddsqra_source = source; ddsqra.ddsqra_value = quota; return (dsl_sync_task(ddname, dsl_dir_set_quota_check, dsl_dir_set_quota_sync, &ddsqra, 0, ZFS_SPACE_CHECK_EXTRA_RESERVED)); } int dsl_dir_set_reservation_check(void *arg, dmu_tx_t *tx) { dsl_dir_set_qr_arg_t *ddsqra = arg; dsl_pool_t *dp = dmu_tx_pool(tx); dsl_dataset_t *ds; dsl_dir_t *dd; uint64_t newval, used, avail; int error; error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds); if (error != 0) return (error); dd = ds->ds_dir; /* * If we are doing the preliminary check in open context, the * space estimates may be inaccurate. */ if (!dmu_tx_is_syncing(tx)) { dsl_dataset_rele(ds, FTAG); return (0); } error = dsl_prop_predict(ds->ds_dir, zfs_prop_to_name(ZFS_PROP_RESERVATION), ddsqra->ddsqra_source, ddsqra->ddsqra_value, &newval); if (error != 0) { dsl_dataset_rele(ds, FTAG); return (error); } mutex_enter(&dd->dd_lock); used = dsl_dir_phys(dd)->dd_used_bytes; mutex_exit(&dd->dd_lock); if (dd->dd_parent) { avail = dsl_dir_space_available(dd->dd_parent, NULL, 0, FALSE); } else { avail = dsl_pool_adjustedsize(dd->dd_pool, ZFS_SPACE_CHECK_NORMAL) - used; } if (MAX(used, newval) > MAX(used, dsl_dir_phys(dd)->dd_reserved)) { uint64_t delta = MAX(used, newval) - MAX(used, dsl_dir_phys(dd)->dd_reserved); if (delta > avail || (dsl_dir_phys(dd)->dd_quota > 0 && newval > dsl_dir_phys(dd)->dd_quota)) error = SET_ERROR(ENOSPC); } dsl_dataset_rele(ds, FTAG); return (error); } void dsl_dir_set_reservation_sync_impl(dsl_dir_t *dd, uint64_t value, dmu_tx_t *tx) { uint64_t used; int64_t delta; dmu_buf_will_dirty(dd->dd_dbuf, tx); mutex_enter(&dd->dd_lock); used = dsl_dir_phys(dd)->dd_used_bytes; delta = MAX(used, value) - MAX(used, dsl_dir_phys(dd)->dd_reserved); dsl_dir_phys(dd)->dd_reserved = value; if (dd->dd_parent != NULL) { /* Roll up this additional usage into our ancestors */ dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD_RSRV, delta, 0, 0, tx); } mutex_exit(&dd->dd_lock); } static void dsl_dir_set_reservation_sync(void *arg, dmu_tx_t *tx) { dsl_dir_set_qr_arg_t *ddsqra = arg; dsl_pool_t *dp = dmu_tx_pool(tx); dsl_dataset_t *ds; uint64_t newval; VERIFY0(dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds)); if (spa_version(dp->dp_spa) >= SPA_VERSION_RECVD_PROPS) { dsl_prop_set_sync_impl(ds, zfs_prop_to_name(ZFS_PROP_RESERVATION), ddsqra->ddsqra_source, sizeof (ddsqra->ddsqra_value), 1, &ddsqra->ddsqra_value, tx); VERIFY0(dsl_prop_get_int_ds(ds, zfs_prop_to_name(ZFS_PROP_RESERVATION), &newval)); } else { newval = ddsqra->ddsqra_value; spa_history_log_internal_ds(ds, "set", tx, "%s=%lld", zfs_prop_to_name(ZFS_PROP_RESERVATION), (longlong_t)newval); } dsl_dir_set_reservation_sync_impl(ds->ds_dir, newval, tx); dsl_dataset_rele(ds, FTAG); } int dsl_dir_set_reservation(const char *ddname, zprop_source_t source, uint64_t reservation) { dsl_dir_set_qr_arg_t ddsqra; ddsqra.ddsqra_name = ddname; ddsqra.ddsqra_source = source; ddsqra.ddsqra_value = reservation; return (dsl_sync_task(ddname, dsl_dir_set_reservation_check, dsl_dir_set_reservation_sync, &ddsqra, 0, ZFS_SPACE_CHECK_EXTRA_RESERVED)); } static dsl_dir_t * closest_common_ancestor(dsl_dir_t *ds1, dsl_dir_t *ds2) { for (; ds1; ds1 = ds1->dd_parent) { dsl_dir_t *dd; for (dd = ds2; dd; dd = dd->dd_parent) { if (ds1 == dd) return (dd); } } return (NULL); } /* * If delta is applied to dd, how much of that delta would be applied to * ancestor? Syncing context only. */ static int64_t would_change(dsl_dir_t *dd, int64_t delta, dsl_dir_t *ancestor) { if (dd == ancestor) return (delta); mutex_enter(&dd->dd_lock); delta = parent_delta(dd, dsl_dir_phys(dd)->dd_used_bytes, delta); mutex_exit(&dd->dd_lock); return (would_change(dd->dd_parent, delta, ancestor)); } typedef struct dsl_dir_rename_arg { const char *ddra_oldname; const char *ddra_newname; cred_t *ddra_cred; } dsl_dir_rename_arg_t; typedef struct dsl_valid_rename_arg { int char_delta; int nest_delta; } dsl_valid_rename_arg_t; /* ARGSUSED */ static int dsl_valid_rename(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg) { dsl_valid_rename_arg_t *dvra = arg; char namebuf[ZFS_MAX_DATASET_NAME_LEN]; dsl_dataset_name(ds, namebuf); ASSERT3U(strnlen(namebuf, ZFS_MAX_DATASET_NAME_LEN), <, ZFS_MAX_DATASET_NAME_LEN); int namelen = strlen(namebuf) + dvra->char_delta; int depth = get_dataset_depth(namebuf) + dvra->nest_delta; if (namelen >= ZFS_MAX_DATASET_NAME_LEN) return (SET_ERROR(ENAMETOOLONG)); if (dvra->nest_delta > 0 && depth >= zfs_max_dataset_nesting) return (SET_ERROR(ENAMETOOLONG)); return (0); } static int dsl_dir_rename_check(void *arg, dmu_tx_t *tx) { dsl_dir_rename_arg_t *ddra = arg; dsl_pool_t *dp = dmu_tx_pool(tx); dsl_dir_t *dd, *newparent; dsl_valid_rename_arg_t dvra; + dsl_dataset_t *parentds; + objset_t *parentos; const char *mynewname; int error; /* target dir should exist */ error = dsl_dir_hold(dp, ddra->ddra_oldname, FTAG, &dd, NULL); if (error != 0) return (error); /* new parent should exist */ error = dsl_dir_hold(dp, ddra->ddra_newname, FTAG, &newparent, &mynewname); if (error != 0) { dsl_dir_rele(dd, FTAG); return (error); } /* can't rename to different pool */ if (dd->dd_pool != newparent->dd_pool) { dsl_dir_rele(newparent, FTAG); dsl_dir_rele(dd, FTAG); return (SET_ERROR(EXDEV)); } /* new name should not already exist */ if (mynewname == NULL) { dsl_dir_rele(newparent, FTAG); dsl_dir_rele(dd, FTAG); return (SET_ERROR(EEXIST)); } + /* can't rename below anything but filesystems (eg. no ZVOLs) */ + error = dsl_dataset_hold_obj(newparent->dd_pool, + dsl_dir_phys(newparent)->dd_head_dataset_obj, FTAG, &parentds); + if (error != 0) { + dsl_dir_rele(newparent, FTAG); + dsl_dir_rele(dd, FTAG); + return (error); + } + error = dmu_objset_from_ds(parentds, &parentos); + if (error != 0) { + dsl_dataset_rele(parentds, FTAG); + dsl_dir_rele(newparent, FTAG); + dsl_dir_rele(dd, FTAG); + return (error); + } + if (dmu_objset_type(parentos) != DMU_OST_ZFS) { + dsl_dataset_rele(parentds, FTAG); + dsl_dir_rele(newparent, FTAG); + dsl_dir_rele(dd, FTAG); + return (SET_ERROR(ZFS_ERR_WRONG_PARENT)); + } + dsl_dataset_rele(parentds, FTAG); + ASSERT3U(strnlen(ddra->ddra_newname, ZFS_MAX_DATASET_NAME_LEN), <, ZFS_MAX_DATASET_NAME_LEN); ASSERT3U(strnlen(ddra->ddra_oldname, ZFS_MAX_DATASET_NAME_LEN), <, ZFS_MAX_DATASET_NAME_LEN); dvra.char_delta = strlen(ddra->ddra_newname) - strlen(ddra->ddra_oldname); dvra.nest_delta = get_dataset_depth(ddra->ddra_newname) - get_dataset_depth(ddra->ddra_oldname); /* if the name length is growing, validate child name lengths */ if (dvra.char_delta > 0 || dvra.nest_delta > 0) { error = dmu_objset_find_dp(dp, dd->dd_object, dsl_valid_rename, &dvra, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS); if (error != 0) { dsl_dir_rele(newparent, FTAG); dsl_dir_rele(dd, FTAG); return (error); } } if (dmu_tx_is_syncing(tx)) { if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT)) { /* * Although this is the check function and we don't * normally make on-disk changes in check functions, * we need to do that here. * * Ensure this portion of the tree's counts have been * initialized in case the new parent has limits set. */ dsl_dir_init_fs_ss_count(dd, tx); } } if (newparent != dd->dd_parent) { /* is there enough space? */ uint64_t myspace = MAX(dsl_dir_phys(dd)->dd_used_bytes, dsl_dir_phys(dd)->dd_reserved); objset_t *os = dd->dd_pool->dp_meta_objset; uint64_t fs_cnt = 0; uint64_t ss_cnt = 0; if (dsl_dir_is_zapified(dd)) { int err; err = zap_lookup(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT, sizeof (fs_cnt), 1, &fs_cnt); if (err != ENOENT && err != 0) { dsl_dir_rele(newparent, FTAG); dsl_dir_rele(dd, FTAG); return (err); } /* * have to add 1 for the filesystem itself that we're * moving */ fs_cnt++; err = zap_lookup(os, dd->dd_object, DD_FIELD_SNAPSHOT_COUNT, sizeof (ss_cnt), 1, &ss_cnt); if (err != ENOENT && err != 0) { dsl_dir_rele(newparent, FTAG); dsl_dir_rele(dd, FTAG); return (err); } } /* check for encryption errors */ error = dsl_dir_rename_crypt_check(dd, newparent); if (error != 0) { dsl_dir_rele(newparent, FTAG); dsl_dir_rele(dd, FTAG); return (SET_ERROR(EACCES)); } /* no rename into our descendant */ if (closest_common_ancestor(dd, newparent) == dd) { dsl_dir_rele(newparent, FTAG); dsl_dir_rele(dd, FTAG); return (SET_ERROR(EINVAL)); } error = dsl_dir_transfer_possible(dd->dd_parent, newparent, fs_cnt, ss_cnt, myspace, ddra->ddra_cred); if (error != 0) { dsl_dir_rele(newparent, FTAG); dsl_dir_rele(dd, FTAG); return (error); } } dsl_dir_rele(newparent, FTAG); dsl_dir_rele(dd, FTAG); return (0); } static void dsl_dir_rename_sync(void *arg, dmu_tx_t *tx) { dsl_dir_rename_arg_t *ddra = arg; dsl_pool_t *dp = dmu_tx_pool(tx); dsl_dir_t *dd, *newparent; const char *mynewname; int error; objset_t *mos = dp->dp_meta_objset; VERIFY0(dsl_dir_hold(dp, ddra->ddra_oldname, FTAG, &dd, NULL)); VERIFY0(dsl_dir_hold(dp, ddra->ddra_newname, FTAG, &newparent, &mynewname)); /* Log this before we change the name. */ spa_history_log_internal_dd(dd, "rename", tx, "-> %s", ddra->ddra_newname); if (newparent != dd->dd_parent) { objset_t *os = dd->dd_pool->dp_meta_objset; uint64_t fs_cnt = 0; uint64_t ss_cnt = 0; /* * We already made sure the dd counts were initialized in the * check function. */ if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT)) { VERIFY0(zap_lookup(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT, sizeof (fs_cnt), 1, &fs_cnt)); /* add 1 for the filesystem itself that we're moving */ fs_cnt++; VERIFY0(zap_lookup(os, dd->dd_object, DD_FIELD_SNAPSHOT_COUNT, sizeof (ss_cnt), 1, &ss_cnt)); } dsl_fs_ss_count_adjust(dd->dd_parent, -fs_cnt, DD_FIELD_FILESYSTEM_COUNT, tx); dsl_fs_ss_count_adjust(newparent, fs_cnt, DD_FIELD_FILESYSTEM_COUNT, tx); dsl_fs_ss_count_adjust(dd->dd_parent, -ss_cnt, DD_FIELD_SNAPSHOT_COUNT, tx); dsl_fs_ss_count_adjust(newparent, ss_cnt, DD_FIELD_SNAPSHOT_COUNT, tx); dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD, -dsl_dir_phys(dd)->dd_used_bytes, -dsl_dir_phys(dd)->dd_compressed_bytes, -dsl_dir_phys(dd)->dd_uncompressed_bytes, tx); dsl_dir_diduse_space(newparent, DD_USED_CHILD, dsl_dir_phys(dd)->dd_used_bytes, dsl_dir_phys(dd)->dd_compressed_bytes, dsl_dir_phys(dd)->dd_uncompressed_bytes, tx); if (dsl_dir_phys(dd)->dd_reserved > dsl_dir_phys(dd)->dd_used_bytes) { uint64_t unused_rsrv = dsl_dir_phys(dd)->dd_reserved - dsl_dir_phys(dd)->dd_used_bytes; dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD_RSRV, -unused_rsrv, 0, 0, tx); dsl_dir_diduse_space(newparent, DD_USED_CHILD_RSRV, unused_rsrv, 0, 0, tx); } } dmu_buf_will_dirty(dd->dd_dbuf, tx); /* remove from old parent zapobj */ error = zap_remove(mos, dsl_dir_phys(dd->dd_parent)->dd_child_dir_zapobj, dd->dd_myname, tx); ASSERT0(error); (void) strlcpy(dd->dd_myname, mynewname, sizeof (dd->dd_myname)); dsl_dir_rele(dd->dd_parent, dd); dsl_dir_phys(dd)->dd_parent_obj = newparent->dd_object; VERIFY0(dsl_dir_hold_obj(dp, newparent->dd_object, NULL, dd, &dd->dd_parent)); /* add to new parent zapobj */ VERIFY0(zap_add(mos, dsl_dir_phys(newparent)->dd_child_dir_zapobj, dd->dd_myname, 8, 1, &dd->dd_object, tx)); zvol_rename_minors(dp->dp_spa, ddra->ddra_oldname, ddra->ddra_newname, B_TRUE); dsl_prop_notify_all(dd); dsl_dir_rele(newparent, FTAG); dsl_dir_rele(dd, FTAG); } int dsl_dir_rename(const char *oldname, const char *newname) { dsl_dir_rename_arg_t ddra; ddra.ddra_oldname = oldname; ddra.ddra_newname = newname; ddra.ddra_cred = CRED(); return (dsl_sync_task(oldname, dsl_dir_rename_check, dsl_dir_rename_sync, &ddra, 3, ZFS_SPACE_CHECK_RESERVED)); } int dsl_dir_transfer_possible(dsl_dir_t *sdd, dsl_dir_t *tdd, uint64_t fs_cnt, uint64_t ss_cnt, uint64_t space, cred_t *cr) { dsl_dir_t *ancestor; int64_t adelta; uint64_t avail; int err; ancestor = closest_common_ancestor(sdd, tdd); adelta = would_change(sdd, -space, ancestor); avail = dsl_dir_space_available(tdd, ancestor, adelta, FALSE); if (avail < space) return (SET_ERROR(ENOSPC)); err = dsl_fs_ss_limit_check(tdd, fs_cnt, ZFS_PROP_FILESYSTEM_LIMIT, ancestor, cr); if (err != 0) return (err); err = dsl_fs_ss_limit_check(tdd, ss_cnt, ZFS_PROP_SNAPSHOT_LIMIT, ancestor, cr); if (err != 0) return (err); return (0); } inode_timespec_t dsl_dir_snap_cmtime(dsl_dir_t *dd) { inode_timespec_t t; mutex_enter(&dd->dd_lock); t = dd->dd_snap_cmtime; mutex_exit(&dd->dd_lock); return (t); } void dsl_dir_snap_cmtime_update(dsl_dir_t *dd) { inode_timespec_t t; gethrestime(&t); mutex_enter(&dd->dd_lock); dd->dd_snap_cmtime = t; mutex_exit(&dd->dd_lock); } void dsl_dir_zapify(dsl_dir_t *dd, dmu_tx_t *tx) { objset_t *mos = dd->dd_pool->dp_meta_objset; dmu_object_zapify(mos, dd->dd_object, DMU_OT_DSL_DIR, tx); } boolean_t dsl_dir_is_zapified(dsl_dir_t *dd) { dmu_object_info_t doi; dmu_object_info_from_db(dd->dd_dbuf, &doi); return (doi.doi_type == DMU_OTN_ZAP_METADATA); } #if defined(_KERNEL) EXPORT_SYMBOL(dsl_dir_set_quota); EXPORT_SYMBOL(dsl_dir_set_reservation); #endif diff --git a/module/zfs/zfs_ioctl.c b/module/zfs/zfs_ioctl.c index 0dfa016845a3..f4aea57d445b 100644 --- a/module/zfs/zfs_ioctl.c +++ b/module/zfs/zfs_ioctl.c @@ -1,7319 +1,7316 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Portions Copyright 2011 Martin Matuska * Copyright 2015, OmniTI Computer Consulting, Inc. All rights reserved. * Portions Copyright 2012 Pawel Jakub Dawidek * Copyright (c) 2014, 2016 Joyent, Inc. All rights reserved. * Copyright 2016 Nexenta Systems, Inc. All rights reserved. * Copyright (c) 2014, Joyent, Inc. All rights reserved. * Copyright (c) 2011, 2018 by Delphix. All rights reserved. * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. * Copyright (c) 2013 Steven Hartland. All rights reserved. * Copyright (c) 2014 Integros [integros.com] * Copyright 2016 Toomas Soome * Copyright (c) 2016 Actifio, Inc. All rights reserved. - * Copyright (c) 2017, loli10K . All rights reserved. + * Copyright (c) 2018, loli10K . All rights reserved. * Copyright (c) 2017 Datto Inc. All rights reserved. * Copyright 2017 RackTop Systems. * Copyright (c) 2017 Open-E, Inc. All Rights Reserved. */ /* * ZFS ioctls. * * This file handles the ioctls to /dev/zfs, used for configuring ZFS storage * pools and filesystems, e.g. with /sbin/zfs and /sbin/zpool. * * There are two ways that we handle ioctls: the legacy way where almost * all of the logic is in the ioctl callback, and the new way where most * of the marshalling is handled in the common entry point, zfsdev_ioctl(). * * Non-legacy ioctls should be registered by calling * zfs_ioctl_register() from zfs_ioctl_init(). The ioctl is invoked * from userland by lzc_ioctl(). * * The registration arguments are as follows: * * const char *name * The name of the ioctl. This is used for history logging. If the * ioctl returns successfully (the callback returns 0), and allow_log * is true, then a history log entry will be recorded with the input & * output nvlists. The log entry can be printed with "zpool history -i". * * zfs_ioc_t ioc * The ioctl request number, which userland will pass to ioctl(2). * We want newer versions of libzfs and libzfs_core to run against * existing zfs kernel modules (i.e. a deferred reboot after an update). * Therefore the ioctl numbers cannot change from release to release. * * zfs_secpolicy_func_t *secpolicy * This function will be called before the zfs_ioc_func_t, to * determine if this operation is permitted. It should return EPERM * on failure, and 0 on success. Checks include determining if the * dataset is visible in this zone, and if the user has either all * zfs privileges in the zone (SYS_MOUNT), or has been granted permission * to do this operation on this dataset with "zfs allow". * * zfs_ioc_namecheck_t namecheck * This specifies what to expect in the zfs_cmd_t:zc_name -- a pool * name, a dataset name, or nothing. If the name is not well-formed, * the ioctl will fail and the callback will not be called. * Therefore, the callback can assume that the name is well-formed * (e.g. is null-terminated, doesn't have more than one '@' character, * doesn't have invalid characters). * * zfs_ioc_poolcheck_t pool_check * This specifies requirements on the pool state. If the pool does * not meet them (is suspended or is readonly), the ioctl will fail * and the callback will not be called. If any checks are specified * (i.e. it is not POOL_CHECK_NONE), namecheck must not be NO_NAME. * Multiple checks can be or-ed together (e.g. POOL_CHECK_SUSPENDED | * POOL_CHECK_READONLY). * * zfs_ioc_key_t *nvl_keys * The list of expected/allowable innvl input keys. This list is used * to validate the nvlist input to the ioctl. * * boolean_t smush_outnvlist * If smush_outnvlist is true, then the output is presumed to be a * list of errors, and it will be "smushed" down to fit into the * caller's buffer, by removing some entries and replacing them with a * single "N_MORE_ERRORS" entry indicating how many were removed. See * nvlist_smush() for details. If smush_outnvlist is false, and the * outnvlist does not fit into the userland-provided buffer, then the * ioctl will fail with ENOMEM. * * zfs_ioc_func_t *func * The callback function that will perform the operation. * * The callback should return 0 on success, or an error number on * failure. If the function fails, the userland ioctl will return -1, * and errno will be set to the callback's return value. The callback * will be called with the following arguments: * * const char *name * The name of the pool or dataset to operate on, from * zfs_cmd_t:zc_name. The 'namecheck' argument specifies the * expected type (pool, dataset, or none). * * nvlist_t *innvl * The input nvlist, deserialized from zfs_cmd_t:zc_nvlist_src. Or * NULL if no input nvlist was provided. Changes to this nvlist are * ignored. If the input nvlist could not be deserialized, the * ioctl will fail and the callback will not be called. * * nvlist_t *outnvl * The output nvlist, initially empty. The callback can fill it in, * and it will be returned to userland by serializing it into * zfs_cmd_t:zc_nvlist_dst. If it is non-empty, and serialization * fails (e.g. because the caller didn't supply a large enough * buffer), then the overall ioctl will fail. See the * 'smush_nvlist' argument above for additional behaviors. * * There are two typical uses of the output nvlist: * - To return state, e.g. property values. In this case, * smush_outnvlist should be false. If the buffer was not large * enough, the caller will reallocate a larger buffer and try * the ioctl again. * * - To return multiple errors from an ioctl which makes on-disk * changes. In this case, smush_outnvlist should be true. * Ioctls which make on-disk modifications should generally not * use the outnvl if they succeed, because the caller can not * distinguish between the operation failing, and * deserialization failing. * * IOCTL Interface Errors * * The following ioctl input errors can be returned: * ZFS_ERR_IOC_CMD_UNAVAIL the ioctl number is not supported by kernel * ZFS_ERR_IOC_ARG_UNAVAIL an input argument is not supported by kernel * ZFS_ERR_IOC_ARG_REQUIRED a required input argument is missing * ZFS_ERR_IOC_ARG_BADTYPE an input argument has an invalid type */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "zfs_namecheck.h" #include "zfs_prop.h" #include "zfs_deleg.h" #include "zfs_comutil.h" #include #include /* * Limit maximum nvlist size. We don't want users passing in insane values * for zc->zc_nvlist_src_size, since we will need to allocate that much memory. */ #define MAX_NVLIST_SRC_SIZE KMALLOC_MAX_SIZE kmutex_t zfsdev_state_lock; zfsdev_state_t *zfsdev_state_list; extern void zfs_init(void); extern void zfs_fini(void); uint_t zfs_fsyncer_key; extern uint_t rrw_tsd_key; static uint_t zfs_allow_log_key; typedef int zfs_ioc_legacy_func_t(zfs_cmd_t *); typedef int zfs_ioc_func_t(const char *, nvlist_t *, nvlist_t *); typedef int zfs_secpolicy_func_t(zfs_cmd_t *, nvlist_t *, cred_t *); /* * IOC Keys are used to document and validate user->kernel interface inputs. * See zfs_keys_recv_new for an example declaration. Any key name that is not * listed will be rejected as input. * * The keyname 'optional' is always allowed, and must be an nvlist if present. * Arguments which older kernels can safely ignore can be placed under the * "optional" key. * * When adding new keys to an existing ioc for new functionality, consider: * - adding an entry into zfs_sysfs.c zfs_features[] list * - updating the libzfs_input_check.c test utility * * Note: in the ZK_WILDCARDLIST case, the name serves as documentation * for the expected name (bookmark, snapshot, property, etc) but there * is no validation in the preflight zfs_check_input_nvpairs() check. */ typedef enum { ZK_OPTIONAL = 1 << 0, /* pair is optional */ ZK_WILDCARDLIST = 1 << 1, /* one or more unspecified key names */ } ioc_key_flag_t; /* DATA_TYPE_ANY is used when zkey_type can vary. */ #define DATA_TYPE_ANY DATA_TYPE_UNKNOWN typedef struct zfs_ioc_key { const char *zkey_name; data_type_t zkey_type; ioc_key_flag_t zkey_flags; } zfs_ioc_key_t; typedef enum { NO_NAME, POOL_NAME, DATASET_NAME } zfs_ioc_namecheck_t; typedef enum { POOL_CHECK_NONE = 1 << 0, POOL_CHECK_SUSPENDED = 1 << 1, POOL_CHECK_READONLY = 1 << 2, } zfs_ioc_poolcheck_t; typedef struct zfs_ioc_vec { zfs_ioc_legacy_func_t *zvec_legacy_func; zfs_ioc_func_t *zvec_func; zfs_secpolicy_func_t *zvec_secpolicy; zfs_ioc_namecheck_t zvec_namecheck; boolean_t zvec_allow_log; zfs_ioc_poolcheck_t zvec_pool_check; boolean_t zvec_smush_outnvlist; const char *zvec_name; const zfs_ioc_key_t *zvec_nvl_keys; size_t zvec_nvl_key_count; } zfs_ioc_vec_t; /* This array is indexed by zfs_userquota_prop_t */ static const char *userquota_perms[] = { ZFS_DELEG_PERM_USERUSED, ZFS_DELEG_PERM_USERQUOTA, ZFS_DELEG_PERM_GROUPUSED, ZFS_DELEG_PERM_GROUPQUOTA, ZFS_DELEG_PERM_USEROBJUSED, ZFS_DELEG_PERM_USEROBJQUOTA, ZFS_DELEG_PERM_GROUPOBJUSED, ZFS_DELEG_PERM_GROUPOBJQUOTA, ZFS_DELEG_PERM_PROJECTUSED, ZFS_DELEG_PERM_PROJECTQUOTA, ZFS_DELEG_PERM_PROJECTOBJUSED, ZFS_DELEG_PERM_PROJECTOBJQUOTA, }; static int zfs_ioc_userspace_upgrade(zfs_cmd_t *zc); static int zfs_ioc_id_quota_upgrade(zfs_cmd_t *zc); static int zfs_check_settable(const char *name, nvpair_t *property, cred_t *cr); static int zfs_check_clearable(char *dataset, nvlist_t *props, nvlist_t **errors); static int zfs_fill_zplprops_root(uint64_t, nvlist_t *, nvlist_t *, boolean_t *); int zfs_set_prop_nvlist(const char *, zprop_source_t, nvlist_t *, nvlist_t *); static int get_nvlist(uint64_t nvl, uint64_t size, int iflag, nvlist_t **nvp); static void history_str_free(char *buf) { kmem_free(buf, HIS_MAX_RECORD_LEN); } static char * history_str_get(zfs_cmd_t *zc) { char *buf; if (zc->zc_history == 0) return (NULL); buf = kmem_alloc(HIS_MAX_RECORD_LEN, KM_SLEEP); if (copyinstr((void *)(uintptr_t)zc->zc_history, buf, HIS_MAX_RECORD_LEN, NULL) != 0) { history_str_free(buf); return (NULL); } buf[HIS_MAX_RECORD_LEN -1] = '\0'; return (buf); } /* * Check to see if the named dataset is currently defined as bootable */ static boolean_t zfs_is_bootfs(const char *name) { objset_t *os; if (dmu_objset_hold(name, FTAG, &os) == 0) { boolean_t ret; ret = (dmu_objset_id(os) == spa_bootfs(dmu_objset_spa(os))); dmu_objset_rele(os, FTAG); return (ret); } return (B_FALSE); } /* * Return non-zero if the spa version is less than requested version. */ static int zfs_earlier_version(const char *name, int version) { spa_t *spa; if (spa_open(name, &spa, FTAG) == 0) { if (spa_version(spa) < version) { spa_close(spa, FTAG); return (1); } spa_close(spa, FTAG); } return (0); } /* * Return TRUE if the ZPL version is less than requested version. */ static boolean_t zpl_earlier_version(const char *name, int version) { objset_t *os; boolean_t rc = B_TRUE; if (dmu_objset_hold(name, FTAG, &os) == 0) { uint64_t zplversion; if (dmu_objset_type(os) != DMU_OST_ZFS) { dmu_objset_rele(os, FTAG); return (B_TRUE); } /* XXX reading from non-owned objset */ if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &zplversion) == 0) rc = zplversion < version; dmu_objset_rele(os, FTAG); } return (rc); } static void zfs_log_history(zfs_cmd_t *zc) { spa_t *spa; char *buf; if ((buf = history_str_get(zc)) == NULL) return; if (spa_open(zc->zc_name, &spa, FTAG) == 0) { if (spa_version(spa) >= SPA_VERSION_ZPOOL_HISTORY) (void) spa_history_log(spa, buf); spa_close(spa, FTAG); } history_str_free(buf); } /* * Policy for top-level read operations (list pools). Requires no privileges, * and can be used in the local zone, as there is no associated dataset. */ /* ARGSUSED */ static int zfs_secpolicy_none(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr) { return (0); } /* * Policy for dataset read operations (list children, get statistics). Requires * no privileges, but must be visible in the local zone. */ /* ARGSUSED */ static int zfs_secpolicy_read(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr) { if (INGLOBALZONE(curproc) || zone_dataset_visible(zc->zc_name, NULL)) return (0); return (SET_ERROR(ENOENT)); } static int zfs_dozonecheck_impl(const char *dataset, uint64_t zoned, cred_t *cr) { int writable = 1; /* * The dataset must be visible by this zone -- check this first * so they don't see EPERM on something they shouldn't know about. */ if (!INGLOBALZONE(curproc) && !zone_dataset_visible(dataset, &writable)) return (SET_ERROR(ENOENT)); if (INGLOBALZONE(curproc)) { /* * If the fs is zoned, only root can access it from the * global zone. */ if (secpolicy_zfs(cr) && zoned) return (SET_ERROR(EPERM)); } else { /* * If we are in a local zone, the 'zoned' property must be set. */ if (!zoned) return (SET_ERROR(EPERM)); /* must be writable by this zone */ if (!writable) return (SET_ERROR(EPERM)); } return (0); } static int zfs_dozonecheck(const char *dataset, cred_t *cr) { uint64_t zoned; if (dsl_prop_get_integer(dataset, "zoned", &zoned, NULL)) return (SET_ERROR(ENOENT)); return (zfs_dozonecheck_impl(dataset, zoned, cr)); } static int zfs_dozonecheck_ds(const char *dataset, dsl_dataset_t *ds, cred_t *cr) { uint64_t zoned; if (dsl_prop_get_int_ds(ds, "zoned", &zoned)) return (SET_ERROR(ENOENT)); return (zfs_dozonecheck_impl(dataset, zoned, cr)); } static int zfs_secpolicy_write_perms_ds(const char *name, dsl_dataset_t *ds, const char *perm, cred_t *cr) { int error; error = zfs_dozonecheck_ds(name, ds, cr); if (error == 0) { error = secpolicy_zfs(cr); if (error != 0) error = dsl_deleg_access_impl(ds, perm, cr); } return (error); } static int zfs_secpolicy_write_perms(const char *name, const char *perm, cred_t *cr) { int error; dsl_dataset_t *ds; dsl_pool_t *dp; /* * First do a quick check for root in the global zone, which * is allowed to do all write_perms. This ensures that zfs_ioc_* * will get to handle nonexistent datasets. */ if (INGLOBALZONE(curproc) && secpolicy_zfs(cr) == 0) return (0); error = dsl_pool_hold(name, FTAG, &dp); if (error != 0) return (error); error = dsl_dataset_hold(dp, name, FTAG, &ds); if (error != 0) { dsl_pool_rele(dp, FTAG); return (error); } error = zfs_secpolicy_write_perms_ds(name, ds, perm, cr); dsl_dataset_rele(ds, FTAG); dsl_pool_rele(dp, FTAG); return (error); } /* * Policy for setting the security label property. * * Returns 0 for success, non-zero for access and other errors. */ static int zfs_set_slabel_policy(const char *name, char *strval, cred_t *cr) { #ifdef HAVE_MLSLABEL char ds_hexsl[MAXNAMELEN]; bslabel_t ds_sl, new_sl; boolean_t new_default = FALSE; uint64_t zoned; int needed_priv = -1; int error; /* First get the existing dataset label. */ error = dsl_prop_get(name, zfs_prop_to_name(ZFS_PROP_MLSLABEL), 1, sizeof (ds_hexsl), &ds_hexsl, NULL); if (error != 0) return (SET_ERROR(EPERM)); if (strcasecmp(strval, ZFS_MLSLABEL_DEFAULT) == 0) new_default = TRUE; /* The label must be translatable */ if (!new_default && (hexstr_to_label(strval, &new_sl) != 0)) return (SET_ERROR(EINVAL)); /* * In a non-global zone, disallow attempts to set a label that * doesn't match that of the zone; otherwise no other checks * are needed. */ if (!INGLOBALZONE(curproc)) { if (new_default || !blequal(&new_sl, CR_SL(CRED()))) return (SET_ERROR(EPERM)); return (0); } /* * For global-zone datasets (i.e., those whose zoned property is * "off", verify that the specified new label is valid for the * global zone. */ if (dsl_prop_get_integer(name, zfs_prop_to_name(ZFS_PROP_ZONED), &zoned, NULL)) return (SET_ERROR(EPERM)); if (!zoned) { if (zfs_check_global_label(name, strval) != 0) return (SET_ERROR(EPERM)); } /* * If the existing dataset label is nondefault, check if the * dataset is mounted (label cannot be changed while mounted). * Get the zfsvfs_t; if there isn't one, then the dataset isn't * mounted (or isn't a dataset, doesn't exist, ...). */ if (strcasecmp(ds_hexsl, ZFS_MLSLABEL_DEFAULT) != 0) { objset_t *os; static char *setsl_tag = "setsl_tag"; /* * Try to own the dataset; abort if there is any error, * (e.g., already mounted, in use, or other error). */ error = dmu_objset_own(name, DMU_OST_ZFS, B_TRUE, B_TRUE, setsl_tag, &os); if (error != 0) return (SET_ERROR(EPERM)); dmu_objset_disown(os, B_TRUE, setsl_tag); if (new_default) { needed_priv = PRIV_FILE_DOWNGRADE_SL; goto out_check; } if (hexstr_to_label(strval, &new_sl) != 0) return (SET_ERROR(EPERM)); if (blstrictdom(&ds_sl, &new_sl)) needed_priv = PRIV_FILE_DOWNGRADE_SL; else if (blstrictdom(&new_sl, &ds_sl)) needed_priv = PRIV_FILE_UPGRADE_SL; } else { /* dataset currently has a default label */ if (!new_default) needed_priv = PRIV_FILE_UPGRADE_SL; } out_check: if (needed_priv != -1) return (PRIV_POLICY(cr, needed_priv, B_FALSE, EPERM, NULL)); return (0); #else return (SET_ERROR(ENOTSUP)); #endif /* HAVE_MLSLABEL */ } static int zfs_secpolicy_setprop(const char *dsname, zfs_prop_t prop, nvpair_t *propval, cred_t *cr) { char *strval; /* * Check permissions for special properties. */ switch (prop) { default: break; case ZFS_PROP_ZONED: /* * Disallow setting of 'zoned' from within a local zone. */ if (!INGLOBALZONE(curproc)) return (SET_ERROR(EPERM)); break; case ZFS_PROP_QUOTA: case ZFS_PROP_FILESYSTEM_LIMIT: case ZFS_PROP_SNAPSHOT_LIMIT: if (!INGLOBALZONE(curproc)) { uint64_t zoned; char setpoint[ZFS_MAX_DATASET_NAME_LEN]; /* * Unprivileged users are allowed to modify the * limit on things *under* (ie. contained by) * the thing they own. */ if (dsl_prop_get_integer(dsname, "zoned", &zoned, setpoint)) return (SET_ERROR(EPERM)); if (!zoned || strlen(dsname) <= strlen(setpoint)) return (SET_ERROR(EPERM)); } break; case ZFS_PROP_MLSLABEL: if (!is_system_labeled()) return (SET_ERROR(EPERM)); if (nvpair_value_string(propval, &strval) == 0) { int err; err = zfs_set_slabel_policy(dsname, strval, CRED()); if (err != 0) return (err); } break; } return (zfs_secpolicy_write_perms(dsname, zfs_prop_to_name(prop), cr)); } /* ARGSUSED */ static int zfs_secpolicy_set_fsacl(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr) { int error; error = zfs_dozonecheck(zc->zc_name, cr); if (error != 0) return (error); /* * permission to set permissions will be evaluated later in * dsl_deleg_can_allow() */ return (0); } /* ARGSUSED */ static int zfs_secpolicy_rollback(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr) { return (zfs_secpolicy_write_perms(zc->zc_name, ZFS_DELEG_PERM_ROLLBACK, cr)); } /* ARGSUSED */ static int zfs_secpolicy_send(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr) { dsl_pool_t *dp; dsl_dataset_t *ds; char *cp; int error; /* * Generate the current snapshot name from the given objsetid, then * use that name for the secpolicy/zone checks. */ cp = strchr(zc->zc_name, '@'); if (cp == NULL) return (SET_ERROR(EINVAL)); error = dsl_pool_hold(zc->zc_name, FTAG, &dp); if (error != 0) return (error); error = dsl_dataset_hold_obj(dp, zc->zc_sendobj, FTAG, &ds); if (error != 0) { dsl_pool_rele(dp, FTAG); return (error); } dsl_dataset_name(ds, zc->zc_name); error = zfs_secpolicy_write_perms_ds(zc->zc_name, ds, ZFS_DELEG_PERM_SEND, cr); dsl_dataset_rele(ds, FTAG); dsl_pool_rele(dp, FTAG); return (error); } /* ARGSUSED */ static int zfs_secpolicy_send_new(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr) { return (zfs_secpolicy_write_perms(zc->zc_name, ZFS_DELEG_PERM_SEND, cr)); } int zfs_secpolicy_share(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr) { return (SET_ERROR(ENOTSUP)); } int zfs_secpolicy_smb_acl(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr) { return (SET_ERROR(ENOTSUP)); } static int zfs_get_parent(const char *datasetname, char *parent, int parentsize) { char *cp; /* * Remove the @bla or /bla from the end of the name to get the parent. */ (void) strncpy(parent, datasetname, parentsize); cp = strrchr(parent, '@'); if (cp != NULL) { cp[0] = '\0'; } else { cp = strrchr(parent, '/'); if (cp == NULL) return (SET_ERROR(ENOENT)); cp[0] = '\0'; } return (0); } int zfs_secpolicy_destroy_perms(const char *name, cred_t *cr) { int error; if ((error = zfs_secpolicy_write_perms(name, ZFS_DELEG_PERM_MOUNT, cr)) != 0) return (error); return (zfs_secpolicy_write_perms(name, ZFS_DELEG_PERM_DESTROY, cr)); } /* ARGSUSED */ static int zfs_secpolicy_destroy(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr) { return (zfs_secpolicy_destroy_perms(zc->zc_name, cr)); } /* * Destroying snapshots with delegated permissions requires * descendant mount and destroy permissions. */ /* ARGSUSED */ static int zfs_secpolicy_destroy_snaps(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr) { nvlist_t *snaps; nvpair_t *pair, *nextpair; int error = 0; snaps = fnvlist_lookup_nvlist(innvl, "snaps"); for (pair = nvlist_next_nvpair(snaps, NULL); pair != NULL; pair = nextpair) { nextpair = nvlist_next_nvpair(snaps, pair); error = zfs_secpolicy_destroy_perms(nvpair_name(pair), cr); if (error == ENOENT) { /* * Ignore any snapshots that don't exist (we consider * them "already destroyed"). Remove the name from the * nvl here in case the snapshot is created between * now and when we try to destroy it (in which case * we don't want to destroy it since we haven't * checked for permission). */ fnvlist_remove_nvpair(snaps, pair); error = 0; } if (error != 0) break; } return (error); } int zfs_secpolicy_rename_perms(const char *from, const char *to, cred_t *cr) { char parentname[ZFS_MAX_DATASET_NAME_LEN]; int error; if ((error = zfs_secpolicy_write_perms(from, ZFS_DELEG_PERM_RENAME, cr)) != 0) return (error); if ((error = zfs_secpolicy_write_perms(from, ZFS_DELEG_PERM_MOUNT, cr)) != 0) return (error); if ((error = zfs_get_parent(to, parentname, sizeof (parentname))) != 0) return (error); if ((error = zfs_secpolicy_write_perms(parentname, ZFS_DELEG_PERM_CREATE, cr)) != 0) return (error); if ((error = zfs_secpolicy_write_perms(parentname, ZFS_DELEG_PERM_MOUNT, cr)) != 0) return (error); return (error); } /* ARGSUSED */ static int zfs_secpolicy_rename(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr) { return (zfs_secpolicy_rename_perms(zc->zc_name, zc->zc_value, cr)); } /* ARGSUSED */ static int zfs_secpolicy_promote(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr) { dsl_pool_t *dp; dsl_dataset_t *clone; int error; error = zfs_secpolicy_write_perms(zc->zc_name, ZFS_DELEG_PERM_PROMOTE, cr); if (error != 0) return (error); error = dsl_pool_hold(zc->zc_name, FTAG, &dp); if (error != 0) return (error); error = dsl_dataset_hold(dp, zc->zc_name, FTAG, &clone); if (error == 0) { char parentname[ZFS_MAX_DATASET_NAME_LEN]; dsl_dataset_t *origin = NULL; dsl_dir_t *dd; dd = clone->ds_dir; error = dsl_dataset_hold_obj(dd->dd_pool, dsl_dir_phys(dd)->dd_origin_obj, FTAG, &origin); if (error != 0) { dsl_dataset_rele(clone, FTAG); dsl_pool_rele(dp, FTAG); return (error); } error = zfs_secpolicy_write_perms_ds(zc->zc_name, clone, ZFS_DELEG_PERM_MOUNT, cr); dsl_dataset_name(origin, parentname); if (error == 0) { error = zfs_secpolicy_write_perms_ds(parentname, origin, ZFS_DELEG_PERM_PROMOTE, cr); } dsl_dataset_rele(clone, FTAG); dsl_dataset_rele(origin, FTAG); } dsl_pool_rele(dp, FTAG); return (error); } /* ARGSUSED */ static int zfs_secpolicy_recv(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr) { int error; if ((error = zfs_secpolicy_write_perms(zc->zc_name, ZFS_DELEG_PERM_RECEIVE, cr)) != 0) return (error); if ((error = zfs_secpolicy_write_perms(zc->zc_name, ZFS_DELEG_PERM_MOUNT, cr)) != 0) return (error); return (zfs_secpolicy_write_perms(zc->zc_name, ZFS_DELEG_PERM_CREATE, cr)); } /* ARGSUSED */ static int zfs_secpolicy_recv_new(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr) { return (zfs_secpolicy_recv(zc, innvl, cr)); } int zfs_secpolicy_snapshot_perms(const char *name, cred_t *cr) { return (zfs_secpolicy_write_perms(name, ZFS_DELEG_PERM_SNAPSHOT, cr)); } /* * Check for permission to create each snapshot in the nvlist. */ /* ARGSUSED */ static int zfs_secpolicy_snapshot(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr) { nvlist_t *snaps; int error = 0; nvpair_t *pair; snaps = fnvlist_lookup_nvlist(innvl, "snaps"); for (pair = nvlist_next_nvpair(snaps, NULL); pair != NULL; pair = nvlist_next_nvpair(snaps, pair)) { char *name = nvpair_name(pair); char *atp = strchr(name, '@'); if (atp == NULL) { error = SET_ERROR(EINVAL); break; } *atp = '\0'; error = zfs_secpolicy_snapshot_perms(name, cr); *atp = '@'; if (error != 0) break; } return (error); } /* * Check for permission to create each bookmark in the nvlist. */ /* ARGSUSED */ static int zfs_secpolicy_bookmark(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr) { int error = 0; for (nvpair_t *pair = nvlist_next_nvpair(innvl, NULL); pair != NULL; pair = nvlist_next_nvpair(innvl, pair)) { char *name = nvpair_name(pair); char *hashp = strchr(name, '#'); if (hashp == NULL) { error = SET_ERROR(EINVAL); break; } *hashp = '\0'; error = zfs_secpolicy_write_perms(name, ZFS_DELEG_PERM_BOOKMARK, cr); *hashp = '#'; if (error != 0) break; } return (error); } /* ARGSUSED */ static int zfs_secpolicy_remap(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr) { return (zfs_secpolicy_write_perms(zc->zc_name, ZFS_DELEG_PERM_REMAP, cr)); } /* ARGSUSED */ static int zfs_secpolicy_destroy_bookmarks(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr) { nvpair_t *pair, *nextpair; int error = 0; for (pair = nvlist_next_nvpair(innvl, NULL); pair != NULL; pair = nextpair) { char *name = nvpair_name(pair); char *hashp = strchr(name, '#'); nextpair = nvlist_next_nvpair(innvl, pair); if (hashp == NULL) { error = SET_ERROR(EINVAL); break; } *hashp = '\0'; error = zfs_secpolicy_write_perms(name, ZFS_DELEG_PERM_DESTROY, cr); *hashp = '#'; if (error == ENOENT) { /* * Ignore any filesystems that don't exist (we consider * their bookmarks "already destroyed"). Remove * the name from the nvl here in case the filesystem * is created between now and when we try to destroy * the bookmark (in which case we don't want to * destroy it since we haven't checked for permission). */ fnvlist_remove_nvpair(innvl, pair); error = 0; } if (error != 0) break; } return (error); } /* ARGSUSED */ static int zfs_secpolicy_log_history(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr) { /* * Even root must have a proper TSD so that we know what pool * to log to. */ if (tsd_get(zfs_allow_log_key) == NULL) return (SET_ERROR(EPERM)); return (0); } static int zfs_secpolicy_create_clone(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr) { char parentname[ZFS_MAX_DATASET_NAME_LEN]; int error; char *origin; if ((error = zfs_get_parent(zc->zc_name, parentname, sizeof (parentname))) != 0) return (error); if (nvlist_lookup_string(innvl, "origin", &origin) == 0 && (error = zfs_secpolicy_write_perms(origin, ZFS_DELEG_PERM_CLONE, cr)) != 0) return (error); if ((error = zfs_secpolicy_write_perms(parentname, ZFS_DELEG_PERM_CREATE, cr)) != 0) return (error); return (zfs_secpolicy_write_perms(parentname, ZFS_DELEG_PERM_MOUNT, cr)); } /* * Policy for pool operations - create/destroy pools, add vdevs, etc. Requires * SYS_CONFIG privilege, which is not available in a local zone. */ /* ARGSUSED */ static int zfs_secpolicy_config(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr) { if (secpolicy_sys_config(cr, B_FALSE) != 0) return (SET_ERROR(EPERM)); return (0); } /* * Policy for object to name lookups. */ /* ARGSUSED */ static int zfs_secpolicy_diff(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr) { int error; if ((error = secpolicy_sys_config(cr, B_FALSE)) == 0) return (0); error = zfs_secpolicy_write_perms(zc->zc_name, ZFS_DELEG_PERM_DIFF, cr); return (error); } /* * Policy for fault injection. Requires all privileges. */ /* ARGSUSED */ static int zfs_secpolicy_inject(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr) { return (secpolicy_zinject(cr)); } /* ARGSUSED */ static int zfs_secpolicy_inherit_prop(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr) { zfs_prop_t prop = zfs_name_to_prop(zc->zc_value); if (prop == ZPROP_INVAL) { if (!zfs_prop_user(zc->zc_value)) return (SET_ERROR(EINVAL)); return (zfs_secpolicy_write_perms(zc->zc_name, ZFS_DELEG_PERM_USERPROP, cr)); } else { return (zfs_secpolicy_setprop(zc->zc_name, prop, NULL, cr)); } } static int zfs_secpolicy_userspace_one(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr) { int err = zfs_secpolicy_read(zc, innvl, cr); if (err) return (err); if (zc->zc_objset_type >= ZFS_NUM_USERQUOTA_PROPS) return (SET_ERROR(EINVAL)); if (zc->zc_value[0] == 0) { /* * They are asking about a posix uid/gid. If it's * themself, allow it. */ if (zc->zc_objset_type == ZFS_PROP_USERUSED || zc->zc_objset_type == ZFS_PROP_USERQUOTA || zc->zc_objset_type == ZFS_PROP_USEROBJUSED || zc->zc_objset_type == ZFS_PROP_USEROBJQUOTA) { if (zc->zc_guid == crgetuid(cr)) return (0); } else if (zc->zc_objset_type == ZFS_PROP_GROUPUSED || zc->zc_objset_type == ZFS_PROP_GROUPQUOTA || zc->zc_objset_type == ZFS_PROP_GROUPOBJUSED || zc->zc_objset_type == ZFS_PROP_GROUPOBJQUOTA) { if (groupmember(zc->zc_guid, cr)) return (0); } /* else is for project quota/used */ } return (zfs_secpolicy_write_perms(zc->zc_name, userquota_perms[zc->zc_objset_type], cr)); } static int zfs_secpolicy_userspace_many(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr) { int err = zfs_secpolicy_read(zc, innvl, cr); if (err) return (err); if (zc->zc_objset_type >= ZFS_NUM_USERQUOTA_PROPS) return (SET_ERROR(EINVAL)); return (zfs_secpolicy_write_perms(zc->zc_name, userquota_perms[zc->zc_objset_type], cr)); } /* ARGSUSED */ static int zfs_secpolicy_userspace_upgrade(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr) { return (zfs_secpolicy_setprop(zc->zc_name, ZFS_PROP_VERSION, NULL, cr)); } /* ARGSUSED */ static int zfs_secpolicy_hold(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr) { nvpair_t *pair; nvlist_t *holds; int error; holds = fnvlist_lookup_nvlist(innvl, "holds"); for (pair = nvlist_next_nvpair(holds, NULL); pair != NULL; pair = nvlist_next_nvpair(holds, pair)) { char fsname[ZFS_MAX_DATASET_NAME_LEN]; error = dmu_fsname(nvpair_name(pair), fsname); if (error != 0) return (error); error = zfs_secpolicy_write_perms(fsname, ZFS_DELEG_PERM_HOLD, cr); if (error != 0) return (error); } return (0); } /* ARGSUSED */ static int zfs_secpolicy_release(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr) { nvpair_t *pair; int error; for (pair = nvlist_next_nvpair(innvl, NULL); pair != NULL; pair = nvlist_next_nvpair(innvl, pair)) { char fsname[ZFS_MAX_DATASET_NAME_LEN]; error = dmu_fsname(nvpair_name(pair), fsname); if (error != 0) return (error); error = zfs_secpolicy_write_perms(fsname, ZFS_DELEG_PERM_RELEASE, cr); if (error != 0) return (error); } return (0); } /* * Policy for allowing temporary snapshots to be taken or released */ static int zfs_secpolicy_tmp_snapshot(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr) { /* * A temporary snapshot is the same as a snapshot, * hold, destroy and release all rolled into one. * Delegated diff alone is sufficient that we allow this. */ int error; if ((error = zfs_secpolicy_write_perms(zc->zc_name, ZFS_DELEG_PERM_DIFF, cr)) == 0) return (0); error = zfs_secpolicy_snapshot_perms(zc->zc_name, cr); if (innvl != NULL) { if (error == 0) error = zfs_secpolicy_hold(zc, innvl, cr); if (error == 0) error = zfs_secpolicy_release(zc, innvl, cr); if (error == 0) error = zfs_secpolicy_destroy(zc, innvl, cr); } return (error); } static int zfs_secpolicy_load_key(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr) { return (zfs_secpolicy_write_perms(zc->zc_name, ZFS_DELEG_PERM_LOAD_KEY, cr)); } static int zfs_secpolicy_change_key(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr) { return (zfs_secpolicy_write_perms(zc->zc_name, ZFS_DELEG_PERM_CHANGE_KEY, cr)); } /* * Returns the nvlist as specified by the user in the zfs_cmd_t. */ static int get_nvlist(uint64_t nvl, uint64_t size, int iflag, nvlist_t **nvp) { char *packed; int error; nvlist_t *list = NULL; /* * Read in and unpack the user-supplied nvlist. */ if (size == 0) return (SET_ERROR(EINVAL)); packed = vmem_alloc(size, KM_SLEEP); if ((error = ddi_copyin((void *)(uintptr_t)nvl, packed, size, iflag)) != 0) { vmem_free(packed, size); return (SET_ERROR(EFAULT)); } if ((error = nvlist_unpack(packed, size, &list, 0)) != 0) { vmem_free(packed, size); return (error); } vmem_free(packed, size); *nvp = list; return (0); } /* * Reduce the size of this nvlist until it can be serialized in 'max' bytes. * Entries will be removed from the end of the nvlist, and one int32 entry * named "N_MORE_ERRORS" will be added indicating how many entries were * removed. */ static int nvlist_smush(nvlist_t *errors, size_t max) { size_t size; size = fnvlist_size(errors); if (size > max) { nvpair_t *more_errors; int n = 0; if (max < 1024) return (SET_ERROR(ENOMEM)); fnvlist_add_int32(errors, ZPROP_N_MORE_ERRORS, 0); more_errors = nvlist_prev_nvpair(errors, NULL); do { nvpair_t *pair = nvlist_prev_nvpair(errors, more_errors); fnvlist_remove_nvpair(errors, pair); n++; size = fnvlist_size(errors); } while (size > max); fnvlist_remove_nvpair(errors, more_errors); fnvlist_add_int32(errors, ZPROP_N_MORE_ERRORS, n); ASSERT3U(fnvlist_size(errors), <=, max); } return (0); } static int put_nvlist(zfs_cmd_t *zc, nvlist_t *nvl) { char *packed = NULL; int error = 0; size_t size; size = fnvlist_size(nvl); if (size > zc->zc_nvlist_dst_size) { error = SET_ERROR(ENOMEM); } else { packed = fnvlist_pack(nvl, &size); if (ddi_copyout(packed, (void *)(uintptr_t)zc->zc_nvlist_dst, size, zc->zc_iflags) != 0) error = SET_ERROR(EFAULT); fnvlist_pack_free(packed, size); } zc->zc_nvlist_dst_size = size; zc->zc_nvlist_dst_filled = B_TRUE; return (error); } int getzfsvfs_impl(objset_t *os, zfsvfs_t **zfvp) { int error = 0; if (dmu_objset_type(os) != DMU_OST_ZFS) { return (SET_ERROR(EINVAL)); } mutex_enter(&os->os_user_ptr_lock); *zfvp = dmu_objset_get_user(os); /* bump s_active only when non-zero to prevent umount race */ if (*zfvp == NULL || (*zfvp)->z_sb == NULL || !atomic_inc_not_zero(&((*zfvp)->z_sb->s_active))) { error = SET_ERROR(ESRCH); } mutex_exit(&os->os_user_ptr_lock); return (error); } int getzfsvfs(const char *dsname, zfsvfs_t **zfvp) { objset_t *os; int error; error = dmu_objset_hold(dsname, FTAG, &os); if (error != 0) return (error); error = getzfsvfs_impl(os, zfvp); dmu_objset_rele(os, FTAG); return (error); } /* * Find a zfsvfs_t for a mounted filesystem, or create our own, in which * case its z_sb will be NULL, and it will be opened as the owner. * If 'writer' is set, the z_teardown_lock will be held for RW_WRITER, * which prevents all inode ops from running. */ static int zfsvfs_hold(const char *name, void *tag, zfsvfs_t **zfvp, boolean_t writer) { int error = 0; if (getzfsvfs(name, zfvp) != 0) error = zfsvfs_create(name, B_FALSE, zfvp); if (error == 0) { rrm_enter(&(*zfvp)->z_teardown_lock, (writer) ? RW_WRITER : RW_READER, tag); if ((*zfvp)->z_unmounted) { /* * XXX we could probably try again, since the unmounting * thread should be just about to disassociate the * objset from the zfsvfs. */ rrm_exit(&(*zfvp)->z_teardown_lock, tag); return (SET_ERROR(EBUSY)); } } return (error); } static void zfsvfs_rele(zfsvfs_t *zfsvfs, void *tag) { rrm_exit(&zfsvfs->z_teardown_lock, tag); if (zfsvfs->z_sb) { deactivate_super(zfsvfs->z_sb); } else { dmu_objset_disown(zfsvfs->z_os, B_TRUE, zfsvfs); zfsvfs_free(zfsvfs); } } static int zfs_ioc_pool_create(zfs_cmd_t *zc) { int error; nvlist_t *config, *props = NULL; nvlist_t *rootprops = NULL; nvlist_t *zplprops = NULL; dsl_crypto_params_t *dcp = NULL; char *spa_name = zc->zc_name; if ((error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size, zc->zc_iflags, &config))) return (error); if (zc->zc_nvlist_src_size != 0 && (error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size, zc->zc_iflags, &props))) { nvlist_free(config); return (error); } if (props) { nvlist_t *nvl = NULL; nvlist_t *hidden_args = NULL; uint64_t version = SPA_VERSION; char *tname; (void) nvlist_lookup_uint64(props, zpool_prop_to_name(ZPOOL_PROP_VERSION), &version); if (!SPA_VERSION_IS_SUPPORTED(version)) { error = SET_ERROR(EINVAL); goto pool_props_bad; } (void) nvlist_lookup_nvlist(props, ZPOOL_ROOTFS_PROPS, &nvl); if (nvl) { error = nvlist_dup(nvl, &rootprops, KM_SLEEP); if (error != 0) { nvlist_free(config); nvlist_free(props); return (error); } (void) nvlist_remove_all(props, ZPOOL_ROOTFS_PROPS); } (void) nvlist_lookup_nvlist(props, ZPOOL_HIDDEN_ARGS, &hidden_args); error = dsl_crypto_params_create_nvlist(DCP_CMD_NONE, rootprops, hidden_args, &dcp); if (error != 0) { nvlist_free(config); nvlist_free(props); return (error); } (void) nvlist_remove_all(props, ZPOOL_HIDDEN_ARGS); VERIFY(nvlist_alloc(&zplprops, NV_UNIQUE_NAME, KM_SLEEP) == 0); error = zfs_fill_zplprops_root(version, rootprops, zplprops, NULL); if (error != 0) goto pool_props_bad; if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_TNAME), &tname) == 0) spa_name = tname; } error = spa_create(zc->zc_name, config, props, zplprops, dcp); /* * Set the remaining root properties */ if (!error && (error = zfs_set_prop_nvlist(spa_name, ZPROP_SRC_LOCAL, rootprops, NULL)) != 0) (void) spa_destroy(spa_name); pool_props_bad: nvlist_free(rootprops); nvlist_free(zplprops); nvlist_free(config); nvlist_free(props); dsl_crypto_params_free(dcp, !!error); return (error); } static int zfs_ioc_pool_destroy(zfs_cmd_t *zc) { int error; zfs_log_history(zc); error = spa_destroy(zc->zc_name); return (error); } static int zfs_ioc_pool_import(zfs_cmd_t *zc) { nvlist_t *config, *props = NULL; uint64_t guid; int error; if ((error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size, zc->zc_iflags, &config)) != 0) return (error); if (zc->zc_nvlist_src_size != 0 && (error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size, zc->zc_iflags, &props))) { nvlist_free(config); return (error); } if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &guid) != 0 || guid != zc->zc_guid) error = SET_ERROR(EINVAL); else error = spa_import(zc->zc_name, config, props, zc->zc_cookie); if (zc->zc_nvlist_dst != 0) { int err; if ((err = put_nvlist(zc, config)) != 0) error = err; } nvlist_free(config); nvlist_free(props); return (error); } static int zfs_ioc_pool_export(zfs_cmd_t *zc) { int error; boolean_t force = (boolean_t)zc->zc_cookie; boolean_t hardforce = (boolean_t)zc->zc_guid; zfs_log_history(zc); error = spa_export(zc->zc_name, NULL, force, hardforce); return (error); } static int zfs_ioc_pool_configs(zfs_cmd_t *zc) { nvlist_t *configs; int error; if ((configs = spa_all_configs(&zc->zc_cookie)) == NULL) return (SET_ERROR(EEXIST)); error = put_nvlist(zc, configs); nvlist_free(configs); return (error); } /* * inputs: * zc_name name of the pool * * outputs: * zc_cookie real errno * zc_nvlist_dst config nvlist * zc_nvlist_dst_size size of config nvlist */ static int zfs_ioc_pool_stats(zfs_cmd_t *zc) { nvlist_t *config; int error; int ret = 0; error = spa_get_stats(zc->zc_name, &config, zc->zc_value, sizeof (zc->zc_value)); if (config != NULL) { ret = put_nvlist(zc, config); nvlist_free(config); /* * The config may be present even if 'error' is non-zero. * In this case we return success, and preserve the real errno * in 'zc_cookie'. */ zc->zc_cookie = error; } else { ret = error; } return (ret); } /* * Try to import the given pool, returning pool stats as appropriate so that * user land knows which devices are available and overall pool health. */ static int zfs_ioc_pool_tryimport(zfs_cmd_t *zc) { nvlist_t *tryconfig, *config = NULL; int error; if ((error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size, zc->zc_iflags, &tryconfig)) != 0) return (error); config = spa_tryimport(tryconfig); nvlist_free(tryconfig); if (config == NULL) return (SET_ERROR(EINVAL)); error = put_nvlist(zc, config); nvlist_free(config); return (error); } /* * inputs: * zc_name name of the pool * zc_cookie scan func (pool_scan_func_t) * zc_flags scrub pause/resume flag (pool_scrub_cmd_t) */ static int zfs_ioc_pool_scan(zfs_cmd_t *zc) { spa_t *spa; int error; if (zc->zc_flags >= POOL_SCRUB_FLAGS_END) return (SET_ERROR(EINVAL)); if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0) return (error); if (zc->zc_flags == POOL_SCRUB_PAUSE) error = spa_scrub_pause_resume(spa, POOL_SCRUB_PAUSE); else if (zc->zc_cookie == POOL_SCAN_NONE) error = spa_scan_stop(spa); else error = spa_scan(spa, zc->zc_cookie); spa_close(spa, FTAG); return (error); } static int zfs_ioc_pool_freeze(zfs_cmd_t *zc) { spa_t *spa; int error; error = spa_open(zc->zc_name, &spa, FTAG); if (error == 0) { spa_freeze(spa); spa_close(spa, FTAG); } return (error); } static int zfs_ioc_pool_upgrade(zfs_cmd_t *zc) { spa_t *spa; int error; if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0) return (error); if (zc->zc_cookie < spa_version(spa) || !SPA_VERSION_IS_SUPPORTED(zc->zc_cookie)) { spa_close(spa, FTAG); return (SET_ERROR(EINVAL)); } spa_upgrade(spa, zc->zc_cookie); spa_close(spa, FTAG); return (error); } static int zfs_ioc_pool_get_history(zfs_cmd_t *zc) { spa_t *spa; char *hist_buf; uint64_t size; int error; if ((size = zc->zc_history_len) == 0) return (SET_ERROR(EINVAL)); if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0) return (error); if (spa_version(spa) < SPA_VERSION_ZPOOL_HISTORY) { spa_close(spa, FTAG); return (SET_ERROR(ENOTSUP)); } hist_buf = vmem_alloc(size, KM_SLEEP); if ((error = spa_history_get(spa, &zc->zc_history_offset, &zc->zc_history_len, hist_buf)) == 0) { error = ddi_copyout(hist_buf, (void *)(uintptr_t)zc->zc_history, zc->zc_history_len, zc->zc_iflags); } spa_close(spa, FTAG); vmem_free(hist_buf, size); return (error); } static int zfs_ioc_pool_reguid(zfs_cmd_t *zc) { spa_t *spa; int error; error = spa_open(zc->zc_name, &spa, FTAG); if (error == 0) { error = spa_change_guid(spa); spa_close(spa, FTAG); } return (error); } static int zfs_ioc_dsobj_to_dsname(zfs_cmd_t *zc) { return (dsl_dsobj_to_dsname(zc->zc_name, zc->zc_obj, zc->zc_value)); } /* * inputs: * zc_name name of filesystem * zc_obj object to find * * outputs: * zc_value name of object */ static int zfs_ioc_obj_to_path(zfs_cmd_t *zc) { objset_t *os; int error; /* XXX reading from objset not owned */ if ((error = dmu_objset_hold_flags(zc->zc_name, B_TRUE, FTAG, &os)) != 0) return (error); if (dmu_objset_type(os) != DMU_OST_ZFS) { dmu_objset_rele_flags(os, B_TRUE, FTAG); return (SET_ERROR(EINVAL)); } error = zfs_obj_to_path(os, zc->zc_obj, zc->zc_value, sizeof (zc->zc_value)); dmu_objset_rele_flags(os, B_TRUE, FTAG); return (error); } /* * inputs: * zc_name name of filesystem * zc_obj object to find * * outputs: * zc_stat stats on object * zc_value path to object */ static int zfs_ioc_obj_to_stats(zfs_cmd_t *zc) { objset_t *os; int error; /* XXX reading from objset not owned */ if ((error = dmu_objset_hold_flags(zc->zc_name, B_TRUE, FTAG, &os)) != 0) return (error); if (dmu_objset_type(os) != DMU_OST_ZFS) { dmu_objset_rele_flags(os, B_TRUE, FTAG); return (SET_ERROR(EINVAL)); } error = zfs_obj_to_stats(os, zc->zc_obj, &zc->zc_stat, zc->zc_value, sizeof (zc->zc_value)); dmu_objset_rele_flags(os, B_TRUE, FTAG); return (error); } static int zfs_ioc_vdev_add(zfs_cmd_t *zc) { spa_t *spa; int error; nvlist_t *config; error = spa_open(zc->zc_name, &spa, FTAG); if (error != 0) return (error); error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size, zc->zc_iflags, &config); if (error == 0) { error = spa_vdev_add(spa, config); nvlist_free(config); } spa_close(spa, FTAG); return (error); } /* * inputs: * zc_name name of the pool * zc_guid guid of vdev to remove * zc_cookie cancel removal */ static int zfs_ioc_vdev_remove(zfs_cmd_t *zc) { spa_t *spa; int error; error = spa_open(zc->zc_name, &spa, FTAG); if (error != 0) return (error); if (zc->zc_cookie != 0) { error = spa_vdev_remove_cancel(spa); } else { error = spa_vdev_remove(spa, zc->zc_guid, B_FALSE); } spa_close(spa, FTAG); return (error); } static int zfs_ioc_vdev_set_state(zfs_cmd_t *zc) { spa_t *spa; int error; vdev_state_t newstate = VDEV_STATE_UNKNOWN; if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0) return (error); switch (zc->zc_cookie) { case VDEV_STATE_ONLINE: error = vdev_online(spa, zc->zc_guid, zc->zc_obj, &newstate); break; case VDEV_STATE_OFFLINE: error = vdev_offline(spa, zc->zc_guid, zc->zc_obj); break; case VDEV_STATE_FAULTED: if (zc->zc_obj != VDEV_AUX_ERR_EXCEEDED && zc->zc_obj != VDEV_AUX_EXTERNAL && zc->zc_obj != VDEV_AUX_EXTERNAL_PERSIST) zc->zc_obj = VDEV_AUX_ERR_EXCEEDED; error = vdev_fault(spa, zc->zc_guid, zc->zc_obj); break; case VDEV_STATE_DEGRADED: if (zc->zc_obj != VDEV_AUX_ERR_EXCEEDED && zc->zc_obj != VDEV_AUX_EXTERNAL) zc->zc_obj = VDEV_AUX_ERR_EXCEEDED; error = vdev_degrade(spa, zc->zc_guid, zc->zc_obj); break; default: error = SET_ERROR(EINVAL); } zc->zc_cookie = newstate; spa_close(spa, FTAG); return (error); } static int zfs_ioc_vdev_attach(zfs_cmd_t *zc) { spa_t *spa; int replacing = zc->zc_cookie; nvlist_t *config; int error; if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0) return (error); if ((error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size, zc->zc_iflags, &config)) == 0) { error = spa_vdev_attach(spa, zc->zc_guid, config, replacing); nvlist_free(config); } spa_close(spa, FTAG); return (error); } static int zfs_ioc_vdev_detach(zfs_cmd_t *zc) { spa_t *spa; int error; if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0) return (error); error = spa_vdev_detach(spa, zc->zc_guid, 0, B_FALSE); spa_close(spa, FTAG); return (error); } static int zfs_ioc_vdev_split(zfs_cmd_t *zc) { spa_t *spa; nvlist_t *config, *props = NULL; int error; boolean_t exp = !!(zc->zc_cookie & ZPOOL_EXPORT_AFTER_SPLIT); if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0) return (error); if ((error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size, zc->zc_iflags, &config))) { spa_close(spa, FTAG); return (error); } if (zc->zc_nvlist_src_size != 0 && (error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size, zc->zc_iflags, &props))) { spa_close(spa, FTAG); nvlist_free(config); return (error); } error = spa_vdev_split_mirror(spa, zc->zc_string, config, props, exp); spa_close(spa, FTAG); nvlist_free(config); nvlist_free(props); return (error); } static int zfs_ioc_vdev_setpath(zfs_cmd_t *zc) { spa_t *spa; char *path = zc->zc_value; uint64_t guid = zc->zc_guid; int error; error = spa_open(zc->zc_name, &spa, FTAG); if (error != 0) return (error); error = spa_vdev_setpath(spa, guid, path); spa_close(spa, FTAG); return (error); } static int zfs_ioc_vdev_setfru(zfs_cmd_t *zc) { spa_t *spa; char *fru = zc->zc_value; uint64_t guid = zc->zc_guid; int error; error = spa_open(zc->zc_name, &spa, FTAG); if (error != 0) return (error); error = spa_vdev_setfru(spa, guid, fru); spa_close(spa, FTAG); return (error); } static int zfs_ioc_objset_stats_impl(zfs_cmd_t *zc, objset_t *os) { int error = 0; nvlist_t *nv; dmu_objset_fast_stat(os, &zc->zc_objset_stats); if (zc->zc_nvlist_dst != 0 && (error = dsl_prop_get_all(os, &nv)) == 0) { dmu_objset_stats(os, nv); /* * NB: zvol_get_stats() will read the objset contents, * which we aren't supposed to do with a * DS_MODE_USER hold, because it could be * inconsistent. So this is a bit of a workaround... * XXX reading with out owning */ if (!zc->zc_objset_stats.dds_inconsistent && dmu_objset_type(os) == DMU_OST_ZVOL) { error = zvol_get_stats(os, nv); if (error == EIO) { nvlist_free(nv); return (error); } VERIFY0(error); } if (error == 0) error = put_nvlist(zc, nv); nvlist_free(nv); } return (error); } /* * inputs: * zc_name name of filesystem * zc_nvlist_dst_size size of buffer for property nvlist * * outputs: * zc_objset_stats stats * zc_nvlist_dst property nvlist * zc_nvlist_dst_size size of property nvlist */ static int zfs_ioc_objset_stats(zfs_cmd_t *zc) { objset_t *os; int error; error = dmu_objset_hold(zc->zc_name, FTAG, &os); if (error == 0) { error = zfs_ioc_objset_stats_impl(zc, os); dmu_objset_rele(os, FTAG); } return (error); } /* * inputs: * zc_name name of filesystem * zc_nvlist_dst_size size of buffer for property nvlist * * outputs: * zc_nvlist_dst received property nvlist * zc_nvlist_dst_size size of received property nvlist * * Gets received properties (distinct from local properties on or after * SPA_VERSION_RECVD_PROPS) for callers who want to differentiate received from * local property values. */ static int zfs_ioc_objset_recvd_props(zfs_cmd_t *zc) { int error = 0; nvlist_t *nv; /* * Without this check, we would return local property values if the * caller has not already received properties on or after * SPA_VERSION_RECVD_PROPS. */ if (!dsl_prop_get_hasrecvd(zc->zc_name)) return (SET_ERROR(ENOTSUP)); if (zc->zc_nvlist_dst != 0 && (error = dsl_prop_get_received(zc->zc_name, &nv)) == 0) { error = put_nvlist(zc, nv); nvlist_free(nv); } return (error); } static int nvl_add_zplprop(objset_t *os, nvlist_t *props, zfs_prop_t prop) { uint64_t value; int error; /* * zfs_get_zplprop() will either find a value or give us * the default value (if there is one). */ if ((error = zfs_get_zplprop(os, prop, &value)) != 0) return (error); VERIFY(nvlist_add_uint64(props, zfs_prop_to_name(prop), value) == 0); return (0); } /* * inputs: * zc_name name of filesystem * zc_nvlist_dst_size size of buffer for zpl property nvlist * * outputs: * zc_nvlist_dst zpl property nvlist * zc_nvlist_dst_size size of zpl property nvlist */ static int zfs_ioc_objset_zplprops(zfs_cmd_t *zc) { objset_t *os; int err; /* XXX reading without owning */ if ((err = dmu_objset_hold(zc->zc_name, FTAG, &os))) return (err); dmu_objset_fast_stat(os, &zc->zc_objset_stats); /* * NB: nvl_add_zplprop() will read the objset contents, * which we aren't supposed to do with a DS_MODE_USER * hold, because it could be inconsistent. */ if (zc->zc_nvlist_dst != 0 && !zc->zc_objset_stats.dds_inconsistent && dmu_objset_type(os) == DMU_OST_ZFS) { nvlist_t *nv; VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0); if ((err = nvl_add_zplprop(os, nv, ZFS_PROP_VERSION)) == 0 && (err = nvl_add_zplprop(os, nv, ZFS_PROP_NORMALIZE)) == 0 && (err = nvl_add_zplprop(os, nv, ZFS_PROP_UTF8ONLY)) == 0 && (err = nvl_add_zplprop(os, nv, ZFS_PROP_CASE)) == 0) err = put_nvlist(zc, nv); nvlist_free(nv); } else { err = SET_ERROR(ENOENT); } dmu_objset_rele(os, FTAG); return (err); } /* * inputs: * zc_name name of filesystem * zc_cookie zap cursor * zc_nvlist_dst_size size of buffer for property nvlist * * outputs: * zc_name name of next filesystem * zc_cookie zap cursor * zc_objset_stats stats * zc_nvlist_dst property nvlist * zc_nvlist_dst_size size of property nvlist */ static int zfs_ioc_dataset_list_next(zfs_cmd_t *zc) { objset_t *os; int error; char *p; size_t orig_len = strlen(zc->zc_name); top: if ((error = dmu_objset_hold(zc->zc_name, FTAG, &os))) { if (error == ENOENT) error = SET_ERROR(ESRCH); return (error); } p = strrchr(zc->zc_name, '/'); if (p == NULL || p[1] != '\0') (void) strlcat(zc->zc_name, "/", sizeof (zc->zc_name)); p = zc->zc_name + strlen(zc->zc_name); do { error = dmu_dir_list_next(os, sizeof (zc->zc_name) - (p - zc->zc_name), p, NULL, &zc->zc_cookie); if (error == ENOENT) error = SET_ERROR(ESRCH); } while (error == 0 && zfs_dataset_name_hidden(zc->zc_name)); dmu_objset_rele(os, FTAG); /* * If it's an internal dataset (ie. with a '$' in its name), * don't try to get stats for it, otherwise we'll return ENOENT. */ if (error == 0 && strchr(zc->zc_name, '$') == NULL) { error = zfs_ioc_objset_stats(zc); /* fill in the stats */ if (error == ENOENT) { /* We lost a race with destroy, get the next one. */ zc->zc_name[orig_len] = '\0'; goto top; } } return (error); } /* * inputs: * zc_name name of filesystem * zc_cookie zap cursor * zc_nvlist_dst_size size of buffer for property nvlist * * outputs: * zc_name name of next snapshot * zc_objset_stats stats * zc_nvlist_dst property nvlist * zc_nvlist_dst_size size of property nvlist */ static int zfs_ioc_snapshot_list_next(zfs_cmd_t *zc) { objset_t *os; int error; error = dmu_objset_hold(zc->zc_name, FTAG, &os); if (error != 0) { return (error == ENOENT ? ESRCH : error); } /* * A dataset name of maximum length cannot have any snapshots, * so exit immediately. */ if (strlcat(zc->zc_name, "@", sizeof (zc->zc_name)) >= ZFS_MAX_DATASET_NAME_LEN) { dmu_objset_rele(os, FTAG); return (SET_ERROR(ESRCH)); } error = dmu_snapshot_list_next(os, sizeof (zc->zc_name) - strlen(zc->zc_name), zc->zc_name + strlen(zc->zc_name), &zc->zc_obj, &zc->zc_cookie, NULL); if (error == 0 && !zc->zc_simple) { dsl_dataset_t *ds; dsl_pool_t *dp = os->os_dsl_dataset->ds_dir->dd_pool; error = dsl_dataset_hold_obj(dp, zc->zc_obj, FTAG, &ds); if (error == 0) { objset_t *ossnap; error = dmu_objset_from_ds(ds, &ossnap); if (error == 0) error = zfs_ioc_objset_stats_impl(zc, ossnap); dsl_dataset_rele(ds, FTAG); } } else if (error == ENOENT) { error = SET_ERROR(ESRCH); } dmu_objset_rele(os, FTAG); /* if we failed, undo the @ that we tacked on to zc_name */ if (error != 0) *strchr(zc->zc_name, '@') = '\0'; return (error); } static int zfs_prop_set_userquota(const char *dsname, nvpair_t *pair) { const char *propname = nvpair_name(pair); uint64_t *valary; unsigned int vallen; const char *domain; char *dash; zfs_userquota_prop_t type; uint64_t rid; uint64_t quota; zfsvfs_t *zfsvfs; int err; if (nvpair_type(pair) == DATA_TYPE_NVLIST) { nvlist_t *attrs; VERIFY(nvpair_value_nvlist(pair, &attrs) == 0); if (nvlist_lookup_nvpair(attrs, ZPROP_VALUE, &pair) != 0) return (SET_ERROR(EINVAL)); } /* * A correctly constructed propname is encoded as * userquota@-. */ if ((dash = strchr(propname, '-')) == NULL || nvpair_value_uint64_array(pair, &valary, &vallen) != 0 || vallen != 3) return (SET_ERROR(EINVAL)); domain = dash + 1; type = valary[0]; rid = valary[1]; quota = valary[2]; err = zfsvfs_hold(dsname, FTAG, &zfsvfs, B_FALSE); if (err == 0) { err = zfs_set_userquota(zfsvfs, type, domain, rid, quota); zfsvfs_rele(zfsvfs, FTAG); } return (err); } /* * If the named property is one that has a special function to set its value, * return 0 on success and a positive error code on failure; otherwise if it is * not one of the special properties handled by this function, return -1. * * XXX: It would be better for callers of the property interface if we handled * these special cases in dsl_prop.c (in the dsl layer). */ static int zfs_prop_set_special(const char *dsname, zprop_source_t source, nvpair_t *pair) { const char *propname = nvpair_name(pair); zfs_prop_t prop = zfs_name_to_prop(propname); uint64_t intval = 0; char *strval = NULL; int err = -1; if (prop == ZPROP_INVAL) { if (zfs_prop_userquota(propname)) return (zfs_prop_set_userquota(dsname, pair)); return (-1); } if (nvpair_type(pair) == DATA_TYPE_NVLIST) { nvlist_t *attrs; VERIFY(nvpair_value_nvlist(pair, &attrs) == 0); VERIFY(nvlist_lookup_nvpair(attrs, ZPROP_VALUE, &pair) == 0); } /* all special properties are numeric except for keylocation */ if (zfs_prop_get_type(prop) == PROP_TYPE_STRING) { strval = fnvpair_value_string(pair); } else { intval = fnvpair_value_uint64(pair); } switch (prop) { case ZFS_PROP_QUOTA: err = dsl_dir_set_quota(dsname, source, intval); break; case ZFS_PROP_REFQUOTA: err = dsl_dataset_set_refquota(dsname, source, intval); break; case ZFS_PROP_FILESYSTEM_LIMIT: case ZFS_PROP_SNAPSHOT_LIMIT: if (intval == UINT64_MAX) { /* clearing the limit, just do it */ err = 0; } else { err = dsl_dir_activate_fs_ss_limit(dsname); } /* * Set err to -1 to force the zfs_set_prop_nvlist code down the * default path to set the value in the nvlist. */ if (err == 0) err = -1; break; case ZFS_PROP_KEYLOCATION: err = dsl_crypto_can_set_keylocation(dsname, strval); /* * Set err to -1 to force the zfs_set_prop_nvlist code down the * default path to set the value in the nvlist. */ if (err == 0) err = -1; break; case ZFS_PROP_RESERVATION: err = dsl_dir_set_reservation(dsname, source, intval); break; case ZFS_PROP_REFRESERVATION: err = dsl_dataset_set_refreservation(dsname, source, intval); break; case ZFS_PROP_VOLSIZE: err = zvol_set_volsize(dsname, intval); break; case ZFS_PROP_SNAPDEV: err = zvol_set_snapdev(dsname, source, intval); break; case ZFS_PROP_VOLMODE: err = zvol_set_volmode(dsname, source, intval); break; case ZFS_PROP_VERSION: { zfsvfs_t *zfsvfs; if ((err = zfsvfs_hold(dsname, FTAG, &zfsvfs, B_TRUE)) != 0) break; err = zfs_set_version(zfsvfs, intval); zfsvfs_rele(zfsvfs, FTAG); if (err == 0 && intval >= ZPL_VERSION_USERSPACE) { zfs_cmd_t *zc; zc = kmem_zalloc(sizeof (zfs_cmd_t), KM_SLEEP); (void) strcpy(zc->zc_name, dsname); (void) zfs_ioc_userspace_upgrade(zc); (void) zfs_ioc_id_quota_upgrade(zc); kmem_free(zc, sizeof (zfs_cmd_t)); } break; } default: err = -1; } return (err); } /* * This function is best effort. If it fails to set any of the given properties, * it continues to set as many as it can and returns the last error * encountered. If the caller provides a non-NULL errlist, it will be filled in * with the list of names of all the properties that failed along with the * corresponding error numbers. * * If every property is set successfully, zero is returned and errlist is not * modified. */ int zfs_set_prop_nvlist(const char *dsname, zprop_source_t source, nvlist_t *nvl, nvlist_t *errlist) { nvpair_t *pair; nvpair_t *propval; int rv = 0; uint64_t intval; char *strval; nvlist_t *genericnvl = fnvlist_alloc(); nvlist_t *retrynvl = fnvlist_alloc(); retry: pair = NULL; while ((pair = nvlist_next_nvpair(nvl, pair)) != NULL) { const char *propname = nvpair_name(pair); zfs_prop_t prop = zfs_name_to_prop(propname); int err = 0; /* decode the property value */ propval = pair; if (nvpair_type(pair) == DATA_TYPE_NVLIST) { nvlist_t *attrs; attrs = fnvpair_value_nvlist(pair); if (nvlist_lookup_nvpair(attrs, ZPROP_VALUE, &propval) != 0) err = SET_ERROR(EINVAL); } /* Validate value type */ if (err == 0 && source == ZPROP_SRC_INHERITED) { /* inherited properties are expected to be booleans */ if (nvpair_type(propval) != DATA_TYPE_BOOLEAN) err = SET_ERROR(EINVAL); } else if (err == 0 && prop == ZPROP_INVAL) { if (zfs_prop_user(propname)) { if (nvpair_type(propval) != DATA_TYPE_STRING) err = SET_ERROR(EINVAL); } else if (zfs_prop_userquota(propname)) { if (nvpair_type(propval) != DATA_TYPE_UINT64_ARRAY) err = SET_ERROR(EINVAL); } else { err = SET_ERROR(EINVAL); } } else if (err == 0) { if (nvpair_type(propval) == DATA_TYPE_STRING) { if (zfs_prop_get_type(prop) != PROP_TYPE_STRING) err = SET_ERROR(EINVAL); } else if (nvpair_type(propval) == DATA_TYPE_UINT64) { const char *unused; intval = fnvpair_value_uint64(propval); switch (zfs_prop_get_type(prop)) { case PROP_TYPE_NUMBER: break; case PROP_TYPE_STRING: err = SET_ERROR(EINVAL); break; case PROP_TYPE_INDEX: if (zfs_prop_index_to_string(prop, intval, &unused) != 0) err = SET_ERROR(EINVAL); break; default: cmn_err(CE_PANIC, "unknown property type"); } } else { err = SET_ERROR(EINVAL); } } /* Validate permissions */ if (err == 0) err = zfs_check_settable(dsname, pair, CRED()); if (err == 0) { if (source == ZPROP_SRC_INHERITED) err = -1; /* does not need special handling */ else err = zfs_prop_set_special(dsname, source, pair); if (err == -1) { /* * For better performance we build up a list of * properties to set in a single transaction. */ err = nvlist_add_nvpair(genericnvl, pair); } else if (err != 0 && nvl != retrynvl) { /* * This may be a spurious error caused by * receiving quota and reservation out of order. * Try again in a second pass. */ err = nvlist_add_nvpair(retrynvl, pair); } } if (err != 0) { if (errlist != NULL) fnvlist_add_int32(errlist, propname, err); rv = err; } } if (nvl != retrynvl && !nvlist_empty(retrynvl)) { nvl = retrynvl; goto retry; } if (!nvlist_empty(genericnvl) && dsl_props_set(dsname, source, genericnvl) != 0) { /* * If this fails, we still want to set as many properties as we * can, so try setting them individually. */ pair = NULL; while ((pair = nvlist_next_nvpair(genericnvl, pair)) != NULL) { const char *propname = nvpair_name(pair); int err = 0; propval = pair; if (nvpair_type(pair) == DATA_TYPE_NVLIST) { nvlist_t *attrs; attrs = fnvpair_value_nvlist(pair); propval = fnvlist_lookup_nvpair(attrs, ZPROP_VALUE); } if (nvpair_type(propval) == DATA_TYPE_STRING) { strval = fnvpair_value_string(propval); err = dsl_prop_set_string(dsname, propname, source, strval); } else if (nvpair_type(propval) == DATA_TYPE_BOOLEAN) { err = dsl_prop_inherit(dsname, propname, source); } else { intval = fnvpair_value_uint64(propval); err = dsl_prop_set_int(dsname, propname, source, intval); } if (err != 0) { if (errlist != NULL) { fnvlist_add_int32(errlist, propname, err); } rv = err; } } } nvlist_free(genericnvl); nvlist_free(retrynvl); return (rv); } /* * Check that all the properties are valid user properties. */ static int zfs_check_userprops(const char *fsname, nvlist_t *nvl) { nvpair_t *pair = NULL; int error = 0; while ((pair = nvlist_next_nvpair(nvl, pair)) != NULL) { const char *propname = nvpair_name(pair); if (!zfs_prop_user(propname) || nvpair_type(pair) != DATA_TYPE_STRING) return (SET_ERROR(EINVAL)); if ((error = zfs_secpolicy_write_perms(fsname, ZFS_DELEG_PERM_USERPROP, CRED()))) return (error); if (strlen(propname) >= ZAP_MAXNAMELEN) return (SET_ERROR(ENAMETOOLONG)); if (strlen(fnvpair_value_string(pair)) >= ZAP_MAXVALUELEN) return (SET_ERROR(E2BIG)); } return (0); } static void props_skip(nvlist_t *props, nvlist_t *skipped, nvlist_t **newprops) { nvpair_t *pair; VERIFY(nvlist_alloc(newprops, NV_UNIQUE_NAME, KM_SLEEP) == 0); pair = NULL; while ((pair = nvlist_next_nvpair(props, pair)) != NULL) { if (nvlist_exists(skipped, nvpair_name(pair))) continue; VERIFY(nvlist_add_nvpair(*newprops, pair) == 0); } } static int clear_received_props(const char *dsname, nvlist_t *props, nvlist_t *skipped) { int err = 0; nvlist_t *cleared_props = NULL; props_skip(props, skipped, &cleared_props); if (!nvlist_empty(cleared_props)) { /* * Acts on local properties until the dataset has received * properties at least once on or after SPA_VERSION_RECVD_PROPS. */ zprop_source_t flags = (ZPROP_SRC_NONE | (dsl_prop_get_hasrecvd(dsname) ? ZPROP_SRC_RECEIVED : 0)); err = zfs_set_prop_nvlist(dsname, flags, cleared_props, NULL); } nvlist_free(cleared_props); return (err); } /* * inputs: * zc_name name of filesystem * zc_value name of property to set * zc_nvlist_src{_size} nvlist of properties to apply * zc_cookie received properties flag * * outputs: * zc_nvlist_dst{_size} error for each unapplied received property */ static int zfs_ioc_set_prop(zfs_cmd_t *zc) { nvlist_t *nvl; boolean_t received = zc->zc_cookie; zprop_source_t source = (received ? ZPROP_SRC_RECEIVED : ZPROP_SRC_LOCAL); nvlist_t *errors; int error; if ((error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size, zc->zc_iflags, &nvl)) != 0) return (error); if (received) { nvlist_t *origprops; if (dsl_prop_get_received(zc->zc_name, &origprops) == 0) { (void) clear_received_props(zc->zc_name, origprops, nvl); nvlist_free(origprops); } error = dsl_prop_set_hasrecvd(zc->zc_name); } errors = fnvlist_alloc(); if (error == 0) error = zfs_set_prop_nvlist(zc->zc_name, source, nvl, errors); if (zc->zc_nvlist_dst != 0 && errors != NULL) { (void) put_nvlist(zc, errors); } nvlist_free(errors); nvlist_free(nvl); return (error); } /* * inputs: * zc_name name of filesystem * zc_value name of property to inherit * zc_cookie revert to received value if TRUE * * outputs: none */ static int zfs_ioc_inherit_prop(zfs_cmd_t *zc) { const char *propname = zc->zc_value; zfs_prop_t prop = zfs_name_to_prop(propname); boolean_t received = zc->zc_cookie; zprop_source_t source = (received ? ZPROP_SRC_NONE /* revert to received value, if any */ : ZPROP_SRC_INHERITED); /* explicitly inherit */ nvlist_t *dummy; nvpair_t *pair; zprop_type_t type; int err; if (!received) { /* * Only check this in the non-received case. We want to allow * 'inherit -S' to revert non-inheritable properties like quota * and reservation to the received or default values even though * they are not considered inheritable. */ if (prop != ZPROP_INVAL && !zfs_prop_inheritable(prop)) return (SET_ERROR(EINVAL)); } if (prop == ZPROP_INVAL) { if (!zfs_prop_user(propname)) return (SET_ERROR(EINVAL)); type = PROP_TYPE_STRING; } else if (prop == ZFS_PROP_VOLSIZE || prop == ZFS_PROP_VERSION) { return (SET_ERROR(EINVAL)); } else { type = zfs_prop_get_type(prop); } /* * zfs_prop_set_special() expects properties in the form of an * nvpair with type info. */ dummy = fnvlist_alloc(); switch (type) { case PROP_TYPE_STRING: VERIFY(0 == nvlist_add_string(dummy, propname, "")); break; case PROP_TYPE_NUMBER: case PROP_TYPE_INDEX: VERIFY(0 == nvlist_add_uint64(dummy, propname, 0)); break; default: err = SET_ERROR(EINVAL); goto errout; } pair = nvlist_next_nvpair(dummy, NULL); if (pair == NULL) { err = SET_ERROR(EINVAL); } else { err = zfs_prop_set_special(zc->zc_name, source, pair); if (err == -1) /* property is not "special", needs handling */ err = dsl_prop_inherit(zc->zc_name, zc->zc_value, source); } errout: nvlist_free(dummy); return (err); } static int zfs_ioc_pool_set_props(zfs_cmd_t *zc) { nvlist_t *props; spa_t *spa; int error; nvpair_t *pair; if ((error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size, zc->zc_iflags, &props))) return (error); /* * If the only property is the configfile, then just do a spa_lookup() * to handle the faulted case. */ pair = nvlist_next_nvpair(props, NULL); if (pair != NULL && strcmp(nvpair_name(pair), zpool_prop_to_name(ZPOOL_PROP_CACHEFILE)) == 0 && nvlist_next_nvpair(props, pair) == NULL) { mutex_enter(&spa_namespace_lock); if ((spa = spa_lookup(zc->zc_name)) != NULL) { spa_configfile_set(spa, props, B_FALSE); spa_write_cachefile(spa, B_FALSE, B_TRUE); } mutex_exit(&spa_namespace_lock); if (spa != NULL) { nvlist_free(props); return (0); } } if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0) { nvlist_free(props); return (error); } error = spa_prop_set(spa, props); nvlist_free(props); spa_close(spa, FTAG); return (error); } static int zfs_ioc_pool_get_props(zfs_cmd_t *zc) { spa_t *spa; int error; nvlist_t *nvp = NULL; if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0) { /* * If the pool is faulted, there may be properties we can still * get (such as altroot and cachefile), so attempt to get them * anyway. */ mutex_enter(&spa_namespace_lock); if ((spa = spa_lookup(zc->zc_name)) != NULL) error = spa_prop_get(spa, &nvp); mutex_exit(&spa_namespace_lock); } else { error = spa_prop_get(spa, &nvp); spa_close(spa, FTAG); } if (error == 0 && zc->zc_nvlist_dst != 0) error = put_nvlist(zc, nvp); else error = SET_ERROR(EFAULT); nvlist_free(nvp); return (error); } /* * inputs: * zc_name name of filesystem * zc_nvlist_src{_size} nvlist of delegated permissions * zc_perm_action allow/unallow flag * * outputs: none */ static int zfs_ioc_set_fsacl(zfs_cmd_t *zc) { int error; nvlist_t *fsaclnv = NULL; if ((error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size, zc->zc_iflags, &fsaclnv)) != 0) return (error); /* * Verify nvlist is constructed correctly */ if ((error = zfs_deleg_verify_nvlist(fsaclnv)) != 0) { nvlist_free(fsaclnv); return (SET_ERROR(EINVAL)); } /* * If we don't have PRIV_SYS_MOUNT, then validate * that user is allowed to hand out each permission in * the nvlist(s) */ error = secpolicy_zfs(CRED()); if (error != 0) { if (zc->zc_perm_action == B_FALSE) { error = dsl_deleg_can_allow(zc->zc_name, fsaclnv, CRED()); } else { error = dsl_deleg_can_unallow(zc->zc_name, fsaclnv, CRED()); } } if (error == 0) error = dsl_deleg_set(zc->zc_name, fsaclnv, zc->zc_perm_action); nvlist_free(fsaclnv); return (error); } /* * inputs: * zc_name name of filesystem * * outputs: * zc_nvlist_src{_size} nvlist of delegated permissions */ static int zfs_ioc_get_fsacl(zfs_cmd_t *zc) { nvlist_t *nvp; int error; if ((error = dsl_deleg_get(zc->zc_name, &nvp)) == 0) { error = put_nvlist(zc, nvp); nvlist_free(nvp); } return (error); } /* ARGSUSED */ static void zfs_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx) { zfs_creat_t *zct = arg; zfs_create_fs(os, cr, zct->zct_zplprops, tx); } #define ZFS_PROP_UNDEFINED ((uint64_t)-1) /* * inputs: * os parent objset pointer (NULL if root fs) * fuids_ok fuids allowed in this version of the spa? * sa_ok SAs allowed in this version of the spa? * createprops list of properties requested by creator * * outputs: * zplprops values for the zplprops we attach to the master node object * is_ci true if requested file system will be purely case-insensitive * * Determine the settings for utf8only, normalization and * casesensitivity. Specific values may have been requested by the * creator and/or we can inherit values from the parent dataset. If * the file system is of too early a vintage, a creator can not * request settings for these properties, even if the requested * setting is the default value. We don't actually want to create dsl * properties for these, so remove them from the source nvlist after * processing. */ static int zfs_fill_zplprops_impl(objset_t *os, uint64_t zplver, boolean_t fuids_ok, boolean_t sa_ok, nvlist_t *createprops, nvlist_t *zplprops, boolean_t *is_ci) { uint64_t sense = ZFS_PROP_UNDEFINED; uint64_t norm = ZFS_PROP_UNDEFINED; uint64_t u8 = ZFS_PROP_UNDEFINED; int error; ASSERT(zplprops != NULL); + /* parent dataset must be a filesystem */ if (os != NULL && os->os_phys->os_type != DMU_OST_ZFS) - return (SET_ERROR(EINVAL)); + return (SET_ERROR(ZFS_ERR_WRONG_PARENT)); /* * Pull out creator prop choices, if any. */ if (createprops) { (void) nvlist_lookup_uint64(createprops, zfs_prop_to_name(ZFS_PROP_VERSION), &zplver); (void) nvlist_lookup_uint64(createprops, zfs_prop_to_name(ZFS_PROP_NORMALIZE), &norm); (void) nvlist_remove_all(createprops, zfs_prop_to_name(ZFS_PROP_NORMALIZE)); (void) nvlist_lookup_uint64(createprops, zfs_prop_to_name(ZFS_PROP_UTF8ONLY), &u8); (void) nvlist_remove_all(createprops, zfs_prop_to_name(ZFS_PROP_UTF8ONLY)); (void) nvlist_lookup_uint64(createprops, zfs_prop_to_name(ZFS_PROP_CASE), &sense); (void) nvlist_remove_all(createprops, zfs_prop_to_name(ZFS_PROP_CASE)); } /* * If the zpl version requested is whacky or the file system * or pool is version is too "young" to support normalization * and the creator tried to set a value for one of the props, * error out. */ if ((zplver < ZPL_VERSION_INITIAL || zplver > ZPL_VERSION) || (zplver >= ZPL_VERSION_FUID && !fuids_ok) || (zplver >= ZPL_VERSION_SA && !sa_ok) || (zplver < ZPL_VERSION_NORMALIZATION && (norm != ZFS_PROP_UNDEFINED || u8 != ZFS_PROP_UNDEFINED || sense != ZFS_PROP_UNDEFINED))) return (SET_ERROR(ENOTSUP)); /* * Put the version in the zplprops */ VERIFY(nvlist_add_uint64(zplprops, zfs_prop_to_name(ZFS_PROP_VERSION), zplver) == 0); if (norm == ZFS_PROP_UNDEFINED && (error = zfs_get_zplprop(os, ZFS_PROP_NORMALIZE, &norm)) != 0) return (error); VERIFY(nvlist_add_uint64(zplprops, zfs_prop_to_name(ZFS_PROP_NORMALIZE), norm) == 0); /* * If we're normalizing, names must always be valid UTF-8 strings. */ if (norm) u8 = 1; if (u8 == ZFS_PROP_UNDEFINED && (error = zfs_get_zplprop(os, ZFS_PROP_UTF8ONLY, &u8)) != 0) return (error); VERIFY(nvlist_add_uint64(zplprops, zfs_prop_to_name(ZFS_PROP_UTF8ONLY), u8) == 0); if (sense == ZFS_PROP_UNDEFINED && (error = zfs_get_zplprop(os, ZFS_PROP_CASE, &sense)) != 0) return (error); VERIFY(nvlist_add_uint64(zplprops, zfs_prop_to_name(ZFS_PROP_CASE), sense) == 0); if (is_ci) *is_ci = (sense == ZFS_CASE_INSENSITIVE); return (0); } static int zfs_fill_zplprops(const char *dataset, nvlist_t *createprops, nvlist_t *zplprops, boolean_t *is_ci) { boolean_t fuids_ok, sa_ok; uint64_t zplver = ZPL_VERSION; objset_t *os = NULL; char parentname[ZFS_MAX_DATASET_NAME_LEN]; - char *cp; spa_t *spa; uint64_t spa_vers; int error; - (void) strlcpy(parentname, dataset, sizeof (parentname)); - cp = strrchr(parentname, '/'); - ASSERT(cp != NULL); - cp[0] = '\0'; + zfs_get_parent(dataset, parentname, sizeof (parentname)); if ((error = spa_open(dataset, &spa, FTAG)) != 0) return (error); spa_vers = spa_version(spa); spa_close(spa, FTAG); zplver = zfs_zpl_version_map(spa_vers); fuids_ok = (zplver >= ZPL_VERSION_FUID); sa_ok = (zplver >= ZPL_VERSION_SA); /* * Open parent object set so we can inherit zplprop values. */ if ((error = dmu_objset_hold(parentname, FTAG, &os)) != 0) return (error); error = zfs_fill_zplprops_impl(os, zplver, fuids_ok, sa_ok, createprops, zplprops, is_ci); dmu_objset_rele(os, FTAG); return (error); } static int zfs_fill_zplprops_root(uint64_t spa_vers, nvlist_t *createprops, nvlist_t *zplprops, boolean_t *is_ci) { boolean_t fuids_ok; boolean_t sa_ok; uint64_t zplver = ZPL_VERSION; int error; zplver = zfs_zpl_version_map(spa_vers); fuids_ok = (zplver >= ZPL_VERSION_FUID); sa_ok = (zplver >= ZPL_VERSION_SA); error = zfs_fill_zplprops_impl(NULL, zplver, fuids_ok, sa_ok, createprops, zplprops, is_ci); return (error); } /* * innvl: { * "type" -> dmu_objset_type_t (int32) * (optional) "props" -> { prop -> value } * (optional) "hidden_args" -> { "wkeydata" -> value } * raw uint8_t array of encryption wrapping key data (32 bytes) * } * * outnvl: propname -> error code (int32) */ static const zfs_ioc_key_t zfs_keys_create[] = { {"type", DATA_TYPE_INT32, 0}, {"props", DATA_TYPE_NVLIST, ZK_OPTIONAL}, {"hidden_args", DATA_TYPE_NVLIST, ZK_OPTIONAL}, }; static int zfs_ioc_create(const char *fsname, nvlist_t *innvl, nvlist_t *outnvl) { int error = 0; zfs_creat_t zct = { 0 }; nvlist_t *nvprops = NULL; nvlist_t *hidden_args = NULL; void (*cbfunc)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx); dmu_objset_type_t type; boolean_t is_insensitive = B_FALSE; dsl_crypto_params_t *dcp = NULL; type = (dmu_objset_type_t)fnvlist_lookup_int32(innvl, "type"); (void) nvlist_lookup_nvlist(innvl, "props", &nvprops); (void) nvlist_lookup_nvlist(innvl, ZPOOL_HIDDEN_ARGS, &hidden_args); switch (type) { case DMU_OST_ZFS: cbfunc = zfs_create_cb; break; case DMU_OST_ZVOL: cbfunc = zvol_create_cb; break; default: cbfunc = NULL; break; } if (strchr(fsname, '@') || strchr(fsname, '%')) return (SET_ERROR(EINVAL)); zct.zct_props = nvprops; if (cbfunc == NULL) return (SET_ERROR(EINVAL)); if (type == DMU_OST_ZVOL) { uint64_t volsize, volblocksize; if (nvprops == NULL) return (SET_ERROR(EINVAL)); if (nvlist_lookup_uint64(nvprops, zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) != 0) return (SET_ERROR(EINVAL)); if ((error = nvlist_lookup_uint64(nvprops, zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize)) != 0 && error != ENOENT) return (SET_ERROR(EINVAL)); if (error != 0) volblocksize = zfs_prop_default_numeric( ZFS_PROP_VOLBLOCKSIZE); if ((error = zvol_check_volblocksize(fsname, volblocksize)) != 0 || (error = zvol_check_volsize(volsize, volblocksize)) != 0) return (error); } else if (type == DMU_OST_ZFS) { int error; /* * We have to have normalization and * case-folding flags correct when we do the * file system creation, so go figure them out * now. */ VERIFY(nvlist_alloc(&zct.zct_zplprops, NV_UNIQUE_NAME, KM_SLEEP) == 0); error = zfs_fill_zplprops(fsname, nvprops, zct.zct_zplprops, &is_insensitive); if (error != 0) { nvlist_free(zct.zct_zplprops); return (error); } } error = dsl_crypto_params_create_nvlist(DCP_CMD_NONE, nvprops, hidden_args, &dcp); if (error != 0) { nvlist_free(zct.zct_zplprops); return (error); } error = dmu_objset_create(fsname, type, is_insensitive ? DS_FLAG_CI_DATASET : 0, dcp, cbfunc, &zct); nvlist_free(zct.zct_zplprops); dsl_crypto_params_free(dcp, !!error); /* * It would be nice to do this atomically. */ if (error == 0) { error = zfs_set_prop_nvlist(fsname, ZPROP_SRC_LOCAL, nvprops, outnvl); if (error != 0) { spa_t *spa; int error2; /* * Volumes will return EBUSY and cannot be destroyed * until all asynchronous minor handling has completed. * Wait for the spa_zvol_taskq to drain then retry. */ error2 = dsl_destroy_head(fsname); while ((error2 == EBUSY) && (type == DMU_OST_ZVOL)) { error2 = spa_open(fsname, &spa, FTAG); if (error2 == 0) { taskq_wait(spa->spa_zvol_taskq); spa_close(spa, FTAG); } error2 = dsl_destroy_head(fsname); } } } return (error); } /* * innvl: { * "origin" -> name of origin snapshot * (optional) "props" -> { prop -> value } * (optional) "hidden_args" -> { "wkeydata" -> value } * raw uint8_t array of encryption wrapping key data (32 bytes) * } * * outputs: * outnvl: propname -> error code (int32) */ static const zfs_ioc_key_t zfs_keys_clone[] = { {"origin", DATA_TYPE_STRING, 0}, {"props", DATA_TYPE_NVLIST, ZK_OPTIONAL}, {"hidden_args", DATA_TYPE_NVLIST, ZK_OPTIONAL}, }; static int zfs_ioc_clone(const char *fsname, nvlist_t *innvl, nvlist_t *outnvl) { int error = 0; nvlist_t *nvprops = NULL; char *origin_name; origin_name = fnvlist_lookup_string(innvl, "origin"); (void) nvlist_lookup_nvlist(innvl, "props", &nvprops); if (strchr(fsname, '@') || strchr(fsname, '%')) return (SET_ERROR(EINVAL)); if (dataset_namecheck(origin_name, NULL, NULL) != 0) return (SET_ERROR(EINVAL)); error = dmu_objset_clone(fsname, origin_name); /* * It would be nice to do this atomically. */ if (error == 0) { error = zfs_set_prop_nvlist(fsname, ZPROP_SRC_LOCAL, nvprops, outnvl); if (error != 0) (void) dsl_destroy_head(fsname); } return (error); } static const zfs_ioc_key_t zfs_keys_remap[] = { /* no nvl keys */ }; /* ARGSUSED */ static int zfs_ioc_remap(const char *fsname, nvlist_t *innvl, nvlist_t *outnvl) { if (strchr(fsname, '@') || strchr(fsname, '%')) return (SET_ERROR(EINVAL)); return (dmu_objset_remap_indirects(fsname)); } /* * innvl: { * "snaps" -> { snapshot1, snapshot2 } * (optional) "props" -> { prop -> value (string) } * } * * outnvl: snapshot -> error code (int32) */ static const zfs_ioc_key_t zfs_keys_snapshot[] = { {"snaps", DATA_TYPE_NVLIST, 0}, {"props", DATA_TYPE_NVLIST, ZK_OPTIONAL}, }; static int zfs_ioc_snapshot(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl) { nvlist_t *snaps; nvlist_t *props = NULL; int error, poollen; nvpair_t *pair; (void) nvlist_lookup_nvlist(innvl, "props", &props); if ((error = zfs_check_userprops(poolname, props)) != 0) return (error); if (!nvlist_empty(props) && zfs_earlier_version(poolname, SPA_VERSION_SNAP_PROPS)) return (SET_ERROR(ENOTSUP)); snaps = fnvlist_lookup_nvlist(innvl, "snaps"); poollen = strlen(poolname); for (pair = nvlist_next_nvpair(snaps, NULL); pair != NULL; pair = nvlist_next_nvpair(snaps, pair)) { const char *name = nvpair_name(pair); const char *cp = strchr(name, '@'); /* * The snap name must contain an @, and the part after it must * contain only valid characters. */ if (cp == NULL || zfs_component_namecheck(cp + 1, NULL, NULL) != 0) return (SET_ERROR(EINVAL)); /* * The snap must be in the specified pool. */ if (strncmp(name, poolname, poollen) != 0 || (name[poollen] != '/' && name[poollen] != '@')) return (SET_ERROR(EXDEV)); /* This must be the only snap of this fs. */ for (nvpair_t *pair2 = nvlist_next_nvpair(snaps, pair); pair2 != NULL; pair2 = nvlist_next_nvpair(snaps, pair2)) { if (strncmp(name, nvpair_name(pair2), cp - name + 1) == 0) { return (SET_ERROR(EXDEV)); } } } error = dsl_dataset_snapshot(snaps, props, outnvl); return (error); } /* * innvl: "message" -> string */ static const zfs_ioc_key_t zfs_keys_log_history[] = { {"message", DATA_TYPE_STRING, 0}, }; /* ARGSUSED */ static int zfs_ioc_log_history(const char *unused, nvlist_t *innvl, nvlist_t *outnvl) { char *message; spa_t *spa; int error; char *poolname; /* * The poolname in the ioctl is not set, we get it from the TSD, * which was set at the end of the last successful ioctl that allows * logging. The secpolicy func already checked that it is set. * Only one log ioctl is allowed after each successful ioctl, so * we clear the TSD here. */ poolname = tsd_get(zfs_allow_log_key); if (poolname == NULL) return (SET_ERROR(EINVAL)); (void) tsd_set(zfs_allow_log_key, NULL); error = spa_open(poolname, &spa, FTAG); strfree(poolname); if (error != 0) return (error); message = fnvlist_lookup_string(innvl, "message"); if (spa_version(spa) < SPA_VERSION_ZPOOL_HISTORY) { spa_close(spa, FTAG); return (SET_ERROR(ENOTSUP)); } error = spa_history_log(spa, message); spa_close(spa, FTAG); return (error); } /* * The dp_config_rwlock must not be held when calling this, because the * unmount may need to write out data. * * This function is best-effort. Callers must deal gracefully if it * remains mounted (or is remounted after this call). * * Returns 0 if the argument is not a snapshot, or it is not currently a * filesystem, or we were able to unmount it. Returns error code otherwise. */ void zfs_unmount_snap(const char *snapname) { if (strchr(snapname, '@') == NULL) return; (void) zfsctl_snapshot_unmount((char *)snapname, MNT_FORCE); } /* ARGSUSED */ static int zfs_unmount_snap_cb(const char *snapname, void *arg) { zfs_unmount_snap(snapname); return (0); } /* * When a clone is destroyed, its origin may also need to be destroyed, * in which case it must be unmounted. This routine will do that unmount * if necessary. */ void zfs_destroy_unmount_origin(const char *fsname) { int error; objset_t *os; dsl_dataset_t *ds; error = dmu_objset_hold(fsname, FTAG, &os); if (error != 0) return; ds = dmu_objset_ds(os); if (dsl_dir_is_clone(ds->ds_dir) && DS_IS_DEFER_DESTROY(ds->ds_prev)) { char originname[ZFS_MAX_DATASET_NAME_LEN]; dsl_dataset_name(ds->ds_prev, originname); dmu_objset_rele(os, FTAG); zfs_unmount_snap(originname); } else { dmu_objset_rele(os, FTAG); } } /* * innvl: { * "snaps" -> { snapshot1, snapshot2 } * (optional boolean) "defer" * } * * outnvl: snapshot -> error code (int32) */ static const zfs_ioc_key_t zfs_keys_destroy_snaps[] = { {"snaps", DATA_TYPE_NVLIST, 0}, {"defer", DATA_TYPE_BOOLEAN, ZK_OPTIONAL}, }; /* ARGSUSED */ static int zfs_ioc_destroy_snaps(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl) { nvlist_t *snaps; nvpair_t *pair; boolean_t defer; snaps = fnvlist_lookup_nvlist(innvl, "snaps"); defer = nvlist_exists(innvl, "defer"); for (pair = nvlist_next_nvpair(snaps, NULL); pair != NULL; pair = nvlist_next_nvpair(snaps, pair)) { zfs_unmount_snap(nvpair_name(pair)); } return (dsl_destroy_snapshots_nvl(snaps, defer, outnvl)); } /* * Create bookmarks. Bookmark names are of the form #. * All bookmarks must be in the same pool. * * innvl: { * bookmark1 -> snapshot1, bookmark2 -> snapshot2 * } * * outnvl: bookmark -> error code (int32) * */ static const zfs_ioc_key_t zfs_keys_bookmark[] = { {"...", DATA_TYPE_STRING, ZK_WILDCARDLIST}, }; /* ARGSUSED */ static int zfs_ioc_bookmark(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl) { for (nvpair_t *pair = nvlist_next_nvpair(innvl, NULL); pair != NULL; pair = nvlist_next_nvpair(innvl, pair)) { char *snap_name; /* * Verify the snapshot argument. */ if (nvpair_value_string(pair, &snap_name) != 0) return (SET_ERROR(EINVAL)); /* Verify that the keys (bookmarks) are unique */ for (nvpair_t *pair2 = nvlist_next_nvpair(innvl, pair); pair2 != NULL; pair2 = nvlist_next_nvpair(innvl, pair2)) { if (strcmp(nvpair_name(pair), nvpair_name(pair2)) == 0) return (SET_ERROR(EINVAL)); } } return (dsl_bookmark_create(innvl, outnvl)); } /* * innvl: { * property 1, property 2, ... * } * * outnvl: { * bookmark name 1 -> { property 1, property 2, ... }, * bookmark name 2 -> { property 1, property 2, ... } * } * */ static const zfs_ioc_key_t zfs_keys_get_bookmarks[] = { {"...", DATA_TYPE_BOOLEAN, ZK_WILDCARDLIST | ZK_OPTIONAL}, }; static int zfs_ioc_get_bookmarks(const char *fsname, nvlist_t *innvl, nvlist_t *outnvl) { return (dsl_get_bookmarks(fsname, innvl, outnvl)); } /* * innvl: { * bookmark name 1, bookmark name 2 * } * * outnvl: bookmark -> error code (int32) * */ static const zfs_ioc_key_t zfs_keys_destroy_bookmarks[] = { {"...", DATA_TYPE_BOOLEAN, ZK_WILDCARDLIST}, }; static int zfs_ioc_destroy_bookmarks(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl) { int error, poollen; poollen = strlen(poolname); for (nvpair_t *pair = nvlist_next_nvpair(innvl, NULL); pair != NULL; pair = nvlist_next_nvpair(innvl, pair)) { const char *name = nvpair_name(pair); const char *cp = strchr(name, '#'); /* * The bookmark name must contain an #, and the part after it * must contain only valid characters. */ if (cp == NULL || zfs_component_namecheck(cp + 1, NULL, NULL) != 0) return (SET_ERROR(EINVAL)); /* * The bookmark must be in the specified pool. */ if (strncmp(name, poolname, poollen) != 0 || (name[poollen] != '/' && name[poollen] != '#')) return (SET_ERROR(EXDEV)); } error = dsl_bookmark_destroy(innvl, outnvl); return (error); } static const zfs_ioc_key_t zfs_keys_channel_program[] = { {"program", DATA_TYPE_STRING, 0}, {"arg", DATA_TYPE_ANY, 0}, {"sync", DATA_TYPE_BOOLEAN_VALUE, ZK_OPTIONAL}, {"instrlimit", DATA_TYPE_UINT64, ZK_OPTIONAL}, {"memlimit", DATA_TYPE_UINT64, ZK_OPTIONAL}, }; static int zfs_ioc_channel_program(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl) { char *program; uint64_t instrlimit, memlimit; boolean_t sync_flag; nvpair_t *nvarg = NULL; program = fnvlist_lookup_string(innvl, ZCP_ARG_PROGRAM); if (0 != nvlist_lookup_boolean_value(innvl, ZCP_ARG_SYNC, &sync_flag)) { sync_flag = B_TRUE; } if (0 != nvlist_lookup_uint64(innvl, ZCP_ARG_INSTRLIMIT, &instrlimit)) { instrlimit = ZCP_DEFAULT_INSTRLIMIT; } if (0 != nvlist_lookup_uint64(innvl, ZCP_ARG_MEMLIMIT, &memlimit)) { memlimit = ZCP_DEFAULT_MEMLIMIT; } nvarg = fnvlist_lookup_nvpair(innvl, ZCP_ARG_ARGLIST); if (instrlimit == 0 || instrlimit > zfs_lua_max_instrlimit) return (EINVAL); if (memlimit == 0 || memlimit > zfs_lua_max_memlimit) return (EINVAL); return (zcp_eval(poolname, program, sync_flag, instrlimit, memlimit, nvarg, outnvl)); } /* * innvl: unused * outnvl: empty */ static const zfs_ioc_key_t zfs_keys_pool_checkpoint[] = { /* no nvl keys */ }; /* ARGSUSED */ static int zfs_ioc_pool_checkpoint(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl) { return (spa_checkpoint(poolname)); } /* * innvl: unused * outnvl: empty */ static const zfs_ioc_key_t zfs_keys_pool_discard_checkpoint[] = { /* no nvl keys */ }; /* ARGSUSED */ static int zfs_ioc_pool_discard_checkpoint(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl) { return (spa_checkpoint_discard(poolname)); } /* * inputs: * zc_name name of dataset to destroy * zc_defer_destroy mark for deferred destroy * * outputs: none */ static int zfs_ioc_destroy(zfs_cmd_t *zc) { objset_t *os; dmu_objset_type_t ost; int err; err = dmu_objset_hold(zc->zc_name, FTAG, &os); if (err != 0) return (err); ost = dmu_objset_type(os); dmu_objset_rele(os, FTAG); if (ost == DMU_OST_ZFS) zfs_unmount_snap(zc->zc_name); if (strchr(zc->zc_name, '@')) { err = dsl_destroy_snapshot(zc->zc_name, zc->zc_defer_destroy); } else { err = dsl_destroy_head(zc->zc_name); if (err == EEXIST) { /* * It is possible that the given DS may have * hidden child (%recv) datasets - "leftovers" * resulting from the previously interrupted * 'zfs receive'. * * 6 extra bytes for /%recv */ char namebuf[ZFS_MAX_DATASET_NAME_LEN + 6]; if (snprintf(namebuf, sizeof (namebuf), "%s/%s", zc->zc_name, recv_clone_name) >= sizeof (namebuf)) return (SET_ERROR(EINVAL)); /* * Try to remove the hidden child (%recv) and after * that try to remove the target dataset. * If the hidden child (%recv) does not exist * the original error (EEXIST) will be returned */ err = dsl_destroy_head(namebuf); if (err == 0) err = dsl_destroy_head(zc->zc_name); else if (err == ENOENT) err = SET_ERROR(EEXIST); } } return (err); } /* * innvl: { * "initialize_command" -> POOL_INITIALIZE_{CANCEL|DO|SUSPEND} (uint64) * "initialize_vdevs": { -> guids to initialize (nvlist) * "vdev_path_1": vdev_guid_1, (uint64), * "vdev_path_2": vdev_guid_2, (uint64), * ... * }, * } * * outnvl: { * "initialize_vdevs": { -> initialization errors (nvlist) * "vdev_path_1": errno, see function body for possible errnos (uint64) * "vdev_path_2": errno, ... (uint64) * ... * } * } * * EINVAL is returned for an unknown commands or if any of the provided vdev * guids have be specified with a type other than uint64. */ static const zfs_ioc_key_t zfs_keys_pool_initialize[] = { {ZPOOL_INITIALIZE_COMMAND, DATA_TYPE_UINT64, 0}, {ZPOOL_INITIALIZE_VDEVS, DATA_TYPE_NVLIST, 0} }; static int zfs_ioc_pool_initialize(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl) { uint64_t cmd_type; if (nvlist_lookup_uint64(innvl, ZPOOL_INITIALIZE_COMMAND, &cmd_type) != 0) { return (SET_ERROR(EINVAL)); } if (!(cmd_type == POOL_INITIALIZE_CANCEL || cmd_type == POOL_INITIALIZE_DO || cmd_type == POOL_INITIALIZE_SUSPEND)) { return (SET_ERROR(EINVAL)); } nvlist_t *vdev_guids; if (nvlist_lookup_nvlist(innvl, ZPOOL_INITIALIZE_VDEVS, &vdev_guids) != 0) { return (SET_ERROR(EINVAL)); } for (nvpair_t *pair = nvlist_next_nvpair(vdev_guids, NULL); pair != NULL; pair = nvlist_next_nvpair(vdev_guids, pair)) { uint64_t vdev_guid; if (nvpair_value_uint64(pair, &vdev_guid) != 0) { return (SET_ERROR(EINVAL)); } } spa_t *spa; int error = spa_open(poolname, &spa, FTAG); if (error != 0) return (error); nvlist_t *vdev_errlist = fnvlist_alloc(); int total_errors = spa_vdev_initialize(spa, vdev_guids, cmd_type, vdev_errlist); if (fnvlist_size(vdev_errlist) > 0) { fnvlist_add_nvlist(outnvl, ZPOOL_INITIALIZE_VDEVS, vdev_errlist); } fnvlist_free(vdev_errlist); spa_close(spa, FTAG); return (total_errors > 0 ? EINVAL : 0); } /* * fsname is name of dataset to rollback (to most recent snapshot) * * innvl may contain name of expected target snapshot * * outnvl: "target" -> name of most recent snapshot * } */ static const zfs_ioc_key_t zfs_keys_rollback[] = { {"target", DATA_TYPE_STRING, ZK_OPTIONAL}, }; /* ARGSUSED */ static int zfs_ioc_rollback(const char *fsname, nvlist_t *innvl, nvlist_t *outnvl) { zfsvfs_t *zfsvfs; zvol_state_t *zv; char *target = NULL; int error; (void) nvlist_lookup_string(innvl, "target", &target); if (target != NULL) { const char *cp = strchr(target, '@'); /* * The snap name must contain an @, and the part after it must * contain only valid characters. */ if (cp == NULL || zfs_component_namecheck(cp + 1, NULL, NULL) != 0) return (SET_ERROR(EINVAL)); } if (getzfsvfs(fsname, &zfsvfs) == 0) { dsl_dataset_t *ds; ds = dmu_objset_ds(zfsvfs->z_os); error = zfs_suspend_fs(zfsvfs); if (error == 0) { int resume_err; error = dsl_dataset_rollback(fsname, target, zfsvfs, outnvl); resume_err = zfs_resume_fs(zfsvfs, ds); error = error ? error : resume_err; } deactivate_super(zfsvfs->z_sb); } else if ((zv = zvol_suspend(fsname)) != NULL) { error = dsl_dataset_rollback(fsname, target, zvol_tag(zv), outnvl); zvol_resume(zv); } else { error = dsl_dataset_rollback(fsname, target, NULL, outnvl); } return (error); } static int recursive_unmount(const char *fsname, void *arg) { const char *snapname = arg; char *fullname; fullname = kmem_asprintf("%s@%s", fsname, snapname); zfs_unmount_snap(fullname); strfree(fullname); return (0); } /* * inputs: * zc_name old name of dataset * zc_value new name of dataset * zc_cookie recursive flag (only valid for snapshots) * * outputs: none */ static int zfs_ioc_rename(zfs_cmd_t *zc) { objset_t *os; dmu_objset_type_t ost; boolean_t recursive = zc->zc_cookie & 1; char *at; int err; /* "zfs rename" from and to ...%recv datasets should both fail */ zc->zc_name[sizeof (zc->zc_name) - 1] = '\0'; zc->zc_value[sizeof (zc->zc_value) - 1] = '\0'; if (dataset_namecheck(zc->zc_name, NULL, NULL) != 0 || dataset_namecheck(zc->zc_value, NULL, NULL) != 0 || strchr(zc->zc_name, '%') || strchr(zc->zc_value, '%')) return (SET_ERROR(EINVAL)); err = dmu_objset_hold(zc->zc_name, FTAG, &os); if (err != 0) return (err); ost = dmu_objset_type(os); dmu_objset_rele(os, FTAG); at = strchr(zc->zc_name, '@'); if (at != NULL) { /* snaps must be in same fs */ int error; if (strncmp(zc->zc_name, zc->zc_value, at - zc->zc_name + 1)) return (SET_ERROR(EXDEV)); *at = '\0'; if (ost == DMU_OST_ZFS) { error = dmu_objset_find(zc->zc_name, recursive_unmount, at + 1, recursive ? DS_FIND_CHILDREN : 0); if (error != 0) { *at = '@'; return (error); } } error = dsl_dataset_rename_snapshot(zc->zc_name, at + 1, strchr(zc->zc_value, '@') + 1, recursive); *at = '@'; return (error); } else { return (dsl_dir_rename(zc->zc_name, zc->zc_value)); } } static int zfs_check_settable(const char *dsname, nvpair_t *pair, cred_t *cr) { const char *propname = nvpair_name(pair); boolean_t issnap = (strchr(dsname, '@') != NULL); zfs_prop_t prop = zfs_name_to_prop(propname); uint64_t intval; int err; if (prop == ZPROP_INVAL) { if (zfs_prop_user(propname)) { if ((err = zfs_secpolicy_write_perms(dsname, ZFS_DELEG_PERM_USERPROP, cr))) return (err); return (0); } if (!issnap && zfs_prop_userquota(propname)) { const char *perm = NULL; const char *uq_prefix = zfs_userquota_prop_prefixes[ZFS_PROP_USERQUOTA]; const char *gq_prefix = zfs_userquota_prop_prefixes[ZFS_PROP_GROUPQUOTA]; const char *uiq_prefix = zfs_userquota_prop_prefixes[ZFS_PROP_USEROBJQUOTA]; const char *giq_prefix = zfs_userquota_prop_prefixes[ZFS_PROP_GROUPOBJQUOTA]; const char *pq_prefix = zfs_userquota_prop_prefixes[ZFS_PROP_PROJECTQUOTA]; const char *piq_prefix = zfs_userquota_prop_prefixes[\ ZFS_PROP_PROJECTOBJQUOTA]; if (strncmp(propname, uq_prefix, strlen(uq_prefix)) == 0) { perm = ZFS_DELEG_PERM_USERQUOTA; } else if (strncmp(propname, uiq_prefix, strlen(uiq_prefix)) == 0) { perm = ZFS_DELEG_PERM_USEROBJQUOTA; } else if (strncmp(propname, gq_prefix, strlen(gq_prefix)) == 0) { perm = ZFS_DELEG_PERM_GROUPQUOTA; } else if (strncmp(propname, giq_prefix, strlen(giq_prefix)) == 0) { perm = ZFS_DELEG_PERM_GROUPOBJQUOTA; } else if (strncmp(propname, pq_prefix, strlen(pq_prefix)) == 0) { perm = ZFS_DELEG_PERM_PROJECTQUOTA; } else if (strncmp(propname, piq_prefix, strlen(piq_prefix)) == 0) { perm = ZFS_DELEG_PERM_PROJECTOBJQUOTA; } else { /* {USER|GROUP|PROJECT}USED are read-only */ return (SET_ERROR(EINVAL)); } if ((err = zfs_secpolicy_write_perms(dsname, perm, cr))) return (err); return (0); } return (SET_ERROR(EINVAL)); } if (issnap) return (SET_ERROR(EINVAL)); if (nvpair_type(pair) == DATA_TYPE_NVLIST) { /* * dsl_prop_get_all_impl() returns properties in this * format. */ nvlist_t *attrs; VERIFY(nvpair_value_nvlist(pair, &attrs) == 0); VERIFY(nvlist_lookup_nvpair(attrs, ZPROP_VALUE, &pair) == 0); } /* * Check that this value is valid for this pool version */ switch (prop) { case ZFS_PROP_COMPRESSION: /* * If the user specified gzip compression, make sure * the SPA supports it. We ignore any errors here since * we'll catch them later. */ if (nvpair_value_uint64(pair, &intval) == 0) { if (intval >= ZIO_COMPRESS_GZIP_1 && intval <= ZIO_COMPRESS_GZIP_9 && zfs_earlier_version(dsname, SPA_VERSION_GZIP_COMPRESSION)) { return (SET_ERROR(ENOTSUP)); } if (intval == ZIO_COMPRESS_ZLE && zfs_earlier_version(dsname, SPA_VERSION_ZLE_COMPRESSION)) return (SET_ERROR(ENOTSUP)); if (intval == ZIO_COMPRESS_LZ4) { spa_t *spa; if ((err = spa_open(dsname, &spa, FTAG)) != 0) return (err); if (!spa_feature_is_enabled(spa, SPA_FEATURE_LZ4_COMPRESS)) { spa_close(spa, FTAG); return (SET_ERROR(ENOTSUP)); } spa_close(spa, FTAG); } /* * If this is a bootable dataset then * verify that the compression algorithm * is supported for booting. We must return * something other than ENOTSUP since it * implies a downrev pool version. */ if (zfs_is_bootfs(dsname) && !BOOTFS_COMPRESS_VALID(intval)) { return (SET_ERROR(ERANGE)); } } break; case ZFS_PROP_COPIES: if (zfs_earlier_version(dsname, SPA_VERSION_DITTO_BLOCKS)) return (SET_ERROR(ENOTSUP)); break; case ZFS_PROP_VOLBLOCKSIZE: case ZFS_PROP_RECORDSIZE: /* Record sizes above 128k need the feature to be enabled */ if (nvpair_value_uint64(pair, &intval) == 0 && intval > SPA_OLD_MAXBLOCKSIZE) { spa_t *spa; /* * We don't allow setting the property above 1MB, * unless the tunable has been changed. */ if (intval > zfs_max_recordsize || intval > SPA_MAXBLOCKSIZE) return (SET_ERROR(ERANGE)); if ((err = spa_open(dsname, &spa, FTAG)) != 0) return (err); if (!spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) { spa_close(spa, FTAG); return (SET_ERROR(ENOTSUP)); } spa_close(spa, FTAG); } break; case ZFS_PROP_DNODESIZE: /* Dnode sizes above 512 need the feature to be enabled */ if (nvpair_value_uint64(pair, &intval) == 0 && intval != ZFS_DNSIZE_LEGACY) { spa_t *spa; /* * If this is a bootable dataset then * we don't allow large (>512B) dnodes, * because GRUB doesn't support them. */ if (zfs_is_bootfs(dsname) && intval != ZFS_DNSIZE_LEGACY) { return (SET_ERROR(EDOM)); } if ((err = spa_open(dsname, &spa, FTAG)) != 0) return (err); if (!spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE)) { spa_close(spa, FTAG); return (SET_ERROR(ENOTSUP)); } spa_close(spa, FTAG); } break; case ZFS_PROP_SPECIAL_SMALL_BLOCKS: /* * This property could require the allocation classes * feature to be active for setting, however we allow * it so that tests of settable properties succeed. * The CLI will issue a warning in this case. */ break; case ZFS_PROP_SHARESMB: if (zpl_earlier_version(dsname, ZPL_VERSION_FUID)) return (SET_ERROR(ENOTSUP)); break; case ZFS_PROP_ACLINHERIT: if (nvpair_type(pair) == DATA_TYPE_UINT64 && nvpair_value_uint64(pair, &intval) == 0) { if (intval == ZFS_ACL_PASSTHROUGH_X && zfs_earlier_version(dsname, SPA_VERSION_PASSTHROUGH_X)) return (SET_ERROR(ENOTSUP)); } break; case ZFS_PROP_CHECKSUM: case ZFS_PROP_DEDUP: { spa_feature_t feature; spa_t *spa; int err; /* dedup feature version checks */ if (prop == ZFS_PROP_DEDUP && zfs_earlier_version(dsname, SPA_VERSION_DEDUP)) return (SET_ERROR(ENOTSUP)); if (nvpair_type(pair) == DATA_TYPE_UINT64 && nvpair_value_uint64(pair, &intval) == 0) { /* check prop value is enabled in features */ feature = zio_checksum_to_feature( intval & ZIO_CHECKSUM_MASK); if (feature == SPA_FEATURE_NONE) break; if ((err = spa_open(dsname, &spa, FTAG)) != 0) return (err); if (!spa_feature_is_enabled(spa, feature)) { spa_close(spa, FTAG); return (SET_ERROR(ENOTSUP)); } spa_close(spa, FTAG); } break; } default: break; } return (zfs_secpolicy_setprop(dsname, prop, pair, CRED())); } /* * Removes properties from the given props list that fail permission checks * needed to clear them and to restore them in case of a receive error. For each * property, make sure we have both set and inherit permissions. * * Returns the first error encountered if any permission checks fail. If the * caller provides a non-NULL errlist, it also gives the complete list of names * of all the properties that failed a permission check along with the * corresponding error numbers. The caller is responsible for freeing the * returned errlist. * * If every property checks out successfully, zero is returned and the list * pointed at by errlist is NULL. */ static int zfs_check_clearable(char *dataset, nvlist_t *props, nvlist_t **errlist) { zfs_cmd_t *zc; nvpair_t *pair, *next_pair; nvlist_t *errors; int err, rv = 0; if (props == NULL) return (0); VERIFY(nvlist_alloc(&errors, NV_UNIQUE_NAME, KM_SLEEP) == 0); zc = kmem_alloc(sizeof (zfs_cmd_t), KM_SLEEP); (void) strlcpy(zc->zc_name, dataset, sizeof (zc->zc_name)); pair = nvlist_next_nvpair(props, NULL); while (pair != NULL) { next_pair = nvlist_next_nvpair(props, pair); (void) strlcpy(zc->zc_value, nvpair_name(pair), sizeof (zc->zc_value)); if ((err = zfs_check_settable(dataset, pair, CRED())) != 0 || (err = zfs_secpolicy_inherit_prop(zc, NULL, CRED())) != 0) { VERIFY(nvlist_remove_nvpair(props, pair) == 0); VERIFY(nvlist_add_int32(errors, zc->zc_value, err) == 0); } pair = next_pair; } kmem_free(zc, sizeof (zfs_cmd_t)); if ((pair = nvlist_next_nvpair(errors, NULL)) == NULL) { nvlist_free(errors); errors = NULL; } else { VERIFY(nvpair_value_int32(pair, &rv) == 0); } if (errlist == NULL) nvlist_free(errors); else *errlist = errors; return (rv); } static boolean_t propval_equals(nvpair_t *p1, nvpair_t *p2) { if (nvpair_type(p1) == DATA_TYPE_NVLIST) { /* dsl_prop_get_all_impl() format */ nvlist_t *attrs; VERIFY(nvpair_value_nvlist(p1, &attrs) == 0); VERIFY(nvlist_lookup_nvpair(attrs, ZPROP_VALUE, &p1) == 0); } if (nvpair_type(p2) == DATA_TYPE_NVLIST) { nvlist_t *attrs; VERIFY(nvpair_value_nvlist(p2, &attrs) == 0); VERIFY(nvlist_lookup_nvpair(attrs, ZPROP_VALUE, &p2) == 0); } if (nvpair_type(p1) != nvpair_type(p2)) return (B_FALSE); if (nvpair_type(p1) == DATA_TYPE_STRING) { char *valstr1, *valstr2; VERIFY(nvpair_value_string(p1, (char **)&valstr1) == 0); VERIFY(nvpair_value_string(p2, (char **)&valstr2) == 0); return (strcmp(valstr1, valstr2) == 0); } else { uint64_t intval1, intval2; VERIFY(nvpair_value_uint64(p1, &intval1) == 0); VERIFY(nvpair_value_uint64(p2, &intval2) == 0); return (intval1 == intval2); } } /* * Remove properties from props if they are not going to change (as determined * by comparison with origprops). Remove them from origprops as well, since we * do not need to clear or restore properties that won't change. */ static void props_reduce(nvlist_t *props, nvlist_t *origprops) { nvpair_t *pair, *next_pair; if (origprops == NULL) return; /* all props need to be received */ pair = nvlist_next_nvpair(props, NULL); while (pair != NULL) { const char *propname = nvpair_name(pair); nvpair_t *match; next_pair = nvlist_next_nvpair(props, pair); if ((nvlist_lookup_nvpair(origprops, propname, &match) != 0) || !propval_equals(pair, match)) goto next; /* need to set received value */ /* don't clear the existing received value */ (void) nvlist_remove_nvpair(origprops, match); /* don't bother receiving the property */ (void) nvlist_remove_nvpair(props, pair); next: pair = next_pair; } } /* * Extract properties that cannot be set PRIOR to the receipt of a dataset. * For example, refquota cannot be set until after the receipt of a dataset, * because in replication streams, an older/earlier snapshot may exceed the * refquota. We want to receive the older/earlier snapshot, but setting * refquota pre-receipt will set the dsl's ACTUAL quota, which will prevent * the older/earlier snapshot from being received (with EDQUOT). * * The ZFS test "zfs_receive_011_pos" demonstrates such a scenario. * * libzfs will need to be judicious handling errors encountered by props * extracted by this function. */ static nvlist_t * extract_delay_props(nvlist_t *props) { nvlist_t *delayprops; nvpair_t *nvp, *tmp; static const zfs_prop_t delayable[] = { ZFS_PROP_REFQUOTA, ZFS_PROP_KEYLOCATION, 0 }; int i; VERIFY(nvlist_alloc(&delayprops, NV_UNIQUE_NAME, KM_SLEEP) == 0); for (nvp = nvlist_next_nvpair(props, NULL); nvp != NULL; nvp = nvlist_next_nvpair(props, nvp)) { /* * strcmp() is safe because zfs_prop_to_name() always returns * a bounded string. */ for (i = 0; delayable[i] != 0; i++) { if (strcmp(zfs_prop_to_name(delayable[i]), nvpair_name(nvp)) == 0) { break; } } if (delayable[i] != 0) { tmp = nvlist_prev_nvpair(props, nvp); VERIFY(nvlist_add_nvpair(delayprops, nvp) == 0); VERIFY(nvlist_remove_nvpair(props, nvp) == 0); nvp = tmp; } } if (nvlist_empty(delayprops)) { nvlist_free(delayprops); delayprops = NULL; } return (delayprops); } #ifdef DEBUG static boolean_t zfs_ioc_recv_inject_err; #endif /* * nvlist 'errors' is always allocated. It will contain descriptions of * encountered errors, if any. It's the callers responsibility to free. */ static int zfs_ioc_recv_impl(char *tofs, char *tosnap, char *origin, nvlist_t *recvprops, nvlist_t *localprops, nvlist_t *hidden_args, boolean_t force, boolean_t resumable, int input_fd, dmu_replay_record_t *begin_record, int cleanup_fd, uint64_t *read_bytes, uint64_t *errflags, uint64_t *action_handle, nvlist_t **errors) { dmu_recv_cookie_t drc; int error = 0; int props_error = 0; offset_t off; nvlist_t *local_delayprops = NULL; nvlist_t *recv_delayprops = NULL; nvlist_t *origprops = NULL; /* existing properties */ nvlist_t *origrecvd = NULL; /* existing received properties */ boolean_t first_recvd_props = B_FALSE; file_t *input_fp; *read_bytes = 0; *errflags = 0; *errors = fnvlist_alloc(); input_fp = getf(input_fd); if (input_fp == NULL) return (SET_ERROR(EBADF)); error = dmu_recv_begin(tofs, tosnap, begin_record, force, resumable, localprops, hidden_args, origin, &drc); if (error != 0) goto out; /* * Set properties before we receive the stream so that they are applied * to the new data. Note that we must call dmu_recv_stream() if * dmu_recv_begin() succeeds. */ if (recvprops != NULL && !drc.drc_newfs) { if (spa_version(dsl_dataset_get_spa(drc.drc_ds)) >= SPA_VERSION_RECVD_PROPS && !dsl_prop_get_hasrecvd(tofs)) first_recvd_props = B_TRUE; /* * If new received properties are supplied, they are to * completely replace the existing received properties, * so stash away the existing ones. */ if (dsl_prop_get_received(tofs, &origrecvd) == 0) { nvlist_t *errlist = NULL; /* * Don't bother writing a property if its value won't * change (and avoid the unnecessary security checks). * * The first receive after SPA_VERSION_RECVD_PROPS is a * special case where we blow away all local properties * regardless. */ if (!first_recvd_props) props_reduce(recvprops, origrecvd); if (zfs_check_clearable(tofs, origrecvd, &errlist) != 0) (void) nvlist_merge(*errors, errlist, 0); nvlist_free(errlist); if (clear_received_props(tofs, origrecvd, first_recvd_props ? NULL : recvprops) != 0) *errflags |= ZPROP_ERR_NOCLEAR; } else { *errflags |= ZPROP_ERR_NOCLEAR; } } /* * Stash away existing properties so we can restore them on error unless * we're doing the first receive after SPA_VERSION_RECVD_PROPS, in which * case "origrecvd" will take care of that. */ if (localprops != NULL && !drc.drc_newfs && !first_recvd_props) { objset_t *os; if (dmu_objset_hold(tofs, FTAG, &os) == 0) { if (dsl_prop_get_all(os, &origprops) != 0) { *errflags |= ZPROP_ERR_NOCLEAR; } dmu_objset_rele(os, FTAG); } else { *errflags |= ZPROP_ERR_NOCLEAR; } } if (recvprops != NULL) { props_error = dsl_prop_set_hasrecvd(tofs); if (props_error == 0) { recv_delayprops = extract_delay_props(recvprops); (void) zfs_set_prop_nvlist(tofs, ZPROP_SRC_RECEIVED, recvprops, *errors); } } if (localprops != NULL) { nvlist_t *oprops = fnvlist_alloc(); nvlist_t *xprops = fnvlist_alloc(); nvpair_t *nvp = NULL; while ((nvp = nvlist_next_nvpair(localprops, nvp)) != NULL) { if (nvpair_type(nvp) == DATA_TYPE_BOOLEAN) { /* -x property */ const char *name = nvpair_name(nvp); zfs_prop_t prop = zfs_name_to_prop(name); if (prop != ZPROP_INVAL) { if (!zfs_prop_inheritable(prop)) continue; } else if (!zfs_prop_user(name)) continue; fnvlist_add_boolean(xprops, name); } else { /* -o property=value */ fnvlist_add_nvpair(oprops, nvp); } } local_delayprops = extract_delay_props(oprops); (void) zfs_set_prop_nvlist(tofs, ZPROP_SRC_LOCAL, oprops, *errors); (void) zfs_set_prop_nvlist(tofs, ZPROP_SRC_INHERITED, xprops, *errors); nvlist_free(oprops); nvlist_free(xprops); } off = input_fp->f_offset; error = dmu_recv_stream(&drc, input_fp->f_vnode, &off, cleanup_fd, action_handle); if (error == 0) { zfsvfs_t *zfsvfs = NULL; zvol_state_t *zv = NULL; if (getzfsvfs(tofs, &zfsvfs) == 0) { /* online recv */ dsl_dataset_t *ds; int end_err; ds = dmu_objset_ds(zfsvfs->z_os); error = zfs_suspend_fs(zfsvfs); /* * If the suspend fails, then the recv_end will * likely also fail, and clean up after itself. */ end_err = dmu_recv_end(&drc, zfsvfs); if (error == 0) error = zfs_resume_fs(zfsvfs, ds); error = error ? error : end_err; deactivate_super(zfsvfs->z_sb); } else if ((zv = zvol_suspend(tofs)) != NULL) { error = dmu_recv_end(&drc, zvol_tag(zv)); zvol_resume(zv); } else { error = dmu_recv_end(&drc, NULL); } /* Set delayed properties now, after we're done receiving. */ if (recv_delayprops != NULL && error == 0) { (void) zfs_set_prop_nvlist(tofs, ZPROP_SRC_RECEIVED, recv_delayprops, *errors); } if (local_delayprops != NULL && error == 0) { (void) zfs_set_prop_nvlist(tofs, ZPROP_SRC_LOCAL, local_delayprops, *errors); } } /* * Merge delayed props back in with initial props, in case * we're DEBUG and zfs_ioc_recv_inject_err is set (which means * we have to make sure clear_received_props() includes * the delayed properties). * * Since zfs_ioc_recv_inject_err is only in DEBUG kernels, * using ASSERT() will be just like a VERIFY. */ if (recv_delayprops != NULL) { ASSERT(nvlist_merge(recvprops, recv_delayprops, 0) == 0); nvlist_free(recv_delayprops); } if (local_delayprops != NULL) { ASSERT(nvlist_merge(localprops, local_delayprops, 0) == 0); nvlist_free(local_delayprops); } *read_bytes = off - input_fp->f_offset; if (VOP_SEEK(input_fp->f_vnode, input_fp->f_offset, &off, NULL) == 0) input_fp->f_offset = off; #ifdef DEBUG if (zfs_ioc_recv_inject_err) { zfs_ioc_recv_inject_err = B_FALSE; error = 1; } #endif /* * On error, restore the original props. */ if (error != 0 && recvprops != NULL && !drc.drc_newfs) { if (clear_received_props(tofs, recvprops, NULL) != 0) { /* * We failed to clear the received properties. * Since we may have left a $recvd value on the * system, we can't clear the $hasrecvd flag. */ *errflags |= ZPROP_ERR_NORESTORE; } else if (first_recvd_props) { dsl_prop_unset_hasrecvd(tofs); } if (origrecvd == NULL && !drc.drc_newfs) { /* We failed to stash the original properties. */ *errflags |= ZPROP_ERR_NORESTORE; } /* * dsl_props_set() will not convert RECEIVED to LOCAL on or * after SPA_VERSION_RECVD_PROPS, so we need to specify LOCAL * explicitly if we're restoring local properties cleared in the * first new-style receive. */ if (origrecvd != NULL && zfs_set_prop_nvlist(tofs, (first_recvd_props ? ZPROP_SRC_LOCAL : ZPROP_SRC_RECEIVED), origrecvd, NULL) != 0) { /* * We stashed the original properties but failed to * restore them. */ *errflags |= ZPROP_ERR_NORESTORE; } } if (error != 0 && localprops != NULL && !drc.drc_newfs && !first_recvd_props) { nvlist_t *setprops; nvlist_t *inheritprops; nvpair_t *nvp; if (origprops == NULL) { /* We failed to stash the original properties. */ *errflags |= ZPROP_ERR_NORESTORE; goto out; } /* Restore original props */ setprops = fnvlist_alloc(); inheritprops = fnvlist_alloc(); nvp = NULL; while ((nvp = nvlist_next_nvpair(localprops, nvp)) != NULL) { const char *name = nvpair_name(nvp); const char *source; nvlist_t *attrs; if (!nvlist_exists(origprops, name)) { /* * Property was not present or was explicitly * inherited before the receive, restore this. */ fnvlist_add_boolean(inheritprops, name); continue; } attrs = fnvlist_lookup_nvlist(origprops, name); source = fnvlist_lookup_string(attrs, ZPROP_SOURCE); /* Skip received properties */ if (strcmp(source, ZPROP_SOURCE_VAL_RECVD) == 0) continue; if (strcmp(source, tofs) == 0) { /* Property was locally set */ fnvlist_add_nvlist(setprops, name, attrs); } else { /* Property was implicitly inherited */ fnvlist_add_boolean(inheritprops, name); } } if (zfs_set_prop_nvlist(tofs, ZPROP_SRC_LOCAL, setprops, NULL) != 0) *errflags |= ZPROP_ERR_NORESTORE; if (zfs_set_prop_nvlist(tofs, ZPROP_SRC_INHERITED, inheritprops, NULL) != 0) *errflags |= ZPROP_ERR_NORESTORE; nvlist_free(setprops); nvlist_free(inheritprops); } out: releasef(input_fd); nvlist_free(origrecvd); nvlist_free(origprops); if (error == 0) error = props_error; return (error); } /* * inputs: * zc_name name of containing filesystem (unused) * zc_nvlist_src{_size} nvlist of properties to apply * zc_nvlist_conf{_size} nvlist of properties to exclude * (DATA_TYPE_BOOLEAN) and override (everything else) * zc_value name of snapshot to create * zc_string name of clone origin (if DRR_FLAG_CLONE) * zc_cookie file descriptor to recv from * zc_begin_record the BEGIN record of the stream (not byteswapped) * zc_guid force flag * zc_cleanup_fd cleanup-on-exit file descriptor * zc_action_handle handle for this guid/ds mapping (or zero on first call) * * outputs: * zc_cookie number of bytes read * zc_obj zprop_errflags_t * zc_action_handle handle for this guid/ds mapping * zc_nvlist_dst{_size} error for each unapplied received property */ static int zfs_ioc_recv(zfs_cmd_t *zc) { dmu_replay_record_t begin_record; nvlist_t *errors = NULL; nvlist_t *recvdprops = NULL; nvlist_t *localprops = NULL; char *origin = NULL; char *tosnap; char tofs[ZFS_MAX_DATASET_NAME_LEN]; int error = 0; if (dataset_namecheck(zc->zc_value, NULL, NULL) != 0 || strchr(zc->zc_value, '@') == NULL || strchr(zc->zc_value, '%')) return (SET_ERROR(EINVAL)); (void) strlcpy(tofs, zc->zc_value, sizeof (tofs)); tosnap = strchr(tofs, '@'); *tosnap++ = '\0'; if (zc->zc_nvlist_src != 0 && (error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size, zc->zc_iflags, &recvdprops)) != 0) return (error); if (zc->zc_nvlist_conf != 0 && (error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size, zc->zc_iflags, &localprops)) != 0) return (error); if (zc->zc_string[0]) origin = zc->zc_string; begin_record.drr_type = DRR_BEGIN; begin_record.drr_payloadlen = 0; begin_record.drr_u.drr_begin = zc->zc_begin_record; error = zfs_ioc_recv_impl(tofs, tosnap, origin, recvdprops, localprops, NULL, zc->zc_guid, B_FALSE, zc->zc_cookie, &begin_record, zc->zc_cleanup_fd, &zc->zc_cookie, &zc->zc_obj, &zc->zc_action_handle, &errors); nvlist_free(recvdprops); nvlist_free(localprops); /* * Now that all props, initial and delayed, are set, report the prop * errors to the caller. */ if (zc->zc_nvlist_dst_size != 0 && errors != NULL && (nvlist_smush(errors, zc->zc_nvlist_dst_size) != 0 || put_nvlist(zc, errors) != 0)) { /* * Caller made zc->zc_nvlist_dst less than the minimum expected * size or supplied an invalid address. */ error = SET_ERROR(EINVAL); } nvlist_free(errors); return (error); } /* * innvl: { * "snapname" -> full name of the snapshot to create * (optional) "props" -> received properties to set (nvlist) * (optional) "localprops" -> override and exclude properties (nvlist) * (optional) "origin" -> name of clone origin (DRR_FLAG_CLONE) * "begin_record" -> non-byteswapped dmu_replay_record_t * "input_fd" -> file descriptor to read stream from (int32) * (optional) "force" -> force flag (value ignored) * (optional) "resumable" -> resumable flag (value ignored) * (optional) "cleanup_fd" -> cleanup-on-exit file descriptor * (optional) "action_handle" -> handle for this guid/ds mapping * (optional) "hidden_args" -> { "wkeydata" -> value } * } * * outnvl: { * "read_bytes" -> number of bytes read * "error_flags" -> zprop_errflags_t * "action_handle" -> handle for this guid/ds mapping * "errors" -> error for each unapplied received property (nvlist) * } */ static const zfs_ioc_key_t zfs_keys_recv_new[] = { {"snapname", DATA_TYPE_STRING, 0}, {"props", DATA_TYPE_NVLIST, ZK_OPTIONAL}, {"localprops", DATA_TYPE_NVLIST, ZK_OPTIONAL}, {"origin", DATA_TYPE_STRING, ZK_OPTIONAL}, {"begin_record", DATA_TYPE_BYTE_ARRAY, 0}, {"input_fd", DATA_TYPE_INT32, 0}, {"force", DATA_TYPE_BOOLEAN, ZK_OPTIONAL}, {"resumable", DATA_TYPE_BOOLEAN, ZK_OPTIONAL}, {"cleanup_fd", DATA_TYPE_INT32, ZK_OPTIONAL}, {"action_handle", DATA_TYPE_UINT64, ZK_OPTIONAL}, {"hidden_args", DATA_TYPE_NVLIST, ZK_OPTIONAL}, }; static int zfs_ioc_recv_new(const char *fsname, nvlist_t *innvl, nvlist_t *outnvl) { dmu_replay_record_t *begin_record; uint_t begin_record_size; nvlist_t *errors = NULL; nvlist_t *recvprops = NULL; nvlist_t *localprops = NULL; nvlist_t *hidden_args = NULL; char *snapname; char *origin = NULL; char *tosnap; char tofs[ZFS_MAX_DATASET_NAME_LEN]; boolean_t force; boolean_t resumable; uint64_t action_handle = 0; uint64_t read_bytes = 0; uint64_t errflags = 0; int input_fd = -1; int cleanup_fd = -1; int error; snapname = fnvlist_lookup_string(innvl, "snapname"); if (dataset_namecheck(snapname, NULL, NULL) != 0 || strchr(snapname, '@') == NULL || strchr(snapname, '%')) return (SET_ERROR(EINVAL)); (void) strcpy(tofs, snapname); tosnap = strchr(tofs, '@'); *tosnap++ = '\0'; error = nvlist_lookup_string(innvl, "origin", &origin); if (error && error != ENOENT) return (error); error = nvlist_lookup_byte_array(innvl, "begin_record", (uchar_t **)&begin_record, &begin_record_size); if (error != 0 || begin_record_size != sizeof (*begin_record)) return (SET_ERROR(EINVAL)); input_fd = fnvlist_lookup_int32(innvl, "input_fd"); force = nvlist_exists(innvl, "force"); resumable = nvlist_exists(innvl, "resumable"); error = nvlist_lookup_int32(innvl, "cleanup_fd", &cleanup_fd); if (error && error != ENOENT) return (error); error = nvlist_lookup_uint64(innvl, "action_handle", &action_handle); if (error && error != ENOENT) return (error); /* we still use "props" here for backwards compatibility */ error = nvlist_lookup_nvlist(innvl, "props", &recvprops); if (error && error != ENOENT) return (error); error = nvlist_lookup_nvlist(innvl, "localprops", &localprops); if (error && error != ENOENT) return (error); error = nvlist_lookup_nvlist(innvl, ZPOOL_HIDDEN_ARGS, &hidden_args); if (error && error != ENOENT) return (error); error = zfs_ioc_recv_impl(tofs, tosnap, origin, recvprops, localprops, hidden_args, force, resumable, input_fd, begin_record, cleanup_fd, &read_bytes, &errflags, &action_handle, &errors); fnvlist_add_uint64(outnvl, "read_bytes", read_bytes); fnvlist_add_uint64(outnvl, "error_flags", errflags); fnvlist_add_uint64(outnvl, "action_handle", action_handle); fnvlist_add_nvlist(outnvl, "errors", errors); nvlist_free(errors); nvlist_free(recvprops); nvlist_free(localprops); return (error); } /* * inputs: * zc_name name of snapshot to send * zc_cookie file descriptor to send stream to * zc_obj fromorigin flag (mutually exclusive with zc_fromobj) * zc_sendobj objsetid of snapshot to send * zc_fromobj objsetid of incremental fromsnap (may be zero) * zc_guid if set, estimate size of stream only. zc_cookie is ignored. * output size in zc_objset_type. * zc_flags lzc_send_flags * * outputs: * zc_objset_type estimated size, if zc_guid is set * * NOTE: This is no longer the preferred interface, any new functionality * should be added to zfs_ioc_send_new() instead. */ static int zfs_ioc_send(zfs_cmd_t *zc) { int error; offset_t off; boolean_t estimate = (zc->zc_guid != 0); boolean_t embedok = (zc->zc_flags & 0x1); boolean_t large_block_ok = (zc->zc_flags & 0x2); boolean_t compressok = (zc->zc_flags & 0x4); boolean_t rawok = (zc->zc_flags & 0x8); if (zc->zc_obj != 0) { dsl_pool_t *dp; dsl_dataset_t *tosnap; error = dsl_pool_hold(zc->zc_name, FTAG, &dp); if (error != 0) return (error); error = dsl_dataset_hold_obj(dp, zc->zc_sendobj, FTAG, &tosnap); if (error != 0) { dsl_pool_rele(dp, FTAG); return (error); } if (dsl_dir_is_clone(tosnap->ds_dir)) zc->zc_fromobj = dsl_dir_phys(tosnap->ds_dir)->dd_origin_obj; dsl_dataset_rele(tosnap, FTAG); dsl_pool_rele(dp, FTAG); } if (estimate) { dsl_pool_t *dp; dsl_dataset_t *tosnap; dsl_dataset_t *fromsnap = NULL; error = dsl_pool_hold(zc->zc_name, FTAG, &dp); if (error != 0) return (error); error = dsl_dataset_hold_obj(dp, zc->zc_sendobj, FTAG, &tosnap); if (error != 0) { dsl_pool_rele(dp, FTAG); return (error); } if (zc->zc_fromobj != 0) { error = dsl_dataset_hold_obj(dp, zc->zc_fromobj, FTAG, &fromsnap); if (error != 0) { dsl_dataset_rele(tosnap, FTAG); dsl_pool_rele(dp, FTAG); return (error); } } error = dmu_send_estimate(tosnap, fromsnap, compressok || rawok, &zc->zc_objset_type); if (fromsnap != NULL) dsl_dataset_rele(fromsnap, FTAG); dsl_dataset_rele(tosnap, FTAG); dsl_pool_rele(dp, FTAG); } else { file_t *fp = getf(zc->zc_cookie); if (fp == NULL) return (SET_ERROR(EBADF)); off = fp->f_offset; error = dmu_send_obj(zc->zc_name, zc->zc_sendobj, zc->zc_fromobj, embedok, large_block_ok, compressok, rawok, zc->zc_cookie, fp->f_vnode, &off); if (VOP_SEEK(fp->f_vnode, fp->f_offset, &off, NULL) == 0) fp->f_offset = off; releasef(zc->zc_cookie); } return (error); } /* * inputs: * zc_name name of snapshot on which to report progress * zc_cookie file descriptor of send stream * * outputs: * zc_cookie number of bytes written in send stream thus far */ static int zfs_ioc_send_progress(zfs_cmd_t *zc) { dsl_pool_t *dp; dsl_dataset_t *ds; dmu_sendarg_t *dsp = NULL; int error; error = dsl_pool_hold(zc->zc_name, FTAG, &dp); if (error != 0) return (error); error = dsl_dataset_hold(dp, zc->zc_name, FTAG, &ds); if (error != 0) { dsl_pool_rele(dp, FTAG); return (error); } mutex_enter(&ds->ds_sendstream_lock); /* * Iterate over all the send streams currently active on this dataset. * If there's one which matches the specified file descriptor _and_ the * stream was started by the current process, return the progress of * that stream. */ for (dsp = list_head(&ds->ds_sendstreams); dsp != NULL; dsp = list_next(&ds->ds_sendstreams, dsp)) { if (dsp->dsa_outfd == zc->zc_cookie && dsp->dsa_proc->group_leader == curproc->group_leader) break; } if (dsp != NULL) zc->zc_cookie = *(dsp->dsa_off); else error = SET_ERROR(ENOENT); mutex_exit(&ds->ds_sendstream_lock); dsl_dataset_rele(ds, FTAG); dsl_pool_rele(dp, FTAG); return (error); } static int zfs_ioc_inject_fault(zfs_cmd_t *zc) { int id, error; error = zio_inject_fault(zc->zc_name, (int)zc->zc_guid, &id, &zc->zc_inject_record); if (error == 0) zc->zc_guid = (uint64_t)id; return (error); } static int zfs_ioc_clear_fault(zfs_cmd_t *zc) { return (zio_clear_fault((int)zc->zc_guid)); } static int zfs_ioc_inject_list_next(zfs_cmd_t *zc) { int id = (int)zc->zc_guid; int error; error = zio_inject_list_next(&id, zc->zc_name, sizeof (zc->zc_name), &zc->zc_inject_record); zc->zc_guid = id; return (error); } static int zfs_ioc_error_log(zfs_cmd_t *zc) { spa_t *spa; int error; size_t count = (size_t)zc->zc_nvlist_dst_size; if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0) return (error); error = spa_get_errlog(spa, (void *)(uintptr_t)zc->zc_nvlist_dst, &count); if (error == 0) zc->zc_nvlist_dst_size = count; else zc->zc_nvlist_dst_size = spa_get_errlog_size(spa); spa_close(spa, FTAG); return (error); } static int zfs_ioc_clear(zfs_cmd_t *zc) { spa_t *spa; vdev_t *vd; int error; /* * On zpool clear we also fix up missing slogs */ mutex_enter(&spa_namespace_lock); spa = spa_lookup(zc->zc_name); if (spa == NULL) { mutex_exit(&spa_namespace_lock); return (SET_ERROR(EIO)); } if (spa_get_log_state(spa) == SPA_LOG_MISSING) { /* we need to let spa_open/spa_load clear the chains */ spa_set_log_state(spa, SPA_LOG_CLEAR); } spa->spa_last_open_failed = 0; mutex_exit(&spa_namespace_lock); if (zc->zc_cookie & ZPOOL_NO_REWIND) { error = spa_open(zc->zc_name, &spa, FTAG); } else { nvlist_t *policy; nvlist_t *config = NULL; if (zc->zc_nvlist_src == 0) return (SET_ERROR(EINVAL)); if ((error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size, zc->zc_iflags, &policy)) == 0) { error = spa_open_rewind(zc->zc_name, &spa, FTAG, policy, &config); if (config != NULL) { int err; if ((err = put_nvlist(zc, config)) != 0) error = err; nvlist_free(config); } nvlist_free(policy); } } if (error != 0) return (error); spa_vdev_state_enter(spa, SCL_NONE); if (zc->zc_guid == 0) { vd = NULL; } else { vd = spa_lookup_by_guid(spa, zc->zc_guid, B_TRUE); if (vd == NULL) { (void) spa_vdev_state_exit(spa, NULL, ENODEV); spa_close(spa, FTAG); return (SET_ERROR(ENODEV)); } } vdev_clear(spa, vd); (void) spa_vdev_state_exit(spa, spa_suspended(spa) ? NULL : spa->spa_root_vdev, 0); /* * Resume any suspended I/Os. */ if (zio_resume(spa) != 0) error = SET_ERROR(EIO); spa_close(spa, FTAG); return (error); } /* * Reopen all the vdevs associated with the pool. * * innvl: { * "scrub_restart" -> when true and scrub is running, allow to restart * scrub as the side effect of the reopen (boolean). * } * * outnvl is unused */ static const zfs_ioc_key_t zfs_keys_pool_reopen[] = { {"scrub_restart", DATA_TYPE_BOOLEAN_VALUE, 0}, }; /* ARGSUSED */ static int zfs_ioc_pool_reopen(const char *pool, nvlist_t *innvl, nvlist_t *outnvl) { spa_t *spa; int error; boolean_t scrub_restart = B_TRUE; if (innvl) { scrub_restart = fnvlist_lookup_boolean_value(innvl, "scrub_restart"); } error = spa_open(pool, &spa, FTAG); if (error != 0) return (error); spa_vdev_state_enter(spa, SCL_NONE); /* * If the scrub_restart flag is B_FALSE and a scrub is already * in progress then set spa_scrub_reopen flag to B_TRUE so that * we don't restart the scrub as a side effect of the reopen. * Otherwise, let vdev_open() decided if a resilver is required. */ spa->spa_scrub_reopen = (!scrub_restart && dsl_scan_scrubbing(spa->spa_dsl_pool)); vdev_reopen(spa->spa_root_vdev); spa->spa_scrub_reopen = B_FALSE; (void) spa_vdev_state_exit(spa, NULL, 0); spa_close(spa, FTAG); return (0); } /* * inputs: * zc_name name of filesystem * * outputs: * zc_string name of conflicting snapshot, if there is one */ static int zfs_ioc_promote(zfs_cmd_t *zc) { dsl_pool_t *dp; dsl_dataset_t *ds, *ods; char origin[ZFS_MAX_DATASET_NAME_LEN]; char *cp; int error; zc->zc_name[sizeof (zc->zc_name) - 1] = '\0'; if (dataset_namecheck(zc->zc_name, NULL, NULL) != 0 || strchr(zc->zc_name, '%')) return (SET_ERROR(EINVAL)); error = dsl_pool_hold(zc->zc_name, FTAG, &dp); if (error != 0) return (error); error = dsl_dataset_hold(dp, zc->zc_name, FTAG, &ds); if (error != 0) { dsl_pool_rele(dp, FTAG); return (error); } if (!dsl_dir_is_clone(ds->ds_dir)) { dsl_dataset_rele(ds, FTAG); dsl_pool_rele(dp, FTAG); return (SET_ERROR(EINVAL)); } error = dsl_dataset_hold_obj(dp, dsl_dir_phys(ds->ds_dir)->dd_origin_obj, FTAG, &ods); if (error != 0) { dsl_dataset_rele(ds, FTAG); dsl_pool_rele(dp, FTAG); return (error); } dsl_dataset_name(ods, origin); dsl_dataset_rele(ods, FTAG); dsl_dataset_rele(ds, FTAG); dsl_pool_rele(dp, FTAG); /* * We don't need to unmount *all* the origin fs's snapshots, but * it's easier. */ cp = strchr(origin, '@'); if (cp) *cp = '\0'; (void) dmu_objset_find(origin, zfs_unmount_snap_cb, NULL, DS_FIND_SNAPSHOTS); return (dsl_dataset_promote(zc->zc_name, zc->zc_string)); } /* * Retrieve a single {user|group|project}{used|quota}@... property. * * inputs: * zc_name name of filesystem * zc_objset_type zfs_userquota_prop_t * zc_value domain name (eg. "S-1-234-567-89") * zc_guid RID/UID/GID * * outputs: * zc_cookie property value */ static int zfs_ioc_userspace_one(zfs_cmd_t *zc) { zfsvfs_t *zfsvfs; int error; if (zc->zc_objset_type >= ZFS_NUM_USERQUOTA_PROPS) return (SET_ERROR(EINVAL)); error = zfsvfs_hold(zc->zc_name, FTAG, &zfsvfs, B_FALSE); if (error != 0) return (error); error = zfs_userspace_one(zfsvfs, zc->zc_objset_type, zc->zc_value, zc->zc_guid, &zc->zc_cookie); zfsvfs_rele(zfsvfs, FTAG); return (error); } /* * inputs: * zc_name name of filesystem * zc_cookie zap cursor * zc_objset_type zfs_userquota_prop_t * zc_nvlist_dst[_size] buffer to fill (not really an nvlist) * * outputs: * zc_nvlist_dst[_size] data buffer (array of zfs_useracct_t) * zc_cookie zap cursor */ static int zfs_ioc_userspace_many(zfs_cmd_t *zc) { zfsvfs_t *zfsvfs; int bufsize = zc->zc_nvlist_dst_size; if (bufsize <= 0) return (SET_ERROR(ENOMEM)); int error = zfsvfs_hold(zc->zc_name, FTAG, &zfsvfs, B_FALSE); if (error != 0) return (error); void *buf = vmem_alloc(bufsize, KM_SLEEP); error = zfs_userspace_many(zfsvfs, zc->zc_objset_type, &zc->zc_cookie, buf, &zc->zc_nvlist_dst_size); if (error == 0) { error = xcopyout(buf, (void *)(uintptr_t)zc->zc_nvlist_dst, zc->zc_nvlist_dst_size); } vmem_free(buf, bufsize); zfsvfs_rele(zfsvfs, FTAG); return (error); } /* * inputs: * zc_name name of filesystem * * outputs: * none */ static int zfs_ioc_userspace_upgrade(zfs_cmd_t *zc) { objset_t *os; int error = 0; zfsvfs_t *zfsvfs; if (getzfsvfs(zc->zc_name, &zfsvfs) == 0) { if (!dmu_objset_userused_enabled(zfsvfs->z_os)) { /* * If userused is not enabled, it may be because the * objset needs to be closed & reopened (to grow the * objset_phys_t). Suspend/resume the fs will do that. */ dsl_dataset_t *ds, *newds; ds = dmu_objset_ds(zfsvfs->z_os); error = zfs_suspend_fs(zfsvfs); if (error == 0) { dmu_objset_refresh_ownership(ds, &newds, B_TRUE, zfsvfs); error = zfs_resume_fs(zfsvfs, newds); } } if (error == 0) error = dmu_objset_userspace_upgrade(zfsvfs->z_os); deactivate_super(zfsvfs->z_sb); } else { /* XXX kind of reading contents without owning */ error = dmu_objset_hold_flags(zc->zc_name, B_TRUE, FTAG, &os); if (error != 0) return (error); error = dmu_objset_userspace_upgrade(os); dmu_objset_rele_flags(os, B_TRUE, FTAG); } return (error); } /* * inputs: * zc_name name of filesystem * * outputs: * none */ static int zfs_ioc_id_quota_upgrade(zfs_cmd_t *zc) { objset_t *os; int error; error = dmu_objset_hold_flags(zc->zc_name, B_TRUE, FTAG, &os); if (error != 0) return (error); if (dmu_objset_userobjspace_upgradable(os) || dmu_objset_projectquota_upgradable(os)) { mutex_enter(&os->os_upgrade_lock); if (os->os_upgrade_id == 0) { /* clear potential error code and retry */ os->os_upgrade_status = 0; mutex_exit(&os->os_upgrade_lock); dmu_objset_id_quota_upgrade(os); } else { mutex_exit(&os->os_upgrade_lock); } dsl_pool_rele(dmu_objset_pool(os), FTAG); taskq_wait_id(os->os_spa->spa_upgrade_taskq, os->os_upgrade_id); error = os->os_upgrade_status; } else { dsl_pool_rele(dmu_objset_pool(os), FTAG); } dsl_dataset_rele_flags(dmu_objset_ds(os), DS_HOLD_FLAG_DECRYPT, FTAG); return (error); } static int zfs_ioc_share(zfs_cmd_t *zc) { return (SET_ERROR(ENOSYS)); } ace_t full_access[] = { {(uid_t)-1, ACE_ALL_PERMS, ACE_EVERYONE, 0} }; /* * inputs: * zc_name name of containing filesystem * zc_obj object # beyond which we want next in-use object # * * outputs: * zc_obj next in-use object # */ static int zfs_ioc_next_obj(zfs_cmd_t *zc) { objset_t *os = NULL; int error; error = dmu_objset_hold(zc->zc_name, FTAG, &os); if (error != 0) return (error); error = dmu_object_next(os, &zc->zc_obj, B_FALSE, 0); dmu_objset_rele(os, FTAG); return (error); } /* * inputs: * zc_name name of filesystem * zc_value prefix name for snapshot * zc_cleanup_fd cleanup-on-exit file descriptor for calling process * * outputs: * zc_value short name of new snapshot */ static int zfs_ioc_tmp_snapshot(zfs_cmd_t *zc) { char *snap_name; char *hold_name; int error; minor_t minor; error = zfs_onexit_fd_hold(zc->zc_cleanup_fd, &minor); if (error != 0) return (error); snap_name = kmem_asprintf("%s-%016llx", zc->zc_value, (u_longlong_t)ddi_get_lbolt64()); hold_name = kmem_asprintf("%%%s", zc->zc_value); error = dsl_dataset_snapshot_tmp(zc->zc_name, snap_name, minor, hold_name); if (error == 0) (void) strlcpy(zc->zc_value, snap_name, sizeof (zc->zc_value)); strfree(snap_name); strfree(hold_name); zfs_onexit_fd_rele(zc->zc_cleanup_fd); return (error); } /* * inputs: * zc_name name of "to" snapshot * zc_value name of "from" snapshot * zc_cookie file descriptor to write diff data on * * outputs: * dmu_diff_record_t's to the file descriptor */ static int zfs_ioc_diff(zfs_cmd_t *zc) { file_t *fp; offset_t off; int error; fp = getf(zc->zc_cookie); if (fp == NULL) return (SET_ERROR(EBADF)); off = fp->f_offset; error = dmu_diff(zc->zc_name, zc->zc_value, fp->f_vnode, &off); if (VOP_SEEK(fp->f_vnode, fp->f_offset, &off, NULL) == 0) fp->f_offset = off; releasef(zc->zc_cookie); return (error); } static int zfs_ioc_smb_acl(zfs_cmd_t *zc) { return (SET_ERROR(ENOTSUP)); } /* * innvl: { * "holds" -> { snapname -> holdname (string), ... } * (optional) "cleanup_fd" -> fd (int32) * } * * outnvl: { * snapname -> error value (int32) * ... * } */ static const zfs_ioc_key_t zfs_keys_hold[] = { {"holds", DATA_TYPE_NVLIST, 0}, {"cleanup_fd", DATA_TYPE_INT32, ZK_OPTIONAL}, }; /* ARGSUSED */ static int zfs_ioc_hold(const char *pool, nvlist_t *args, nvlist_t *errlist) { nvpair_t *pair; nvlist_t *holds; int cleanup_fd = -1; int error; minor_t minor = 0; holds = fnvlist_lookup_nvlist(args, "holds"); /* make sure the user didn't pass us any invalid (empty) tags */ for (pair = nvlist_next_nvpair(holds, NULL); pair != NULL; pair = nvlist_next_nvpair(holds, pair)) { char *htag; error = nvpair_value_string(pair, &htag); if (error != 0) return (SET_ERROR(error)); if (strlen(htag) == 0) return (SET_ERROR(EINVAL)); } if (nvlist_lookup_int32(args, "cleanup_fd", &cleanup_fd) == 0) { error = zfs_onexit_fd_hold(cleanup_fd, &minor); if (error != 0) return (error); } error = dsl_dataset_user_hold(holds, minor, errlist); if (minor != 0) zfs_onexit_fd_rele(cleanup_fd); return (error); } /* * innvl is not used. * * outnvl: { * holdname -> time added (uint64 seconds since epoch) * ... * } */ static const zfs_ioc_key_t zfs_keys_get_holds[] = { /* no nvl keys */ }; /* ARGSUSED */ static int zfs_ioc_get_holds(const char *snapname, nvlist_t *args, nvlist_t *outnvl) { return (dsl_dataset_get_holds(snapname, outnvl)); } /* * innvl: { * snapname -> { holdname, ... } * ... * } * * outnvl: { * snapname -> error value (int32) * ... * } */ static const zfs_ioc_key_t zfs_keys_release[] = { {"...", DATA_TYPE_NVLIST, ZK_WILDCARDLIST}, }; /* ARGSUSED */ static int zfs_ioc_release(const char *pool, nvlist_t *holds, nvlist_t *errlist) { return (dsl_dataset_user_release(holds, errlist)); } /* * inputs: * zc_guid flags (ZEVENT_NONBLOCK) * zc_cleanup_fd zevent file descriptor * * outputs: * zc_nvlist_dst next nvlist event * zc_cookie dropped events since last get */ static int zfs_ioc_events_next(zfs_cmd_t *zc) { zfs_zevent_t *ze; nvlist_t *event = NULL; minor_t minor; uint64_t dropped = 0; int error; error = zfs_zevent_fd_hold(zc->zc_cleanup_fd, &minor, &ze); if (error != 0) return (error); do { error = zfs_zevent_next(ze, &event, &zc->zc_nvlist_dst_size, &dropped); if (event != NULL) { zc->zc_cookie = dropped; error = put_nvlist(zc, event); nvlist_free(event); } if (zc->zc_guid & ZEVENT_NONBLOCK) break; if ((error == 0) || (error != ENOENT)) break; error = zfs_zevent_wait(ze); if (error != 0) break; } while (1); zfs_zevent_fd_rele(zc->zc_cleanup_fd); return (error); } /* * outputs: * zc_cookie cleared events count */ static int zfs_ioc_events_clear(zfs_cmd_t *zc) { int count; zfs_zevent_drain_all(&count); zc->zc_cookie = count; return (0); } /* * inputs: * zc_guid eid | ZEVENT_SEEK_START | ZEVENT_SEEK_END * zc_cleanup zevent file descriptor */ static int zfs_ioc_events_seek(zfs_cmd_t *zc) { zfs_zevent_t *ze; minor_t minor; int error; error = zfs_zevent_fd_hold(zc->zc_cleanup_fd, &minor, &ze); if (error != 0) return (error); error = zfs_zevent_seek(ze, zc->zc_guid); zfs_zevent_fd_rele(zc->zc_cleanup_fd); return (error); } /* * inputs: * zc_name name of new filesystem or snapshot * zc_value full name of old snapshot * * outputs: * zc_cookie space in bytes * zc_objset_type compressed space in bytes * zc_perm_action uncompressed space in bytes */ static int zfs_ioc_space_written(zfs_cmd_t *zc) { int error; dsl_pool_t *dp; dsl_dataset_t *new, *old; error = dsl_pool_hold(zc->zc_name, FTAG, &dp); if (error != 0) return (error); error = dsl_dataset_hold(dp, zc->zc_name, FTAG, &new); if (error != 0) { dsl_pool_rele(dp, FTAG); return (error); } error = dsl_dataset_hold(dp, zc->zc_value, FTAG, &old); if (error != 0) { dsl_dataset_rele(new, FTAG); dsl_pool_rele(dp, FTAG); return (error); } error = dsl_dataset_space_written(old, new, &zc->zc_cookie, &zc->zc_objset_type, &zc->zc_perm_action); dsl_dataset_rele(old, FTAG); dsl_dataset_rele(new, FTAG); dsl_pool_rele(dp, FTAG); return (error); } /* * innvl: { * "firstsnap" -> snapshot name * } * * outnvl: { * "used" -> space in bytes * "compressed" -> compressed space in bytes * "uncompressed" -> uncompressed space in bytes * } */ static const zfs_ioc_key_t zfs_keys_space_snaps[] = { {"firstsnap", DATA_TYPE_STRING, 0}, }; static int zfs_ioc_space_snaps(const char *lastsnap, nvlist_t *innvl, nvlist_t *outnvl) { int error; dsl_pool_t *dp; dsl_dataset_t *new, *old; char *firstsnap; uint64_t used, comp, uncomp; firstsnap = fnvlist_lookup_string(innvl, "firstsnap"); error = dsl_pool_hold(lastsnap, FTAG, &dp); if (error != 0) return (error); error = dsl_dataset_hold(dp, lastsnap, FTAG, &new); if (error == 0 && !new->ds_is_snapshot) { dsl_dataset_rele(new, FTAG); error = SET_ERROR(EINVAL); } if (error != 0) { dsl_pool_rele(dp, FTAG); return (error); } error = dsl_dataset_hold(dp, firstsnap, FTAG, &old); if (error == 0 && !old->ds_is_snapshot) { dsl_dataset_rele(old, FTAG); error = SET_ERROR(EINVAL); } if (error != 0) { dsl_dataset_rele(new, FTAG); dsl_pool_rele(dp, FTAG); return (error); } error = dsl_dataset_space_wouldfree(old, new, &used, &comp, &uncomp); dsl_dataset_rele(old, FTAG); dsl_dataset_rele(new, FTAG); dsl_pool_rele(dp, FTAG); fnvlist_add_uint64(outnvl, "used", used); fnvlist_add_uint64(outnvl, "compressed", comp); fnvlist_add_uint64(outnvl, "uncompressed", uncomp); return (error); } /* * innvl: { * "fd" -> file descriptor to write stream to (int32) * (optional) "fromsnap" -> full snap name to send an incremental from * (optional) "largeblockok" -> (value ignored) * indicates that blocks > 128KB are permitted * (optional) "embedok" -> (value ignored) * presence indicates DRR_WRITE_EMBEDDED records are permitted * (optional) "compressok" -> (value ignored) * presence indicates compressed DRR_WRITE records are permitted * (optional) "rawok" -> (value ignored) * presence indicates raw encrypted records should be used. * (optional) "resume_object" and "resume_offset" -> (uint64) * if present, resume send stream from specified object and offset. * } * * outnvl is unused */ static const zfs_ioc_key_t zfs_keys_send_new[] = { {"fd", DATA_TYPE_INT32, 0}, {"fromsnap", DATA_TYPE_STRING, ZK_OPTIONAL}, {"largeblockok", DATA_TYPE_BOOLEAN, ZK_OPTIONAL}, {"embedok", DATA_TYPE_BOOLEAN, ZK_OPTIONAL}, {"compressok", DATA_TYPE_BOOLEAN, ZK_OPTIONAL}, {"rawok", DATA_TYPE_BOOLEAN, ZK_OPTIONAL}, {"resume_object", DATA_TYPE_UINT64, ZK_OPTIONAL}, {"resume_offset", DATA_TYPE_UINT64, ZK_OPTIONAL}, }; /* ARGSUSED */ static int zfs_ioc_send_new(const char *snapname, nvlist_t *innvl, nvlist_t *outnvl) { int error; offset_t off; char *fromname = NULL; int fd; file_t *fp; boolean_t largeblockok; boolean_t embedok; boolean_t compressok; boolean_t rawok; uint64_t resumeobj = 0; uint64_t resumeoff = 0; fd = fnvlist_lookup_int32(innvl, "fd"); (void) nvlist_lookup_string(innvl, "fromsnap", &fromname); largeblockok = nvlist_exists(innvl, "largeblockok"); embedok = nvlist_exists(innvl, "embedok"); compressok = nvlist_exists(innvl, "compressok"); rawok = nvlist_exists(innvl, "rawok"); (void) nvlist_lookup_uint64(innvl, "resume_object", &resumeobj); (void) nvlist_lookup_uint64(innvl, "resume_offset", &resumeoff); if ((fp = getf(fd)) == NULL) return (SET_ERROR(EBADF)); off = fp->f_offset; error = dmu_send(snapname, fromname, embedok, largeblockok, compressok, rawok, fd, resumeobj, resumeoff, fp->f_vnode, &off); if (VOP_SEEK(fp->f_vnode, fp->f_offset, &off, NULL) == 0) fp->f_offset = off; releasef(fd); return (error); } /* * Determine approximately how large a zfs send stream will be -- the number * of bytes that will be written to the fd supplied to zfs_ioc_send_new(). * * innvl: { * (optional) "from" -> full snap or bookmark name to send an incremental * from * (optional) "largeblockok" -> (value ignored) * indicates that blocks > 128KB are permitted * (optional) "embedok" -> (value ignored) * presence indicates DRR_WRITE_EMBEDDED records are permitted * (optional) "compressok" -> (value ignored) * presence indicates compressed DRR_WRITE records are permitted * (optional) "rawok" -> (value ignored) * presence indicates raw encrypted records should be used. * } * * outnvl: { * "space" -> bytes of space (uint64) * } */ static const zfs_ioc_key_t zfs_keys_send_space[] = { {"from", DATA_TYPE_STRING, ZK_OPTIONAL}, {"fromsnap", DATA_TYPE_STRING, ZK_OPTIONAL}, {"largeblockok", DATA_TYPE_BOOLEAN, ZK_OPTIONAL}, {"embedok", DATA_TYPE_BOOLEAN, ZK_OPTIONAL}, {"compressok", DATA_TYPE_BOOLEAN, ZK_OPTIONAL}, {"rawok", DATA_TYPE_BOOLEAN, ZK_OPTIONAL}, }; static int zfs_ioc_send_space(const char *snapname, nvlist_t *innvl, nvlist_t *outnvl) { dsl_pool_t *dp; dsl_dataset_t *tosnap; int error; char *fromname; boolean_t compressok; boolean_t rawok; uint64_t space; error = dsl_pool_hold(snapname, FTAG, &dp); if (error != 0) return (error); error = dsl_dataset_hold(dp, snapname, FTAG, &tosnap); if (error != 0) { dsl_pool_rele(dp, FTAG); return (error); } compressok = nvlist_exists(innvl, "compressok"); rawok = nvlist_exists(innvl, "rawok"); error = nvlist_lookup_string(innvl, "from", &fromname); if (error == 0) { if (strchr(fromname, '@') != NULL) { /* * If from is a snapshot, hold it and use the more * efficient dmu_send_estimate to estimate send space * size using deadlists. */ dsl_dataset_t *fromsnap; error = dsl_dataset_hold(dp, fromname, FTAG, &fromsnap); if (error != 0) goto out; error = dmu_send_estimate(tosnap, fromsnap, compressok || rawok, &space); dsl_dataset_rele(fromsnap, FTAG); } else if (strchr(fromname, '#') != NULL) { /* * If from is a bookmark, fetch the creation TXG of the * snapshot it was created from and use that to find * blocks that were born after it. */ zfs_bookmark_phys_t frombm; error = dsl_bookmark_lookup(dp, fromname, tosnap, &frombm); if (error != 0) goto out; error = dmu_send_estimate_from_txg(tosnap, frombm.zbm_creation_txg, compressok || rawok, &space); } else { /* * from is not properly formatted as a snapshot or * bookmark */ error = SET_ERROR(EINVAL); goto out; } } else { /* * If estimating the size of a full send, use dmu_send_estimate. */ error = dmu_send_estimate(tosnap, NULL, compressok || rawok, &space); } fnvlist_add_uint64(outnvl, "space", space); out: dsl_dataset_rele(tosnap, FTAG); dsl_pool_rele(dp, FTAG); return (error); } /* * Sync the currently open TXG to disk for the specified pool. * This is somewhat similar to 'zfs_sync()'. * For cases that do not result in error this ioctl will wait for * the currently open TXG to commit before returning back to the caller. * * innvl: { * "force" -> when true, force uberblock update even if there is no dirty data. * In addition this will cause the vdev configuration to be written * out including updating the zpool cache file. (boolean_t) * } * * onvl is unused */ static const zfs_ioc_key_t zfs_keys_pool_sync[] = { {"force", DATA_TYPE_BOOLEAN_VALUE, 0}, }; /* ARGSUSED */ static int zfs_ioc_pool_sync(const char *pool, nvlist_t *innvl, nvlist_t *onvl) { int err; boolean_t force = B_FALSE; spa_t *spa; if ((err = spa_open(pool, &spa, FTAG)) != 0) return (err); if (innvl) force = fnvlist_lookup_boolean_value(innvl, "force"); if (force) { spa_config_enter(spa, SCL_CONFIG, FTAG, RW_WRITER); vdev_config_dirty(spa->spa_root_vdev); spa_config_exit(spa, SCL_CONFIG, FTAG); } txg_wait_synced(spa_get_dsl(spa), 0); spa_close(spa, FTAG); return (err); } /* * Load a user's wrapping key into the kernel. * innvl: { * "hidden_args" -> { "wkeydata" -> value } * raw uint8_t array of encryption wrapping key data (32 bytes) * (optional) "noop" -> (value ignored) * presence indicated key should only be verified, not loaded * } */ static const zfs_ioc_key_t zfs_keys_load_key[] = { {"hidden_args", DATA_TYPE_NVLIST, 0}, {"noop", DATA_TYPE_BOOLEAN, ZK_OPTIONAL}, }; /* ARGSUSED */ static int zfs_ioc_load_key(const char *dsname, nvlist_t *innvl, nvlist_t *outnvl) { int ret; dsl_crypto_params_t *dcp = NULL; nvlist_t *hidden_args; boolean_t noop = nvlist_exists(innvl, "noop"); if (strchr(dsname, '@') != NULL || strchr(dsname, '%') != NULL) { ret = SET_ERROR(EINVAL); goto error; } hidden_args = fnvlist_lookup_nvlist(innvl, ZPOOL_HIDDEN_ARGS); ret = dsl_crypto_params_create_nvlist(DCP_CMD_NONE, NULL, hidden_args, &dcp); if (ret != 0) goto error; ret = spa_keystore_load_wkey(dsname, dcp, noop); if (ret != 0) goto error; dsl_crypto_params_free(dcp, noop); return (0); error: dsl_crypto_params_free(dcp, B_TRUE); return (ret); } /* * Unload a user's wrapping key from the kernel. * Both innvl and outnvl are unused. */ static const zfs_ioc_key_t zfs_keys_unload_key[] = { /* no nvl keys */ }; /* ARGSUSED */ static int zfs_ioc_unload_key(const char *dsname, nvlist_t *innvl, nvlist_t *outnvl) { int ret = 0; if (strchr(dsname, '@') != NULL || strchr(dsname, '%') != NULL) { ret = (SET_ERROR(EINVAL)); goto out; } ret = spa_keystore_unload_wkey(dsname); if (ret != 0) goto out; out: return (ret); } /* * Changes a user's wrapping key used to decrypt a dataset. The keyformat, * keylocation, pbkdf2salt, and pbkdf2iters properties can also be specified * here to change how the key is derived in userspace. * * innvl: { * "hidden_args" (optional) -> { "wkeydata" -> value } * raw uint8_t array of new encryption wrapping key data (32 bytes) * "props" (optional) -> { prop -> value } * } * * outnvl is unused */ static const zfs_ioc_key_t zfs_keys_change_key[] = { {"crypt_cmd", DATA_TYPE_UINT64, ZK_OPTIONAL}, {"hidden_args", DATA_TYPE_NVLIST, ZK_OPTIONAL}, {"props", DATA_TYPE_NVLIST, ZK_OPTIONAL}, }; /* ARGSUSED */ static int zfs_ioc_change_key(const char *dsname, nvlist_t *innvl, nvlist_t *outnvl) { int ret; uint64_t cmd = DCP_CMD_NONE; dsl_crypto_params_t *dcp = NULL; nvlist_t *args = NULL, *hidden_args = NULL; if (strchr(dsname, '@') != NULL || strchr(dsname, '%') != NULL) { ret = (SET_ERROR(EINVAL)); goto error; } (void) nvlist_lookup_uint64(innvl, "crypt_cmd", &cmd); (void) nvlist_lookup_nvlist(innvl, "props", &args); (void) nvlist_lookup_nvlist(innvl, ZPOOL_HIDDEN_ARGS, &hidden_args); ret = dsl_crypto_params_create_nvlist(cmd, args, hidden_args, &dcp); if (ret != 0) goto error; ret = spa_keystore_change_key(dsname, dcp); if (ret != 0) goto error; dsl_crypto_params_free(dcp, B_FALSE); return (0); error: dsl_crypto_params_free(dcp, B_TRUE); return (ret); } static zfs_ioc_vec_t zfs_ioc_vec[ZFS_IOC_LAST - ZFS_IOC_FIRST]; static void zfs_ioctl_register_legacy(zfs_ioc_t ioc, zfs_ioc_legacy_func_t *func, zfs_secpolicy_func_t *secpolicy, zfs_ioc_namecheck_t namecheck, boolean_t log_history, zfs_ioc_poolcheck_t pool_check) { zfs_ioc_vec_t *vec = &zfs_ioc_vec[ioc - ZFS_IOC_FIRST]; ASSERT3U(ioc, >=, ZFS_IOC_FIRST); ASSERT3U(ioc, <, ZFS_IOC_LAST); ASSERT3P(vec->zvec_legacy_func, ==, NULL); ASSERT3P(vec->zvec_func, ==, NULL); vec->zvec_legacy_func = func; vec->zvec_secpolicy = secpolicy; vec->zvec_namecheck = namecheck; vec->zvec_allow_log = log_history; vec->zvec_pool_check = pool_check; } /* * See the block comment at the beginning of this file for details on * each argument to this function. */ static void zfs_ioctl_register(const char *name, zfs_ioc_t ioc, zfs_ioc_func_t *func, zfs_secpolicy_func_t *secpolicy, zfs_ioc_namecheck_t namecheck, zfs_ioc_poolcheck_t pool_check, boolean_t smush_outnvlist, boolean_t allow_log, const zfs_ioc_key_t *nvl_keys, size_t num_keys) { zfs_ioc_vec_t *vec = &zfs_ioc_vec[ioc - ZFS_IOC_FIRST]; ASSERT3U(ioc, >=, ZFS_IOC_FIRST); ASSERT3U(ioc, <, ZFS_IOC_LAST); ASSERT3P(vec->zvec_legacy_func, ==, NULL); ASSERT3P(vec->zvec_func, ==, NULL); /* if we are logging, the name must be valid */ ASSERT(!allow_log || namecheck != NO_NAME); vec->zvec_name = name; vec->zvec_func = func; vec->zvec_secpolicy = secpolicy; vec->zvec_namecheck = namecheck; vec->zvec_pool_check = pool_check; vec->zvec_smush_outnvlist = smush_outnvlist; vec->zvec_allow_log = allow_log; vec->zvec_nvl_keys = nvl_keys; vec->zvec_nvl_key_count = num_keys; } static void zfs_ioctl_register_pool(zfs_ioc_t ioc, zfs_ioc_legacy_func_t *func, zfs_secpolicy_func_t *secpolicy, boolean_t log_history, zfs_ioc_poolcheck_t pool_check) { zfs_ioctl_register_legacy(ioc, func, secpolicy, POOL_NAME, log_history, pool_check); } static void zfs_ioctl_register_dataset_nolog(zfs_ioc_t ioc, zfs_ioc_legacy_func_t *func, zfs_secpolicy_func_t *secpolicy, zfs_ioc_poolcheck_t pool_check) { zfs_ioctl_register_legacy(ioc, func, secpolicy, DATASET_NAME, B_FALSE, pool_check); } static void zfs_ioctl_register_pool_modify(zfs_ioc_t ioc, zfs_ioc_legacy_func_t *func) { zfs_ioctl_register_legacy(ioc, func, zfs_secpolicy_config, POOL_NAME, B_TRUE, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY); } static void zfs_ioctl_register_pool_meta(zfs_ioc_t ioc, zfs_ioc_legacy_func_t *func, zfs_secpolicy_func_t *secpolicy) { zfs_ioctl_register_legacy(ioc, func, secpolicy, NO_NAME, B_FALSE, POOL_CHECK_NONE); } static void zfs_ioctl_register_dataset_read_secpolicy(zfs_ioc_t ioc, zfs_ioc_legacy_func_t *func, zfs_secpolicy_func_t *secpolicy) { zfs_ioctl_register_legacy(ioc, func, secpolicy, DATASET_NAME, B_FALSE, POOL_CHECK_SUSPENDED); } static void zfs_ioctl_register_dataset_read(zfs_ioc_t ioc, zfs_ioc_legacy_func_t *func) { zfs_ioctl_register_dataset_read_secpolicy(ioc, func, zfs_secpolicy_read); } static void zfs_ioctl_register_dataset_modify(zfs_ioc_t ioc, zfs_ioc_legacy_func_t *func, zfs_secpolicy_func_t *secpolicy) { zfs_ioctl_register_legacy(ioc, func, secpolicy, DATASET_NAME, B_TRUE, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY); } static void zfs_ioctl_init(void) { zfs_ioctl_register("snapshot", ZFS_IOC_SNAPSHOT, zfs_ioc_snapshot, zfs_secpolicy_snapshot, POOL_NAME, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE, zfs_keys_snapshot, ARRAY_SIZE(zfs_keys_snapshot)); zfs_ioctl_register("log_history", ZFS_IOC_LOG_HISTORY, zfs_ioc_log_history, zfs_secpolicy_log_history, NO_NAME, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_FALSE, B_FALSE, zfs_keys_log_history, ARRAY_SIZE(zfs_keys_log_history)); zfs_ioctl_register("space_snaps", ZFS_IOC_SPACE_SNAPS, zfs_ioc_space_snaps, zfs_secpolicy_read, DATASET_NAME, POOL_CHECK_SUSPENDED, B_FALSE, B_FALSE, zfs_keys_space_snaps, ARRAY_SIZE(zfs_keys_space_snaps)); zfs_ioctl_register("send", ZFS_IOC_SEND_NEW, zfs_ioc_send_new, zfs_secpolicy_send_new, DATASET_NAME, POOL_CHECK_SUSPENDED, B_FALSE, B_FALSE, zfs_keys_send_new, ARRAY_SIZE(zfs_keys_send_new)); zfs_ioctl_register("send_space", ZFS_IOC_SEND_SPACE, zfs_ioc_send_space, zfs_secpolicy_read, DATASET_NAME, POOL_CHECK_SUSPENDED, B_FALSE, B_FALSE, zfs_keys_send_space, ARRAY_SIZE(zfs_keys_send_space)); zfs_ioctl_register("create", ZFS_IOC_CREATE, zfs_ioc_create, zfs_secpolicy_create_clone, DATASET_NAME, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE, zfs_keys_create, ARRAY_SIZE(zfs_keys_create)); zfs_ioctl_register("clone", ZFS_IOC_CLONE, zfs_ioc_clone, zfs_secpolicy_create_clone, DATASET_NAME, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE, zfs_keys_clone, ARRAY_SIZE(zfs_keys_clone)); zfs_ioctl_register("remap", ZFS_IOC_REMAP, zfs_ioc_remap, zfs_secpolicy_remap, DATASET_NAME, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_FALSE, B_TRUE, zfs_keys_remap, ARRAY_SIZE(zfs_keys_remap)); zfs_ioctl_register("destroy_snaps", ZFS_IOC_DESTROY_SNAPS, zfs_ioc_destroy_snaps, zfs_secpolicy_destroy_snaps, POOL_NAME, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE, zfs_keys_destroy_snaps, ARRAY_SIZE(zfs_keys_destroy_snaps)); zfs_ioctl_register("hold", ZFS_IOC_HOLD, zfs_ioc_hold, zfs_secpolicy_hold, POOL_NAME, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE, zfs_keys_hold, ARRAY_SIZE(zfs_keys_hold)); zfs_ioctl_register("release", ZFS_IOC_RELEASE, zfs_ioc_release, zfs_secpolicy_release, POOL_NAME, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE, zfs_keys_release, ARRAY_SIZE(zfs_keys_release)); zfs_ioctl_register("get_holds", ZFS_IOC_GET_HOLDS, zfs_ioc_get_holds, zfs_secpolicy_read, DATASET_NAME, POOL_CHECK_SUSPENDED, B_FALSE, B_FALSE, zfs_keys_get_holds, ARRAY_SIZE(zfs_keys_get_holds)); zfs_ioctl_register("rollback", ZFS_IOC_ROLLBACK, zfs_ioc_rollback, zfs_secpolicy_rollback, DATASET_NAME, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_FALSE, B_TRUE, zfs_keys_rollback, ARRAY_SIZE(zfs_keys_rollback)); zfs_ioctl_register("bookmark", ZFS_IOC_BOOKMARK, zfs_ioc_bookmark, zfs_secpolicy_bookmark, POOL_NAME, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE, zfs_keys_bookmark, ARRAY_SIZE(zfs_keys_bookmark)); zfs_ioctl_register("get_bookmarks", ZFS_IOC_GET_BOOKMARKS, zfs_ioc_get_bookmarks, zfs_secpolicy_read, DATASET_NAME, POOL_CHECK_SUSPENDED, B_FALSE, B_FALSE, zfs_keys_get_bookmarks, ARRAY_SIZE(zfs_keys_get_bookmarks)); zfs_ioctl_register("destroy_bookmarks", ZFS_IOC_DESTROY_BOOKMARKS, zfs_ioc_destroy_bookmarks, zfs_secpolicy_destroy_bookmarks, POOL_NAME, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE, zfs_keys_destroy_bookmarks, ARRAY_SIZE(zfs_keys_destroy_bookmarks)); zfs_ioctl_register("receive", ZFS_IOC_RECV_NEW, zfs_ioc_recv_new, zfs_secpolicy_recv_new, DATASET_NAME, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE, zfs_keys_recv_new, ARRAY_SIZE(zfs_keys_recv_new)); zfs_ioctl_register("load-key", ZFS_IOC_LOAD_KEY, zfs_ioc_load_key, zfs_secpolicy_load_key, DATASET_NAME, POOL_CHECK_SUSPENDED, B_TRUE, B_TRUE, zfs_keys_load_key, ARRAY_SIZE(zfs_keys_load_key)); zfs_ioctl_register("unload-key", ZFS_IOC_UNLOAD_KEY, zfs_ioc_unload_key, zfs_secpolicy_load_key, DATASET_NAME, POOL_CHECK_SUSPENDED, B_TRUE, B_TRUE, zfs_keys_unload_key, ARRAY_SIZE(zfs_keys_unload_key)); zfs_ioctl_register("change-key", ZFS_IOC_CHANGE_KEY, zfs_ioc_change_key, zfs_secpolicy_change_key, DATASET_NAME, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE, zfs_keys_change_key, ARRAY_SIZE(zfs_keys_change_key)); zfs_ioctl_register("sync", ZFS_IOC_POOL_SYNC, zfs_ioc_pool_sync, zfs_secpolicy_none, POOL_NAME, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_FALSE, B_FALSE, zfs_keys_pool_sync, ARRAY_SIZE(zfs_keys_pool_sync)); zfs_ioctl_register("reopen", ZFS_IOC_POOL_REOPEN, zfs_ioc_pool_reopen, zfs_secpolicy_config, POOL_NAME, POOL_CHECK_SUSPENDED, B_TRUE, B_TRUE, zfs_keys_pool_reopen, ARRAY_SIZE(zfs_keys_pool_reopen)); zfs_ioctl_register("channel_program", ZFS_IOC_CHANNEL_PROGRAM, zfs_ioc_channel_program, zfs_secpolicy_config, POOL_NAME, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE, zfs_keys_channel_program, ARRAY_SIZE(zfs_keys_channel_program)); zfs_ioctl_register("zpool_checkpoint", ZFS_IOC_POOL_CHECKPOINT, zfs_ioc_pool_checkpoint, zfs_secpolicy_config, POOL_NAME, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE, zfs_keys_pool_checkpoint, ARRAY_SIZE(zfs_keys_pool_checkpoint)); zfs_ioctl_register("zpool_discard_checkpoint", ZFS_IOC_POOL_DISCARD_CHECKPOINT, zfs_ioc_pool_discard_checkpoint, zfs_secpolicy_config, POOL_NAME, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE, zfs_keys_pool_discard_checkpoint, ARRAY_SIZE(zfs_keys_pool_discard_checkpoint)); zfs_ioctl_register("initialize", ZFS_IOC_POOL_INITIALIZE, zfs_ioc_pool_initialize, zfs_secpolicy_config, POOL_NAME, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE, zfs_keys_pool_initialize, ARRAY_SIZE(zfs_keys_pool_initialize)); /* IOCTLS that use the legacy function signature */ zfs_ioctl_register_legacy(ZFS_IOC_POOL_FREEZE, zfs_ioc_pool_freeze, zfs_secpolicy_config, NO_NAME, B_FALSE, POOL_CHECK_READONLY); zfs_ioctl_register_pool(ZFS_IOC_POOL_CREATE, zfs_ioc_pool_create, zfs_secpolicy_config, B_TRUE, POOL_CHECK_NONE); zfs_ioctl_register_pool_modify(ZFS_IOC_POOL_SCAN, zfs_ioc_pool_scan); zfs_ioctl_register_pool_modify(ZFS_IOC_POOL_UPGRADE, zfs_ioc_pool_upgrade); zfs_ioctl_register_pool_modify(ZFS_IOC_VDEV_ADD, zfs_ioc_vdev_add); zfs_ioctl_register_pool_modify(ZFS_IOC_VDEV_REMOVE, zfs_ioc_vdev_remove); zfs_ioctl_register_pool_modify(ZFS_IOC_VDEV_SET_STATE, zfs_ioc_vdev_set_state); zfs_ioctl_register_pool_modify(ZFS_IOC_VDEV_ATTACH, zfs_ioc_vdev_attach); zfs_ioctl_register_pool_modify(ZFS_IOC_VDEV_DETACH, zfs_ioc_vdev_detach); zfs_ioctl_register_pool_modify(ZFS_IOC_VDEV_SETPATH, zfs_ioc_vdev_setpath); zfs_ioctl_register_pool_modify(ZFS_IOC_VDEV_SETFRU, zfs_ioc_vdev_setfru); zfs_ioctl_register_pool_modify(ZFS_IOC_POOL_SET_PROPS, zfs_ioc_pool_set_props); zfs_ioctl_register_pool_modify(ZFS_IOC_VDEV_SPLIT, zfs_ioc_vdev_split); zfs_ioctl_register_pool_modify(ZFS_IOC_POOL_REGUID, zfs_ioc_pool_reguid); zfs_ioctl_register_pool_meta(ZFS_IOC_POOL_CONFIGS, zfs_ioc_pool_configs, zfs_secpolicy_none); zfs_ioctl_register_pool_meta(ZFS_IOC_POOL_TRYIMPORT, zfs_ioc_pool_tryimport, zfs_secpolicy_config); zfs_ioctl_register_pool_meta(ZFS_IOC_INJECT_FAULT, zfs_ioc_inject_fault, zfs_secpolicy_inject); zfs_ioctl_register_pool_meta(ZFS_IOC_CLEAR_FAULT, zfs_ioc_clear_fault, zfs_secpolicy_inject); zfs_ioctl_register_pool_meta(ZFS_IOC_INJECT_LIST_NEXT, zfs_ioc_inject_list_next, zfs_secpolicy_inject); /* * pool destroy, and export don't log the history as part of * zfsdev_ioctl, but rather zfs_ioc_pool_export * does the logging of those commands. */ zfs_ioctl_register_pool(ZFS_IOC_POOL_DESTROY, zfs_ioc_pool_destroy, zfs_secpolicy_config, B_FALSE, POOL_CHECK_SUSPENDED); zfs_ioctl_register_pool(ZFS_IOC_POOL_EXPORT, zfs_ioc_pool_export, zfs_secpolicy_config, B_FALSE, POOL_CHECK_SUSPENDED); zfs_ioctl_register_pool(ZFS_IOC_POOL_STATS, zfs_ioc_pool_stats, zfs_secpolicy_read, B_FALSE, POOL_CHECK_NONE); zfs_ioctl_register_pool(ZFS_IOC_POOL_GET_PROPS, zfs_ioc_pool_get_props, zfs_secpolicy_read, B_FALSE, POOL_CHECK_NONE); zfs_ioctl_register_pool(ZFS_IOC_ERROR_LOG, zfs_ioc_error_log, zfs_secpolicy_inject, B_FALSE, POOL_CHECK_SUSPENDED); zfs_ioctl_register_pool(ZFS_IOC_DSOBJ_TO_DSNAME, zfs_ioc_dsobj_to_dsname, zfs_secpolicy_diff, B_FALSE, POOL_CHECK_SUSPENDED); zfs_ioctl_register_pool(ZFS_IOC_POOL_GET_HISTORY, zfs_ioc_pool_get_history, zfs_secpolicy_config, B_FALSE, POOL_CHECK_SUSPENDED); zfs_ioctl_register_pool(ZFS_IOC_POOL_IMPORT, zfs_ioc_pool_import, zfs_secpolicy_config, B_TRUE, POOL_CHECK_NONE); zfs_ioctl_register_pool(ZFS_IOC_CLEAR, zfs_ioc_clear, zfs_secpolicy_config, B_TRUE, POOL_CHECK_READONLY); zfs_ioctl_register_dataset_read(ZFS_IOC_SPACE_WRITTEN, zfs_ioc_space_written); zfs_ioctl_register_dataset_read(ZFS_IOC_OBJSET_RECVD_PROPS, zfs_ioc_objset_recvd_props); zfs_ioctl_register_dataset_read(ZFS_IOC_NEXT_OBJ, zfs_ioc_next_obj); zfs_ioctl_register_dataset_read(ZFS_IOC_GET_FSACL, zfs_ioc_get_fsacl); zfs_ioctl_register_dataset_read(ZFS_IOC_OBJSET_STATS, zfs_ioc_objset_stats); zfs_ioctl_register_dataset_read(ZFS_IOC_OBJSET_ZPLPROPS, zfs_ioc_objset_zplprops); zfs_ioctl_register_dataset_read(ZFS_IOC_DATASET_LIST_NEXT, zfs_ioc_dataset_list_next); zfs_ioctl_register_dataset_read(ZFS_IOC_SNAPSHOT_LIST_NEXT, zfs_ioc_snapshot_list_next); zfs_ioctl_register_dataset_read(ZFS_IOC_SEND_PROGRESS, zfs_ioc_send_progress); zfs_ioctl_register_dataset_read_secpolicy(ZFS_IOC_DIFF, zfs_ioc_diff, zfs_secpolicy_diff); zfs_ioctl_register_dataset_read_secpolicy(ZFS_IOC_OBJ_TO_STATS, zfs_ioc_obj_to_stats, zfs_secpolicy_diff); zfs_ioctl_register_dataset_read_secpolicy(ZFS_IOC_OBJ_TO_PATH, zfs_ioc_obj_to_path, zfs_secpolicy_diff); zfs_ioctl_register_dataset_read_secpolicy(ZFS_IOC_USERSPACE_ONE, zfs_ioc_userspace_one, zfs_secpolicy_userspace_one); zfs_ioctl_register_dataset_read_secpolicy(ZFS_IOC_USERSPACE_MANY, zfs_ioc_userspace_many, zfs_secpolicy_userspace_many); zfs_ioctl_register_dataset_read_secpolicy(ZFS_IOC_SEND, zfs_ioc_send, zfs_secpolicy_send); zfs_ioctl_register_dataset_modify(ZFS_IOC_SET_PROP, zfs_ioc_set_prop, zfs_secpolicy_none); zfs_ioctl_register_dataset_modify(ZFS_IOC_DESTROY, zfs_ioc_destroy, zfs_secpolicy_destroy); zfs_ioctl_register_dataset_modify(ZFS_IOC_RENAME, zfs_ioc_rename, zfs_secpolicy_rename); zfs_ioctl_register_dataset_modify(ZFS_IOC_RECV, zfs_ioc_recv, zfs_secpolicy_recv); zfs_ioctl_register_dataset_modify(ZFS_IOC_PROMOTE, zfs_ioc_promote, zfs_secpolicy_promote); zfs_ioctl_register_dataset_modify(ZFS_IOC_INHERIT_PROP, zfs_ioc_inherit_prop, zfs_secpolicy_inherit_prop); zfs_ioctl_register_dataset_modify(ZFS_IOC_SET_FSACL, zfs_ioc_set_fsacl, zfs_secpolicy_set_fsacl); zfs_ioctl_register_dataset_nolog(ZFS_IOC_SHARE, zfs_ioc_share, zfs_secpolicy_share, POOL_CHECK_NONE); zfs_ioctl_register_dataset_nolog(ZFS_IOC_SMB_ACL, zfs_ioc_smb_acl, zfs_secpolicy_smb_acl, POOL_CHECK_NONE); zfs_ioctl_register_dataset_nolog(ZFS_IOC_USERSPACE_UPGRADE, zfs_ioc_userspace_upgrade, zfs_secpolicy_userspace_upgrade, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY); zfs_ioctl_register_dataset_nolog(ZFS_IOC_TMP_SNAPSHOT, zfs_ioc_tmp_snapshot, zfs_secpolicy_tmp_snapshot, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY); /* * ZoL functions */ zfs_ioctl_register_legacy(ZFS_IOC_EVENTS_NEXT, zfs_ioc_events_next, zfs_secpolicy_config, NO_NAME, B_FALSE, POOL_CHECK_NONE); zfs_ioctl_register_legacy(ZFS_IOC_EVENTS_CLEAR, zfs_ioc_events_clear, zfs_secpolicy_config, NO_NAME, B_FALSE, POOL_CHECK_NONE); zfs_ioctl_register_legacy(ZFS_IOC_EVENTS_SEEK, zfs_ioc_events_seek, zfs_secpolicy_config, NO_NAME, B_FALSE, POOL_CHECK_NONE); } /* * Verify that for non-legacy ioctls the input nvlist * pairs match against the expected input. * * Possible errors are: * ZFS_ERR_IOC_ARG_UNAVAIL An unrecognized nvpair was encountered * ZFS_ERR_IOC_ARG_REQUIRED A required nvpair is missing * ZFS_ERR_IOC_ARG_BADTYPE Invalid type for nvpair */ static int zfs_check_input_nvpairs(nvlist_t *innvl, const zfs_ioc_vec_t *vec) { const zfs_ioc_key_t *nvl_keys = vec->zvec_nvl_keys; boolean_t required_keys_found = B_FALSE; /* * examine each input pair */ for (nvpair_t *pair = nvlist_next_nvpair(innvl, NULL); pair != NULL; pair = nvlist_next_nvpair(innvl, pair)) { char *name = nvpair_name(pair); data_type_t type = nvpair_type(pair); boolean_t identified = B_FALSE; /* * check pair against the documented names and type */ for (int k = 0; k < vec->zvec_nvl_key_count; k++) { /* if not a wild card name, check for an exact match */ if ((nvl_keys[k].zkey_flags & ZK_WILDCARDLIST) == 0 && strcmp(nvl_keys[k].zkey_name, name) != 0) continue; identified = B_TRUE; if (nvl_keys[k].zkey_type != DATA_TYPE_ANY && nvl_keys[k].zkey_type != type) { return (SET_ERROR(ZFS_ERR_IOC_ARG_BADTYPE)); } if (nvl_keys[k].zkey_flags & ZK_OPTIONAL) continue; required_keys_found = B_TRUE; break; } /* allow an 'optional' key, everything else is invalid */ if (!identified && (strcmp(name, "optional") != 0 || type != DATA_TYPE_NVLIST)) { return (SET_ERROR(ZFS_ERR_IOC_ARG_UNAVAIL)); } } /* verify that all required keys were found */ for (int k = 0; k < vec->zvec_nvl_key_count; k++) { if (nvl_keys[k].zkey_flags & ZK_OPTIONAL) continue; if (nvl_keys[k].zkey_flags & ZK_WILDCARDLIST) { /* at least one non-optionial key is expected here */ if (!required_keys_found) return (SET_ERROR(ZFS_ERR_IOC_ARG_REQUIRED)); continue; } if (!nvlist_exists(innvl, nvl_keys[k].zkey_name)) return (SET_ERROR(ZFS_ERR_IOC_ARG_REQUIRED)); } return (0); } int pool_status_check(const char *name, zfs_ioc_namecheck_t type, zfs_ioc_poolcheck_t check) { spa_t *spa; int error; ASSERT(type == POOL_NAME || type == DATASET_NAME); if (check & POOL_CHECK_NONE) return (0); error = spa_open(name, &spa, FTAG); if (error == 0) { if ((check & POOL_CHECK_SUSPENDED) && spa_suspended(spa)) error = SET_ERROR(EAGAIN); else if ((check & POOL_CHECK_READONLY) && !spa_writeable(spa)) error = SET_ERROR(EROFS); spa_close(spa, FTAG); } return (error); } static void * zfsdev_get_state_impl(minor_t minor, enum zfsdev_state_type which) { zfsdev_state_t *zs; for (zs = zfsdev_state_list; zs != NULL; zs = zs->zs_next) { if (zs->zs_minor == minor) { smp_rmb(); switch (which) { case ZST_ONEXIT: return (zs->zs_onexit); case ZST_ZEVENT: return (zs->zs_zevent); case ZST_ALL: return (zs); } } } return (NULL); } void * zfsdev_get_state(minor_t minor, enum zfsdev_state_type which) { void *ptr; ptr = zfsdev_get_state_impl(minor, which); return (ptr); } int zfsdev_getminor(struct file *filp, minor_t *minorp) { zfsdev_state_t *zs, *fpd; ASSERT(filp != NULL); ASSERT(!MUTEX_HELD(&zfsdev_state_lock)); fpd = filp->private_data; if (fpd == NULL) return (SET_ERROR(EBADF)); mutex_enter(&zfsdev_state_lock); for (zs = zfsdev_state_list; zs != NULL; zs = zs->zs_next) { if (zs->zs_minor == -1) continue; if (fpd == zs) { *minorp = fpd->zs_minor; mutex_exit(&zfsdev_state_lock); return (0); } } mutex_exit(&zfsdev_state_lock); return (SET_ERROR(EBADF)); } /* * Find a free minor number. The zfsdev_state_list is expected to * be short since it is only a list of currently open file handles. */ minor_t zfsdev_minor_alloc(void) { static minor_t last_minor = 0; minor_t m; ASSERT(MUTEX_HELD(&zfsdev_state_lock)); for (m = last_minor + 1; m != last_minor; m++) { if (m > ZFSDEV_MAX_MINOR) m = 1; if (zfsdev_get_state_impl(m, ZST_ALL) == NULL) { last_minor = m; return (m); } } return (0); } static int zfsdev_state_init(struct file *filp) { zfsdev_state_t *zs, *zsprev = NULL; minor_t minor; boolean_t newzs = B_FALSE; ASSERT(MUTEX_HELD(&zfsdev_state_lock)); minor = zfsdev_minor_alloc(); if (minor == 0) return (SET_ERROR(ENXIO)); for (zs = zfsdev_state_list; zs != NULL; zs = zs->zs_next) { if (zs->zs_minor == -1) break; zsprev = zs; } if (!zs) { zs = kmem_zalloc(sizeof (zfsdev_state_t), KM_SLEEP); newzs = B_TRUE; } zs->zs_file = filp; filp->private_data = zs; zfs_onexit_init((zfs_onexit_t **)&zs->zs_onexit); zfs_zevent_init((zfs_zevent_t **)&zs->zs_zevent); /* * In order to provide for lock-free concurrent read access * to the minor list in zfsdev_get_state_impl(), new entries * must be completely written before linking them into the * list whereas existing entries are already linked; the last * operation must be updating zs_minor (from -1 to the new * value). */ if (newzs) { zs->zs_minor = minor; smp_wmb(); zsprev->zs_next = zs; } else { smp_wmb(); zs->zs_minor = minor; } return (0); } static int zfsdev_state_destroy(struct file *filp) { zfsdev_state_t *zs; ASSERT(MUTEX_HELD(&zfsdev_state_lock)); ASSERT(filp->private_data != NULL); zs = filp->private_data; zs->zs_minor = -1; zfs_onexit_destroy(zs->zs_onexit); zfs_zevent_destroy(zs->zs_zevent); return (0); } static int zfsdev_open(struct inode *ino, struct file *filp) { int error; mutex_enter(&zfsdev_state_lock); error = zfsdev_state_init(filp); mutex_exit(&zfsdev_state_lock); return (-error); } static int zfsdev_release(struct inode *ino, struct file *filp) { int error; mutex_enter(&zfsdev_state_lock); error = zfsdev_state_destroy(filp); mutex_exit(&zfsdev_state_lock); return (-error); } static long zfsdev_ioctl(struct file *filp, unsigned cmd, unsigned long arg) { zfs_cmd_t *zc; uint_t vecnum; int error, rc, flag = 0; const zfs_ioc_vec_t *vec; char *saved_poolname = NULL; nvlist_t *innvl = NULL; fstrans_cookie_t cookie; vecnum = cmd - ZFS_IOC_FIRST; if (vecnum >= sizeof (zfs_ioc_vec) / sizeof (zfs_ioc_vec[0])) return (-SET_ERROR(ZFS_ERR_IOC_CMD_UNAVAIL)); vec = &zfs_ioc_vec[vecnum]; /* * The registered ioctl list may be sparse, verify that either * a normal or legacy handler are registered. */ if (vec->zvec_func == NULL && vec->zvec_legacy_func == NULL) return (-SET_ERROR(ZFS_ERR_IOC_CMD_UNAVAIL)); zc = kmem_zalloc(sizeof (zfs_cmd_t), KM_SLEEP); error = ddi_copyin((void *)arg, zc, sizeof (zfs_cmd_t), flag); if (error != 0) { error = SET_ERROR(EFAULT); goto out; } zc->zc_iflags = flag & FKIOCTL; if (zc->zc_nvlist_src_size > MAX_NVLIST_SRC_SIZE) { /* * Make sure the user doesn't pass in an insane value for * zc_nvlist_src_size. We have to check, since we will end * up allocating that much memory inside of get_nvlist(). This * prevents a nefarious user from allocating tons of kernel * memory. * * Also, we return EINVAL instead of ENOMEM here. The reason * being that returning ENOMEM from an ioctl() has a special * connotation; that the user's size value is too small and * needs to be expanded to hold the nvlist. See * zcmd_expand_dst_nvlist() for details. */ error = SET_ERROR(EINVAL); /* User's size too big */ } else if (zc->zc_nvlist_src_size != 0) { error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size, zc->zc_iflags, &innvl); if (error != 0) goto out; } /* * Ensure that all pool/dataset names are valid before we pass down to * the lower layers. */ zc->zc_name[sizeof (zc->zc_name) - 1] = '\0'; switch (vec->zvec_namecheck) { case POOL_NAME: if (pool_namecheck(zc->zc_name, NULL, NULL) != 0) error = SET_ERROR(EINVAL); else error = pool_status_check(zc->zc_name, vec->zvec_namecheck, vec->zvec_pool_check); break; case DATASET_NAME: if (dataset_namecheck(zc->zc_name, NULL, NULL) != 0) error = SET_ERROR(EINVAL); else error = pool_status_check(zc->zc_name, vec->zvec_namecheck, vec->zvec_pool_check); break; case NO_NAME: break; } /* * Ensure that all input pairs are valid before we pass them down * to the lower layers. * * The vectored functions can use fnvlist_lookup_{type} for any * required pairs since zfs_check_input_nvpairs() confirmed that * they exist and are of the correct type. */ if (error == 0 && vec->zvec_func != NULL) { error = zfs_check_input_nvpairs(innvl, vec); if (error != 0) goto out; } if (error == 0) { cookie = spl_fstrans_mark(); error = vec->zvec_secpolicy(zc, innvl, CRED()); spl_fstrans_unmark(cookie); } if (error != 0) goto out; /* legacy ioctls can modify zc_name */ saved_poolname = strdup(zc->zc_name); if (saved_poolname == NULL) { error = SET_ERROR(ENOMEM); goto out; } else { saved_poolname[strcspn(saved_poolname, "/@#")] = '\0'; } if (vec->zvec_func != NULL) { nvlist_t *outnvl; int puterror = 0; spa_t *spa; nvlist_t *lognv = NULL; ASSERT(vec->zvec_legacy_func == NULL); /* * Add the innvl to the lognv before calling the func, * in case the func changes the innvl. */ if (vec->zvec_allow_log) { lognv = fnvlist_alloc(); fnvlist_add_string(lognv, ZPOOL_HIST_IOCTL, vec->zvec_name); if (!nvlist_empty(innvl)) { fnvlist_add_nvlist(lognv, ZPOOL_HIST_INPUT_NVL, innvl); } } outnvl = fnvlist_alloc(); cookie = spl_fstrans_mark(); error = vec->zvec_func(zc->zc_name, innvl, outnvl); spl_fstrans_unmark(cookie); /* * Some commands can partially execute, modify state, and still * return an error. In these cases, attempt to record what * was modified. */ if ((error == 0 || (cmd == ZFS_IOC_CHANNEL_PROGRAM && error != EINVAL)) && vec->zvec_allow_log && spa_open(zc->zc_name, &spa, FTAG) == 0) { if (!nvlist_empty(outnvl)) { fnvlist_add_nvlist(lognv, ZPOOL_HIST_OUTPUT_NVL, outnvl); } if (error != 0) { fnvlist_add_int64(lognv, ZPOOL_HIST_ERRNO, error); } (void) spa_history_log_nvl(spa, lognv); spa_close(spa, FTAG); } fnvlist_free(lognv); if (!nvlist_empty(outnvl) || zc->zc_nvlist_dst_size != 0) { int smusherror = 0; if (vec->zvec_smush_outnvlist) { smusherror = nvlist_smush(outnvl, zc->zc_nvlist_dst_size); } if (smusherror == 0) puterror = put_nvlist(zc, outnvl); } if (puterror != 0) error = puterror; nvlist_free(outnvl); } else { cookie = spl_fstrans_mark(); error = vec->zvec_legacy_func(zc); spl_fstrans_unmark(cookie); } out: nvlist_free(innvl); rc = ddi_copyout(zc, (void *)arg, sizeof (zfs_cmd_t), flag); if (error == 0 && rc != 0) error = SET_ERROR(EFAULT); if (error == 0 && vec->zvec_allow_log) { char *s = tsd_get(zfs_allow_log_key); if (s != NULL) strfree(s); (void) tsd_set(zfs_allow_log_key, saved_poolname); } else { if (saved_poolname != NULL) strfree(saved_poolname); } kmem_free(zc, sizeof (zfs_cmd_t)); return (-error); } #ifdef CONFIG_COMPAT static long zfsdev_compat_ioctl(struct file *filp, unsigned cmd, unsigned long arg) { return (zfsdev_ioctl(filp, cmd, arg)); } #else #define zfsdev_compat_ioctl NULL #endif static const struct file_operations zfsdev_fops = { .open = zfsdev_open, .release = zfsdev_release, .unlocked_ioctl = zfsdev_ioctl, .compat_ioctl = zfsdev_compat_ioctl, .owner = THIS_MODULE, }; static struct miscdevice zfs_misc = { .minor = ZFS_DEVICE_MINOR, .name = ZFS_DRIVER, .fops = &zfsdev_fops, }; MODULE_ALIAS_MISCDEV(ZFS_DEVICE_MINOR); MODULE_ALIAS("devname:zfs"); static int zfs_attach(void) { int error; mutex_init(&zfsdev_state_lock, NULL, MUTEX_DEFAULT, NULL); zfsdev_state_list = kmem_zalloc(sizeof (zfsdev_state_t), KM_SLEEP); zfsdev_state_list->zs_minor = -1; error = misc_register(&zfs_misc); if (error == -EBUSY) { /* * Fallback to dynamic minor allocation in the event of a * collision with a reserved minor in linux/miscdevice.h. * In this case the kernel modules must be manually loaded. */ printk(KERN_INFO "ZFS: misc_register() with static minor %d " "failed %d, retrying with MISC_DYNAMIC_MINOR\n", ZFS_DEVICE_MINOR, error); zfs_misc.minor = MISC_DYNAMIC_MINOR; error = misc_register(&zfs_misc); } if (error) printk(KERN_INFO "ZFS: misc_register() failed %d\n", error); return (error); } static void zfs_detach(void) { zfsdev_state_t *zs, *zsprev = NULL; misc_deregister(&zfs_misc); mutex_destroy(&zfsdev_state_lock); for (zs = zfsdev_state_list; zs != NULL; zs = zs->zs_next) { if (zsprev) kmem_free(zsprev, sizeof (zfsdev_state_t)); zsprev = zs; } if (zsprev) kmem_free(zsprev, sizeof (zfsdev_state_t)); } static void zfs_allow_log_destroy(void *arg) { char *poolname = arg; if (poolname != NULL) strfree(poolname); } #ifdef DEBUG #define ZFS_DEBUG_STR " (DEBUG mode)" #else #define ZFS_DEBUG_STR "" #endif static int __init _init(void) { int error; error = -vn_set_pwd("/"); if (error) { printk(KERN_NOTICE "ZFS: Warning unable to set pwd to '/': %d\n", error); return (error); } if ((error = -zvol_init()) != 0) return (error); spa_init(FREAD | FWRITE); zfs_init(); zfs_ioctl_init(); zfs_sysfs_init(); if ((error = zfs_attach()) != 0) goto out; tsd_create(&zfs_fsyncer_key, NULL); tsd_create(&rrw_tsd_key, rrw_tsd_destroy); tsd_create(&zfs_allow_log_key, zfs_allow_log_destroy); printk(KERN_NOTICE "ZFS: Loaded module v%s-%s%s, " "ZFS pool version %s, ZFS filesystem version %s\n", ZFS_META_VERSION, ZFS_META_RELEASE, ZFS_DEBUG_STR, SPA_VERSION_STRING, ZPL_VERSION_STRING); #ifndef CONFIG_FS_POSIX_ACL printk(KERN_NOTICE "ZFS: Posix ACLs disabled by kernel\n"); #endif /* CONFIG_FS_POSIX_ACL */ return (0); out: zfs_sysfs_fini(); zfs_fini(); spa_fini(); (void) zvol_fini(); printk(KERN_NOTICE "ZFS: Failed to Load ZFS Filesystem v%s-%s%s" ", rc = %d\n", ZFS_META_VERSION, ZFS_META_RELEASE, ZFS_DEBUG_STR, error); return (error); } static void __exit _fini(void) { zfs_detach(); zfs_sysfs_fini(); zfs_fini(); spa_fini(); zvol_fini(); tsd_destroy(&zfs_fsyncer_key); tsd_destroy(&rrw_tsd_key); tsd_destroy(&zfs_allow_log_key); printk(KERN_NOTICE "ZFS: Unloaded module v%s-%s%s\n", ZFS_META_VERSION, ZFS_META_RELEASE, ZFS_DEBUG_STR); } #if defined(_KERNEL) module_init(_init); module_exit(_fini); MODULE_DESCRIPTION("ZFS"); MODULE_AUTHOR(ZFS_META_AUTHOR); MODULE_LICENSE(ZFS_META_LICENSE); MODULE_VERSION(ZFS_META_VERSION "-" ZFS_META_RELEASE); #endif diff --git a/tests/runfiles/linux.run b/tests/runfiles/linux.run index d3ecf6274770..8ab5e7033702 100644 --- a/tests/runfiles/linux.run +++ b/tests/runfiles/linux.run @@ -1,900 +1,901 @@ # # This file and its contents are supplied under the terms of the # Common Development and Distribution License ("CDDL"), version 1.0. # You may only use this file in accordance with the terms of version # 1.0 of the CDDL. # # A full copy of the text of the CDDL should have accompanied this # source. A copy of the CDDL is also available via the Internet at # http://www.illumos.org/license/CDDL. # [DEFAULT] pre = setup quiet = False pre_user = root user = root timeout = 600 post_user = root post = cleanup outputdir = /var/tmp/test_results tags = ['functional'] [tests/functional/acl/posix] tests = ['posix_001_pos', 'posix_002_pos', 'posix_003_pos'] tags = ['functional', 'acl', 'posix'] [tests/functional/alloc_class] tests = ['alloc_class_001_pos', 'alloc_class_002_neg', 'alloc_class_003_pos', 'alloc_class_004_pos', 'alloc_class_005_pos', 'alloc_class_006_pos', 'alloc_class_007_pos', 'alloc_class_008_pos', 'alloc_class_009_pos', 'alloc_class_010_pos', 'alloc_class_011_neg', 'alloc_class_012_pos', 'alloc_class_013_pos'] tags = ['functional', 'alloc_class'] [tests/functional/arc] tests = ['dbufstats_001_pos', 'dbufstats_002_pos'] tags = ['functional', 'arc'] [tests/functional/atime] tests = ['atime_001_pos', 'atime_002_neg', 'atime_003_pos'] tags = ['functional', 'atime'] [tests/functional/bootfs] tests = ['bootfs_001_pos', 'bootfs_002_neg', 'bootfs_003_pos', 'bootfs_004_neg', 'bootfs_005_neg', 'bootfs_006_pos', 'bootfs_007_pos', 'bootfs_008_pos'] tags = ['functional', 'bootfs'] [tests/functional/cache] tests = ['cache_001_pos', 'cache_002_pos', 'cache_003_pos', 'cache_004_neg', 'cache_005_neg', 'cache_006_pos', 'cache_007_neg', 'cache_008_neg', 'cache_009_pos', 'cache_010_neg', 'cache_011_pos'] tags = ['functional', 'cache'] [tests/functional/cachefile] tests = ['cachefile_001_pos', 'cachefile_002_pos', 'cachefile_003_pos', 'cachefile_004_pos'] tags = ['functional', 'cachefile'] [tests/functional/casenorm] tests = ['case_all_values', 'norm_all_values', 'mixed_create_failure', 'sensitive_none_lookup', 'sensitive_none_delete', 'sensitive_formd_lookup', 'sensitive_formd_delete', 'insensitive_none_lookup', 'insensitive_none_delete', 'insensitive_formd_lookup', 'insensitive_formd_delete', 'mixed_none_lookup', 'mixed_none_lookup_ci', 'mixed_none_delete', 'mixed_formd_lookup', 'mixed_formd_lookup_ci', 'mixed_formd_delete'] tags = ['functional', 'casenorm'] [tests/functional/channel_program/lua_core] tests = ['tst.args_to_lua', 'tst.divide_by_zero', 'tst.exists', 'tst.integer_illegal', 'tst.integer_overflow', 'tst.language_functions_neg', 'tst.language_functions_pos', 'tst.large_prog', 'tst.libraries', 'tst.memory_limit', 'tst.nested_neg', 'tst.nested_pos', 'tst.nvlist_to_lua', 'tst.recursive_neg', 'tst.recursive_pos', 'tst.return_large', 'tst.return_nvlist_neg', 'tst.return_nvlist_pos', 'tst.return_recursive_table', 'tst.timeout'] tags = ['functional', 'channel_program', 'lua_core'] [tests/functional/channel_program/synctask_core] tests = ['tst.destroy_fs', 'tst.destroy_snap', 'tst.get_count_and_limit', 'tst.get_index_props', 'tst.get_mountpoint', 'tst.get_neg', 'tst.get_number_props', 'tst.get_string_props', 'tst.get_type', 'tst.get_userquota', 'tst.get_written', 'tst.list_children', 'tst.list_clones', 'tst.list_snapshots', 'tst.list_system_props', 'tst.list_user_props', 'tst.parse_args_neg','tst.promote_conflict', 'tst.promote_multiple', 'tst.promote_simple', 'tst.rollback_mult', 'tst.rollback_one', 'tst.snapshot_destroy', 'tst.snapshot_neg', 'tst.snapshot_recursive', 'tst.snapshot_simple'] tags = ['functional', 'channel_program', 'synctask_core'] [tests/functional/chattr] tests = ['chattr_001_pos', 'chattr_002_neg'] tags = ['functional', 'chattr'] [tests/functional/checksum] tests = ['run_edonr_test', 'run_sha2_test', 'run_skein_test', 'filetest_001_pos'] tags = ['functional', 'checksum'] [tests/functional/clean_mirror] tests = [ 'clean_mirror_001_pos', 'clean_mirror_002_pos', 'clean_mirror_003_pos', 'clean_mirror_004_pos'] tags = ['functional', 'clean_mirror'] [tests/functional/cli_root/zdb] tests = ['zdb_001_neg', 'zdb_002_pos', 'zdb_003_pos', 'zdb_004_pos', 'zdb_005_pos', 'zdb_006_pos'] pre = post = tags = ['functional', 'cli_root', 'zdb'] [tests/functional/cli_root/zfs] tests = ['zfs_001_neg', 'zfs_002_pos', 'zfs_003_neg'] tags = ['functional', 'cli_root', 'zfs'] [tests/functional/cli_root/zfs_bookmark] tests = ['zfs_bookmark_cliargs'] tags = ['functional', 'cli_root', 'zfs_bookmark'] [tests/functional/cli_root/zfs_change-key] tests = ['zfs_change-key', 'zfs_change-key_child', 'zfs_change-key_format', 'zfs_change-key_inherit', 'zfs_change-key_load', 'zfs_change-key_location', 'zfs_change-key_pbkdf2iters'] tags = ['functional', 'cli_root', 'zfs_change-key'] [tests/functional/cli_root/zfs_clone] tests = ['zfs_clone_001_neg', 'zfs_clone_002_pos', 'zfs_clone_003_pos', 'zfs_clone_004_pos', 'zfs_clone_005_pos', 'zfs_clone_006_pos', 'zfs_clone_007_pos', 'zfs_clone_008_neg', 'zfs_clone_009_neg', 'zfs_clone_010_pos', 'zfs_clone_encrypted', 'zfs_clone_deeply_nested'] tags = ['functional', 'cli_root', 'zfs_clone'] [tests/functional/cli_root/zfs_copies] tests = ['zfs_copies_001_pos', 'zfs_copies_002_pos', 'zfs_copies_003_pos', 'zfs_copies_004_neg', 'zfs_copies_005_neg', 'zfs_copies_006_pos'] tags = ['functional', 'cli_root', 'zfs_copies'] [tests/functional/cli_root/zfs_create] tests = ['zfs_create_001_pos', 'zfs_create_002_pos', 'zfs_create_003_pos', 'zfs_create_004_pos', 'zfs_create_005_pos', 'zfs_create_006_pos', 'zfs_create_007_pos', 'zfs_create_008_neg', 'zfs_create_009_neg', 'zfs_create_010_neg', 'zfs_create_011_pos', 'zfs_create_012_pos', 'zfs_create_013_pos', 'zfs_create_014_pos', 'zfs_create_encrypted', 'zfs_create_crypt_combos'] tags = ['functional', 'cli_root', 'zfs_create'] [tests/functional/cli_root/zfs_destroy] tests = ['zfs_destroy_001_pos', 'zfs_destroy_002_pos', 'zfs_destroy_003_pos', 'zfs_destroy_004_pos', 'zfs_destroy_005_neg', 'zfs_destroy_006_neg', 'zfs_destroy_007_neg', 'zfs_destroy_008_pos', 'zfs_destroy_009_pos', 'zfs_destroy_010_pos', 'zfs_destroy_011_pos', 'zfs_destroy_012_pos', 'zfs_destroy_013_neg', 'zfs_destroy_014_pos', 'zfs_destroy_015_pos', 'zfs_destroy_016_pos'] tags = ['functional', 'cli_root', 'zfs_destroy'] [tests/functional/cli_root/zfs_diff] tests = ['zfs_diff_changes', 'zfs_diff_cliargs', 'zfs_diff_timestamp', 'zfs_diff_types', 'zfs_diff_encrypted'] tags = ['functional', 'cli_root', 'zfs_diff'] [tests/functional/cli_root/zfs_get] tests = ['zfs_get_001_pos', 'zfs_get_002_pos', 'zfs_get_003_pos', 'zfs_get_004_pos', 'zfs_get_005_neg', 'zfs_get_006_neg', 'zfs_get_007_neg', 'zfs_get_008_pos', 'zfs_get_009_pos', 'zfs_get_010_neg'] tags = ['functional', 'cli_root', 'zfs_get'] [tests/functional/cli_root/zfs_inherit] tests = ['zfs_inherit_001_neg', 'zfs_inherit_002_neg', 'zfs_inherit_003_pos', 'zfs_inherit_mountpoint'] tags = ['functional', 'cli_root', 'zfs_inherit'] [tests/functional/cli_root/zfs_load-key] tests = ['zfs_load-key', 'zfs_load-key_all', 'zfs_load-key_file', 'zfs_load-key_location', 'zfs_load-key_noop', 'zfs_load-key_recursive'] tags = ['functional', 'cli_root', 'zfs_load-key'] [tests/functional/cli_root/zfs_mount] tests = ['zfs_mount_001_pos', 'zfs_mount_002_pos', 'zfs_mount_003_pos', 'zfs_mount_004_pos', 'zfs_mount_005_pos', 'zfs_mount_006_pos', 'zfs_mount_007_pos', 'zfs_mount_008_pos', 'zfs_mount_009_neg', 'zfs_mount_010_neg', 'zfs_mount_011_neg', 'zfs_mount_012_neg', 'zfs_mount_all_001_pos', 'zfs_mount_encrypted', 'zfs_mount_remount', 'zfs_multi_mount', 'zfs_mount_all_fail', 'zfs_mount_all_mountpoints'] tags = ['functional', 'cli_root', 'zfs_mount'] [tests/functional/cli_root/zfs_program] tests = ['zfs_program_json'] tags = ['functional', 'cli_root', 'zfs_program'] [tests/functional/cli_root/zfs_promote] tests = ['zfs_promote_001_pos', 'zfs_promote_002_pos', 'zfs_promote_003_pos', 'zfs_promote_004_pos', 'zfs_promote_005_pos', 'zfs_promote_006_neg', 'zfs_promote_007_neg', 'zfs_promote_008_pos', 'zfs_promote_encryptionroot'] tags = ['functional', 'cli_root', 'zfs_promote'] [tests/functional/cli_root/zfs_property] tests = ['zfs_written_property_001_pos'] tags = ['functional', 'cli_root', 'zfs_property'] [tests/functional/cli_root/zfs_receive] tests = ['zfs_receive_001_pos', 'zfs_receive_002_pos', 'zfs_receive_003_pos', 'zfs_receive_004_neg', 'zfs_receive_005_neg', 'zfs_receive_006_pos', 'zfs_receive_007_neg', 'zfs_receive_008_pos', 'zfs_receive_009_neg', 'zfs_receive_010_pos', 'zfs_receive_011_pos', 'zfs_receive_012_pos', 'zfs_receive_013_pos', 'zfs_receive_014_pos', 'zfs_receive_015_pos', 'receive-o-x_props_override', 'zfs_receive_from_encrypted', 'zfs_receive_to_encrypted', 'zfs_receive_raw', 'zfs_receive_raw_incremental', 'zfs_receive_-e'] tags = ['functional', 'cli_root', 'zfs_receive'] [tests/functional/cli_root/zfs_remap] tests = ['zfs_remap_cliargs', 'zfs_remap_obsolete_counts'] tags = ['functional', 'cli_root', 'zfs_remap'] [tests/functional/cli_root/zfs_rename] tests = ['zfs_rename_001_pos', 'zfs_rename_002_pos', 'zfs_rename_003_pos', 'zfs_rename_004_neg', 'zfs_rename_005_neg', 'zfs_rename_006_pos', 'zfs_rename_007_pos', 'zfs_rename_008_pos', 'zfs_rename_009_neg', 'zfs_rename_010_neg', 'zfs_rename_011_pos', 'zfs_rename_012_neg', 'zfs_rename_013_pos', 'zfs_rename_014_neg', 'zfs_rename_encrypted_child', 'zfs_rename_to_encrypted', 'zfs_rename_mountpoint'] tags = ['functional', 'cli_root', 'zfs_rename'] [tests/functional/cli_root/zfs_reservation] tests = ['zfs_reservation_001_pos', 'zfs_reservation_002_pos'] tags = ['functional', 'cli_root', 'zfs_reservation'] [tests/functional/cli_root/zfs_rollback] tests = ['zfs_rollback_001_pos', 'zfs_rollback_002_pos', 'zfs_rollback_003_neg', 'zfs_rollback_004_neg'] tags = ['functional', 'cli_root', 'zfs_rollback'] [tests/functional/cli_root/zfs_send] tests = ['zfs_send_001_pos', 'zfs_send_002_pos', 'zfs_send_003_pos', 'zfs_send_004_neg', 'zfs_send_005_pos', 'zfs_send_006_pos', 'zfs_send_007_pos', 'zfs_send_encrypted', 'zfs_send_raw', 'zfs_send_sparse', 'zfs_send-b'] tags = ['functional', 'cli_root', 'zfs_send'] [tests/functional/cli_root/zfs_set] tests = ['cache_001_pos', 'cache_002_neg', 'canmount_001_pos', 'canmount_002_pos', 'canmount_003_pos', 'canmount_004_pos', 'checksum_001_pos', 'compression_001_pos', 'mountpoint_001_pos', 'mountpoint_002_pos', 'reservation_001_neg', 'user_property_002_pos', 'share_mount_001_neg', 'snapdir_001_pos', 'onoffs_001_pos', 'user_property_001_pos', 'user_property_003_neg', 'readonly_001_pos', 'user_property_004_pos', 'version_001_neg', 'zfs_set_001_neg', 'zfs_set_002_neg', 'zfs_set_003_neg', 'property_alias_001_pos', 'mountpoint_003_pos', 'ro_props_001_pos', 'zfs_set_keylocation'] tags = ['functional', 'cli_root', 'zfs_set'] [tests/functional/cli_root/zfs_share] tests = ['zfs_share_001_pos', 'zfs_share_002_pos', 'zfs_share_003_pos', 'zfs_share_004_pos', 'zfs_share_005_pos', 'zfs_share_006_pos', 'zfs_share_007_neg', 'zfs_share_008_neg', 'zfs_share_009_neg', 'zfs_share_010_neg', 'zfs_share_011_pos'] tags = ['functional', 'cli_root', 'zfs_share'] [tests/functional/cli_root/zfs_snapshot] tests = ['zfs_snapshot_001_neg', 'zfs_snapshot_002_neg', 'zfs_snapshot_003_neg', 'zfs_snapshot_004_neg', 'zfs_snapshot_005_neg', 'zfs_snapshot_006_pos', 'zfs_snapshot_007_neg', 'zfs_snapshot_008_neg', 'zfs_snapshot_009_pos'] tags = ['functional', 'cli_root', 'zfs_snapshot'] [tests/functional/cli_root/zfs_sysfs] tests = ['zfeature_set_unsupported.ksh', 'zfs_get_unsupported', 'zfs_set_unsupported', 'zfs_sysfs_live.ksh', 'zpool_get_unsupported', 'zpool_set_unsupported'] tags = ['functional', 'cli_root', 'zfs_sysfs'] [tests/functional/cli_root/zfs_unload-key] tests = ['zfs_unload-key', 'zfs_unload-key_all', 'zfs_unload-key_recursive'] tags = ['functional', 'cli_root', 'zfs_unload-key'] [tests/functional/cli_root/zfs_unmount] tests = ['zfs_unmount_001_pos', 'zfs_unmount_002_pos', 'zfs_unmount_003_pos', 'zfs_unmount_004_pos', 'zfs_unmount_005_pos', 'zfs_unmount_006_pos', 'zfs_unmount_007_neg', 'zfs_unmount_008_neg', 'zfs_unmount_009_pos', 'zfs_unmount_all_001_pos', 'zfs_unmount_nested'] tags = ['functional', 'cli_root', 'zfs_unmount'] [tests/functional/cli_root/zfs_unshare] tests = ['zfs_unshare_001_pos', 'zfs_unshare_002_pos', 'zfs_unshare_003_pos', 'zfs_unshare_004_neg', 'zfs_unshare_005_neg', 'zfs_unshare_006_pos', 'zfs_unshare_007_pos'] tags = ['functional', 'cli_root', 'zfs_unshare'] [tests/functional/cli_root/zfs_upgrade] tests = ['zfs_upgrade_001_pos', 'zfs_upgrade_002_pos', 'zfs_upgrade_003_pos', 'zfs_upgrade_004_pos', 'zfs_upgrade_005_pos', 'zfs_upgrade_006_neg', 'zfs_upgrade_007_neg'] tags = ['functional', 'cli_root', 'zfs_upgrade'] [tests/functional/cli_root/zpool] tests = ['zpool_001_neg', 'zpool_002_pos', 'zpool_003_pos'] tags = ['functional', 'cli_root', 'zpool'] [tests/functional/cli_root/zpool_add] tests = ['zpool_add_001_pos', 'zpool_add_002_pos', 'zpool_add_003_pos', 'zpool_add_004_pos', 'zpool_add_005_pos', 'zpool_add_006_pos', 'zpool_add_007_neg', 'zpool_add_008_neg', 'zpool_add_009_neg', 'zpool_add_010_pos', 'add-o_ashift', 'add_prop_ashift', 'add_nested_replacing_spare'] tags = ['functional', 'cli_root', 'zpool_add'] [tests/functional/cli_root/zpool_attach] tests = ['zpool_attach_001_neg', 'attach-o_ashift'] tags = ['functional', 'cli_root', 'zpool_attach'] [tests/functional/cli_root/zpool_clear] tests = ['zpool_clear_001_pos', 'zpool_clear_002_neg', 'zpool_clear_003_neg', 'zpool_clear_readonly'] tags = ['functional', 'cli_root', 'zpool_clear'] [tests/functional/cli_root/zpool_create] tests = ['zpool_create_001_pos', 'zpool_create_002_pos', 'zpool_create_003_pos', 'zpool_create_004_pos', 'zpool_create_005_pos', 'zpool_create_006_pos', 'zpool_create_007_neg', 'zpool_create_008_pos', 'zpool_create_009_neg', 'zpool_create_010_neg', 'zpool_create_011_neg', 'zpool_create_012_neg', 'zpool_create_014_neg', 'zpool_create_015_neg', 'zpool_create_016_pos', 'zpool_create_017_neg', 'zpool_create_018_pos', 'zpool_create_019_pos', 'zpool_create_020_pos', 'zpool_create_021_pos', 'zpool_create_022_pos', 'zpool_create_023_neg', 'zpool_create_024_pos', 'zpool_create_encrypted', 'zpool_create_crypt_combos', 'zpool_create_features_001_pos', 'zpool_create_features_002_pos', 'zpool_create_features_003_pos', 'zpool_create_features_004_neg', 'zpool_create_features_005_pos', 'create-o_ashift', 'zpool_create_tempname'] tags = ['functional', 'cli_root', 'zpool_create'] [tests/functional/cli_root/zpool_destroy] tests = ['zpool_destroy_001_pos', 'zpool_destroy_002_pos', 'zpool_destroy_003_neg'] pre = post = tags = ['functional', 'cli_root', 'zpool_destroy'] [tests/functional/cli_root/zpool_detach] tests = ['zpool_detach_001_neg'] tags = ['functional', 'cli_root', 'zpool_detach'] [tests/functional/cli_root/zpool_events] tests = ['zpool_events_clear', 'zpool_events_cliargs', 'zpool_events_follow', 'zpool_events_poolname'] tags = ['functional', 'cli_root', 'zpool_events'] [tests/functional/cli_root/zpool_expand] tests = ['zpool_expand_001_pos', 'zpool_expand_002_pos', 'zpool_expand_003_neg', 'zpool_expand_004_pos', 'zpool_expand_005_pos'] tags = ['functional', 'cli_root', 'zpool_expand'] [tests/functional/cli_root/zpool_export] tests = ['zpool_export_001_pos', 'zpool_export_002_pos', 'zpool_export_003_neg', 'zpool_export_004_pos'] tags = ['functional', 'cli_root', 'zpool_export'] [tests/functional/cli_root/zpool_get] tests = ['zpool_get_001_pos', 'zpool_get_002_pos', 'zpool_get_003_pos', 'zpool_get_004_neg'] tags = ['functional', 'cli_root', 'zpool_get'] [tests/functional/cli_root/zpool_history] tests = ['zpool_history_001_neg', 'zpool_history_002_pos'] tags = ['functional', 'cli_root', 'zpool_history'] [tests/functional/cli_root/zpool_import] tests = ['zpool_import_001_pos', 'zpool_import_002_pos', 'zpool_import_003_pos', 'zpool_import_004_pos', 'zpool_import_005_pos', 'zpool_import_006_pos', 'zpool_import_007_pos', 'zpool_import_008_pos', 'zpool_import_009_neg', 'zpool_import_010_pos', 'zpool_import_011_neg', 'zpool_import_012_pos', 'zpool_import_013_neg', 'zpool_import_014_pos', 'zpool_import_015_pos', 'zpool_import_features_001_pos', 'zpool_import_features_002_neg', 'zpool_import_features_003_pos', 'zpool_import_missing_001_pos', 'zpool_import_missing_002_pos', 'zpool_import_missing_003_pos', 'zpool_import_rename_001_pos', 'zpool_import_all_001_pos', 'zpool_import_encrypted', 'zpool_import_encrypted_load', 'zpool_import_errata3', 'import_cachefile_device_added', 'import_cachefile_device_removed', 'import_cachefile_device_replaced', 'import_cachefile_mirror_attached', 'import_cachefile_mirror_detached', 'import_cachefile_shared_device', 'import_devices_missing', 'import_paths_changed', 'import_rewind_config_changed', 'import_rewind_device_replaced'] tags = ['functional', 'cli_root', 'zpool_import'] [tests/functional/cli_root/zpool_labelclear] tests = ['zpool_labelclear_active', 'zpool_labelclear_exported'] pre = post = tags = ['functional', 'cli_root', 'zpool_labelclear'] [tests/functional/cli_root/zpool_initialize] tests = ['zpool_initialize_attach_detach_add_remove', 'zpool_initialize_import_export', 'zpool_initialize_offline_export_import_online', 'zpool_initialize_online_offline', 'zpool_initialize_split', 'zpool_initialize_start_and_cancel_neg', 'zpool_initialize_start_and_cancel_pos', 'zpool_initialize_suspend_resume', 'zpool_initialize_unsupported_vdevs', 'zpool_initialize_verify_checksums', 'zpool_initialize_verify_initialized'] pre = tags = ['functional', 'cli_root', 'zpool_initialize'] [tests/functional/cli_root/zpool_offline] tests = ['zpool_offline_001_pos', 'zpool_offline_002_neg', 'zpool_offline_003_pos'] tags = ['functional', 'cli_root', 'zpool_offline'] [tests/functional/cli_root/zpool_online] tests = ['zpool_online_001_pos', 'zpool_online_002_neg'] tags = ['functional', 'cli_root', 'zpool_online'] [tests/functional/cli_root/zpool_remove] tests = ['zpool_remove_001_neg', 'zpool_remove_002_pos', 'zpool_remove_003_pos'] tags = ['functional', 'cli_root', 'zpool_remove'] [tests/functional/cli_root/zpool_reopen] tests = ['zpool_reopen_001_pos', 'zpool_reopen_002_pos', 'zpool_reopen_003_pos', 'zpool_reopen_004_pos', 'zpool_reopen_005_pos', 'zpool_reopen_006_neg', 'zpool_reopen_007_pos'] tags = ['functional', 'cli_root', 'zpool_reopen'] [tests/functional/cli_root/zpool_replace] tests = ['zpool_replace_001_neg', 'replace-o_ashift', 'replace_prop_ashift'] tags = ['functional', 'cli_root', 'zpool_replace'] [tests/functional/cli_root/zpool_resilver] tests = ['zpool_resilver_bad_args', 'zpool_resilver_restart'] tags = ['functional', 'cli_root', 'zpool_resilver'] [tests/functional/cli_root/zpool_scrub] tests = ['zpool_scrub_001_neg', 'zpool_scrub_002_pos', 'zpool_scrub_003_pos', 'zpool_scrub_004_pos', 'zpool_scrub_005_pos', 'zpool_scrub_encrypted_unloaded', 'zpool_scrub_print_repairing', 'zpool_scrub_offline_device'] tags = ['functional', 'cli_root', 'zpool_scrub'] [tests/functional/cli_root/zpool_set] tests = ['zpool_set_001_pos', 'zpool_set_002_neg', 'zpool_set_003_neg', 'zpool_set_ashift', 'zpool_set_features'] tags = ['functional', 'cli_root', 'zpool_set'] [tests/functional/cli_root/zpool_split] tests = ['zpool_split_cliargs', 'zpool_split_devices', 'zpool_split_encryption', 'zpool_split_props', 'zpool_split_vdevs', 'zpool_split_resilver', 'zpool_split_wholedisk'] tags = ['functional', 'cli_root', 'zpool_split'] [tests/functional/cli_root/zpool_status] tests = ['zpool_status_001_pos', 'zpool_status_002_pos','zpool_status_003_pos', 'zpool_status_-c_disable', 'zpool_status_-c_homedir', 'zpool_status_-c_searchpath'] user = tags = ['functional', 'cli_root', 'zpool_status'] [tests/functional/cli_root/zpool_sync] tests = ['zpool_sync_001_pos', 'zpool_sync_002_neg'] tags = ['functional', 'cli_root', 'zpool_sync'] [tests/functional/cli_root/zpool_upgrade] tests = ['zpool_upgrade_001_pos', 'zpool_upgrade_002_pos', 'zpool_upgrade_003_pos', 'zpool_upgrade_004_pos', 'zpool_upgrade_005_neg', 'zpool_upgrade_006_neg', 'zpool_upgrade_007_pos', 'zpool_upgrade_008_pos', 'zpool_upgrade_009_neg'] tags = ['functional', 'cli_root', 'zpool_upgrade'] [tests/functional/cli_user/misc] tests = ['zdb_001_neg', 'zfs_001_neg', 'zfs_allow_001_neg', 'zfs_clone_001_neg', 'zfs_create_001_neg', 'zfs_destroy_001_neg', 'zfs_get_001_neg', 'zfs_inherit_001_neg', 'zfs_mount_001_neg', 'zfs_promote_001_neg', 'zfs_receive_001_neg', 'zfs_rename_001_neg', 'zfs_rollback_001_neg', 'zfs_send_001_neg', 'zfs_set_001_neg', 'zfs_share_001_neg', 'zfs_snapshot_001_neg', 'zfs_unallow_001_neg', 'zfs_unmount_001_neg', 'zfs_unshare_001_neg', 'zfs_upgrade_001_neg', 'zpool_001_neg', 'zpool_add_001_neg', 'zpool_attach_001_neg', 'zpool_clear_001_neg', 'zpool_create_001_neg', 'zpool_destroy_001_neg', 'zpool_detach_001_neg', 'zpool_export_001_neg', 'zpool_get_001_neg', 'zpool_history_001_neg', 'zpool_import_001_neg', 'zpool_import_002_neg', 'zpool_offline_001_neg', 'zpool_online_001_neg', 'zpool_remove_001_neg', 'zpool_replace_001_neg', 'zpool_scrub_001_neg', 'zpool_set_001_neg', 'zpool_status_001_neg', 'zpool_upgrade_001_neg', 'arcstat_001_pos', 'arc_summary_001_pos', 'arc_summary_002_neg', 'dbufstat_001_pos'] user = tags = ['functional', 'cli_user', 'misc'] [tests/functional/cli_user/zfs_list] tests = ['zfs_list_001_pos', 'zfs_list_002_pos', 'zfs_list_003_pos', 'zfs_list_004_neg', 'zfs_list_007_pos', 'zfs_list_008_neg'] user = tags = ['functional', 'cli_user', 'zfs_list'] [tests/functional/cli_user/zpool_iostat] tests = ['zpool_iostat_001_neg', 'zpool_iostat_002_pos', 'zpool_iostat_003_neg', 'zpool_iostat_004_pos', 'zpool_iostat_005_pos', 'zpool_iostat_-c_disable', 'zpool_iostat_-c_homedir', 'zpool_iostat_-c_searchpath'] user = tags = ['functional', 'cli_user', 'zpool_iostat'] [tests/functional/cli_user/zpool_list] tests = ['zpool_list_001_pos', 'zpool_list_002_neg'] user = tags = ['functional', 'cli_user', 'zpool_list'] [tests/functional/compression] tests = ['compress_001_pos', 'compress_002_pos', 'compress_003_pos', 'compress_004_pos'] tags = ['functional', 'compression'] [tests/functional/cp_files] tests = ['cp_files_001_pos'] tags = ['functional', 'cp_files'] [tests/functional/ctime] tests = ['ctime_001_pos' ] tags = ['functional', 'ctime'] [tests/functional/deadman] tests = ['deadman_sync', 'deadman_zio'] pre = post = tags = ['functional', 'deadman'] [tests/functional/delegate] tests = ['zfs_allow_001_pos', 'zfs_allow_002_pos', 'zfs_allow_003_pos', 'zfs_allow_004_pos', 'zfs_allow_005_pos', 'zfs_allow_006_pos', 'zfs_allow_007_pos', 'zfs_allow_008_pos', 'zfs_allow_009_neg', 'zfs_allow_010_pos', 'zfs_allow_011_neg', 'zfs_allow_012_neg', 'zfs_unallow_001_pos', 'zfs_unallow_002_pos', 'zfs_unallow_003_pos', 'zfs_unallow_004_pos', 'zfs_unallow_005_pos', 'zfs_unallow_006_pos', 'zfs_unallow_007_neg', 'zfs_unallow_008_neg'] tags = ['functional', 'delegate'] [tests/functional/devices] tests = ['devices_001_pos', 'devices_002_neg', 'devices_003_pos'] tags = ['functional', 'devices'] [tests/functional/events] tests = ['events_001_pos', 'events_002_pos', 'zed_rc_filter'] tags = ['functional', 'events'] [tests/functional/exec] tests = ['exec_001_pos', 'exec_002_neg'] tags = ['functional', 'exec'] [tests/functional/fault] tests = ['auto_offline_001_pos', 'auto_online_001_pos', 'auto_replace_001_pos', 'auto_spare_001_pos', 'auto_spare_002_pos', 'auto_spare_ashift', 'auto_spare_multiple', 'auto_spare_shared', 'scrub_after_resilver', 'decrypt_fault', 'decompress_fault', 'zpool_status_-s'] tags = ['functional', 'fault'] [tests/functional/features/async_destroy] tests = ['async_destroy_001_pos'] tags = ['functional', 'features', 'async_destroy'] [tests/functional/features/large_dnode] tests = ['large_dnode_001_pos', 'large_dnode_002_pos', 'large_dnode_003_pos', 'large_dnode_004_neg', 'large_dnode_005_pos', 'large_dnode_006_pos', 'large_dnode_007_neg', 'large_dnode_008_pos', 'large_dnode_009_pos'] tags = ['functional', 'features', 'large_dnode'] [tests/functional/grow] pre = post = tests = ['grow_pool_001_pos', 'grow_replicas_001_pos'] tags = ['functional', 'grow'] [tests/functional/history] tests = ['history_001_pos', 'history_002_pos', 'history_003_pos', 'history_004_pos', 'history_005_neg', 'history_006_neg', 'history_007_pos', 'history_008_pos', 'history_009_pos', 'history_010_pos'] tags = ['functional', 'history'] [tests/functional/hkdf] tests = ['run_hkdf_test'] tags = ['functional', 'hkdf'] [tests/functional/inheritance] tests = ['inherit_001_pos'] pre = tags = ['functional', 'inheritance'] [tests/functional/io] tests = ['sync', 'psync', 'libaio', 'posixaio', 'mmap'] tags = ['functional', 'io'] [tests/functional/inuse] tests = ['inuse_001_pos', 'inuse_003_pos', 'inuse_004_pos', 'inuse_005_pos', 'inuse_006_pos', 'inuse_007_pos', 'inuse_008_pos', 'inuse_009_pos'] post = tags = ['functional', 'inuse'] [tests/functional/large_files] tests = ['large_files_001_pos', 'large_files_002_pos'] tags = ['functional', 'large_files'] [tests/functional/largest_pool] tests = ['largest_pool_001_pos'] pre = post = tags = ['functional', 'largest_pool'] [tests/functional/limits] tests = ['filesystem_count', 'filesystem_limit', 'snapshot_count', 'snapshot_limit'] tags = ['functional', 'limits'] [tests/functional/link_count] tests = ['link_count_001'] tags = ['functional', 'link_count'] [tests/functional/migration] tests = ['migration_001_pos', 'migration_002_pos', 'migration_003_pos', 'migration_004_pos', 'migration_005_pos', 'migration_006_pos', 'migration_007_pos', 'migration_008_pos', 'migration_009_pos', 'migration_010_pos', 'migration_011_pos', 'migration_012_pos'] tags = ['functional', 'migration'] [tests/functional/mmap] tests = ['mmap_write_001_pos', 'mmap_read_001_pos', 'mmap_libaio_001_pos'] tags = ['functional', 'mmap'] [tests/functional/mmp] tests = ['mmp_on_thread', 'mmp_on_uberblocks', 'mmp_on_off', 'mmp_interval', 'mmp_active_import', 'mmp_inactive_import', 'mmp_exported_import', 'mmp_write_uberblocks', 'mmp_reset_interval', 'multihost_history', 'mmp_on_zdb'] tags = ['functional', 'mmp'] [tests/functional/mount] tests = ['umount_001', 'umountall_001'] tags = ['functional', 'mount'] [tests/functional/mv_files] tests = ['mv_files_001_pos', 'mv_files_002_pos', 'random_creation'] tags = ['functional', 'mv_files'] [tests/functional/nestedfs] tests = ['nestedfs_001_pos'] tags = ['functional', 'nestedfs'] [tests/functional/no_space] tests = ['enospc_001_pos', 'enospc_002_pos', 'enospc_003_pos', 'enospc_df'] tags = ['functional', 'no_space'] [tests/functional/nopwrite] tests = ['nopwrite_copies', 'nopwrite_mtime', 'nopwrite_negative', 'nopwrite_promoted_clone', 'nopwrite_recsize', 'nopwrite_sync', 'nopwrite_varying_compression', 'nopwrite_volume'] tags = ['functional', 'nopwrite'] [tests/functional/online_offline] tests = ['online_offline_001_pos', 'online_offline_002_neg', 'online_offline_003_neg'] tags = ['functional', 'online_offline'] [tests/functional/pool_checkpoint] tests = ['checkpoint_after_rewind', 'checkpoint_big_rewind', 'checkpoint_capacity', 'checkpoint_conf_change', 'checkpoint_discard', 'checkpoint_discard_busy', 'checkpoint_discard_many', 'checkpoint_indirect', 'checkpoint_invalid', 'checkpoint_lun_expsz', 'checkpoint_open', 'checkpoint_removal', 'checkpoint_rewind', 'checkpoint_ro_rewind', 'checkpoint_sm_scale', 'checkpoint_twice', 'checkpoint_vdev_add', 'checkpoint_zdb', 'checkpoint_zhack_feat'] tags = ['functional', 'pool_checkpoint'] timeout = 1800 [tests/functional/pool_names] tests = ['pool_names_001_pos', 'pool_names_002_neg'] pre = post = tags = ['functional', 'pool_names'] [tests/functional/poolversion] tests = ['poolversion_001_pos', 'poolversion_002_pos'] tags = ['functional', 'poolversion'] [tests/functional/privilege] tests = ['privilege_001_pos', 'privilege_002_pos'] tags = ['functional', 'privilege'] [tests/functional/procfs] tests = ['procfs_list_basic', 'procfs_list_concurrent_readers', 'procfs_list_stale_read', 'pool_state'] tags = ['functional', 'procfs'] [tests/functional/projectquota] tests = ['projectid_001_pos', 'projectid_002_pos', 'projectid_003_pos', 'projectquota_001_pos', 'projectquota_002_pos', 'projectquota_003_pos', 'projectquota_004_neg', 'projectquota_005_pos', 'projectquota_006_pos', 'projectquota_007_pos', 'projectquota_008_pos', 'projectquota_009_pos', 'projectspace_001_pos', 'projectspace_002_pos', 'projectspace_003_pos', 'projectspace_004_pos', 'projecttree_001_pos', 'projecttree_002_pos', 'projecttree_003_neg' ] tags = ['functional', 'projectquota'] [tests/functional/pyzfs] tests = ['pyzfs_unittest'] pre = post = tags = ['functional', 'pyzfs'] [tests/functional/quota] tests = ['quota_001_pos', 'quota_002_pos', 'quota_003_pos', 'quota_004_pos', 'quota_005_pos', 'quota_006_neg'] tags = ['functional', 'quota'] [tests/functional/raidz] tests = ['raidz_001_neg', 'raidz_002_pos'] tags = ['functional', 'raidz'] [tests/functional/redundancy] tests = ['redundancy_001_pos', 'redundancy_002_pos', 'redundancy_003_pos', 'redundancy_004_neg'] tags = ['functional', 'redundancy'] [tests/functional/refquota] tests = ['refquota_001_pos', 'refquota_002_pos', 'refquota_003_pos', 'refquota_004_pos', 'refquota_005_pos', 'refquota_006_neg'] tags = ['functional', 'refquota'] [tests/functional/refreserv] tests = ['refreserv_001_pos', 'refreserv_002_pos', 'refreserv_003_pos', 'refreserv_004_pos', 'refreserv_005_pos'] tags = ['functional', 'refreserv'] [tests/functional/removal] pre = tests = ['removal_all_vdev', 'removal_check_space', 'removal_condense_export', 'removal_multiple_indirection', 'removal_remap', 'removal_remap_deadlists', 'removal_resume_export', 'removal_sanity', 'removal_with_add', 'removal_with_create_fs', 'removal_with_dedup', 'removal_with_errors', 'removal_with_export', 'removal_with_ganging', 'removal_with_faulted', 'removal_with_remap', 'removal_with_remove', 'removal_with_scrub', 'removal_with_send', 'removal_with_send_recv', 'removal_with_snapshot', 'removal_with_write', 'removal_with_zdb', 'remove_expanded', 'remove_mirror', 'remove_mirror_sanity', 'remove_raidz'] tags = ['functional', 'removal'] [tests/functional/rename_dirs] tests = ['rename_dirs_001_pos'] tags = ['functional', 'rename_dirs'] [tests/functional/replacement] tests = ['replacement_001_pos', 'replacement_002_pos', 'replacement_003_pos'] tags = ['functional', 'replacement'] [tests/functional/reservation] tests = ['reservation_001_pos', 'reservation_002_pos', 'reservation_003_pos', 'reservation_004_pos', 'reservation_005_pos', 'reservation_006_pos', 'reservation_007_pos', 'reservation_008_pos', 'reservation_009_pos', 'reservation_010_pos', 'reservation_011_pos', 'reservation_012_pos', 'reservation_013_pos', 'reservation_014_pos', 'reservation_015_pos', 'reservation_016_pos', 'reservation_017_pos', 'reservation_018_pos', 'reservation_019_pos', 'reservation_020_pos', 'reservation_021_neg', 'reservation_022_pos'] tags = ['functional', 'reservation'] [tests/functional/rootpool] tests = ['rootpool_002_neg', 'rootpool_003_neg', 'rootpool_007_pos'] tags = ['functional', 'rootpool'] [tests/functional/rsend] tests = ['rsend_001_pos', 'rsend_002_pos', 'rsend_003_pos', 'rsend_004_pos', 'rsend_005_pos', 'rsend_006_pos', 'rsend_007_pos', 'rsend_008_pos', 'rsend_009_pos', 'rsend_010_pos', 'rsend_011_pos', 'rsend_012_pos', 'rsend_013_pos', 'rsend_014_pos', 'rsend_019_pos', 'rsend_020_pos', 'rsend_021_pos', 'rsend_022_pos', 'rsend_024_pos', 'send-c_verify_ratio', 'send-c_verify_contents', 'send-c_props', 'send-c_incremental', 'send-c_volume', 'send-c_zstreamdump', 'send-c_lz4_disabled', 'send-c_recv_lz4_disabled', 'send-c_mixed_compression', 'send-c_stream_size_estimate', 'send-cD', 'send-c_embedded_blocks', 'send-c_resume', 'send-cpL_varied_recsize', 'send-c_recv_dedup', 'send_encrypted_files', 'send_encrypted_heirarchy', 'send_encrypted_props', 'send_freeobjects', 'send_realloc_dnode_size', 'send_hole_birth', 'send-wDR_encrypted_zvol'] tags = ['functional', 'rsend'] [tests/functional/scrub_mirror] tests = ['scrub_mirror_001_pos', 'scrub_mirror_002_pos', 'scrub_mirror_003_pos', 'scrub_mirror_004_pos'] tags = ['functional', 'scrub_mirror'] [tests/functional/slog] tests = ['slog_001_pos', 'slog_002_pos', 'slog_003_pos', 'slog_004_pos', 'slog_005_pos', 'slog_006_pos', 'slog_007_pos', 'slog_008_neg', 'slog_009_neg', 'slog_010_neg', 'slog_011_neg', 'slog_012_neg', 'slog_013_pos', 'slog_014_pos', 'slog_015_neg', 'slog_replay_fs', 'slog_replay_volume'] tags = ['functional', 'slog'] [tests/functional/snapshot] tests = ['clone_001_pos', 'rollback_001_pos', 'rollback_002_pos', 'rollback_003_pos', 'snapshot_001_pos', 'snapshot_002_pos', 'snapshot_003_pos', 'snapshot_004_pos', 'snapshot_005_pos', 'snapshot_006_pos', 'snapshot_007_pos', 'snapshot_008_pos', 'snapshot_009_pos', 'snapshot_010_pos', 'snapshot_011_pos', 'snapshot_012_pos', 'snapshot_013_pos', 'snapshot_014_pos', 'snapshot_015_pos', 'snapshot_016_pos', 'snapshot_017_pos'] tags = ['functional', 'snapshot'] [tests/functional/snapused] tests = ['snapused_001_pos', 'snapused_002_pos', 'snapused_003_pos', 'snapused_004_pos', 'snapused_005_pos'] tags = ['functional', 'snapused'] [tests/functional/sparse] tests = ['sparse_001_pos'] tags = ['functional', 'sparse'] [tests/functional/threadsappend] tests = ['threadsappend_001_pos'] tags = ['functional', 'threadsappend'] [tests/functional/tmpfile] tests = ['tmpfile_001_pos', 'tmpfile_002_pos', 'tmpfile_003_pos'] tags = ['functional', 'tmpfile'] [tests/functional/truncate] tests = ['truncate_001_pos', 'truncate_002_pos', 'truncate_timestamps'] tags = ['functional', 'truncate'] [tests/functional/upgrade] tests = ['upgrade_userobj_001_pos', 'upgrade_projectquota_001_pos'] tags = ['functional', 'upgrade'] [tests/functional/user_namespace] tests = ['user_namespace_001'] tags = ['functional', 'user_namespace'] [tests/functional/userquota] tests = [ 'userquota_001_pos', 'userquota_002_pos', 'userquota_003_pos', 'userquota_004_pos', 'userquota_005_neg', 'userquota_006_pos', 'userquota_007_pos', 'userquota_008_pos', 'userquota_009_pos', 'userquota_010_pos', 'userquota_011_pos', 'userquota_012_neg', 'userquota_013_pos', 'userspace_001_pos', 'userspace_002_pos', 'userspace_003_pos', 'groupspace_001_pos', 'groupspace_002_pos', 'groupspace_003_pos' ] tags = ['functional', 'userquota'] [tests/functional/vdev_zaps] tests = ['vdev_zaps_001_pos', 'vdev_zaps_002_pos', 'vdev_zaps_003_pos', 'vdev_zaps_004_pos', 'vdev_zaps_005_pos', 'vdev_zaps_006_pos', 'vdev_zaps_007_pos'] tags = ['functional', 'vdev_zaps'] [tests/functional/write_dirs] tests = ['write_dirs_001_pos', 'write_dirs_002_pos'] tags = ['functional', 'write_dirs'] [tests/functional/xattr] tests = ['xattr_001_pos', 'xattr_002_neg', 'xattr_003_neg', 'xattr_004_pos', 'xattr_005_pos', 'xattr_006_pos', 'xattr_007_neg', 'xattr_008_pos', 'xattr_009_neg', 'xattr_010_neg', 'xattr_011_pos', 'xattr_012_pos', 'xattr_013_pos'] tags = ['functional', 'xattr'] [tests/functional/zvol/zvol_ENOSPC] tests = ['zvol_ENOSPC_001_pos'] tags = ['functional', 'zvol', 'zvol_ENOSPC'] [tests/functional/zvol/zvol_cli] tests = ['zvol_cli_001_pos', 'zvol_cli_002_pos', 'zvol_cli_003_neg'] tags = ['functional', 'zvol', 'zvol_cli'] [tests/functional/zvol/zvol_misc] tests = ['zvol_misc_001_neg', 'zvol_misc_002_pos', 'zvol_misc_003_neg', 'zvol_misc_004_pos', 'zvol_misc_005_neg', 'zvol_misc_006_pos', - 'zvol_misc_snapdev', 'zvol_misc_volmode', 'zvol_misc_zil'] + 'zvol_misc_hierarchy', 'zvol_misc_snapdev', 'zvol_misc_volmode', + 'zvol_misc_zil'] tags = ['functional', 'zvol', 'zvol_misc'] [tests/functional/zvol/zvol_swap] tests = ['zvol_swap_001_pos', 'zvol_swap_002_pos', 'zvol_swap_003_pos', 'zvol_swap_004_pos', 'zvol_swap_005_pos', 'zvol_swap_006_pos'] tags = ['functional', 'zvol', 'zvol_swap'] [tests/functional/libzfs] tests = ['many_fds', 'libzfs_input'] tags = ['functional', 'libzfs'] diff --git a/tests/zfs-tests/tests/functional/zvol/zvol_misc/Makefile.am b/tests/zfs-tests/tests/functional/zvol/zvol_misc/Makefile.am index a2c95a3ebb72..57ffbd565e66 100644 --- a/tests/zfs-tests/tests/functional/zvol/zvol_misc/Makefile.am +++ b/tests/zfs-tests/tests/functional/zvol/zvol_misc/Makefile.am @@ -1,17 +1,18 @@ pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/functional/zvol/zvol_misc dist_pkgdata_SCRIPTS = \ cleanup.ksh \ setup.ksh \ zvol_misc_001_neg.ksh \ zvol_misc_002_pos.ksh \ zvol_misc_003_neg.ksh \ zvol_misc_004_pos.ksh \ zvol_misc_005_neg.ksh \ zvol_misc_006_pos.ksh \ + zvol_misc_hierarchy.ksh \ zvol_misc_snapdev.ksh \ zvol_misc_volmode.ksh \ zvol_misc_zil.ksh dist_pkgdata_DATA = \ zvol_misc_common.kshlib diff --git a/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_hierarchy.ksh b/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_hierarchy.ksh new file mode 100755 index 000000000000..1431f0b1f1f0 --- /dev/null +++ b/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_hierarchy.ksh @@ -0,0 +1,93 @@ +#!/bin/ksh -p +# +# CDDL HEADER START +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# +# CDDL HEADER END +# + +# +# Copyright 2018, loli10K . All rights reserved. +# + +. $STF_SUITE/include/libtest.shlib + +# +# DESCRIPTION: +# ZVOLs cannot have children datasets: verify zfs commands respect this +# hierarchy rule. +# +# STRATEGY: +# 1. Create a filesystem and a ZVOL +# 2. Verify 'zfs recv' will not (force-)receive a ZVOL over the root dataset +# 3. Verify 'zfs recv' cannot receive a ZVOL overwriting datasets with children +# 4. Verify 'zfs recv' cannot receive datasets below a ZVOL +# 5. Verify 'zfs create' cannot create datasets under a ZVOL +# 6. Verify 'zfs rename' cannot move datasets under a ZVOL +# + +verify_runnable "both" + +function cleanup +{ + destroy_pool "$poolname" + log_must rm -f "$vdevfile" "$streamfile_fs" "$streamfile_zvol" +} + +log_assert "ZVOLs cannot have children datasets: verify zfs commands respect "\ + "this hierarchy rule" +log_onexit cleanup + +poolname="$TESTPOOL-zvol_hierarchy" +vdevfile="$TEST_BASE_DIR/vdevfile.$$" +streamfile_fs="$TEST_BASE_DIR/streamfile_fs.$$" +streamfile_zvol="$TEST_BASE_DIR/streamfile_zvol.$$" + +# 1. Create filesystems and ZVOLs +# NOTE: set "mountpoint=none" just to speed up the test process +log_must truncate -s $MINVDEVSIZE "$vdevfile" +log_must zpool create -O mountpoint=none "$poolname" "$vdevfile" +log_must zfs create "$poolname/sendfs" +log_must zfs create -V 1M -s "$poolname/sendvol" +log_must zfs snapshot "$poolname/sendfs@snap" +log_must zfs snapshot "$poolname/sendvol@snap" +log_must eval "zfs send $poolname/sendfs@snap > $streamfile_fs" +log_must eval "zfs send $poolname/sendvol@snap > $streamfile_zvol" + +# 2. Verify 'zfs recv' will not (force-)receive a ZVOL over the root dataset +log_mustnot eval "zfs receive -F $poolname < $streamfile_zvol" + +# 3. Verify 'zfs recv' cannot receive a ZVOL overwriting datasets with children +log_must zfs create "$poolname/fs" +log_must zfs create "$poolname/fs/subfs" +log_mustnot eval "zfs receive -F $poolname/fs < $streamfile_zvol" +log_must zfs destroy "$poolname/fs/subfs" +log_must eval "zfs receive -F $poolname/fs < $streamfile_zvol" + +# 4. Verify 'zfs recv' cannot receive datasets below a ZVOL +log_must zfs create -V 1M -s "$poolname/volume" +log_mustnot eval "zfs receive $poolname/volume/subfs < $streamfile_fs" +log_mustnot eval "zfs receive $poolname/volume/subvol < $streamfile_zvol" + +# 5. Verify 'zfs create' cannot create datasets under a ZVOL +log_must zfs create -V 1M -s "$poolname/createvol" +log_mustnot zfs create "$poolname/createvol/fs" +log_mustnot zfs create -V 1M -s "$poolname/createvol/vol" + +# 6. Verify 'zfs rename' cannot move datasets under a ZVOL +log_must zfs create "$poolname/movefs" +log_must zfs create -V 1M -s "$poolname/movevol" +log_must zfs create -V 1M -s "$poolname/renamevol" +log_mustnot zfs rename "$poolname/fs" "$poolname/renamevol/fs" +log_mustnot zfs rename "$poolname/vol" "$poolname/renamevol/vol" + +log_pass "ZVOLs cannot have children datasets and zfs commands enforce this "\ + "rule"