diff --git a/config/kernel-blk-queue.m4 b/config/kernel-blk-queue.m4 index 2f0b386e6637..a064140f337a 100644 --- a/config/kernel-blk-queue.m4 +++ b/config/kernel-blk-queue.m4 @@ -1,433 +1,461 @@ dnl # dnl # 2.6.39 API change, dnl # blk_start_plug() and blk_finish_plug() dnl # AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_QUEUE_PLUG], [ ZFS_LINUX_TEST_SRC([blk_plug], [ #include ],[ struct blk_plug plug __attribute__ ((unused)); blk_start_plug(&plug); blk_finish_plug(&plug); ]) ]) AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE_PLUG], [ AC_MSG_CHECKING([whether struct blk_plug is available]) ZFS_LINUX_TEST_RESULT([blk_plug], [ AC_MSG_RESULT(yes) ],[ ZFS_LINUX_TEST_ERROR([blk_plug]) ]) ]) dnl # dnl # 2.6.32 - 4.11: statically allocated bdi in request_queue dnl # 4.12: dynamically allocated bdi in request_queue +dnl # 6.11: bdi no longer available through request_queue, so get it from +dnl # the gendisk attached to the queue dnl # AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_QUEUE_BDI], [ ZFS_LINUX_TEST_SRC([blk_queue_bdi], [ #include ],[ struct request_queue q; struct backing_dev_info bdi; q.backing_dev_info = &bdi; ]) ]) AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE_BDI], [ AC_MSG_CHECKING([whether blk_queue bdi is dynamic]) ZFS_LINUX_TEST_RESULT([blk_queue_bdi], [ AC_MSG_RESULT(yes) AC_DEFINE(HAVE_BLK_QUEUE_BDI_DYNAMIC, 1, [blk queue backing_dev_info is dynamic]) ],[ AC_MSG_RESULT(no) ]) ]) +AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_QUEUE_DISK_BDI], [ + ZFS_LINUX_TEST_SRC([blk_queue_disk_bdi], [ + #include + #include + ], [ + struct request_queue q; + struct gendisk disk; + struct backing_dev_info bdi __attribute__ ((unused)); + q.disk = &disk; + q.disk->bdi = &bdi; + ]) +]) + +AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE_DISK_BDI], [ + AC_MSG_CHECKING([whether backing_dev_info is available through queue gendisk]) + ZFS_LINUX_TEST_RESULT([blk_queue_disk_bdi], [ + AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_BLK_QUEUE_DISK_BDI, 1, + [backing_dev_info is available through queue gendisk]) + ],[ + AC_MSG_RESULT(no) + ]) +]) + dnl # dnl # 5.9: added blk_queue_update_readahead(), dnl # 5.15: renamed to disk_update_readahead() dnl # AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_QUEUE_UPDATE_READAHEAD], [ ZFS_LINUX_TEST_SRC([blk_queue_update_readahead], [ #include ],[ struct request_queue q; blk_queue_update_readahead(&q); ]) ZFS_LINUX_TEST_SRC([disk_update_readahead], [ #include ],[ struct gendisk disk; disk_update_readahead(&disk); ]) ]) AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE_UPDATE_READAHEAD], [ AC_MSG_CHECKING([whether blk_queue_update_readahead() exists]) ZFS_LINUX_TEST_RESULT([blk_queue_update_readahead], [ AC_MSG_RESULT(yes) AC_DEFINE(HAVE_BLK_QUEUE_UPDATE_READAHEAD, 1, [blk_queue_update_readahead() exists]) ],[ AC_MSG_RESULT(no) AC_MSG_CHECKING([whether disk_update_readahead() exists]) ZFS_LINUX_TEST_RESULT([disk_update_readahead], [ AC_MSG_RESULT(yes) AC_DEFINE(HAVE_DISK_UPDATE_READAHEAD, 1, [disk_update_readahead() exists]) ],[ AC_MSG_RESULT(no) ]) ]) ]) dnl # dnl # 5.19: bdev_max_discard_sectors() available dnl # 2.6.32: blk_queue_discard() available dnl # AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_QUEUE_DISCARD], [ ZFS_LINUX_TEST_SRC([bdev_max_discard_sectors], [ #include ],[ struct block_device *bdev __attribute__ ((unused)) = NULL; unsigned int error __attribute__ ((unused)); error = bdev_max_discard_sectors(bdev); ]) ZFS_LINUX_TEST_SRC([blk_queue_discard], [ #include ],[ struct request_queue r; struct request_queue *q = &r; int value __attribute__ ((unused)); memset(q, 0, sizeof(r)); value = blk_queue_discard(q); ],[-Wframe-larger-than=8192]) ]) AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE_DISCARD], [ AC_MSG_CHECKING([whether bdev_max_discard_sectors() is available]) ZFS_LINUX_TEST_RESULT([bdev_max_discard_sectors], [ AC_MSG_RESULT(yes) AC_DEFINE(HAVE_BDEV_MAX_DISCARD_SECTORS, 1, [bdev_max_discard_sectors() is available]) ],[ AC_MSG_RESULT(no) AC_MSG_CHECKING([whether blk_queue_discard() is available]) ZFS_LINUX_TEST_RESULT([blk_queue_discard], [ AC_MSG_RESULT(yes) AC_DEFINE(HAVE_BLK_QUEUE_DISCARD, 1, [blk_queue_discard() is available]) ],[ ZFS_LINUX_TEST_ERROR([blk_queue_discard]) ]) ]) ]) dnl # dnl # 5.19: bdev_max_secure_erase_sectors() available dnl # 4.8: blk_queue_secure_erase() available dnl # 2.6.36: blk_queue_secdiscard() available dnl # AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_QUEUE_SECURE_ERASE], [ ZFS_LINUX_TEST_SRC([bdev_max_secure_erase_sectors], [ #include ],[ struct block_device *bdev __attribute__ ((unused)) = NULL; unsigned int error __attribute__ ((unused)); error = bdev_max_secure_erase_sectors(bdev); ]) ZFS_LINUX_TEST_SRC([blk_queue_secure_erase], [ #include ],[ struct request_queue r; struct request_queue *q = &r; int value __attribute__ ((unused)); memset(q, 0, sizeof(r)); value = blk_queue_secure_erase(q); ],[-Wframe-larger-than=8192]) ZFS_LINUX_TEST_SRC([blk_queue_secdiscard], [ #include ],[ struct request_queue r; struct request_queue *q = &r; int value __attribute__ ((unused)); memset(q, 0, sizeof(r)); value = blk_queue_secdiscard(q); ]) ]) AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE_SECURE_ERASE], [ AC_MSG_CHECKING([whether bdev_max_secure_erase_sectors() is available]) ZFS_LINUX_TEST_RESULT([bdev_max_secure_erase_sectors], [ AC_MSG_RESULT(yes) AC_DEFINE(HAVE_BDEV_MAX_SECURE_ERASE_SECTORS, 1, [bdev_max_secure_erase_sectors() is available]) ],[ AC_MSG_RESULT(no) AC_MSG_CHECKING([whether blk_queue_secure_erase() is available]) ZFS_LINUX_TEST_RESULT([blk_queue_secure_erase], [ AC_MSG_RESULT(yes) AC_DEFINE(HAVE_BLK_QUEUE_SECURE_ERASE, 1, [blk_queue_secure_erase() is available]) ],[ AC_MSG_RESULT(no) AC_MSG_CHECKING([whether blk_queue_secdiscard() is available]) ZFS_LINUX_TEST_RESULT([blk_queue_secdiscard], [ AC_MSG_RESULT(yes) AC_DEFINE(HAVE_BLK_QUEUE_SECDISCARD, 1, [blk_queue_secdiscard() is available]) ],[ ZFS_LINUX_TEST_ERROR([blk_queue_secure_erase]) ]) ]) ]) ]) dnl # dnl # 4.16 API change, dnl # Introduction of blk_queue_flag_set and blk_queue_flag_clear dnl # AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_QUEUE_FLAG_SET], [ ZFS_LINUX_TEST_SRC([blk_queue_flag_set], [ #include #include ],[ struct request_queue *q = NULL; blk_queue_flag_set(0, q); ]) ]) AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE_FLAG_SET], [ AC_MSG_CHECKING([whether blk_queue_flag_set() exists]) ZFS_LINUX_TEST_RESULT([blk_queue_flag_set], [ AC_MSG_RESULT(yes) AC_DEFINE(HAVE_BLK_QUEUE_FLAG_SET, 1, [blk_queue_flag_set() exists]) ],[ AC_MSG_RESULT(no) ]) ]) AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_QUEUE_FLAG_CLEAR], [ ZFS_LINUX_TEST_SRC([blk_queue_flag_clear], [ #include #include ],[ struct request_queue *q = NULL; blk_queue_flag_clear(0, q); ]) ]) AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE_FLAG_CLEAR], [ AC_MSG_CHECKING([whether blk_queue_flag_clear() exists]) ZFS_LINUX_TEST_RESULT([blk_queue_flag_clear], [ AC_MSG_RESULT(yes) AC_DEFINE(HAVE_BLK_QUEUE_FLAG_CLEAR, 1, [blk_queue_flag_clear() exists]) ],[ AC_MSG_RESULT(no) ]) ]) dnl # dnl # 2.6.36 API change, dnl # Added blk_queue_flush() interface, while the previous interface dnl # was available to all the new one is GPL-only. Thus in addition to dnl # detecting if this function is available we determine if it is dnl # GPL-only. If the GPL-only interface is there we implement our own dnl # compatibility function, otherwise we use the function. The hope dnl # is that long term this function will be opened up. dnl # dnl # 4.7 API change, dnl # Replace blk_queue_flush with blk_queue_write_cache dnl # AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_QUEUE_FLUSH], [ ZFS_LINUX_TEST_SRC([blk_queue_flush], [ #include ], [ struct request_queue *q __attribute__ ((unused)) = NULL; (void) blk_queue_flush(q, REQ_FLUSH); ], [], [ZFS_META_LICENSE]) ZFS_LINUX_TEST_SRC([blk_queue_write_cache], [ #include #include ], [ struct request_queue *q __attribute__ ((unused)) = NULL; blk_queue_write_cache(q, true, true); ], [], [ZFS_META_LICENSE]) ]) AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE_FLUSH], [ AC_MSG_CHECKING([whether blk_queue_flush() is available]) ZFS_LINUX_TEST_RESULT([blk_queue_flush], [ AC_MSG_RESULT(yes) AC_DEFINE(HAVE_BLK_QUEUE_FLUSH, 1, [blk_queue_flush() is available]) AC_MSG_CHECKING([whether blk_queue_flush() is GPL-only]) ZFS_LINUX_TEST_RESULT([blk_queue_flush_license], [ AC_MSG_RESULT(no) ],[ AC_MSG_RESULT(yes) AC_DEFINE(HAVE_BLK_QUEUE_FLUSH_GPL_ONLY, 1, [blk_queue_flush() is GPL-only]) ]) ],[ AC_MSG_RESULT(no) ]) dnl # dnl # 4.7 API change dnl # Replace blk_queue_flush with blk_queue_write_cache dnl # AC_MSG_CHECKING([whether blk_queue_write_cache() exists]) ZFS_LINUX_TEST_RESULT([blk_queue_write_cache], [ AC_MSG_RESULT(yes) AC_DEFINE(HAVE_BLK_QUEUE_WRITE_CACHE, 1, [blk_queue_write_cache() exists]) AC_MSG_CHECKING([whether blk_queue_write_cache() is GPL-only]) ZFS_LINUX_TEST_RESULT([blk_queue_write_cache_license], [ AC_MSG_RESULT(no) ],[ AC_MSG_RESULT(yes) AC_DEFINE(HAVE_BLK_QUEUE_WRITE_CACHE_GPL_ONLY, 1, [blk_queue_write_cache() is GPL-only]) ]) ],[ AC_MSG_RESULT(no) ]) ]) dnl # dnl # 2.6.34 API change dnl # blk_queue_max_hw_sectors() replaces blk_queue_max_sectors(). dnl # AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_QUEUE_MAX_HW_SECTORS], [ ZFS_LINUX_TEST_SRC([blk_queue_max_hw_sectors], [ #include ], [ struct request_queue *q __attribute__ ((unused)) = NULL; (void) blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS); ], []) ]) AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE_MAX_HW_SECTORS], [ AC_MSG_CHECKING([whether blk_queue_max_hw_sectors() is available]) ZFS_LINUX_TEST_RESULT([blk_queue_max_hw_sectors], [ AC_MSG_RESULT(yes) ],[ AC_MSG_RESULT(no) ]) ]) dnl # dnl # 2.6.34 API change dnl # blk_queue_max_segments() consolidates blk_queue_max_hw_segments() dnl # and blk_queue_max_phys_segments(). dnl # AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_QUEUE_MAX_SEGMENTS], [ ZFS_LINUX_TEST_SRC([blk_queue_max_segments], [ #include ], [ struct request_queue *q __attribute__ ((unused)) = NULL; (void) blk_queue_max_segments(q, BLK_MAX_SEGMENTS); ], []) ]) AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE_MAX_SEGMENTS], [ AC_MSG_CHECKING([whether blk_queue_max_segments() is available]) ZFS_LINUX_TEST_RESULT([blk_queue_max_segments], [ AC_MSG_RESULT(yes) ], [ AC_MSG_RESULT(no) ]) ]) dnl # dnl # See if kernel supports block multi-queue and blk_status_t. dnl # blk_status_t represents the new status codes introduced in the 4.13 dnl # kernel patch: dnl # dnl # block: introduce new block status code type dnl # dnl # We do not currently support the "old" block multi-queue interfaces from dnl # prior kernels. dnl # AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_MQ], [ ZFS_LINUX_TEST_SRC([blk_mq], [ #include ], [ struct blk_mq_tag_set tag_set __attribute__ ((unused)) = {0}; (void) blk_mq_alloc_tag_set(&tag_set); return BLK_STS_OK; ], []) ZFS_LINUX_TEST_SRC([blk_mq_rq_hctx], [ #include #include ], [ struct request rq = {0}; struct blk_mq_hw_ctx *hctx = NULL; rq.mq_hctx = hctx; ], []) ]) AC_DEFUN([ZFS_AC_KERNEL_BLK_MQ], [ AC_MSG_CHECKING([whether block multiqueue with blk_status_t is available]) ZFS_LINUX_TEST_RESULT([blk_mq], [ AC_MSG_RESULT(yes) AC_DEFINE(HAVE_BLK_MQ, 1, [block multiqueue is available]) AC_MSG_CHECKING([whether block multiqueue hardware context is cached in struct request]) ZFS_LINUX_TEST_RESULT([blk_mq_rq_hctx], [ AC_MSG_RESULT(yes) AC_DEFINE(HAVE_BLK_MQ_RQ_HCTX, 1, [block multiqueue hardware context is cached in struct request]) ], [ AC_MSG_RESULT(no) ]) ], [ AC_MSG_RESULT(no) ]) ]) AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_QUEUE], [ ZFS_AC_KERNEL_SRC_BLK_QUEUE_PLUG ZFS_AC_KERNEL_SRC_BLK_QUEUE_BDI + ZFS_AC_KERNEL_SRC_BLK_QUEUE_DISK_BDI ZFS_AC_KERNEL_SRC_BLK_QUEUE_UPDATE_READAHEAD ZFS_AC_KERNEL_SRC_BLK_QUEUE_DISCARD ZFS_AC_KERNEL_SRC_BLK_QUEUE_SECURE_ERASE ZFS_AC_KERNEL_SRC_BLK_QUEUE_FLAG_SET ZFS_AC_KERNEL_SRC_BLK_QUEUE_FLAG_CLEAR ZFS_AC_KERNEL_SRC_BLK_QUEUE_FLUSH ZFS_AC_KERNEL_SRC_BLK_QUEUE_MAX_HW_SECTORS ZFS_AC_KERNEL_SRC_BLK_QUEUE_MAX_SEGMENTS ZFS_AC_KERNEL_SRC_BLK_MQ ]) AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE], [ ZFS_AC_KERNEL_BLK_QUEUE_PLUG ZFS_AC_KERNEL_BLK_QUEUE_BDI + ZFS_AC_KERNEL_BLK_QUEUE_DISK_BDI ZFS_AC_KERNEL_BLK_QUEUE_UPDATE_READAHEAD ZFS_AC_KERNEL_BLK_QUEUE_DISCARD ZFS_AC_KERNEL_BLK_QUEUE_SECURE_ERASE ZFS_AC_KERNEL_BLK_QUEUE_FLAG_SET ZFS_AC_KERNEL_BLK_QUEUE_FLAG_CLEAR ZFS_AC_KERNEL_BLK_QUEUE_FLUSH ZFS_AC_KERNEL_BLK_QUEUE_MAX_HW_SECTORS ZFS_AC_KERNEL_BLK_QUEUE_MAX_SEGMENTS ZFS_AC_KERNEL_BLK_MQ ]) diff --git a/include/os/linux/kernel/linux/blkdev_compat.h b/include/os/linux/kernel/linux/blkdev_compat.h index b7c21f5b317a..c2e818b4d4ee 100644 --- a/include/os/linux/kernel/linux/blkdev_compat.h +++ b/include/os/linux/kernel/linux/blkdev_compat.h @@ -1,817 +1,819 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or https://opensource.org/licenses/CDDL-1.0. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (C) 2011 Lawrence Livermore National Security, LLC. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Brian Behlendorf . * LLNL-CODE-403049. */ #ifndef _ZFS_BLKDEV_H #define _ZFS_BLKDEV_H #include #include #include #include #include /* for SECTOR_* */ #include #ifdef HAVE_BLK_MQ #include #endif #ifndef HAVE_BLK_QUEUE_FLAG_SET static inline void blk_queue_flag_set(unsigned int flag, struct request_queue *q) { queue_flag_set(flag, q); } #endif #ifndef HAVE_BLK_QUEUE_FLAG_CLEAR static inline void blk_queue_flag_clear(unsigned int flag, struct request_queue *q) { queue_flag_clear(flag, q); } #endif /* * 6.11 API * Setting the flush flags directly is no longer possible; flush flags are set * on the queue_limits structure and passed to blk_disk_alloc(). In this case * we remove this function entirely. * * 4.7 API, * The blk_queue_write_cache() interface has replaced blk_queue_flush() * interface. However, the new interface is GPL-only thus we implement * our own trivial wrapper when the GPL-only version is detected. * * 2.6.36 - 4.6 API, * The blk_queue_flush() interface has replaced blk_queue_ordered() * interface. However, while the old interface was available to all the * new one is GPL-only. Thus if the GPL-only version is detected we * implement our own trivial helper. */ #if !defined(HAVE_BLK_ALLOC_DISK_2ARG) || \ !defined(HAVE_BLKDEV_QUEUE_LIMITS_FEATURES) static inline void blk_queue_set_write_cache(struct request_queue *q, bool on) { #if defined(HAVE_BLK_QUEUE_WRITE_CACHE_GPL_ONLY) if (on) { blk_queue_flag_set(QUEUE_FLAG_WC, q); blk_queue_flag_set(QUEUE_FLAG_FUA, q); } else { blk_queue_flag_clear(QUEUE_FLAG_WC, q); blk_queue_flag_clear(QUEUE_FLAG_FUA, q); } #elif defined(HAVE_BLK_QUEUE_WRITE_CACHE) blk_queue_write_cache(q, on, on); #elif defined(HAVE_BLK_QUEUE_FLUSH_GPL_ONLY) if (on) q->flush_flags |= REQ_FLUSH | REQ_FUA; else q->flush_flags &= ~(REQ_FLUSH | REQ_FUA); #elif defined(HAVE_BLK_QUEUE_FLUSH) blk_queue_flush(q, on ? (REQ_FLUSH | REQ_FUA) : 0); #else #error "Unsupported kernel" #endif } #endif /* !HAVE_BLK_ALLOC_DISK_2ARG || !HAVE_BLKDEV_QUEUE_LIMITS_FEATURES */ /* * Detect if a device has a write cache. Used to set the intial value for the * vdev nowritecache flag. * * 4.10: QUEUE_FLAG_WC added. Initialised by the driver, but can be changed * later by the operator. If not set, kernel will return flush requests * immediately without doing anything. * 6.6: QUEUE_FLAG_HW_WC added. Initialised by the driver, can't be changed. * Only controls if the operator is allowed to change _WC. Initial version * buggy; aliased to QUEUE_FLAG_FUA, so unuseable. * 6.6.10, 6.7: QUEUE_FLAG_HW_WC fixed. * * Older than 4.10 we just assume write cache, and let the normal flush fail * detection apply. */ static inline boolean_t zfs_bdev_has_write_cache(struct block_device *bdev) { #if defined(QUEUE_FLAG_HW_WC) && QUEUE_FLAG_HW_WC != QUEUE_FLAG_FUA return (test_bit(QUEUE_FLAG_HW_WC, &bdev_get_queue(bdev)->queue_flags)); #elif defined(QUEUE_FLAG_WC) return (test_bit(QUEUE_FLAG_WC, &bdev_get_queue(bdev)->queue_flags)); #else return (B_TRUE); #endif } static inline void blk_queue_set_read_ahead(struct request_queue *q, unsigned long ra_pages) { #if !defined(HAVE_BLK_QUEUE_UPDATE_READAHEAD) && \ !defined(HAVE_DISK_UPDATE_READAHEAD) -#ifdef HAVE_BLK_QUEUE_BDI_DYNAMIC +#if defined(HAVE_BLK_QUEUE_BDI_DYNAMIC) q->backing_dev_info->ra_pages = ra_pages; +#elif defined(HAVE_BLK_QUEUE_DISK_BDI) + q->disk->bdi->ra_pages = ra_pages; #else q->backing_dev_info.ra_pages = ra_pages; #endif #endif } #ifdef HAVE_BIO_BVEC_ITER #define BIO_BI_SECTOR(bio) (bio)->bi_iter.bi_sector #define BIO_BI_SIZE(bio) (bio)->bi_iter.bi_size #define BIO_BI_IDX(bio) (bio)->bi_iter.bi_idx #define BIO_BI_SKIP(bio) (bio)->bi_iter.bi_bvec_done #define bio_for_each_segment4(bv, bvp, b, i) \ bio_for_each_segment((bv), (b), (i)) typedef struct bvec_iter bvec_iterator_t; #else #define BIO_BI_SECTOR(bio) (bio)->bi_sector #define BIO_BI_SIZE(bio) (bio)->bi_size #define BIO_BI_IDX(bio) (bio)->bi_idx #define BIO_BI_SKIP(bio) (0) #define bio_for_each_segment4(bv, bvp, b, i) \ bio_for_each_segment((bvp), (b), (i)) typedef int bvec_iterator_t; #endif static inline void bio_set_flags_failfast(struct block_device *bdev, int *flags, bool dev, bool transport, bool driver) { #ifdef CONFIG_BUG /* * Disable FAILFAST for loopback devices because of the * following incorrect BUG_ON() in loop_make_request(). * This support is also disabled for md devices because the * test suite layers md devices on top of loopback devices. * This may be removed when the loopback driver is fixed. * * BUG_ON(!lo || (rw != READ && rw != WRITE)); */ if ((MAJOR(bdev->bd_dev) == LOOP_MAJOR) || (MAJOR(bdev->bd_dev) == MD_MAJOR)) return; #ifdef BLOCK_EXT_MAJOR if (MAJOR(bdev->bd_dev) == BLOCK_EXT_MAJOR) return; #endif /* BLOCK_EXT_MAJOR */ #endif /* CONFIG_BUG */ if (dev) *flags |= REQ_FAILFAST_DEV; if (transport) *flags |= REQ_FAILFAST_TRANSPORT; if (driver) *flags |= REQ_FAILFAST_DRIVER; } /* * Maximum disk label length, it may be undefined for some kernels. */ #if !defined(DISK_NAME_LEN) #define DISK_NAME_LEN 32 #endif /* DISK_NAME_LEN */ #ifdef HAVE_BIO_BI_STATUS static inline int bi_status_to_errno(blk_status_t status) { switch (status) { case BLK_STS_OK: return (0); case BLK_STS_NOTSUPP: return (EOPNOTSUPP); case BLK_STS_TIMEOUT: return (ETIMEDOUT); case BLK_STS_NOSPC: return (ENOSPC); case BLK_STS_TRANSPORT: return (ENOLINK); case BLK_STS_TARGET: return (EREMOTEIO); #ifdef HAVE_BLK_STS_RESV_CONFLICT case BLK_STS_RESV_CONFLICT: #else case BLK_STS_NEXUS: #endif return (EBADE); case BLK_STS_MEDIUM: return (ENODATA); case BLK_STS_PROTECTION: return (EILSEQ); case BLK_STS_RESOURCE: return (ENOMEM); case BLK_STS_AGAIN: return (EAGAIN); case BLK_STS_IOERR: return (EIO); default: return (EIO); } } static inline blk_status_t errno_to_bi_status(int error) { switch (error) { case 0: return (BLK_STS_OK); case EOPNOTSUPP: return (BLK_STS_NOTSUPP); case ETIMEDOUT: return (BLK_STS_TIMEOUT); case ENOSPC: return (BLK_STS_NOSPC); case ENOLINK: return (BLK_STS_TRANSPORT); case EREMOTEIO: return (BLK_STS_TARGET); case EBADE: #ifdef HAVE_BLK_STS_RESV_CONFLICT return (BLK_STS_RESV_CONFLICT); #else return (BLK_STS_NEXUS); #endif case ENODATA: return (BLK_STS_MEDIUM); case EILSEQ: return (BLK_STS_PROTECTION); case ENOMEM: return (BLK_STS_RESOURCE); case EAGAIN: return (BLK_STS_AGAIN); case EIO: return (BLK_STS_IOERR); default: return (BLK_STS_IOERR); } } #endif /* HAVE_BIO_BI_STATUS */ /* * 4.3 API change * The bio_endio() prototype changed slightly. These are helper * macro's to ensure the prototype and invocation are handled. */ #ifdef HAVE_1ARG_BIO_END_IO_T #ifdef HAVE_BIO_BI_STATUS #define BIO_END_IO_ERROR(bio) bi_status_to_errno(bio->bi_status) #define BIO_END_IO_PROTO(fn, x, z) static void fn(struct bio *x) #define BIO_END_IO(bio, error) bio_set_bi_status(bio, error) static inline void bio_set_bi_status(struct bio *bio, int error) { ASSERT3S(error, <=, 0); bio->bi_status = errno_to_bi_status(-error); bio_endio(bio); } #else #define BIO_END_IO_ERROR(bio) (-(bio->bi_error)) #define BIO_END_IO_PROTO(fn, x, z) static void fn(struct bio *x) #define BIO_END_IO(bio, error) bio_set_bi_error(bio, error) static inline void bio_set_bi_error(struct bio *bio, int error) { ASSERT3S(error, <=, 0); bio->bi_error = error; bio_endio(bio); } #endif /* HAVE_BIO_BI_STATUS */ #else #define BIO_END_IO_PROTO(fn, x, z) static void fn(struct bio *x, int z) #define BIO_END_IO(bio, error) bio_endio(bio, error); #endif /* HAVE_1ARG_BIO_END_IO_T */ /* * 5.15 MACRO, * GD_DEAD * * 2.6.36 - 5.14 MACRO, * GENHD_FL_UP * * Check the disk status and return B_TRUE if alive * otherwise B_FALSE */ static inline boolean_t zfs_check_disk_status(struct block_device *bdev) { #if defined(GENHD_FL_UP) return (!!(bdev->bd_disk->flags & GENHD_FL_UP)); #elif defined(GD_DEAD) return (!test_bit(GD_DEAD, &bdev->bd_disk->state)); #else /* * This is encountered if neither GENHD_FL_UP nor GD_DEAD is available in * the kernel - likely due to an MACRO change that needs to be chased down. */ #error "Unsupported kernel: no usable disk status check" #endif } /* * 4.1 API, * 3.10.0 CentOS 7.x API, * blkdev_reread_part() * * For older kernels trigger a re-reading of the partition table by calling * check_disk_change() which calls flush_disk() to invalidate the device. * * For newer kernels (as of 5.10), bdev_check_media_change is used, in favor of * check_disk_change(), with the modification that invalidation is no longer * forced. */ #ifdef HAVE_CHECK_DISK_CHANGE #define zfs_check_media_change(bdev) check_disk_change(bdev) #ifdef HAVE_BLKDEV_REREAD_PART #define vdev_bdev_reread_part(bdev) blkdev_reread_part(bdev) #else #define vdev_bdev_reread_part(bdev) check_disk_change(bdev) #endif /* HAVE_BLKDEV_REREAD_PART */ #else #ifdef HAVE_BDEV_CHECK_MEDIA_CHANGE static inline int zfs_check_media_change(struct block_device *bdev) { #ifdef HAVE_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK struct gendisk *gd = bdev->bd_disk; const struct block_device_operations *bdo = gd->fops; #endif if (!bdev_check_media_change(bdev)) return (0); #ifdef HAVE_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK /* * Force revalidation, to mimic the old behavior of * check_disk_change() */ if (bdo->revalidate_disk) bdo->revalidate_disk(gd); #endif return (0); } #define vdev_bdev_reread_part(bdev) zfs_check_media_change(bdev) #elif defined(HAVE_DISK_CHECK_MEDIA_CHANGE) #define vdev_bdev_reread_part(bdev) disk_check_media_change(bdev->bd_disk) #define zfs_check_media_change(bdev) disk_check_media_change(bdev->bd_disk) #else /* * This is encountered if check_disk_change() and bdev_check_media_change() * are not available in the kernel - likely due to an API change that needs * to be chased down. */ #error "Unsupported kernel: no usable disk change check" #endif /* HAVE_BDEV_CHECK_MEDIA_CHANGE */ #endif /* HAVE_CHECK_DISK_CHANGE */ /* * 2.6.27 API change * The function was exported for use, prior to this it existed but the * symbol was not exported. * * 4.4.0-6.21 API change for Ubuntu * lookup_bdev() gained a second argument, FMODE_*, to check inode permissions. * * 5.11 API change * Changed to take a dev_t argument which is set on success and return a * non-zero error code on failure. */ static inline int vdev_lookup_bdev(const char *path, dev_t *dev) { #if defined(HAVE_DEVT_LOOKUP_BDEV) return (lookup_bdev(path, dev)); #elif defined(HAVE_1ARG_LOOKUP_BDEV) struct block_device *bdev = lookup_bdev(path); if (IS_ERR(bdev)) return (PTR_ERR(bdev)); *dev = bdev->bd_dev; bdput(bdev); return (0); #elif defined(HAVE_MODE_LOOKUP_BDEV) struct block_device *bdev = lookup_bdev(path, FMODE_READ); if (IS_ERR(bdev)) return (PTR_ERR(bdev)); *dev = bdev->bd_dev; bdput(bdev); return (0); #else #error "Unsupported kernel" #endif } #if defined(HAVE_BLK_MODE_T) #define blk_mode_is_open_write(flag) ((flag) & BLK_OPEN_WRITE) #else #define blk_mode_is_open_write(flag) ((flag) & FMODE_WRITE) #endif /* * Kernels without bio_set_op_attrs use bi_rw for the bio flags. */ #if !defined(HAVE_BIO_SET_OP_ATTRS) static inline void bio_set_op_attrs(struct bio *bio, unsigned rw, unsigned flags) { #if defined(HAVE_BIO_BI_OPF) bio->bi_opf = rw | flags; #else bio->bi_rw |= rw | flags; #endif /* HAVE_BIO_BI_OPF */ } #endif /* * bio_set_flush - Set the appropriate flags in a bio to guarantee * data are on non-volatile media on completion. * * 2.6.37 - 4.8 API, * Introduce WRITE_FLUSH, WRITE_FUA, and WRITE_FLUSH_FUA flags as a * replacement for WRITE_BARRIER to allow expressing richer semantics * to the block layer. It's up to the block layer to implement the * semantics correctly. Use the WRITE_FLUSH_FUA flag combination. * * 4.8 - 4.9 API, * REQ_FLUSH was renamed to REQ_PREFLUSH. For consistency with previous * OpenZFS releases, prefer the WRITE_FLUSH_FUA flag set if it's available. * * 4.10 API, * The read/write flags and their modifiers, including WRITE_FLUSH, * WRITE_FUA and WRITE_FLUSH_FUA were removed from fs.h in * torvalds/linux@70fd7614 and replaced by direct flag modification * of the REQ_ flags in bio->bi_opf. Use REQ_PREFLUSH. */ static inline void bio_set_flush(struct bio *bio) { #if defined(HAVE_REQ_PREFLUSH) /* >= 4.10 */ bio_set_op_attrs(bio, 0, REQ_PREFLUSH | REQ_OP_WRITE); #elif defined(WRITE_FLUSH_FUA) /* >= 2.6.37 and <= 4.9 */ bio_set_op_attrs(bio, 0, WRITE_FLUSH_FUA); #else #error "Allowing the build will cause bio_set_flush requests to be ignored." #endif } /* * 4.8 API, * REQ_OP_FLUSH * * 4.8-rc0 - 4.8-rc1, * REQ_PREFLUSH * * 2.6.36 - 4.7 API, * REQ_FLUSH * * in all cases but may have a performance impact for some kernels. It * has the advantage of minimizing kernel specific changes in the zvol code. * */ static inline boolean_t bio_is_flush(struct bio *bio) { #if defined(HAVE_REQ_OP_FLUSH) && defined(HAVE_BIO_BI_OPF) return ((bio_op(bio) == REQ_OP_FLUSH) || (bio->bi_opf & REQ_PREFLUSH)); #elif defined(HAVE_REQ_PREFLUSH) && defined(HAVE_BIO_BI_OPF) return (bio->bi_opf & REQ_PREFLUSH); #elif defined(HAVE_REQ_PREFLUSH) && !defined(HAVE_BIO_BI_OPF) return (bio->bi_rw & REQ_PREFLUSH); #elif defined(HAVE_REQ_FLUSH) return (bio->bi_rw & REQ_FLUSH); #else #error "Unsupported kernel" #endif } /* * 4.8 API, * REQ_FUA flag moved to bio->bi_opf * * 2.6.x - 4.7 API, * REQ_FUA */ static inline boolean_t bio_is_fua(struct bio *bio) { #if defined(HAVE_BIO_BI_OPF) return (bio->bi_opf & REQ_FUA); #elif defined(REQ_FUA) return (bio->bi_rw & REQ_FUA); #else #error "Allowing the build will cause fua requests to be ignored." #endif } /* * 4.8 API, * REQ_OP_DISCARD * * 2.6.36 - 4.7 API, * REQ_DISCARD * * In all cases the normal I/O path is used for discards. The only * difference is how the kernel tags individual I/Os as discards. */ static inline boolean_t bio_is_discard(struct bio *bio) { #if defined(HAVE_REQ_OP_DISCARD) return (bio_op(bio) == REQ_OP_DISCARD); #elif defined(HAVE_REQ_DISCARD) return (bio->bi_rw & REQ_DISCARD); #else #error "Unsupported kernel" #endif } /* * 4.8 API, * REQ_OP_SECURE_ERASE * * 2.6.36 - 4.7 API, * REQ_SECURE */ static inline boolean_t bio_is_secure_erase(struct bio *bio) { #if defined(HAVE_REQ_OP_SECURE_ERASE) return (bio_op(bio) == REQ_OP_SECURE_ERASE); #elif defined(REQ_SECURE) return (bio->bi_rw & REQ_SECURE); #else return (0); #endif } /* * 2.6.33 API change * Discard granularity and alignment restrictions may now be set. For * older kernels which do not support this it is safe to skip it. */ static inline void blk_queue_discard_granularity(struct request_queue *q, unsigned int dg) { q->limits.discard_granularity = dg; } /* * 5.19 API, * bdev_max_discard_sectors() * * 2.6.32 API, * blk_queue_discard() */ static inline boolean_t bdev_discard_supported(struct block_device *bdev) { #if defined(HAVE_BDEV_MAX_DISCARD_SECTORS) return (bdev_max_discard_sectors(bdev) > 0 && bdev_discard_granularity(bdev) > 0); #elif defined(HAVE_BLK_QUEUE_DISCARD) return (blk_queue_discard(bdev_get_queue(bdev)) > 0 && bdev_get_queue(bdev)->limits.discard_granularity > 0); #else #error "Unsupported kernel" #endif } /* * 5.19 API, * bdev_max_secure_erase_sectors() * * 4.8 API, * blk_queue_secure_erase() * * 2.6.36 - 4.7 API, * blk_queue_secdiscard() */ static inline boolean_t bdev_secure_discard_supported(struct block_device *bdev) { #if defined(HAVE_BDEV_MAX_SECURE_ERASE_SECTORS) return (!!bdev_max_secure_erase_sectors(bdev)); #elif defined(HAVE_BLK_QUEUE_SECURE_ERASE) return (!!blk_queue_secure_erase(bdev_get_queue(bdev))); #elif defined(HAVE_BLK_QUEUE_SECDISCARD) return (!!blk_queue_secdiscard(bdev_get_queue(bdev))); #else #error "Unsupported kernel" #endif } /* * A common holder for vdev_bdev_open() is used to relax the exclusive open * semantics slightly. Internal vdev disk callers may pass VDEV_HOLDER to * allow them to open the device multiple times. Other kernel callers and * user space processes which don't pass this value will get EBUSY. This is * currently required for the correct operation of hot spares. */ #define VDEV_HOLDER ((void *)0x2401de7) static inline unsigned long blk_generic_start_io_acct(struct request_queue *q __attribute__((unused)), struct gendisk *disk __attribute__((unused)), int rw __attribute__((unused)), struct bio *bio) { #if defined(HAVE_BDEV_IO_ACCT_63) return (bdev_start_io_acct(bio->bi_bdev, bio_op(bio), jiffies)); #elif defined(HAVE_BDEV_IO_ACCT_OLD) return (bdev_start_io_acct(bio->bi_bdev, bio_sectors(bio), bio_op(bio), jiffies)); #elif defined(HAVE_DISK_IO_ACCT) return (disk_start_io_acct(disk, bio_sectors(bio), bio_op(bio))); #elif defined(HAVE_BIO_IO_ACCT) return (bio_start_io_acct(bio)); #elif defined(HAVE_GENERIC_IO_ACCT_3ARG) unsigned long start_time = jiffies; generic_start_io_acct(rw, bio_sectors(bio), &disk->part0); return (start_time); #elif defined(HAVE_GENERIC_IO_ACCT_4ARG) unsigned long start_time = jiffies; generic_start_io_acct(q, rw, bio_sectors(bio), &disk->part0); return (start_time); #else /* Unsupported */ return (0); #endif } static inline void blk_generic_end_io_acct(struct request_queue *q __attribute__((unused)), struct gendisk *disk __attribute__((unused)), int rw __attribute__((unused)), struct bio *bio, unsigned long start_time) { #if defined(HAVE_BDEV_IO_ACCT_63) bdev_end_io_acct(bio->bi_bdev, bio_op(bio), bio_sectors(bio), start_time); #elif defined(HAVE_BDEV_IO_ACCT_OLD) bdev_end_io_acct(bio->bi_bdev, bio_op(bio), start_time); #elif defined(HAVE_DISK_IO_ACCT) disk_end_io_acct(disk, bio_op(bio), start_time); #elif defined(HAVE_BIO_IO_ACCT) bio_end_io_acct(bio, start_time); #elif defined(HAVE_GENERIC_IO_ACCT_3ARG) generic_end_io_acct(rw, &disk->part0, start_time); #elif defined(HAVE_GENERIC_IO_ACCT_4ARG) generic_end_io_acct(q, rw, &disk->part0, start_time); #endif } #ifndef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS static inline struct request_queue * blk_generic_alloc_queue(make_request_fn make_request, int node_id) { #if defined(HAVE_BLK_ALLOC_QUEUE_REQUEST_FN) return (blk_alloc_queue(make_request, node_id)); #elif defined(HAVE_BLK_ALLOC_QUEUE_REQUEST_FN_RH) return (blk_alloc_queue_rh(make_request, node_id)); #else struct request_queue *q = blk_alloc_queue(GFP_KERNEL); if (q != NULL) blk_queue_make_request(q, make_request); return (q); #endif } #endif /* !HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS */ /* * All the io_*() helper functions below can operate on a bio, or a rq, but * not both. The older submit_bio() codepath will pass a bio, and the * newer blk-mq codepath will pass a rq. */ static inline int io_data_dir(struct bio *bio, struct request *rq) { #ifdef HAVE_BLK_MQ if (rq != NULL) { if (op_is_write(req_op(rq))) { return (WRITE); } else { return (READ); } } #else ASSERT3P(rq, ==, NULL); #endif return (bio_data_dir(bio)); } static inline int io_is_flush(struct bio *bio, struct request *rq) { #ifdef HAVE_BLK_MQ if (rq != NULL) return (req_op(rq) == REQ_OP_FLUSH); #else ASSERT3P(rq, ==, NULL); #endif return (bio_is_flush(bio)); } static inline int io_is_discard(struct bio *bio, struct request *rq) { #ifdef HAVE_BLK_MQ if (rq != NULL) return (req_op(rq) == REQ_OP_DISCARD); #else ASSERT3P(rq, ==, NULL); #endif return (bio_is_discard(bio)); } static inline int io_is_secure_erase(struct bio *bio, struct request *rq) { #ifdef HAVE_BLK_MQ if (rq != NULL) return (req_op(rq) == REQ_OP_SECURE_ERASE); #else ASSERT3P(rq, ==, NULL); #endif return (bio_is_secure_erase(bio)); } static inline int io_is_fua(struct bio *bio, struct request *rq) { #ifdef HAVE_BLK_MQ if (rq != NULL) return (rq->cmd_flags & REQ_FUA); #else ASSERT3P(rq, ==, NULL); #endif return (bio_is_fua(bio)); } static inline uint64_t io_offset(struct bio *bio, struct request *rq) { #ifdef HAVE_BLK_MQ if (rq != NULL) return (blk_rq_pos(rq) << 9); #else ASSERT3P(rq, ==, NULL); #endif return (BIO_BI_SECTOR(bio) << 9); } static inline uint64_t io_size(struct bio *bio, struct request *rq) { #ifdef HAVE_BLK_MQ if (rq != NULL) return (blk_rq_bytes(rq)); #else ASSERT3P(rq, ==, NULL); #endif return (BIO_BI_SIZE(bio)); } static inline int io_has_data(struct bio *bio, struct request *rq) { #ifdef HAVE_BLK_MQ if (rq != NULL) return (bio_has_data(rq->bio)); #else ASSERT3P(rq, ==, NULL); #endif return (bio_has_data(bio)); } #endif /* _ZFS_BLKDEV_H */